Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -155,10 +155,10 @@ static u_long swap_total; static int sysctl_page_shift(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, - &swap_reserved, 0, sysctl_page_shift, "A", + &swap_reserved, 0, sysctl_page_shift, "A", "Amount of swap storage needed to back all allocated anonymous memory."); SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, - &swap_total, 0, sysctl_page_shift, "A", + &swap_total, 0, sysctl_page_shift, "A", "Total amount of available swap storage."); static int overcommit = 0; @@ -1650,9 +1650,41 @@ return (nswapdev); } +static void +swp_pager_force_dirty(vm_page_t m) +{ + + vm_object_pip_wakeup(m->object); + vm_page_dirty(m); +#ifdef INVARIANTS + vm_page_lock(m); + if (m->wire_count == 0 && m->queue == PQ_NONE) + panic("page %p is neither wired nor queued", m); + vm_page_unlock(m); +#endif + vm_page_xunbusy(m); + vm_pager_page_unswapped(m); +} + +static void +swp_pager_force_launder(vm_page_t m) +{ + + vm_object_pip_wakeup(m->object); + vm_page_dirty(m); + vm_page_lock(m); + vm_page_launder(m); + vm_page_unlock(m); + vm_page_xunbusy(m); + vm_pager_page_unswapped(m); +} + /* * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in * + * Returns the number of pages that are paged in. The maximum number of + * pages this function can page in at a time is SWB_NPAGES. + * * This routine dissociates the page at the given index within an object * from its backing store, paging it in if it does not reside in memory. * If the page is paged in, it is marked dirty and placed in the laundry @@ -1663,40 +1695,32 @@ * We also attempt to swap in all other pages in the swap block. * However, we only guarantee that the one at the specified index is * paged in. - * - * XXX - The code to page the whole block in doesn't work, so we - * revert to the one-by-one behavior for now. Sigh. */ -static inline void +static int swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) { - vm_page_t m; + vm_page_t ma[SWB_NPAGES]; + int i, j, npages; - vm_object_pip_add(object, 1); - m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); - if (m->valid == VM_PAGE_BITS_ALL) { - vm_object_pip_wakeup(object); - vm_page_dirty(m); -#ifdef INVARIANTS - vm_page_lock(m); - if (m->wire_count == 0 && m->queue == PQ_NONE) - panic("page %p is neither wired nor queued", m); - vm_page_unlock(m); -#endif - vm_page_xunbusy(m); - vm_pager_page_unswapped(m); - return; + KASSERT(swap_pager_haspage(object, pindex, NULL, &npages), + ("%s: missing page %llu", __func__, pindex)); + npages += 1; + npages = vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages); + vm_object_pip_add(object, npages); + for (i = j = 0;; i++) { + if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) + continue; + if (j < i && (swap_pager_getpages(object, &ma[j], i - j, NULL, + NULL) != VM_PAGER_OK)) + panic("swp_pager_force_pagein: read from swap failed " + "for page %llu", ma[i]->pindex ); + while (j < i) + swp_pager_force_launder(ma[j++]); + if (i == npages) + break; + swp_pager_force_dirty(ma[j++]); } - - if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) - panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ - vm_object_pip_wakeup(object); - vm_page_dirty(m); - vm_page_lock(m); - vm_page_launder(m); - vm_page_unlock(m); - vm_page_xunbusy(m); - vm_pager_page_unswapped(m); + return (npages); } /* @@ -1745,16 +1769,22 @@ if (object->type != OBJT_SWAP) goto next_obj; - for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( - &object->un_pager.swp.swp_blks, pi)) != NULL; ) { + for (pi = 0, i = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( + &object->un_pager.swp.swp_blks, pi)) != NULL;) { pi = sb->p + SWAP_META_PAGES; - for (i = 0; i < SWAP_META_PAGES; i++) { - if (sb->d[i] == SWAPBLK_NONE) - continue; - if (swp_pager_isondev(sb->d[i], sp)) - swp_pager_force_pagein(object, + /* + * If last swp_pager_force_pagein() call already + * paged-in this block, we skip the while block + * and advance to the next sb-d[] block. + */ + while (i < SWAP_META_PAGES) { + if (swp_pager_isondev(sb->d[i], sp)) { + i += swp_pager_force_pagein(object, sb->p + i); + } else + i++; } + i -= SWAP_META_PAGES; } next_obj: VM_OBJECT_WUNLOCK(object); @@ -1807,7 +1837,7 @@ } return (true); } - + /* * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object * Index: sys/vm/vm_swapout.c =================================================================== --- sys/vm/vm_swapout.c +++ sys/vm/vm_swapout.c @@ -570,7 +570,8 @@ { vm_object_t ksobj; vm_page_t ma[KSTACK_MAX_PAGES]; - int a, count, i, j, pages, rv; + int a, count, i, j, pages; + boolean_t rv; pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; @@ -588,8 +589,8 @@ for (j = i + 1; j < pages; j++) if (ma[j]->valid == VM_PAGE_BITS_ALL) break; - rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a); - KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i])); + KASSERT(vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a), + ("%s: missing page %p", __func__, ma[i])); count = min(a + 1, j - i); rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL); KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",