Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1676,66 +1676,91 @@ } /* - * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in + * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in * - * This routine dissociates the page at the given index within an object - * from its backing store, paging it in if it does not reside in memory. - * If the page is paged in, it is marked dirty and placed in the laundry - * queue. The page is marked dirty because it no longer has backing - * store. It is placed in the laundry queue because it has not been - * accessed recently. Otherwise, it would already reside in memory. - * - * We also attempt to swap in all other pages in the swap block. - * However, we only guarantee that the one at the specified index is - * paged in. - * - * XXX - The code to page the whole block in doesn't work, so we - * revert to the one-by-one behavior for now. Sigh. + * This routine dissociates pages starting at the given index within an + * object from their backing store, paging them in if they do not reside + * in memory. Page that are paged in are marked dirty and placed in the + * laundry queue. Pages are marked dirty because they no longer have + * backing store. They are placed in the laundry queue because they have + * not been accessed recently. Otherwise, they would already reside in + * memory. */ static void -swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) +swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages) { - vm_page_t m; + vm_page_t ma[npages]; + int i, j; - vm_object_pip_add(object, 1); - m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); - if (m->valid == VM_PAGE_BITS_ALL) { + KASSERT(npages < MAXPHYS/PAGE_SIZE, + ("%s: Too many pages: %d", __func__, npages)); + if (npages == 0) + return; + vm_object_pip_add(object, npages); + vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages); + for (i = j = 0;; i++) { + /* Count nonresident pages, to page-in all at once. */ + if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) + continue; + if (j < i) { + /* Page-in nonresident pages. Mark for laundering. */ + if (swap_pager_getpages(object, &ma[j], i - j, NULL, + NULL) != VM_PAGER_OK) + panic("%s: read from swap failed", __func__); + do { + vm_object_pip_wakeup(object); + swp_pager_force_launder(ma[j]); + vm_pager_page_unswapped(ma[j]); + } while (++j < i); + } + if (i == npages) + break; + /* Mark dirty a resident page. */ vm_object_pip_wakeup(object); - swp_pager_force_dirty(m); - vm_pager_page_unswapped(m); - return; + swp_pager_force_dirty(ma[j]); + vm_pager_page_unswapped(ma[j++]); } - - if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) - panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ - vm_object_pip_wakeup(object); - swp_pager_force_launder(m); - vm_pager_page_unswapped(m); + /* vm_object_pip_wakeupn(object, npages); */ } /* * swap_pager_swapoff_object: * * Page in all of the pages that have been paged out for an object - * from a given swap device. + * from a swap device. */ static void swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) { struct swblk *sb; - vm_pindex_t pi; + vm_pindex_t pi, s_pindex; + daddr_t blk, n_blks, s_blk; int i; + bool ondev; + s_blk = SWAPBLK_NONE; + s_pindex = 0; + n_blks = 0; for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( &object->un_pager.swp.swp_blks, pi)) != NULL; ) { - pi = sb->p + SWAP_META_PAGES; for (i = 0; i < SWAP_META_PAGES; i++) { - if (sb->d[i] == SWAPBLK_NONE) + blk = sb->d[i]; + ondev = swp_pager_isondev(blk, sp); + if (ondev && + n_blks < MAXPHYS/PAGE_SIZE && + s_blk + n_blks == blk && + s_pindex + n_blks == sb->p + i) { + ++n_blks; continue; - if (swp_pager_isondev(sb->d[i], sp)) - swp_pager_force_pagein(object, sb->p + i); + } + swp_pager_force_pagein(object, s_pindex, n_blks); + s_blk = ondev ? blk : SWAPBLK_NONE; + s_pindex = sb->p + i; + n_blks = (s_blk != SWAPBLK_NONE) ? 1 : 0; } + pi = sb->p + SWAP_META_PAGES; } + swp_pager_force_pagein(object, s_pindex, n_blks); } /*