Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1183,6 +1183,21 @@ return (TRUE); } +static void +swap_pager_unswapped_acct(vm_page_t m) +{ + KASSERT((m->object->flags & OBJ_SWAP) != 0, + ("Free object not swappable")); + if ((m->a.flags & PGA_SWAP_FREE) != 0) + counter_u64_add(swap_free_completed, 1); + vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE); + + /* + * The meta data only exists if the object is OBJT_SWAP + * and even then might not be allocated yet. + */ +} + /* * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page * @@ -1229,16 +1244,7 @@ } return; } - if ((m->a.flags & PGA_SWAP_FREE) != 0) - counter_u64_add(swap_free_completed, 1); - vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE); - - /* - * The meta data only exists if the object is OBJT_SWAP - * and even then might not be allocated yet. - */ - KASSERT((m->object->flags & OBJ_SWAP) != 0, - ("Free object not swappable")); + swap_pager_unswapped_acct(m); sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks, rounddown(m->pindex, SWAP_META_PAGES)); @@ -1785,15 +1791,6 @@ return (nswapdev); } -static void -swp_pager_force_dirty(vm_page_t m) -{ - - vm_page_dirty(m); - swap_pager_unswapped(m); - vm_page_launder(m); -} - u_long swap_pager_swapped_pages(vm_object_t object) { @@ -1827,15 +1824,21 @@ static void swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) { + struct page_range range; struct swblk *sb; vm_page_t m; vm_pindex_t pi; daddr_t blk; - int i, nv, rahead, rv; + int i, endv, rahead, rv; + bool bad_block; + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: Object not swappable", __func__)); + swp_pager_init_freerange(&range); + i = 0; + bad_block = false; for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( &object->un_pager.swp.swp_blks, pi)) != NULL; ) { if ((object->flags & OBJ_DEAD) != 0) { @@ -1847,55 +1850,53 @@ swp_pager_meta_free_all(object); break; } - for (i = 0; i < SWAP_META_PAGES; i++) { + for (; i < SWAP_META_PAGES; i++) { /* * Count the number of contiguous valid blocks. */ - for (nv = 0; nv < SWAP_META_PAGES - i; nv++) { - blk = sb->d[i + nv]; - if (!swp_pager_isondev(blk, sp) || - blk == SWAPBLK_NONE) - break; - } - if (nv == 0) - continue; - - /* - * Look for a page corresponding to the first - * valid block and ensure that any pending paging - * operations on it are complete. If the page is valid, - * mark it dirty and free the swap block. Try to batch - * this operation since it may cause sp to be freed, - * meaning that we must restart the scan. Avoid busying - * valid pages since we may block forever on kernel - * stack pages. - */ - m = vm_page_lookup(object, sb->p + i); - if (m == NULL) { - m = vm_page_alloc(object, sb->p + i, - VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); - if (m == NULL) - break; - } else { - if ((m->oflags & VPO_SWAPINPROG) != 0) { - m->oflags |= VPO_SWAPSLEEP; - VM_OBJECT_SLEEP(object, &object->handle, - PSWP, "swpoff", 0); + for (endv = i; endv < SWAP_META_PAGES; endv++) { + blk = sb->d[endv]; + if (blk == SWAPBLK_NONE) break; - } - if (vm_page_all_valid(m)) { - do { - swp_pager_force_dirty(m); - } while (--nv > 0 && - (m = vm_page_next(m)) != NULL && - vm_page_all_valid(m) && - (m->oflags & VPO_SWAPINPROG) == 0); + if (!swp_pager_isondev(blk, sp)) { + bad_block = true; break; } - if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL)) - break; } + if (i < endv) + break; + } + /* + * Look for a page corresponding to the first valid block and + * ensure that any pending paging operations on it are complete. + * If the page is valid, mark it dirty and free the swap block. + * Try to batch this operation since it may cause sp to be + * freed, meaning that we must restart the scan. Avoid busying + * valid pages since we may block forever on kernel stack pages. + */ + if (i == SWAP_META_PAGES) { + /* Do nothing. */ + } else if ((m = vm_page_lookup(object, sb->p + i)) != NULL && + (m->oflags & VPO_SWAPINPROG) != 0) { + m->oflags |= VPO_SWAPSLEEP; + VM_OBJECT_SLEEP(object, &object->handle, PSWP, "swpoff", + 0); + } else if (m != NULL && vm_page_all_valid(m)) { + do { + vm_page_dirty(m); + swap_pager_unswapped_acct(m); + swp_pager_update_freerange(&range, sb->d[i]); + sb->d[i] = SWAPBLK_NONE; + vm_page_launder(m); + } while (++i < endv && + (m = vm_page_next(m)) != NULL && + vm_page_all_valid(m) && + (m->oflags & VPO_SWAPINPROG) == 0); + } else if (m != NULL ? + vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) : + (m = vm_page_alloc(object, sb->p + i, + VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL)) != NULL) { vm_object_pip_add(object, 1); rahead = SWAP_META_PAGES; rv = swap_pager_getpages_locked(object, &m, 1, NULL, @@ -1912,11 +1913,19 @@ * scan of this swap block. Pages paged in during this * iteration will be marked dirty in a future iteration. */ - break; } - if (i == SWAP_META_PAGES) + if (i == SWAP_META_PAGES) { pi = sb->p + SWAP_META_PAGES; + if (!bad_block) { + SWAP_PCTRIE_REMOVE( + &object->un_pager.swp.swp_blks, sb->p); + uma_zfree(swblk_zone, sb); + } + i = 0; + bad_block = false; + } } + swp_pager_freeswapspace(&range); } /*