Index: kern/subr_blist.c =================================================================== --- kern/subr_blist.c +++ kern/subr_blist.c @@ -769,6 +769,8 @@ int next_skip = ((u_int)skip / BLIST_META_RADIX); int nblks = 0; + if (count > radix) + panic("blist_meta_fill: allocation too large"); if (count == radix || scan->u.bmu_avail == 0) { /* * ALL-ALLOCATED special case @@ -800,9 +802,6 @@ radix /= BLIST_META_RADIX; } - if (count > radix) - panic("blist_meta_fill: allocation too large"); - i = (allocBlk - blk) / radix; blk += i * radix; i = i * next_skip + 1; Index: vm/swap_pager.c =================================================================== --- vm/swap_pager.c +++ vm/swap_pager.c @@ -2272,7 +2272,7 @@ static int swapoff_one(struct swdevt *sp, struct ucred *cred) { - u_long nblks, dvbase; + u_long nblks; #ifdef MAC int error; #endif @@ -2301,14 +2301,7 @@ */ mtx_lock(&sw_dev_mtx); sp->sw_flags |= SW_CLOSING; - for (dvbase = 0; dvbase < nblks; dvbase += BLIST_BMAP_RADIX) { - /* - * blist_fill() cannot allocate more than BLIST_BMAP_RADIX - * blocks per call. - */ - swap_pager_avail -= blist_fill(sp->sw_blist, - dvbase, ulmin(nblks - dvbase, BLIST_BMAP_RADIX)); - } + swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks); swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE; mtx_unlock(&sw_dev_mtx);