Index: head/sys/vm/swap_pager.c =================================================================== --- head/sys/vm/swap_pager.c (revision 18892) +++ head/sys/vm/swap_pager.c (revision 18893) @@ -1,1664 +1,1662 @@ /* * Copyright (c) 1994 John S. Dyson * Copyright (c) 1990 University of Utah. * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ * * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 - * $Id: swap_pager.c,v 1.70 1996/07/30 03:08:05 dyson Exp $ + * $Id: swap_pager.c,v 1.71 1996/09/08 20:44:33 dyson Exp $ */ /* * Quick hack to page to dedicated partition(s). * TODO: * Add multiprocessor locks * Deal with async writes in a better fashion */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef NPENDINGIO #define NPENDINGIO 10 #endif static int nswiodone; int swap_pager_full; extern int vm_swap_size; static int no_swap_space = 1; struct rlisthdr swaplist; #define MAX_PAGEOUT_CLUSTER 16 TAILQ_HEAD(swpclean, swpagerclean); typedef struct swpagerclean *swp_clean_t; static struct swpagerclean { TAILQ_ENTRY(swpagerclean) spc_list; int spc_flags; struct buf *spc_bp; vm_object_t spc_object; vm_offset_t spc_kva; int spc_count; vm_page_t spc_m[MAX_PAGEOUT_CLUSTER]; } swcleanlist[NPENDINGIO]; /* spc_flags values */ #define SPC_ERROR 0x01 #define SWB_EMPTY (-1) /* list of completed page cleans */ static struct swpclean swap_pager_done; /* list of pending page cleans */ static struct swpclean swap_pager_inuse; /* list of free pager clean structs */ static struct swpclean swap_pager_free; int swap_pager_free_count; /* list of "named" anon region objects */ static struct pagerlst swap_pager_object_list; /* list of "unnamed" anon region objects */ struct pagerlst swap_pager_un_object_list; #define SWAP_FREE_NEEDED 0x1 /* need a swap block */ #define SWAP_FREE_NEEDED_BY_PAGEOUT 0x2 static int swap_pager_needflags; static struct pagerlst *swp_qs[] = { &swap_pager_object_list, &swap_pager_un_object_list, (struct pagerlst *) 0 }; /* * pagerops for OBJT_SWAP - "swap pager". */ static vm_object_t swap_pager_alloc __P((void *handle, vm_size_t size, vm_prot_t prot, vm_ooffset_t offset)); static void swap_pager_dealloc __P((vm_object_t object)); static boolean_t swap_pager_haspage __P((vm_object_t object, vm_pindex_t pindex, int *before, int *after)); static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); static void swap_pager_init __P((void)); static void swap_pager_sync __P((void)); struct pagerops swappagerops = { swap_pager_init, swap_pager_alloc, swap_pager_dealloc, swap_pager_getpages, swap_pager_putpages, swap_pager_haspage, swap_pager_sync }; static int npendingio = NPENDINGIO; static int dmmin; int dmmax; -static __pure int - swap_pager_block_index __P((vm_pindex_t pindex)) __pure2; -static __pure int - swap_pager_block_offset __P((vm_pindex_t pindex)) __pure2; +static int swap_pager_block_index __P((vm_pindex_t pindex)); +static int swap_pager_block_offset __P((vm_pindex_t pindex)); static daddr_t *swap_pager_diskaddr __P((vm_object_t object, vm_pindex_t pindex, int *valid)); static void swap_pager_finish __P((swp_clean_t spc)); static void swap_pager_freepage __P((vm_page_t m)); static void swap_pager_free_swap __P((vm_object_t object)); static void swap_pager_freeswapspace __P((vm_object_t object, unsigned int from, unsigned int to)); static int swap_pager_getswapspace __P((vm_object_t object, unsigned int amount, daddr_t *rtval)); static void swap_pager_iodone __P((struct buf *)); static void swap_pager_iodone1 __P((struct buf *bp)); static void swap_pager_reclaim __P((void)); static void swap_pager_ridpages __P((vm_page_t *m, int count, int reqpage)); static void swap_pager_setvalid __P((vm_object_t object, vm_offset_t offset, int valid)); static void swapsizecheck __P((void)); #define SWAPLOW (vm_swap_size < (512 * btodb(PAGE_SIZE))) static inline void swapsizecheck() { if (vm_swap_size < 128 * btodb(PAGE_SIZE)) { if (swap_pager_full == 0) printf("swap_pager: out of swap space\n"); swap_pager_full = 1; } else if (vm_swap_size > 192 * btodb(PAGE_SIZE)) swap_pager_full = 0; } static void swap_pager_init() { TAILQ_INIT(&swap_pager_object_list); TAILQ_INIT(&swap_pager_un_object_list); /* * Initialize clean lists */ TAILQ_INIT(&swap_pager_inuse); TAILQ_INIT(&swap_pager_done); TAILQ_INIT(&swap_pager_free); swap_pager_free_count = 0; /* * Calculate the swap allocation constants. */ dmmin = PAGE_SIZE / DEV_BSIZE; dmmax = btodb(SWB_NPAGES * PAGE_SIZE) * 2; } void swap_pager_swap_init() { swp_clean_t spc; struct buf *bp; int i; /* * kva's are allocated here so that we dont need to keep doing * kmem_alloc pageables at runtime */ for (i = 0, spc = swcleanlist; i < npendingio; i++, spc++) { spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * MAX_PAGEOUT_CLUSTER); if (!spc->spc_kva) { break; } spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_KERNEL); if (!spc->spc_bp) { kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE); break; } spc->spc_flags = 0; TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); swap_pager_free_count++; } } int swap_pager_swp_alloc(object, wait) vm_object_t object; int wait; { sw_blk_t swb; int nblocks; int i, j; nblocks = (object->size + SWB_NPAGES - 1) / SWB_NPAGES; swb = malloc(nblocks * sizeof(*swb), M_VMPGDATA, wait); if (swb == NULL) return 1; for (i = 0; i < nblocks; i++) { swb[i].swb_valid = 0; swb[i].swb_locked = 0; for (j = 0; j < SWB_NPAGES; j++) swb[i].swb_block[j] = SWB_EMPTY; } object->un_pager.swp.swp_nblocks = nblocks; object->un_pager.swp.swp_allocsize = 0; object->un_pager.swp.swp_blocks = swb; object->un_pager.swp.swp_poip = 0; if (object->handle != NULL) { TAILQ_INSERT_TAIL(&swap_pager_object_list, object, pager_object_list); } else { TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); } return 0; } /* * Allocate an object and associated resources. * Note that if we are called from the pageout daemon (handle == NULL) * we should not wait for memory as it could resulting in deadlock. */ static vm_object_t swap_pager_alloc(handle, size, prot, offset) void *handle; register vm_size_t size; vm_prot_t prot; vm_ooffset_t offset; { vm_object_t object; /* * If this is a "named" anonymous region, look it up and use the * object if it exists, otherwise allocate a new one. */ if (handle) { object = vm_pager_object_lookup(&swap_pager_object_list, handle); if (object != NULL) { vm_object_reference(object); } else { /* * XXX - there is a race condition here. Two processes * can request the same named object simultaneuously, * and if one blocks for memory, the result is a disaster. * Probably quite rare, but is yet another reason to just * rip support of "named anonymous regions" out altogether. */ object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + PAGE_MASK) + size); object->handle = handle; (void) swap_pager_swp_alloc(object, M_WAITOK); } } else { object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + PAGE_MASK) + size); (void) swap_pager_swp_alloc(object, M_WAITOK); } return (object); } /* * returns disk block associated with pager and offset * additionally, as a side effect returns a flag indicating * if the block has been written */ inline static daddr_t * swap_pager_diskaddr(object, pindex, valid) vm_object_t object; vm_pindex_t pindex; int *valid; { register sw_blk_t swb; int ix; if (valid) *valid = 0; ix = pindex / SWB_NPAGES; if ((ix >= object->un_pager.swp.swp_nblocks) || (pindex >= object->size)) { return (FALSE); } swb = &object->un_pager.swp.swp_blocks[ix]; ix = pindex % SWB_NPAGES; if (valid) *valid = swb->swb_valid & (1 << ix); return &swb->swb_block[ix]; } /* * Utility routine to set the valid (written) bit for * a block associated with a pager and offset */ static void swap_pager_setvalid(object, offset, valid) vm_object_t object; vm_offset_t offset; int valid; { register sw_blk_t swb; int ix; ix = offset / SWB_NPAGES; if (ix >= object->un_pager.swp.swp_nblocks) return; swb = &object->un_pager.swp.swp_blocks[ix]; ix = offset % SWB_NPAGES; if (valid) swb->swb_valid |= (1 << ix); else swb->swb_valid &= ~(1 << ix); return; } /* * this routine allocates swap space with a fragmentation * minimization policy. */ static int swap_pager_getswapspace(object, amount, rtval) vm_object_t object; unsigned int amount; daddr_t *rtval; { unsigned location; vm_swap_size -= amount; if (!rlist_alloc(&swaplist, amount, &location)) { vm_swap_size += amount; return 0; } else { swapsizecheck(); object->un_pager.swp.swp_allocsize += amount; *rtval = location; return 1; } } /* * this routine frees swap space with a fragmentation * minimization policy. */ static void swap_pager_freeswapspace(object, from, to) vm_object_t object; unsigned int from; unsigned int to; { rlist_free(&swaplist, from, to); vm_swap_size += (to - from) + 1; object->un_pager.swp.swp_allocsize -= (to - from) + 1; swapsizecheck(); } /* * this routine frees swap blocks from a specified pager */ void swap_pager_freespace(object, start, size) vm_object_t object; vm_pindex_t start; vm_size_t size; { vm_pindex_t i; int s; s = splbio(); for (i = start; i < start + size; i += 1) { int valid; daddr_t *addr = swap_pager_diskaddr(object, i, &valid); if (addr && *addr != SWB_EMPTY) { swap_pager_freeswapspace(object, *addr, *addr + btodb(PAGE_SIZE) - 1); if (valid) { swap_pager_setvalid(object, i, 0); } *addr = SWB_EMPTY; } } splx(s); } /* * same as freespace, but don't free, just force a DMZ next time */ void swap_pager_dmzspace(object, start, size) vm_object_t object; vm_pindex_t start; vm_size_t size; { vm_pindex_t i; int s; s = splbio(); for (i = start; i < start + size; i += 1) { int valid; daddr_t *addr = swap_pager_diskaddr(object, i, &valid); if (addr && *addr != SWB_EMPTY) { if (valid) { swap_pager_setvalid(object, i, 0); } } } splx(s); } static void swap_pager_free_swap(object) vm_object_t object; { register int i, j; register sw_blk_t swb; int first_block=0, block_count=0; int s; /* * Free left over swap blocks */ s = splbio(); for (i = 0, swb = object->un_pager.swp.swp_blocks; i < object->un_pager.swp.swp_nblocks; i++, swb++) { for (j = 0; j < SWB_NPAGES; j++) { if (swb->swb_block[j] != SWB_EMPTY) { /* * initially the length of the run is zero */ if (block_count == 0) { first_block = swb->swb_block[j]; block_count = btodb(PAGE_SIZE); swb->swb_block[j] = SWB_EMPTY; /* * if the new block can be included into the current run */ } else if (swb->swb_block[j] == first_block + block_count) { block_count += btodb(PAGE_SIZE); swb->swb_block[j] = SWB_EMPTY; /* * terminate the previous run, and start a new one */ } else { swap_pager_freeswapspace(object, first_block, (unsigned) first_block + block_count - 1); first_block = swb->swb_block[j]; block_count = btodb(PAGE_SIZE); swb->swb_block[j] = SWB_EMPTY; } } } } if (block_count) { swap_pager_freeswapspace(object, first_block, (unsigned) first_block + block_count - 1); } splx(s); } /* * swap_pager_reclaim frees up over-allocated space from all pagers * this eliminates internal fragmentation due to allocation of space * for segments that are never swapped to. It has been written so that * it does not block until the rlist_free operation occurs; it keeps * the queues consistant. */ /* * Maximum number of blocks (pages) to reclaim per pass */ #define MAXRECLAIM 128 static void swap_pager_reclaim() { vm_object_t object; int i, j, k; int s; int reclaimcount; static struct { int address; vm_object_t object; } reclaims[MAXRECLAIM]; static int in_reclaim; /* * allow only one process to be in the swap_pager_reclaim subroutine */ s = splbio(); if (in_reclaim) { tsleep(&in_reclaim, PSWP, "swrclm", 0); splx(s); return; } in_reclaim = 1; reclaimcount = 0; /* for each pager queue */ for (k = 0; swp_qs[k]; k++) { object = TAILQ_FIRST(swp_qs[k]); while (object && (reclaimcount < MAXRECLAIM)) { /* * see if any blocks associated with a pager has been * allocated but not used (written) */ if ((object->flags & OBJ_DEAD) == 0 && (object->paging_in_progress == 0)) { for (i = 0; i < object->un_pager.swp.swp_nblocks; i++) { sw_blk_t swb = &object->un_pager.swp.swp_blocks[i]; if (swb->swb_locked) continue; for (j = 0; j < SWB_NPAGES; j++) { if (swb->swb_block[j] != SWB_EMPTY && (swb->swb_valid & (1 << j)) == 0) { reclaims[reclaimcount].address = swb->swb_block[j]; reclaims[reclaimcount++].object = object; swb->swb_block[j] = SWB_EMPTY; if (reclaimcount >= MAXRECLAIM) goto rfinished; } } } } object = TAILQ_NEXT(object, pager_object_list); } } rfinished: /* * free the blocks that have been added to the reclaim list */ for (i = 0; i < reclaimcount; i++) { swap_pager_freeswapspace(reclaims[i].object, reclaims[i].address, reclaims[i].address + btodb(PAGE_SIZE) - 1); } splx(s); in_reclaim = 0; wakeup(&in_reclaim); } /* * swap_pager_copy copies blocks from one pager to another and * destroys the source pager */ void swap_pager_copy(srcobject, srcoffset, dstobject, dstoffset, offset) vm_object_t srcobject; vm_pindex_t srcoffset; vm_object_t dstobject; vm_pindex_t dstoffset; vm_pindex_t offset; { vm_pindex_t i; int origsize; int s; if (vm_swap_size) no_swap_space = 0; origsize = srcobject->un_pager.swp.swp_allocsize; /* * remove the source object from the swap_pager internal queue */ if (srcobject->handle == NULL) { TAILQ_REMOVE(&swap_pager_un_object_list, srcobject, pager_object_list); } else { TAILQ_REMOVE(&swap_pager_object_list, srcobject, pager_object_list); } s = splbio(); while (srcobject->un_pager.swp.swp_poip) { tsleep(srcobject, PVM, "spgout", 0); } splx(s); /* * clean all of the pages that are currently active and finished */ swap_pager_sync(); s = splbio(); /* * transfer source to destination */ for (i = 0; i < dstobject->size; i += 1) { int srcvalid, dstvalid; daddr_t *srcaddrp = swap_pager_diskaddr(srcobject, i + offset + srcoffset, &srcvalid); daddr_t *dstaddrp; /* * see if the source has space allocated */ if (srcaddrp && *srcaddrp != SWB_EMPTY) { /* * if the source is valid and the dest has no space, * then copy the allocation from the srouce to the * dest. */ if (srcvalid) { dstaddrp = swap_pager_diskaddr(dstobject, i + dstoffset, &dstvalid); /* * if the dest already has a valid block, * deallocate the source block without * copying. */ if (!dstvalid && dstaddrp && *dstaddrp != SWB_EMPTY) { swap_pager_freeswapspace(dstobject, *dstaddrp, *dstaddrp + btodb(PAGE_SIZE) - 1); *dstaddrp = SWB_EMPTY; } if (dstaddrp && *dstaddrp == SWB_EMPTY) { *dstaddrp = *srcaddrp; *srcaddrp = SWB_EMPTY; dstobject->un_pager.swp.swp_allocsize += btodb(PAGE_SIZE); srcobject->un_pager.swp.swp_allocsize -= btodb(PAGE_SIZE); swap_pager_setvalid(dstobject, i + dstoffset, 1); } } /* * if the source is not empty at this point, then * deallocate the space. */ if (*srcaddrp != SWB_EMPTY) { swap_pager_freeswapspace(srcobject, *srcaddrp, *srcaddrp + btodb(PAGE_SIZE) - 1); *srcaddrp = SWB_EMPTY; } } } splx(s); /* * Free left over swap blocks */ swap_pager_free_swap(srcobject); if (srcobject->un_pager.swp.swp_allocsize) { printf("swap_pager_copy: *warning* pager with %d blocks (orig: %d)\n", srcobject->un_pager.swp.swp_allocsize, origsize); } free(srcobject->un_pager.swp.swp_blocks, M_VMPGDATA); srcobject->un_pager.swp.swp_blocks = NULL; return; } static void swap_pager_dealloc(object) vm_object_t object; { int s; /* * Remove from list right away so lookups will fail if we block for * pageout completion. */ if (object->handle == NULL) { TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); } else { TAILQ_REMOVE(&swap_pager_object_list, object, pager_object_list); } /* * Wait for all pageouts to finish and remove all entries from * cleaning list. */ s = splbio(); while (object->un_pager.swp.swp_poip) { tsleep(object, PVM, "swpout", 0); } splx(s); swap_pager_sync(); /* * Free left over swap blocks */ swap_pager_free_swap(object); if (object->un_pager.swp.swp_allocsize) { printf("swap_pager_dealloc: *warning* freeing pager with %d blocks\n", object->un_pager.swp.swp_allocsize); } /* * Free swap management resources */ free(object->un_pager.swp.swp_blocks, M_VMPGDATA); object->un_pager.swp.swp_blocks = NULL; } -static inline __pure int +static inline int swap_pager_block_index(pindex) vm_pindex_t pindex; { return (pindex / SWB_NPAGES); } -static inline __pure int +static inline int swap_pager_block_offset(pindex) vm_pindex_t pindex; { return (pindex % SWB_NPAGES); } /* * swap_pager_haspage returns TRUE if the pager has data that has * been written out. */ static boolean_t swap_pager_haspage(object, pindex, before, after) vm_object_t object; vm_pindex_t pindex; int *before; int *after; { register sw_blk_t swb; int ix; if (before != NULL) *before = 0; if (after != NULL) *after = 0; ix = pindex / SWB_NPAGES; if (ix >= object->un_pager.swp.swp_nblocks) { return (FALSE); } swb = &object->un_pager.swp.swp_blocks[ix]; ix = pindex % SWB_NPAGES; if (swb->swb_block[ix] != SWB_EMPTY) { if (swb->swb_valid & (1 << ix)) { int tix; if (before) { for(tix = ix - 1; tix >= 0; --tix) { if ((swb->swb_valid & (1 << tix)) == 0) break; if ((swb->swb_block[tix] + (ix - tix) * (PAGE_SIZE/DEV_BSIZE)) != swb->swb_block[ix]) break; (*before)++; } } if (after) { for(tix = ix + 1; tix < SWB_NPAGES; tix++) { if ((swb->swb_valid & (1 << tix)) == 0) break; if ((swb->swb_block[tix] - (tix - ix) * (PAGE_SIZE/DEV_BSIZE)) != swb->swb_block[ix]) break; (*after)++; } } return TRUE; } } return (FALSE); } /* * swap_pager_freepage is a convienience routine that clears the busy * bit and deallocates a page. */ static void swap_pager_freepage(m) vm_page_t m; { PAGE_WAKEUP(m); vm_page_free(m); } /* * swap_pager_ridpages is a convienience routine that deallocates all * but the required page. this is usually used in error returns that * need to invalidate the "extra" readahead pages. */ static void swap_pager_ridpages(m, count, reqpage) vm_page_t *m; int count; int reqpage; { int i; for (i = 0; i < count; i++) if (i != reqpage) swap_pager_freepage(m[i]); } /* * swap_pager_iodone1 is the completion routine for both reads and async writes */ static void swap_pager_iodone1(bp) struct buf *bp; { bp->b_flags |= B_DONE; bp->b_flags &= ~B_ASYNC; wakeup(bp); } static int swap_pager_getpages(object, m, count, reqpage) vm_object_t object; vm_page_t *m; int count, reqpage; { register struct buf *bp; sw_blk_t swb[count]; register int s; int i; boolean_t rv; vm_offset_t kva, off[count]; swp_clean_t spc; vm_pindex_t paging_offset; int reqaddr[count]; int sequential; int first, last; int failed; int reqdskregion; object = m[reqpage]->object; paging_offset = OFF_TO_IDX(object->paging_offset); sequential = (m[reqpage]->pindex == (object->last_read + 1)); for (i = 0; i < count; i++) { vm_pindex_t fidx = m[i]->pindex + paging_offset; int ix = swap_pager_block_index(fidx); if (ix >= object->un_pager.swp.swp_nblocks) { int j; if (i <= reqpage) { swap_pager_ridpages(m, count, reqpage); return (VM_PAGER_FAIL); } for (j = i; j < count; j++) { swap_pager_freepage(m[j]); } count = i; break; } swb[i] = &object->un_pager.swp.swp_blocks[ix]; off[i] = swap_pager_block_offset(fidx); reqaddr[i] = swb[i]->swb_block[off[i]]; } /* make sure that our required input request is existant */ if (reqaddr[reqpage] == SWB_EMPTY || (swb[reqpage]->swb_valid & (1 << off[reqpage])) == 0) { swap_pager_ridpages(m, count, reqpage); return (VM_PAGER_FAIL); } reqdskregion = reqaddr[reqpage] / dmmax; /* * search backwards for the first contiguous page to transfer */ failed = 0; first = 0; for (i = reqpage - 1; i >= 0; --i) { if (sequential || failed || (reqaddr[i] == SWB_EMPTY) || (swb[i]->swb_valid & (1 << off[i])) == 0 || (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) || ((reqaddr[i] / dmmax) != reqdskregion)) { failed = 1; swap_pager_freepage(m[i]); if (first == 0) first = i + 1; } } /* * search forwards for the last contiguous page to transfer */ failed = 0; last = count; for (i = reqpage + 1; i < count; i++) { if (failed || (reqaddr[i] == SWB_EMPTY) || (swb[i]->swb_valid & (1 << off[i])) == 0 || (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) || ((reqaddr[i] / dmmax) != reqdskregion)) { failed = 1; swap_pager_freepage(m[i]); if (last == count) last = i; } } count = last; if (first != 0) { for (i = first; i < count; i++) { m[i - first] = m[i]; reqaddr[i - first] = reqaddr[i]; off[i - first] = off[i]; } count -= first; reqpage -= first; } ++swb[reqpage]->swb_locked; /* * at this point: "m" is a pointer to the array of vm_page_t for * paging I/O "count" is the number of vm_page_t entries represented * by "m" "object" is the vm_object_t for I/O "reqpage" is the index * into "m" for the page actually faulted */ spc = NULL; if ((count == 1) && ((spc = TAILQ_FIRST(&swap_pager_free)) != NULL)) { TAILQ_REMOVE(&swap_pager_free, spc, spc_list); swap_pager_free_count--; kva = spc->spc_kva; bp = spc->spc_bp; bzero(bp, sizeof *bp); bp->b_spc = spc; bp->b_vnbufs.le_next = NOLIST; } else { /* * Get a swap buffer header to perform the IO */ bp = getpbuf(); kva = (vm_offset_t) bp->b_data; } /* * map our page(s) into kva for input */ pmap_qenter(kva, m, count); bp->b_flags = B_BUSY | B_READ | B_CALL | B_PAGING; bp->b_iodone = swap_pager_iodone1; bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; crhold(bp->b_rcred); crhold(bp->b_wcred); bp->b_un.b_addr = (caddr_t) kva; bp->b_blkno = reqaddr[0]; bp->b_bcount = PAGE_SIZE * count; bp->b_bufsize = PAGE_SIZE * count; pbgetvp(swapdev_vp, bp); cnt.v_swapin++; cnt.v_swappgsin += count; /* * perform the I/O */ VOP_STRATEGY(bp); /* * wait for the sync I/O to complete */ s = splbio(); while ((bp->b_flags & B_DONE) == 0) { if (tsleep(bp, PVM, "swread", hz*20)) { printf("swap_pager: indefinite wait buffer: device: %d, blkno: %d, size: %d\n", bp->b_dev, bp->b_blkno, bp->b_bcount); } } if (bp->b_flags & B_ERROR) { printf("swap_pager: I/O error - pagein failed; blkno %d, size %d, error %d\n", bp->b_blkno, bp->b_bcount, bp->b_error); rv = VM_PAGER_ERROR; } else { rv = VM_PAGER_OK; } /* * relpbuf does this, but we maintain our own buffer list also... */ if (bp->b_vp) pbrelvp(bp); splx(s); swb[reqpage]->swb_locked--; /* * remove the mapping for kernel virtual */ pmap_qremove(kva, count); if (spc) { m[reqpage]->object->last_read = m[reqpage]->pindex; if (bp->b_flags & B_WANTED) wakeup(bp); /* * if we have used an spc, we need to free it. */ if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); if (bp->b_wcred != NOCRED) crfree(bp->b_wcred); TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); swap_pager_free_count++; if (swap_pager_needflags & SWAP_FREE_NEEDED) { wakeup(&swap_pager_free); } if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); if (rv == VM_PAGER_OK) { pmap_clear_modify(VM_PAGE_TO_PHYS(m[reqpage])); m[reqpage]->valid = VM_PAGE_BITS_ALL; m[reqpage]->dirty = 0; } } else { /* * release the physical I/O buffer */ relpbuf(bp); /* * finish up input if everything is ok */ if (rv == VM_PAGER_OK) { for (i = 0; i < count; i++) { pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); m[i]->dirty = 0; m[i]->flags &= ~PG_ZERO; if (i != reqpage) { /* * whether or not to leave the page * activated is up in the air, but we * should put the page on a page queue * somewhere. (it already is in the * object). After some emperical * results, it is best to deactivate * the readahead pages. */ vm_page_deactivate(m[i]); /* * just in case someone was asking for * this page we now tell them that it * is ok to use */ m[i]->valid = VM_PAGE_BITS_ALL; PAGE_WAKEUP(m[i]); } } m[reqpage]->object->last_read = m[count-1]->pindex; /* * If we're out of swap space, then attempt to free * some whenever multiple pages are brought in. We * must set the dirty bits so that the page contents * will be preserved. */ if (SWAPLOW) { for (i = 0; i < count; i++) { m[i]->dirty = VM_PAGE_BITS_ALL; } swap_pager_freespace(object, m[0]->pindex + paging_offset, count); } } else { swap_pager_ridpages(m, count, reqpage); } } return (rv); } int swap_pager_putpages(object, m, count, sync, rtvals) vm_object_t object; vm_page_t *m; int count; boolean_t sync; int *rtvals; { register struct buf *bp; sw_blk_t swb[count]; register int s; int i, j, ix; boolean_t rv; vm_offset_t kva, off, fidx; swp_clean_t spc; vm_pindex_t paging_pindex; int reqaddr[count]; int failed; if (vm_swap_size) no_swap_space = 0; if (no_swap_space) { for (i = 0; i < count; i++) rtvals[i] = VM_PAGER_FAIL; return VM_PAGER_FAIL; } spc = NULL; object = m[0]->object; paging_pindex = OFF_TO_IDX(object->paging_offset); failed = 0; for (j = 0; j < count; j++) { fidx = m[j]->pindex + paging_pindex; ix = swap_pager_block_index(fidx); swb[j] = 0; if (ix >= object->un_pager.swp.swp_nblocks) { rtvals[j] = VM_PAGER_FAIL; failed = 1; continue; } else { rtvals[j] = VM_PAGER_OK; } swb[j] = &object->un_pager.swp.swp_blocks[ix]; swb[j]->swb_locked++; if (failed) { rtvals[j] = VM_PAGER_FAIL; continue; } off = swap_pager_block_offset(fidx); reqaddr[j] = swb[j]->swb_block[off]; if (reqaddr[j] == SWB_EMPTY) { daddr_t blk; int tries; int ntoget; tries = 0; s = splbio(); /* * if any other pages have been allocated in this * block, we only try to get one page. */ for (i = 0; i < SWB_NPAGES; i++) { if (swb[j]->swb_block[i] != SWB_EMPTY) break; } ntoget = (i == SWB_NPAGES) ? SWB_NPAGES : 1; /* * this code is alittle conservative, but works (the * intent of this code is to allocate small chunks for * small objects) */ if ((off == 0) && ((fidx + ntoget) > object->size)) { ntoget = object->size - fidx; } retrygetspace: if (!swap_pager_full && ntoget > 1 && swap_pager_getswapspace(object, ntoget * btodb(PAGE_SIZE), &blk)) { for (i = 0; i < ntoget; i++) { swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i; swb[j]->swb_valid = 0; } reqaddr[j] = swb[j]->swb_block[off]; } else if (!swap_pager_getswapspace(object, btodb(PAGE_SIZE), &swb[j]->swb_block[off])) { /* * if the allocation has failed, we try to * reclaim space and retry. */ if (++tries == 1) { swap_pager_reclaim(); goto retrygetspace; } rtvals[j] = VM_PAGER_AGAIN; failed = 1; swap_pager_full = 1; } else { reqaddr[j] = swb[j]->swb_block[off]; swb[j]->swb_valid &= ~(1 << off); } splx(s); } } /* * search forwards for the last contiguous page to transfer */ failed = 0; for (i = 0; i < count; i++) { if (failed || (reqaddr[i] != reqaddr[0] + i * btodb(PAGE_SIZE)) || ((reqaddr[i] / dmmax) != (reqaddr[0] / dmmax)) || (rtvals[i] != VM_PAGER_OK)) { failed = 1; if (rtvals[i] == VM_PAGER_OK) rtvals[i] = VM_PAGER_AGAIN; } } for (i = 0; i < count; i++) { if (rtvals[i] != VM_PAGER_OK) { if (swb[i]) --swb[i]->swb_locked; } } for (i = 0; i < count; i++) if (rtvals[i] != VM_PAGER_OK) break; if (i == 0) { return VM_PAGER_AGAIN; } count = i; for (i = 0; i < count; i++) { if (reqaddr[i] == SWB_EMPTY) { printf("I/O to empty block???? -- pindex: %d, i: %d\n", m[i]->pindex, i); } } /* * For synchronous writes, we clean up all completed async pageouts. */ if (sync == TRUE) { swap_pager_sync(); } kva = 0; /* * get a swap pager clean data structure, block until we get it */ if (swap_pager_free_count <= 3) { s = splbio(); if (curproc == pageproc) { retryfree: /* * pageout daemon needs a swap control block */ swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT|SWAP_FREE_NEEDED; /* * if it does not get one within a short time, then * there is a potential deadlock, so we go-on trying * to free pages. It is important to block here as opposed * to returning, thereby allowing the pageout daemon to continue. * It is likely that pageout daemon will start suboptimally * reclaiming vnode backed pages if we don't block. Since the * I/O subsystem is probably already fully utilized, might as * well wait. */ if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) { swap_pager_sync(); if (swap_pager_free_count <= 3) { splx(s); return VM_PAGER_AGAIN; } } else { /* * we make sure that pageouts aren't taking up all of * the free swap control blocks. */ swap_pager_sync(); if (swap_pager_free_count <= 3) { goto retryfree; } } } else { pagedaemon_wakeup(); while (swap_pager_free_count <= 3) { swap_pager_needflags |= SWAP_FREE_NEEDED; tsleep(&swap_pager_free, PVM, "swpfre", 0); pagedaemon_wakeup(); } } splx(s); } spc = TAILQ_FIRST(&swap_pager_free); if (spc == NULL) panic("swap_pager_putpages: free queue is empty, %d expected\n", swap_pager_free_count); TAILQ_REMOVE(&swap_pager_free, spc, spc_list); swap_pager_free_count--; kva = spc->spc_kva; /* * map our page(s) into kva for I/O */ pmap_qenter(kva, m, count); /* * get the base I/O offset into the swap file */ for (i = 0; i < count; i++) { fidx = m[i]->pindex + paging_pindex; off = swap_pager_block_offset(fidx); /* * set the valid bit */ swb[i]->swb_valid |= (1 << off); /* * and unlock the data structure */ swb[i]->swb_locked--; } /* * Get a swap buffer header and perform the IO */ bp = spc->spc_bp; bzero(bp, sizeof *bp); bp->b_spc = spc; bp->b_vnbufs.le_next = NOLIST; bp->b_flags = B_BUSY | B_PAGING; bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; if (bp->b_rcred != NOCRED) crhold(bp->b_rcred); if (bp->b_wcred != NOCRED) crhold(bp->b_wcred); bp->b_data = (caddr_t) kva; bp->b_blkno = reqaddr[0]; pbgetvp(swapdev_vp, bp); bp->b_bcount = PAGE_SIZE * count; bp->b_bufsize = PAGE_SIZE * count; swapdev_vp->v_numoutput++; /* * If this is an async write we set up additional buffer fields and * place a "cleaning" entry on the inuse queue. */ s = splbio(); if (sync == FALSE) { spc->spc_flags = 0; spc->spc_object = object; for (i = 0; i < count; i++) spc->spc_m[i] = m[i]; spc->spc_count = count; /* * the completion routine for async writes */ bp->b_flags |= B_CALL; bp->b_iodone = swap_pager_iodone; bp->b_dirtyoff = 0; bp->b_dirtyend = bp->b_bcount; object->un_pager.swp.swp_poip++; TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list); } else { object->un_pager.swp.swp_poip++; bp->b_flags |= B_CALL; bp->b_iodone = swap_pager_iodone1; } cnt.v_swapout++; cnt.v_swappgsout += count; /* * perform the I/O */ VOP_STRATEGY(bp); if (sync == FALSE) { if ((bp->b_flags & B_DONE) == B_DONE) { swap_pager_sync(); } splx(s); for (i = 0; i < count; i++) { rtvals[i] = VM_PAGER_PEND; } return VM_PAGER_PEND; } /* * wait for the sync I/O to complete */ while ((bp->b_flags & B_DONE) == 0) { tsleep(bp, PVM, "swwrt", 0); } if (bp->b_flags & B_ERROR) { printf("swap_pager: I/O error - pageout failed; blkno %d, size %d, error %d\n", bp->b_blkno, bp->b_bcount, bp->b_error); rv = VM_PAGER_ERROR; } else { rv = VM_PAGER_OK; } object->un_pager.swp.swp_poip--; if (object->un_pager.swp.swp_poip == 0) wakeup(object); if (bp->b_vp) pbrelvp(bp); if (bp->b_flags & B_WANTED) wakeup(bp); splx(s); /* * remove the mapping for kernel virtual */ pmap_qremove(kva, count); /* * if we have written the page, then indicate that the page is clean. */ if (rv == VM_PAGER_OK) { for (i = 0; i < count; i++) { if (rtvals[i] == VM_PAGER_OK) { pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); m[i]->dirty = 0; /* * optimization, if a page has been read * during the pageout process, we activate it. */ if ((m[i]->queue != PQ_ACTIVE) && ((m[i]->flags & (PG_WANTED|PG_REFERENCED)) || pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) { vm_page_activate(m[i]); } } } } else { for (i = 0; i < count; i++) { rtvals[i] = rv; } } if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); if (bp->b_wcred != NOCRED) crfree(bp->b_wcred); TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); swap_pager_free_count++; if (swap_pager_needflags & SWAP_FREE_NEEDED) { wakeup(&swap_pager_free); } if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); return (rv); } static void swap_pager_sync() { register swp_clean_t spc, tspc; register int s; tspc = NULL; if (TAILQ_FIRST(&swap_pager_done) == NULL) return; for (;;) { s = splbio(); /* * Look up and removal from done list must be done at splbio() * to avoid conflicts with swap_pager_iodone. */ while ((spc = TAILQ_FIRST(&swap_pager_done)) != 0) { pmap_qremove(spc->spc_kva, spc->spc_count); swap_pager_finish(spc); TAILQ_REMOVE(&swap_pager_done, spc, spc_list); goto doclean; } /* * No operations done, thats all we can do for now. */ splx(s); break; /* * The desired page was found to be busy earlier in the scan * but has since completed. */ doclean: if (tspc && tspc == spc) { tspc = NULL; } spc->spc_flags = 0; TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); swap_pager_free_count++; if (swap_pager_needflags & SWAP_FREE_NEEDED) { wakeup(&swap_pager_free); } if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); splx(s); } return; } void swap_pager_finish(spc) register swp_clean_t spc; { vm_object_t object = spc->spc_m[0]->object; int i; object->paging_in_progress -= spc->spc_count; if ((object->paging_in_progress == 0) && (object->flags & OBJ_PIPWNT)) { object->flags &= ~OBJ_PIPWNT; wakeup(object); } /* * If no error, mark as clean and inform the pmap system. If error, * mark as dirty so we will try again. (XXX could get stuck doing * this, should give up after awhile) */ if (spc->spc_flags & SPC_ERROR) { for (i = 0; i < spc->spc_count; i++) { printf("swap_pager_finish: I/O error, clean of page %lx failed\n", (u_long) VM_PAGE_TO_PHYS(spc->spc_m[i])); } } else { for (i = 0; i < spc->spc_count; i++) { pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i])); spc->spc_m[i]->dirty = 0; if ((spc->spc_m[i]->queue != PQ_ACTIVE) && ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i])))) vm_page_activate(spc->spc_m[i]); } } for (i = 0; i < spc->spc_count; i++) { /* * we wakeup any processes that are waiting on these pages. */ PAGE_WAKEUP(spc->spc_m[i]); } nswiodone -= spc->spc_count; return; } /* * swap_pager_iodone */ static void swap_pager_iodone(bp) register struct buf *bp; { register swp_clean_t spc; int s; s = splbio(); spc = (swp_clean_t) bp->b_spc; TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list); TAILQ_INSERT_TAIL(&swap_pager_done, spc, spc_list); if (bp->b_flags & B_ERROR) { spc->spc_flags |= SPC_ERROR; printf("swap_pager: I/O error - async %s failed; blkno %lu, size %ld, error %d\n", (bp->b_flags & B_READ) ? "pagein" : "pageout", (u_long) bp->b_blkno, bp->b_bcount, bp->b_error); } if (bp->b_vp) pbrelvp(bp); /* if (bp->b_flags & B_WANTED) */ wakeup(bp); if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); if (bp->b_wcred != NOCRED) crfree(bp->b_wcred); nswiodone += spc->spc_count; if (--spc->spc_object->un_pager.swp.swp_poip == 0) { wakeup(spc->spc_object); } if ((swap_pager_needflags & SWAP_FREE_NEEDED) || TAILQ_FIRST(&swap_pager_inuse) == 0) { swap_pager_needflags &= ~SWAP_FREE_NEEDED; wakeup(&swap_pager_free); } if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) { swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT; pagedaemon_wakeup(); } if (vm_pageout_pages_needed) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } if ((TAILQ_FIRST(&swap_pager_inuse) == NULL) || ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min && nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) { pagedaemon_wakeup(); } splx(s); } Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c (revision 18892) +++ head/sys/vm/vm_page.c (revision 18893) @@ -1,1407 +1,1406 @@ /* * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 - * $Id: vm_page.c,v 1.66 1996/09/28 17:53:18 bde Exp $ + * $Id: vm_page.c,v 1.67 1996/10/06 18:27:39 dyson Exp $ */ /* * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Resident memory management module. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void vm_page_queue_init __P((void)); static vm_page_t vm_page_select_free __P((vm_object_t object, vm_pindex_t pindex, int prefqueue)); /* * Associated with page of user-allocatable memory is a * page structure. */ static struct pglist *vm_page_buckets; /* Array of buckets */ static int vm_page_bucket_count; /* How big is array? */ static int vm_page_hash_mask; /* Mask for hash function */ struct pglist vm_page_queue_free[PQ_L2_SIZE]; struct pglist vm_page_queue_zero[PQ_L2_SIZE]; struct pglist vm_page_queue_active; struct pglist vm_page_queue_inactive; struct pglist vm_page_queue_cache[PQ_L2_SIZE]; int no_queue; struct vpgqueues vm_page_queues[PQ_COUNT]; int pqcnt[PQ_COUNT]; static void vm_page_queue_init(void) { int i; vm_page_queues[PQ_NONE].pl = NULL; vm_page_queues[PQ_NONE].cnt = &no_queue; for(i=0;i biggestsize) { biggestone = i; biggestsize = size; } ++nblocks; total += size; } start = phys_avail[biggestone]; /* * Initialize the queue headers for the free queue, the active queue * and the inactive queue. */ vm_page_queue_init(); /* * Allocate (and initialize) the hash table buckets. * * The number of buckets MUST BE a power of 2, and the actual value is * the next power of 2 greater than the number of physical pages in * the system. * * Note: This computation can be tweaked if desired. */ vm_page_buckets = (struct pglist *) vaddr; bucket = vm_page_buckets; if (vm_page_bucket_count == 0) { vm_page_bucket_count = 1; while (vm_page_bucket_count < atop(total)) vm_page_bucket_count <<= 1; } vm_page_hash_mask = vm_page_bucket_count - 1; /* * Validate these addresses. */ new_start = start + vm_page_bucket_count * sizeof(struct pglist); new_start = round_page(new_start); mapped = vaddr; vaddr = pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); start = new_start; bzero((caddr_t) mapped, vaddr - mapped); mapped = vaddr; for (i = 0; i < vm_page_bucket_count; i++) { TAILQ_INIT(bucket); bucket++; } /* * round (or truncate) the addresses to our page size. */ /* * Pre-allocate maps and map entries that cannot be dynamically * allocated via malloc(). The maps include the kernel_map and * kmem_map which must be initialized before malloc() will work * (obviously). Also could include pager maps which would be * allocated before kmeminit. * * Allow some kernel map entries... this should be plenty since people * shouldn't be cluttering up the kernel map (they should use their * own maps). */ kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + MAX_KMAPENT * sizeof(struct vm_map_entry); kentry_data_size = round_page(kentry_data_size); kentry_data = (vm_offset_t) vaddr; vaddr += kentry_data_size; /* * Validate these zone addresses. */ new_start = start + (vaddr - mapped); pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); bzero((caddr_t) mapped, (vaddr - mapped)); start = round_page(new_start); /* * Compute the number of pages of memory that will be available for * use (taking into account the overhead of a page structure per * page). */ first_page = phys_avail[0] / PAGE_SIZE; last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; page_range = last_page - (phys_avail[0] / PAGE_SIZE); npages = (total - (page_range * sizeof(struct vm_page)) - (start - phys_avail[biggestone])) / PAGE_SIZE; /* * Initialize the mem entry structures now, and put them in the free * queue. */ vm_page_array = (vm_page_t) vaddr; mapped = vaddr; /* * Validate these addresses. */ new_start = round_page(start + page_range * sizeof(struct vm_page)); mapped = pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE); start = new_start; first_managed_page = start / PAGE_SIZE; /* * Clear all of the page structures */ bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); vm_page_array_size = page_range; cnt.v_page_count = 0; cnt.v_free_count = 0; for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { if (i == biggestone) pa = ptoa(first_managed_page); else pa = phys_avail[i]; while (pa < phys_avail[i + 1] && npages-- > 0) { ++cnt.v_page_count; ++cnt.v_free_count; m = PHYS_TO_VM_PAGE(pa); m->phys_addr = pa; m->flags = 0; m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; m->queue = PQ_FREE + m->pc; TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); ++(*vm_page_queues[m->queue].lcnt); pa += PAGE_SIZE; } } return (mapped); } /* * vm_page_hash: * * Distributes the object/offset key pair among hash buckets. * * NOTE: This macro depends on vm_page_bucket_count being a power of 2. */ static inline int vm_page_hash(object, pindex) vm_object_t object; vm_pindex_t pindex; { return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask; } /* * vm_page_insert: [ internal use only ] * * Inserts the given mem entry into the object/object-page * table and object list. * * The object and page must be locked, and must be splhigh. */ void vm_page_insert(m, object, pindex) register vm_page_t m; register vm_object_t object; register vm_pindex_t pindex; { register struct pglist *bucket; if (m->flags & PG_TABLED) panic("vm_page_insert: already inserted"); /* * Record the object/offset pair in this page */ m->object = object; m->pindex = pindex; /* * Insert it into the object_object/offset hash table */ bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; TAILQ_INSERT_TAIL(bucket, m, hashq); /* * Now link into the object's list of backed pages. */ TAILQ_INSERT_TAIL(&object->memq, m, listq); m->flags |= PG_TABLED; m->object->page_hint = m; /* * And show that the object has one more resident page. */ object->resident_page_count++; } /* * vm_page_remove: [ internal use only ] * NOTE: used by device pager as well -wfj * * Removes the given mem entry from the object/offset-page * table and the object page list. * * The object and page must be locked, and at splhigh. */ void vm_page_remove(m) register vm_page_t m; { register struct pglist *bucket; if (!(m->flags & PG_TABLED)) return; if (m->object->page_hint == m) m->object->page_hint = NULL; /* * Remove from the object_object/offset hash table */ bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; TAILQ_REMOVE(bucket, m, hashq); /* * Now remove from the object's list of backed pages. */ TAILQ_REMOVE(&m->object->memq, m, listq); /* * And show that the object has one fewer resident page. */ m->object->resident_page_count--; m->flags &= ~PG_TABLED; } /* * vm_page_lookup: * * Returns the page associated with the object/offset * pair specified; if none is found, NULL is returned. * * The object must be locked. No side effects. */ vm_page_t vm_page_lookup(object, pindex) register vm_object_t object; register vm_pindex_t pindex; { register vm_page_t m; register struct pglist *bucket; int s; /* * Search the hash table for this object/offset pair */ bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; s = splvm(); for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) { if ((m->object == object) && (m->pindex == pindex)) { splx(s); m->object->page_hint = m; return (m); } } splx(s); return (NULL); } /* * vm_page_rename: * * Move the given memory entry from its * current object to the specified target object/offset. * * The object must be locked. */ void vm_page_rename(m, new_object, new_pindex) register vm_page_t m; register vm_object_t new_object; vm_pindex_t new_pindex; { int s; s = splvm(); vm_page_remove(m); vm_page_insert(m, new_object, new_pindex); splx(s); } /* * vm_page_unqueue without any wakeup */ void vm_page_unqueue_nowakeup(m) vm_page_t m; { int queue = m->queue; struct vpgqueues *pq; if (queue != PQ_NONE) { pq = &vm_page_queues[queue]; m->queue = PQ_NONE; TAILQ_REMOVE(pq->pl, m, pageq); --(*pq->cnt); --(*pq->lcnt); } } /* * vm_page_unqueue must be called at splhigh(); */ void vm_page_unqueue(m) vm_page_t m; { int queue = m->queue; struct vpgqueues *pq; if (queue != PQ_NONE) { m->queue = PQ_NONE; pq = &vm_page_queues[queue]; TAILQ_REMOVE(pq->pl, m, pageq); --(*pq->cnt); --(*pq->lcnt); if ((m->queue - m->pc) == PQ_CACHE) { if ((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_free_reserved + cnt.v_cache_min)) pagedaemon_wakeup(); } } } /* * Find a page on the specified queue with color optimization. */ vm_page_t vm_page_list_find(basequeue, index) int basequeue, index; { #if PQ_L2_SIZE > 1 int i,j; vm_page_t m; int hindex; for(j = 0; j < PQ_L1_SIZE; j++) { for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); i >= 0; i -= PQ_L1_SIZE) { hindex = (index + (i+j)) & PQ_L2_MASK; m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); if (m) return m; hindex = (index - (i+j)) & PQ_L2_MASK; m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl); if (m) return m; } } return NULL; #else return TAILQ_FIRST(vm_page_queues[basequeue].pl); #endif } /* * Find a page on the specified queue with color optimization. */ vm_page_t vm_page_select(object, pindex, basequeue) vm_object_t object; vm_pindex_t pindex; int basequeue; { #if PQ_L2_SIZE > 1 int index; index = (pindex + object->pg_color) & PQ_L2_MASK; return vm_page_list_find(basequeue, index); #else return TAILQ_FIRST(vm_page_queues[basequeue].pl); #endif } /* * Find a free or zero page, with specified preference. */ static vm_page_t vm_page_select_free(object, pindex, prefqueue) vm_object_t object; vm_pindex_t pindex; int prefqueue; { #if PQ_L2_SIZE > 1 int i,j; int index, hindex; #endif vm_page_t m; int oqueuediff; if (prefqueue == PQ_ZERO) oqueuediff = PQ_FREE - PQ_ZERO; else oqueuediff = PQ_ZERO - PQ_FREE; if (object->page_hint) { if (object->page_hint->pindex == (pindex - 1)) { vm_offset_t last_phys; if ((object->page_hint->flags & PG_FICTITIOUS) == 0) { if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) && (object->page_hint >= &vm_page_array[0])) { int queue; last_phys = VM_PAGE_TO_PHYS(object->page_hint); m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE); queue = m->queue - m->pc; if (queue == PQ_FREE || queue == PQ_ZERO) { return m; } } } } } #if PQ_L2_SIZE > 1 index = pindex + object->pg_color; /* * These are special cased because of clock-arithemetic */ for(j = 0; j < PQ_L1_SIZE; j++) { for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1); (i + j) >= 0; i -= PQ_L1_SIZE) { hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK); if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) return m; if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) return m; hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK); if (m = TAILQ_FIRST(vm_page_queues[hindex].pl)) return m; if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl)) return m; } } #else if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) return m; else return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl); #endif return NULL; } /* * vm_page_alloc: * * Allocate and return a memory cell associated * with this VM object/offset pair. * * page_req classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * VM_ALLOC_ZERO zero page * * Object must be locked. */ vm_page_t vm_page_alloc(object, pindex, page_req) vm_object_t object; vm_pindex_t pindex; int page_req; { register vm_page_t m; struct vpgqueues *pq; int queue; int s; #ifdef DIAGNOSTIC m = vm_page_lookup(object, pindex); if (m) panic("vm_page_alloc: page already allocated"); #endif if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { page_req = VM_ALLOC_SYSTEM; }; s = splvm(); switch (page_req) { case VM_ALLOC_NORMAL: if (cnt.v_free_count >= cnt.v_free_reserved) { m = vm_page_select_free(object, pindex, PQ_FREE); #if defined(DIAGNOSTIC) if (m == NULL) panic("vm_page_alloc(NORMAL): missing page on free queue\n"); #endif } else { m = vm_page_select(object, pindex, PQ_CACHE); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) if (cnt.v_cache_count > 0) printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); #endif pagedaemon_wakeup(); return (NULL); } } break; case VM_ALLOC_ZERO: if (cnt.v_free_count >= cnt.v_free_reserved) { m = vm_page_select_free(object, pindex, PQ_ZERO); #if defined(DIAGNOSTIC) if (m == NULL) panic("vm_page_alloc(ZERO): missing page on free queue\n"); #endif } else { m = vm_page_select(object, pindex, PQ_CACHE); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) if (cnt.v_cache_count > 0) printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count); #endif pagedaemon_wakeup(); return (NULL); } } break; case VM_ALLOC_SYSTEM: if ((cnt.v_free_count >= cnt.v_free_reserved) || ((cnt.v_cache_count == 0) && (cnt.v_free_count >= cnt.v_interrupt_free_min))) { m = vm_page_select_free(object, pindex, PQ_FREE); #if defined(DIAGNOSTIC) if (m == NULL) panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); #endif } else { m = vm_page_select(object, pindex, PQ_CACHE); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) if (cnt.v_cache_count > 0) printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count); #endif pagedaemon_wakeup(); return (NULL); } } break; case VM_ALLOC_INTERRUPT: if (cnt.v_free_count > 0) { m = vm_page_select_free(object, pindex, PQ_FREE); #if defined(DIAGNOSTIC) if (m == NULL) panic("vm_page_alloc(INTERRUPT): missing page on free queue\n"); #endif } else { splx(s); pagedaemon_wakeup(); return (NULL); } break; default: panic("vm_page_alloc: invalid allocation class"); } queue = m->queue; if (queue == PQ_ZERO) --vm_page_zero_count; pq = &vm_page_queues[queue]; TAILQ_REMOVE(pq->pl, m, pageq); --(*pq->cnt); --(*pq->lcnt); if ((m->queue - m->pc) == PQ_ZERO) { m->flags = PG_ZERO|PG_BUSY; } else if ((m->queue - m->pc) == PQ_CACHE) { vm_page_remove(m); m->flags = PG_BUSY; } else { m->flags = PG_BUSY; } m->wire_count = 0; m->hold_count = 0; m->act_count = 0; m->busy = 0; m->valid = 0; m->dirty = 0; m->queue = PQ_NONE; /* XXX before splx until vm_page_insert is safe */ vm_page_insert(m, object, pindex); splx(s); /* * Don't wakeup too often - wakeup the pageout daemon when * we would be nearly out of memory. */ if (((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + cnt.v_cache_min)) || (cnt.v_free_count < cnt.v_pageout_free_min)) pagedaemon_wakeup(); return (m); } /* * vm_page_activate: * * Put the specified page on the active list (if appropriate). * * The page queues must be locked. */ void vm_page_activate(m) register vm_page_t m; { int s; s = splvm(); if (m->queue == PQ_ACTIVE) panic("vm_page_activate: already active"); if ((m->queue - m->pc) == PQ_CACHE) cnt.v_reactivated++; vm_page_unqueue(m); if (m->wire_count == 0) { m->queue = PQ_ACTIVE; ++(*vm_page_queues[PQ_ACTIVE].lcnt); TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; cnt.v_active_count++; } splx(s); } /* * helper routine for vm_page_free and vm_page_free_zero */ static int vm_page_freechk_and_unqueue(m) vm_page_t m; { if (m->busy || (m->flags & PG_BUSY) || ((m->queue - m->pc) == PQ_FREE) || (m->hold_count != 0)) { printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n", m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, m->hold_count); if ((m->queue - m->pc) == PQ_FREE) panic("vm_page_free: freeing free page"); else panic("vm_page_free: freeing busy page"); } vm_page_remove(m); vm_page_unqueue_nowakeup(m); if ((m->flags & PG_FICTITIOUS) != 0) { return 0; } if (m->wire_count != 0) { if (m->wire_count > 1) { panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", m->wire_count, m->pindex); } m->wire_count = 0; cnt.v_wire_count--; } return 1; } /* * helper routine for vm_page_free and vm_page_free_zero */ static __inline void vm_page_free_wakeup() { /* * if pageout daemon needs pages, then tell it that there are * some free. */ if (vm_pageout_pages_needed) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } /* * wakeup processes that are waiting on memory if we hit a * high water mark. And wakeup scheduler process if we have * lots of memory. this process will swapin processes. */ if (vm_pages_needed && ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { wakeup(&cnt.v_free_count); vm_pages_needed = 0; } } /* * vm_page_free: * * Returns the given page to the free list, * disassociating it with any VM object. * * Object and page must be locked prior to entry. */ void vm_page_free(m) register vm_page_t m; { int s; struct vpgqueues *pq; s = splvm(); cnt.v_tfree++; if (!vm_page_freechk_and_unqueue(m)) { splx(s); return; } m->queue = PQ_FREE + m->pc; pq = &vm_page_queues[m->queue]; ++(*pq->lcnt); ++(*pq->cnt); /* * If the pageout process is grabbing the page, it is likely * that the page is NOT in the cache. It is more likely that * the page will be partially in the cache if it is being * explicitly freed. */ if (curproc == pageproc) { TAILQ_INSERT_TAIL(pq->pl, m, pageq); } else { TAILQ_INSERT_HEAD(pq->pl, m, pageq); } vm_page_free_wakeup(); splx(s); } void vm_page_free_zero(m) register vm_page_t m; { int s; struct vpgqueues *pq; s = splvm(); cnt.v_tfree++; if (!vm_page_freechk_and_unqueue(m)) { splx(s); return; } m->queue = PQ_ZERO + m->pc; pq = &vm_page_queues[m->queue]; ++(*pq->lcnt); ++(*pq->cnt); TAILQ_INSERT_HEAD(pq->pl, m, pageq); ++vm_page_zero_count; vm_page_free_wakeup(); splx(s); } /* * vm_page_wire: * * Mark this page as wired down by yet * another map, removing it from paging queues * as necessary. * * The page queues must be locked. */ void vm_page_wire(m) register vm_page_t m; { int s; if (m->wire_count == 0) { s = splvm(); vm_page_unqueue(m); splx(s); cnt.v_wire_count++; } ++(*vm_page_queues[PQ_NONE].lcnt); m->wire_count++; m->flags |= PG_MAPPED; } /* * vm_page_unwire: * * Release one wiring of this page, potentially * enabling it to be paged again. * * The page queues must be locked. */ void vm_page_unwire(m) register vm_page_t m; { int s; s = splvm(); if (m->wire_count > 0) m->wire_count--; if (m->wire_count == 0) { cnt.v_wire_count--; TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); m->queue = PQ_ACTIVE; ++(*vm_page_queues[PQ_ACTIVE].lcnt); cnt.v_active_count++; } splx(s); } /* * vm_page_deactivate: * * Returns the given page to the inactive list, * indicating that no physical maps have access * to this page. [Used by the physical mapping system.] * * The page queues must be locked. */ void vm_page_deactivate(m) register vm_page_t m; { int s; /* * Only move active pages -- ignore locked or already inactive ones. * * XXX: sometimes we get pages which aren't wired down or on any queue - * we need to put them on the inactive queue also, otherwise we lose * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. */ if (m->queue == PQ_INACTIVE) return; s = splvm(); if (m->wire_count == 0 && m->hold_count == 0) { if ((m->queue - m->pc) == PQ_CACHE) cnt.v_reactivated++; vm_page_unqueue(m); TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); m->queue = PQ_INACTIVE; ++(*vm_page_queues[PQ_INACTIVE].lcnt); cnt.v_inactive_count++; } splx(s); } /* * vm_page_cache * * Put the specified page onto the page cache queue (if appropriate). */ void vm_page_cache(m) register vm_page_t m; { int s; if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { printf("vm_page_cache: attempting to cache busy page\n"); return; } if ((m->queue - m->pc) == PQ_CACHE) return; vm_page_protect(m, VM_PROT_NONE); if (m->dirty != 0) { panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); } s = splvm(); vm_page_unqueue_nowakeup(m); m->queue = PQ_CACHE + m->pc; ++(*vm_page_queues[m->queue].lcnt); TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); cnt.v_cache_count++; vm_page_free_wakeup(); splx(s); } /* * mapping function for valid bits or for dirty bits in * a page */ inline int vm_page_bits(int base, int size) { u_short chunk; if ((base == 0) && (size >= PAGE_SIZE)) return VM_PAGE_BITS_ALL; size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); base = (base % PAGE_SIZE) / DEV_BSIZE; chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; return (chunk << base) & VM_PAGE_BITS_ALL; } /* * set a page valid and clean */ void vm_page_set_validclean(m, base, size) vm_page_t m; int base; int size; { int pagebits = vm_page_bits(base, size); m->valid |= pagebits; m->dirty &= ~pagebits; if( base == 0 && size == PAGE_SIZE) pmap_clear_modify(VM_PAGE_TO_PHYS(m)); } /* * set a page (partially) invalid */ void vm_page_set_invalid(m, base, size) vm_page_t m; int base; int size; { int bits; m->valid &= ~(bits = vm_page_bits(base, size)); if (m->valid == 0) m->dirty &= ~bits; } /* * is (partial) page valid? */ int vm_page_is_valid(m, base, size) vm_page_t m; int base; int size; { int bits = vm_page_bits(base, size); if (m->valid && ((m->valid & bits) == bits)) return 1; else return 0; } void vm_page_test_dirty(m) vm_page_t m; { if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(VM_PAGE_TO_PHYS(m))) { m->dirty = VM_PAGE_BITS_ALL; } } /* * This interface is for merging with malloc() someday. * Even if we never implement compaction so that contiguous allocation * works after initialization time, malloc()'s data structures are good * for statistics and for allocations of less than a page. */ void * contigmalloc(size, type, flags, low, high, alignment, boundary) unsigned long size; /* should be size_t here and for malloc() */ int type; int flags; unsigned long low; unsigned long high; unsigned long alignment; unsigned long boundary; { int i, s, start; vm_offset_t addr, phys, tmp_addr; vm_page_t pga = vm_page_array; size = round_page(size); if (size == 0) panic("vm_page_alloc_contig: size must not be 0"); if ((alignment & (alignment - 1)) != 0) panic("vm_page_alloc_contig: alignment must be a power of 2"); if ((boundary & (boundary - 1)) != 0) panic("vm_page_alloc_contig: boundary must be a power of 2"); start = 0; s = splvm(); again: /* * Find first page in array that is free, within range, aligned, and * such that the boundary won't be crossed. */ for (i = start; i < cnt.v_page_count; i++) { phys = VM_PAGE_TO_PHYS(&pga[i]); if (((pga[i].queue - pga[i].pc) == PQ_FREE) && (phys >= low) && (phys < high) && ((phys & (alignment - 1)) == 0) && (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) break; } /* * If the above failed or we will exceed the upper bound, fail. */ if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { splx(s); return (NULL); } start = i; /* * Check successive pages for contiguous and free. */ for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { if ((VM_PAGE_TO_PHYS(&pga[i]) != (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || ((pga[i].queue - pga[i].pc) != PQ_FREE)) { start++; goto again; } } /* * We've found a contiguous chunk that meets are requirements. * Allocate kernel VM, unfree and assign the physical pages to it and * return kernel VM pointer. */ tmp_addr = addr = kmem_alloc_pageable(kernel_map, size); if (addr == 0) { splx(s); return (NULL); } for (i = start; i < (start + size / PAGE_SIZE); i++) { vm_page_t m = &pga[i]; TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); --(*vm_page_queues[m->queue].lcnt); cnt.v_free_count--; m->valid = VM_PAGE_BITS_ALL; m->flags = 0; m->dirty = 0; m->wire_count = 0; m->busy = 0; m->queue = PQ_NONE; vm_page_insert(m, kernel_object, OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); vm_page_wire(m); pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); tmp_addr += PAGE_SIZE; } splx(s); return ((void *)addr); } vm_offset_t vm_page_alloc_contig(size, low, high, alignment) vm_offset_t size; vm_offset_t low; vm_offset_t high; vm_offset_t alignment; { return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high, alignment, 0ul)); } #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(page, vm_page_print_page_info) { db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) { int i; db_printf("PQ_FREE:"); for(i=0;i