Changeset View
Changeset View
Standalone View
Standalone View
vm/vm_reserv.c
Show First 20 Lines • Show All 162 Lines • ▼ Show 20 Lines | |||||
* A partially populated reservation can be broken and reclaimed at any time. | * A partially populated reservation can be broken and reclaimed at any time. | ||||
*/ | */ | ||||
struct vm_reserv { | struct vm_reserv { | ||||
TAILQ_ENTRY(vm_reserv) partpopq; | TAILQ_ENTRY(vm_reserv) partpopq; | ||||
LIST_ENTRY(vm_reserv) objq; | LIST_ENTRY(vm_reserv) objq; | ||||
vm_object_t object; /* containing object */ | vm_object_t object; /* containing object */ | ||||
vm_pindex_t pindex; /* offset within object */ | vm_pindex_t pindex; /* offset within object */ | ||||
vm_page_t pages; /* first page of a superpage */ | vm_page_t pages; /* first page of a superpage */ | ||||
int domain; /* NUMA domain */ | |||||
int popcnt; /* # of pages in use */ | int popcnt; /* # of pages in use */ | ||||
char inpartpopq; | char inpartpopq; | ||||
popmap_t popmap[NPOPMAP]; /* bit vector of used pages */ | popmap_t popmap[NPOPMAP]; /* bit vector of used pages */ | ||||
}; | }; | ||||
/* | /* | ||||
* The reservation array | * The reservation array | ||||
* | * | ||||
Show All 21 Lines | |||||
* The partially populated reservation queue | * The partially populated reservation queue | ||||
* | * | ||||
* This queue enables the fast recovery of an unused free small page from a | * This queue enables the fast recovery of an unused free small page from a | ||||
* partially populated reservation. The reservation at the head of this queue | * partially populated reservation. The reservation at the head of this queue | ||||
* is the least recently changed, partially populated reservation. | * is the least recently changed, partially populated reservation. | ||||
* | * | ||||
* Access to this queue is synchronized by the free page queue lock. | * Access to this queue is synchronized by the free page queue lock. | ||||
*/ | */ | ||||
static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop = | static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM]; | ||||
TAILQ_HEAD_INITIALIZER(vm_rvq_partpop); | |||||
static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | ||||
static long vm_reserv_broken; | static long vm_reserv_broken; | ||||
SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | ||||
&vm_reserv_broken, 0, "Cumulative number of broken reservations"); | &vm_reserv_broken, 0, "Cumulative number of broken reservations"); | ||||
static long vm_reserv_freed; | static long vm_reserv_freed; | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Describes the current state of the partially populated reservation queue. | * Describes the current state of the partially populated reservation queue. | ||||
*/ | */ | ||||
static int | static int | ||||
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) | sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct sbuf sbuf; | struct sbuf sbuf; | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
int counter, error, level, unused_pages; | int counter, error, domain, level, unused_pages; | ||||
error = sysctl_wire_old_buffer(req, 0); | error = sysctl_wire_old_buffer(req, 0); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | ||||
sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n"); | sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); | ||||
for (domain = 0; domain < vm_ndomains; domain++) { | |||||
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | ||||
counter = 0; | counter = 0; | ||||
unused_pages = 0; | unused_pages = 0; | ||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) { | TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) { | ||||
counter++; | counter++; | ||||
unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | ||||
} | } | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level, | sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", | ||||
domain, level, | |||||
unused_pages * ((int)PAGE_SIZE / 1024), counter); | unused_pages * ((int)PAGE_SIZE / 1024), counter); | ||||
} | } | ||||
} | |||||
error = sbuf_finish(&sbuf); | error = sbuf_finish(&sbuf); | ||||
sbuf_delete(&sbuf); | sbuf_delete(&sbuf); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Reduces the given reservation's population count. If the population count | * Reduces the given reservation's population count. If the population count | ||||
* becomes zero, the reservation is destroyed. Additionally, moves the | * becomes zero, the reservation is destroyed. Additionally, moves the | ||||
Show All 9 Lines | vm_reserv_depopulate(vm_reserv_t rv, int index) | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
KASSERT(rv->object != NULL, | KASSERT(rv->object != NULL, | ||||
("vm_reserv_depopulate: reserv %p is free", rv)); | ("vm_reserv_depopulate: reserv %p is free", rv)); | ||||
KASSERT(popmap_is_set(rv->popmap, index), | KASSERT(popmap_is_set(rv->popmap, index), | ||||
("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, | ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv, | ||||
index)); | index)); | ||||
KASSERT(rv->popcnt > 0, | KASSERT(rv->popcnt > 0, | ||||
("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); | ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); | ||||
KASSERT(rv->domain >= 0 && rv->domain < vm_ndomains, | |||||
("vm_reserv_depopulate: reserv %p's domain is corrupted %d", | |||||
rv, rv->domain)); | |||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); | TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} else { | } else { | ||||
KASSERT(rv->pages->psind == 1, | KASSERT(rv->pages->psind == 1, | ||||
("vm_reserv_depopulate: reserv %p is already demoted", | ("vm_reserv_depopulate: reserv %p is already demoted", | ||||
rv)); | rv)); | ||||
rv->pages->psind = 0; | rv->pages->psind = 0; | ||||
} | } | ||||
popmap_clear(rv->popmap, index); | popmap_clear(rv->popmap, index); | ||||
rv->popcnt--; | rv->popcnt--; | ||||
if (rv->popcnt == 0) { | if (rv->popcnt == 0) { | ||||
LIST_REMOVE(rv, objq); | LIST_REMOVE(rv, objq); | ||||
rv->object = NULL; | rv->object = NULL; | ||||
rv->domain = -1; | |||||
vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); | vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); | ||||
vm_reserv_freed++; | vm_reserv_freed++; | ||||
} else { | } else { | ||||
rv->inpartpopq = TRUE; | rv->inpartpopq = TRUE; | ||||
TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq); | TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Returns the reservation to which the given page might belong. | * Returns the reservation to which the given page might belong. | ||||
*/ | */ | ||||
static __inline vm_reserv_t | static __inline vm_reserv_t | ||||
vm_reserv_from_page(vm_page_t m) | vm_reserv_from_page(vm_page_t m) | ||||
Show All 28 Lines | KASSERT(rv->object != NULL, | ||||
("vm_reserv_populate: reserv %p is free", rv)); | ("vm_reserv_populate: reserv %p is free", rv)); | ||||
KASSERT(popmap_is_clear(rv->popmap, index), | KASSERT(popmap_is_clear(rv->popmap, index), | ||||
("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, | ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv, | ||||
index)); | index)); | ||||
KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, | KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, | ||||
("vm_reserv_populate: reserv %p is already full", rv)); | ("vm_reserv_populate: reserv %p is already full", rv)); | ||||
KASSERT(rv->pages->psind == 0, | KASSERT(rv->pages->psind == 0, | ||||
("vm_reserv_populate: reserv %p is already promoted", rv)); | ("vm_reserv_populate: reserv %p is already promoted", rv)); | ||||
KASSERT(rv->domain >= 0 && rv->domain < vm_ndomains, | |||||
("vm_reserv_populate: reserv %p's domain is corrupted %d", | |||||
rv, rv->domain)); | |||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); | TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | } | ||||
popmap_set(rv->popmap, index); | popmap_set(rv->popmap, index); | ||||
rv->popcnt++; | rv->popcnt++; | ||||
if (rv->popcnt < VM_LEVEL_0_NPAGES) { | if (rv->popcnt < VM_LEVEL_0_NPAGES) { | ||||
rv->inpartpopq = TRUE; | rv->inpartpopq = TRUE; | ||||
TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq); | TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); | ||||
} else | } else | ||||
rv->pages->psind = 1; | rv->pages->psind = 1; | ||||
} | } | ||||
/* | /* | ||||
* Allocates a contiguous set of physical pages of the given size "npages" | * Allocates a contiguous set of physical pages of the given size "npages" | ||||
* from existing or newly created reservations. All of the physical pages | * from existing or newly created reservations. All of the physical pages | ||||
* must be at or above the given physical address "low" and below the given | * must be at or above the given physical address "low" and below the given | ||||
* physical address "high". The given value "alignment" determines the | * physical address "high". The given value "alignment" determines the | ||||
* alignment of the first physical page in the set. If the given value | * alignment of the first physical page in the set. If the given value | ||||
* "boundary" is non-zero, then the set of physical pages cannot cross any | * "boundary" is non-zero, then the set of physical pages cannot cross any | ||||
* physical address boundary that is a multiple of that value. Both | * physical address boundary that is a multiple of that value. Both | ||||
* "alignment" and "boundary" must be a power of two. | * "alignment" and "boundary" must be a power of two. | ||||
* | * | ||||
* The page "mpred" must immediately precede the offset "pindex" within the | * The page "mpred" must immediately precede the offset "pindex" within the | ||||
* specified object. | * specified object. | ||||
* | * | ||||
* The object and free page queue must be locked. | * The object and free page queue must be locked. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages, | vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, | u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | ||||
vm_page_t mpred) | vm_paddr_t boundary, vm_page_t mpred) | ||||
{ | { | ||||
vm_paddr_t pa, size; | vm_paddr_t pa, size; | ||||
vm_page_t m, m_ret, msucc; | vm_page_t m, m_ret, msucc; | ||||
vm_pindex_t first, leftcap, rightcap; | vm_pindex_t first, leftcap, rightcap; | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
u_long allocpages, maxpages, minpages; | u_long allocpages, maxpages, minpages; | ||||
int i, index, n; | int i, index, n; | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
/* | /* | ||||
* Allocate the physical pages. The alignment and boundary specified | * Allocate the physical pages. The alignment and boundary specified | ||||
* for this allocation may be different from the alignment and | * for this allocation may be different from the alignment and | ||||
* boundary specified for the requested pages. For instance, the | * boundary specified for the requested pages. For instance, the | ||||
* specified index may not be the first page within the first new | * specified index may not be the first page within the first new | ||||
* reservation. | * reservation. | ||||
*/ | */ | ||||
m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment, | m = vm_phys_alloc_contig(domain, allocpages, low, high, ulmax(alignment, | ||||
VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0); | VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0); | ||||
if (m == NULL) | if (m == NULL) | ||||
return (NULL); | return (NULL); | ||||
/* | /* | ||||
* The allocated physical pages always begin at a reservation | * The allocated physical pages always begin at a reservation | ||||
* boundary, but they do not always end at a reservation boundary. | * boundary, but they do not always end at a reservation boundary. | ||||
* Initialize every reservation that is completely covered by the | * Initialize every reservation that is completely covered by the | ||||
* allocated physical pages. | * allocated physical pages. | ||||
*/ | */ | ||||
m_ret = NULL; | m_ret = NULL; | ||||
index = VM_RESERV_INDEX(object, pindex); | index = VM_RESERV_INDEX(object, pindex); | ||||
do { | do { | ||||
rv = vm_reserv_from_page(m); | rv = vm_reserv_from_page(m); | ||||
KASSERT(rv->pages == m, | KASSERT(rv->pages == m, | ||||
("vm_reserv_alloc_contig: reserv %p's pages is corrupted", | ("vm_reserv_alloc_contig: reserv %p's pages is corrupted", | ||||
rv)); | rv)); | ||||
KASSERT(rv->object == NULL, | KASSERT(rv->object == NULL, | ||||
("vm_reserv_alloc_contig: reserv %p isn't free", rv)); | ("vm_reserv_alloc_contig: reserv %p isn't free", rv)); | ||||
LIST_INSERT_HEAD(&object->rvq, rv, objq); | LIST_INSERT_HEAD(&object->rvq, rv, objq); | ||||
rv->object = object; | rv->object = object; | ||||
rv->pindex = first; | rv->pindex = first; | ||||
rv->domain = vm_phys_domain(m); | |||||
KASSERT(rv->popcnt == 0, | KASSERT(rv->popcnt == 0, | ||||
("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted", | ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted", | ||||
rv)); | rv)); | ||||
KASSERT(!rv->inpartpopq, | KASSERT(!rv->inpartpopq, | ||||
("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE", | ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE", | ||||
rv)); | rv)); | ||||
for (i = 0; i < NPOPMAP; i++) | for (i = 0; i < NPOPMAP; i++) | ||||
KASSERT(rv->popmap[i] == 0, | KASSERT(rv->popmap[i] == 0, | ||||
Show All 39 Lines | |||||
* Allocates a page from an existing or newly created reservation. | * Allocates a page from an existing or newly created reservation. | ||||
* | * | ||||
* The page "mpred" must immediately precede the offset "pindex" within the | * The page "mpred" must immediately precede the offset "pindex" within the | ||||
* specified object. | * specified object. | ||||
* | * | ||||
* The object and free page queue must be locked. | * The object and free page queue must be locked. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred) | vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
jeff: This could eventually want to be two functions;
One would check the object for an existing… | |||||
vm_page_t mpred) | |||||
{ | { | ||||
vm_page_t m, msucc; | vm_page_t m, msucc; | ||||
vm_pindex_t first, leftcap, rightcap; | vm_pindex_t first, leftcap, rightcap; | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
int i, index; | int i, index; | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | if (object->type == OBJT_VNODE || | ||||
object->backing_object->type == OBJT_VNODE)) | object->backing_object->type == OBJT_VNODE)) | ||||
return (NULL); | return (NULL); | ||||
/* Speculate that the object may grow. */ | /* Speculate that the object may grow. */ | ||||
} | } | ||||
/* | /* | ||||
* Allocate and populate the new reservation. | * Allocate and populate the new reservation. | ||||
*/ | */ | ||||
m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER); | m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER); | ||||
if (m == NULL) | if (m == NULL) | ||||
return (NULL); | return (NULL); | ||||
rv = vm_reserv_from_page(m); | rv = vm_reserv_from_page(m); | ||||
KASSERT(rv->pages == m, | KASSERT(rv->pages == m, | ||||
("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); | ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv)); | ||||
KASSERT(rv->object == NULL, | KASSERT(rv->object == NULL, | ||||
("vm_reserv_alloc_page: reserv %p isn't free", rv)); | ("vm_reserv_alloc_page: reserv %p isn't free", rv)); | ||||
LIST_INSERT_HEAD(&object->rvq, rv, objq); | LIST_INSERT_HEAD(&object->rvq, rv, objq); | ||||
rv->object = object; | rv->object = object; | ||||
rv->pindex = first; | rv->pindex = first; | ||||
rv->domain = vm_phys_domain(m); | |||||
KASSERT(rv->popcnt == 0, | KASSERT(rv->popcnt == 0, | ||||
("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv)); | ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv)); | ||||
KASSERT(!rv->inpartpopq, | KASSERT(!rv->inpartpopq, | ||||
("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv)); | ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv)); | ||||
for (i = 0; i < NPOPMAP; i++) | for (i = 0; i < NPOPMAP; i++) | ||||
KASSERT(rv->popmap[i] == 0, | KASSERT(rv->popmap[i] == 0, | ||||
("vm_reserv_alloc_page: reserv %p's popmap is corrupted", | ("vm_reserv_alloc_page: reserv %p's popmap is corrupted", | ||||
rv)); | rv)); | ||||
Show All 30 Lines | vm_reserv_break(vm_reserv_t rv, vm_page_t m) | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
KASSERT(rv->object != NULL, | KASSERT(rv->object != NULL, | ||||
("vm_reserv_break: reserv %p is free", rv)); | ("vm_reserv_break: reserv %p is free", rv)); | ||||
KASSERT(!rv->inpartpopq, | KASSERT(!rv->inpartpopq, | ||||
("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv)); | ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv)); | ||||
LIST_REMOVE(rv, objq); | LIST_REMOVE(rv, objq); | ||||
rv->object = NULL; | rv->object = NULL; | ||||
rv->domain = -1; | |||||
if (m != NULL) { | if (m != NULL) { | ||||
/* | /* | ||||
* Since the reservation is being broken, there is no harm in | * Since the reservation is being broken, there is no harm in | ||||
* abusing the population map to stop "m" from being returned | * abusing the population map to stop "m" from being returned | ||||
* to the physical memory allocator. | * to the physical memory allocator. | ||||
*/ | */ | ||||
i = m - rv->pages; | i = m - rv->pages; | ||||
KASSERT(popmap_is_clear(rv->popmap, i), | KASSERT(popmap_is_clear(rv->popmap, i), | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
while ((rv = LIST_FIRST(&object->rvq)) != NULL) { | while ((rv = LIST_FIRST(&object->rvq)) != NULL) { | ||||
KASSERT(rv->object == object, | KASSERT(rv->object == object, | ||||
("vm_reserv_break_all: reserv %p is corrupted", rv)); | ("vm_reserv_break_all: reserv %p is corrupted", rv)); | ||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); | TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | } | ||||
vm_reserv_break(rv, NULL); | vm_reserv_break(rv, NULL); | ||||
} | } | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
} | } | ||||
/* | /* | ||||
Show All 21 Lines | |||||
* | * | ||||
* Requires that vm_page_array and first_page are initialized! | * Requires that vm_page_array and first_page are initialized! | ||||
*/ | */ | ||||
void | void | ||||
vm_reserv_init(void) | vm_reserv_init(void) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
struct vm_phys_seg *seg; | struct vm_phys_seg *seg; | ||||
int segind; | int i, segind; | ||||
/* | /* | ||||
* Initialize the reservation array. Specifically, initialize the | * Initialize the reservation array. Specifically, initialize the | ||||
* "pages" field for every element that has an underlying superpage. | * "pages" field for every element that has an underlying superpage. | ||||
*/ | */ | ||||
for (segind = 0; segind < vm_phys_nsegs; segind++) { | for (segind = 0; segind < vm_phys_nsegs; segind++) { | ||||
seg = &vm_phys_segs[segind]; | seg = &vm_phys_segs[segind]; | ||||
paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); | paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); | ||||
while (paddr + VM_LEVEL_0_SIZE <= seg->end) { | while (paddr + VM_LEVEL_0_SIZE <= seg->end) { | ||||
vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages = | vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages = | ||||
PHYS_TO_VM_PAGE(paddr); | PHYS_TO_VM_PAGE(paddr); | ||||
paddr += VM_LEVEL_0_SIZE; | paddr += VM_LEVEL_0_SIZE; | ||||
} | } | ||||
} | } | ||||
for (i = 0; i < MAXMEMDOM; i++) | |||||
TAILQ_INIT(&vm_rvq_partpop[i]); | |||||
} | } | ||||
/* | /* | ||||
* Returns true if the given page belongs to a reservation and that page is | * Returns true if the given page belongs to a reservation and that page is | ||||
* free. Otherwise, returns false. | * free. Otherwise, returns false. | ||||
*/ | */ | ||||
bool | bool | ||||
vm_reserv_is_page_free(vm_page_t m) | vm_reserv_is_page_free(vm_page_t m) | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
static void | static void | ||||
vm_reserv_reclaim(vm_reserv_t rv) | vm_reserv_reclaim(vm_reserv_t rv) | ||||
{ | { | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
KASSERT(rv->inpartpopq, | KASSERT(rv->inpartpopq, | ||||
("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); | KASSERT(rv->domain >= 0 && rv->domain < vm_ndomains, | ||||
("vm_reserv_reclaim: reserv %p's domain is corrupted %d", | |||||
rv, rv->domain)); | |||||
TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | |||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
vm_reserv_break(rv, NULL); | vm_reserv_break(rv, NULL); | ||||
vm_reserv_reclaimed++; | vm_reserv_reclaimed++; | ||||
} | } | ||||
/* | /* | ||||
* Breaks the reservation at the head of the partially populated reservation | * Breaks the reservation at the head of the partially populated reservation | ||||
* queue, releasing its free pages to the physical memory allocator. Returns | * queue, releasing its free pages to the physical memory allocator. Returns | ||||
* TRUE if a reservation is broken and FALSE otherwise. | * TRUE if a reservation is broken and FALSE otherwise. | ||||
* | * | ||||
* The free page queue lock must be held. | * The free page queue lock must be held. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_reserv_reclaim_inactive(void) | vm_reserv_reclaim_inactive(int domain) | ||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) { | if ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) { | ||||
vm_reserv_reclaim(rv); | vm_reserv_reclaim(rv); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* Searches the partially populated reservation queue for the least recently | * Searches the partially populated reservation queue for the least recently | ||||
* changed reservation with free pages that satisfy the given request for | * changed reservation with free pages that satisfy the given request for | ||||
* contiguous physical memory. If a satisfactory reservation is found, it is | * contiguous physical memory. If a satisfactory reservation is found, it is | ||||
* broken. Returns TRUE if a reservation is broken and FALSE otherwise. | * broken. Returns TRUE if a reservation is broken and FALSE otherwise. | ||||
* | * | ||||
* The free page queue lock must be held. | * The free page queue lock must be held. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, | vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, | ||||
u_long alignment, vm_paddr_t boundary) | vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | ||||
{ | { | ||||
vm_paddr_t pa, size; | vm_paddr_t pa, size; | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
int hi, i, lo, low_index, next_free; | int hi, i, lo, low_index, next_free; | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
if (npages > VM_LEVEL_0_NPAGES - 1) | if (npages > VM_LEVEL_0_NPAGES - 1) | ||||
return (FALSE); | return (FALSE); | ||||
size = npages << PAGE_SHIFT; | size = npages << PAGE_SHIFT; | ||||
TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) { | TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) { | ||||
pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]); | pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]); | ||||
if (pa + PAGE_SIZE - size < low) { | if (pa + PAGE_SIZE - size < low) { | ||||
/* This entire reservation is too low; go to next. */ | /* This entire reservation is too low; go to next. */ | ||||
continue; | continue; | ||||
} | } | ||||
pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | ||||
if (pa + size > high) { | if (pa + size > high) { | ||||
/* This entire reservation is too high; go to next. */ | /* This entire reservation is too high; go to next. */ | ||||
▲ Show 20 Lines • Show All 155 Lines • Show Last 20 Lines |
This could eventually want to be two functions;
One would check the object for an existing reservation at that pindex and satisfy the allocation from it. This would be done without a domain argument and would not create a reservation. It would possibly need the domain free queue for the page free count.
The second would possibly populate a new reservation when a page allocation is done. This could be called with the domain free queue already locked.