Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_reserv.c
Show First 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* r - vm_reserv_lock | * r - vm_reserv_lock | ||||
* d - vm_reserv_domain_lock | * d - vm_reserv_domain_lock | ||||
* o - vm_reserv_object_lock | * o - vm_reserv_object_lock | ||||
* c - constant after boot | * c - constant after boot | ||||
*/ | */ | ||||
struct vm_reserv { | struct vm_reserv { | ||||
struct mtx lock; /* reservation lock. */ | struct mtx lock; /* reservation lock. */ | ||||
TAILQ_ENTRY(vm_reserv) partpopq; /* (d) per-domain queue. */ | TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ | ||||
LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ | LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ | ||||
vm_object_t object; /* (o, r) containing object */ | vm_object_t object; /* (o, r) containing object */ | ||||
vm_pindex_t pindex; /* (o, r) offset in object */ | vm_pindex_t pindex; /* (o, r) offset in object */ | ||||
vm_page_t pages; /* (c) first page */ | vm_page_t pages; /* (c) first page */ | ||||
uint16_t domain; /* (c) NUMA domain. */ | |||||
uint16_t popcnt; /* (r) # of pages in use */ | uint16_t popcnt; /* (r) # of pages in use */ | ||||
uint8_t domain; /* (c) NUMA domain. */ | |||||
char inpartpopq; /* (d, r) */ | |||||
int lasttick; /* (r) last pop update tick. */ | int lasttick; /* (r) last pop update tick. */ | ||||
char inpartpopq; /* (d) */ | |||||
popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ | popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ | ||||
}; | }; | ||||
#define vm_reserv_lockptr(rv) (&(rv)->lock) | #define vm_reserv_lockptr(rv) (&(rv)->lock) | ||||
#define vm_reserv_assert_locked(rv) \ | #define vm_reserv_assert_locked(rv) \ | ||||
mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) | mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) | ||||
#define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) | #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) | ||||
#define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) | #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) | ||||
#define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) | #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) | ||||
static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM]; | |||||
#define vm_reserv_domain_lockptr(d) &vm_reserv_domain_locks[(d)] | |||||
#define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) | |||||
#define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) | |||||
/* | /* | ||||
* The reservation array | * The reservation array | ||||
* | * | ||||
* This array is analoguous in function to vm_page_array. It differs in the | * This array is analoguous in function to vm_page_array. It differs in the | ||||
* respect that it may contain a greater number of useful reservation | * respect that it may contain a greater number of useful reservation | ||||
* structures than there are (physical) superpages. These "invalid" | * structures than there are (physical) superpages. These "invalid" | ||||
* reservation structures exist to trade-off space for time in the | * reservation structures exist to trade-off space for time in the | ||||
* implementation of vm_reserv_from_page(). Invalid reservation structures are | * implementation of vm_reserv_from_page(). Invalid reservation structures are | ||||
* distinguishable from "valid" reservation structures by inspecting the | * distinguishable from "valid" reservation structures by inspecting the | ||||
* reservation's "pages" field. Invalid reservation structures have a NULL | * reservation's "pages" field. Invalid reservation structures have a NULL | ||||
* "pages" field. | * "pages" field. | ||||
* | * | ||||
* vm_reserv_from_page() maps a small (physical) page to an element of this | * vm_reserv_from_page() maps a small (physical) page to an element of this | ||||
* array by computing a physical reservation number from the page's physical | * array by computing a physical reservation number from the page's physical | ||||
* address. The physical reservation number is used as the array index. | * address. The physical reservation number is used as the array index. | ||||
* | * | ||||
* An "active" reservation is a valid reservation structure that has a non-NULL | * An "active" reservation is a valid reservation structure that has a non-NULL | ||||
* "object" field and a non-zero "popcnt" field. In other words, every active | * "object" field and a non-zero "popcnt" field. In other words, every active | ||||
* reservation belongs to a particular object. Moreover, every active | * reservation belongs to a particular object. Moreover, every active | ||||
* reservation has an entry in the containing object's list of reservations. | * reservation has an entry in the containing object's list of reservations. | ||||
*/ | */ | ||||
static vm_reserv_t vm_reserv_array; | static vm_reserv_t vm_reserv_array; | ||||
/* | /* | ||||
* The partially populated reservation queue | * The per-domain partially populated reservation queues | ||||
* | * | ||||
* This queue enables the fast recovery of an unused free small page from a | * These queues enable the fast recovery of an unused free small page from a | ||||
* partially populated reservation. The reservation at the head of this queue | * partially populated reservation. The reservation at the head of a queue | ||||
* is the least recently changed, partially populated reservation. | * is the least recently changed, partially populated reservation. | ||||
* | * | ||||
* Access to this queue is synchronized by the free page queue lock. | * Access to this queue is synchronized by the per-domain reservation lock. | ||||
*/ | */ | ||||
static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM]; | struct vm_reserv_domain { | ||||
struct mtx lock; | |||||
TAILQ_HEAD(, vm_reserv) partpop; | |||||
} __aligned(CACHE_LINE_SIZE); | |||||
static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; | |||||
#define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) | |||||
#define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) | |||||
#define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) | |||||
static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | ||||
static counter_u64_t vm_reserv_broken = EARLY_COUNTER; | static counter_u64_t vm_reserv_broken = EARLY_COUNTER; | ||||
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | ||||
&vm_reserv_broken, "Cumulative number of broken reservations"); | &vm_reserv_broken, "Cumulative number of broken reservations"); | ||||
static counter_u64_t vm_reserv_freed = EARLY_COUNTER; | static counter_u64_t vm_reserv_freed = EARLY_COUNTER; | ||||
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, | ||||
Show All 38 Lines | |||||
static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, | static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, | ||||
vm_pindex_t pindex); | vm_pindex_t pindex); | ||||
static void vm_reserv_populate(vm_reserv_t rv, int index); | static void vm_reserv_populate(vm_reserv_t rv, int index); | ||||
static void vm_reserv_reclaim(vm_reserv_t rv); | static void vm_reserv_reclaim(vm_reserv_t rv); | ||||
/* | /* | ||||
* Returns the current number of full reservations. | * Returns the current number of full reservations. | ||||
* | * | ||||
* Since the number of full reservations is computed without acquiring the | * Since the number of full reservations is computed without acquiring any | ||||
* free page queue lock, the returned value may be inexact. | * locks, the returned value is inexact. | ||||
*/ | */ | ||||
static int | static int | ||||
sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) | sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
struct vm_phys_seg *seg; | struct vm_phys_seg *seg; | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
int fullpop, segind; | int fullpop, segind; | ||||
Show All 27 Lines | if (error != 0) | ||||
return (error); | return (error); | ||||
sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | ||||
sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); | sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); | ||||
for (domain = 0; domain < vm_ndomains; domain++) { | for (domain = 0; domain < vm_ndomains; domain++) { | ||||
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | ||||
counter = 0; | counter = 0; | ||||
unused_pages = 0; | unused_pages = 0; | ||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) { | TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { | ||||
counter++; | counter++; | ||||
unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | ||||
} | } | ||||
vm_reserv_domain_unlock(domain); | vm_reserv_domain_unlock(domain); | ||||
sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", | sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", | ||||
domain, level, | domain, level, | ||||
unused_pages * ((int)PAGE_SIZE / 1024), counter); | unused_pages * ((int)PAGE_SIZE / 1024), counter); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | if (rv->popcnt == VM_LEVEL_0_NPAGES) { | ||||
rv->pages->psind = 0; | rv->pages->psind = 0; | ||||
} | } | ||||
popmap_clear(rv->popmap, index); | popmap_clear(rv->popmap, index); | ||||
rv->popcnt--; | rv->popcnt--; | ||||
if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || | if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || | ||||
rv->popcnt == 0) { | rv->popcnt == 0) { | ||||
vm_reserv_domain_lock(rv->domain); | vm_reserv_domain_lock(rv->domain); | ||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | } | ||||
if (rv->popcnt != 0) { | if (rv->popcnt != 0) { | ||||
rv->inpartpopq = TRUE; | rv->inpartpopq = TRUE; | ||||
TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, | ||||
partpopq); | |||||
} | } | ||||
vm_reserv_domain_unlock(rv->domain); | vm_reserv_domain_unlock(rv->domain); | ||||
rv->lasttick = ticks; | rv->lasttick = ticks; | ||||
} | } | ||||
vmd = VM_DOMAIN(rv->domain); | vmd = VM_DOMAIN(rv->domain); | ||||
if (rv->popcnt == 0) { | if (rv->popcnt == 0) { | ||||
vm_reserv_remove(rv); | vm_reserv_remove(rv); | ||||
vm_domain_free_lock(vmd); | vm_domain_free_lock(vmd); | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); | return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); | ||||
} | } | ||||
/* | /* | ||||
* Increases the given reservation's population count. Moves the reservation | * Increases the given reservation's population count. Moves the reservation | ||||
* to the tail of the partially populated reservation queue. | * to the tail of the partially populated reservation queue. | ||||
* | |||||
* The free page queue must be locked. | |||||
*/ | */ | ||||
static void | static void | ||||
vm_reserv_populate(vm_reserv_t rv, int index) | vm_reserv_populate(vm_reserv_t rv, int index) | ||||
{ | { | ||||
vm_reserv_assert_locked(rv); | vm_reserv_assert_locked(rv); | ||||
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | ||||
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | ||||
Show All 12 Lines | vm_reserv_populate(vm_reserv_t rv, int index) | ||||
popmap_set(rv->popmap, index); | popmap_set(rv->popmap, index); | ||||
rv->popcnt++; | rv->popcnt++; | ||||
if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && | if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && | ||||
rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) | rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) | ||||
return; | return; | ||||
rv->lasttick = ticks; | rv->lasttick = ticks; | ||||
vm_reserv_domain_lock(rv->domain); | vm_reserv_domain_lock(rv->domain); | ||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | } | ||||
if (rv->popcnt < VM_LEVEL_0_NPAGES) { | if (rv->popcnt < VM_LEVEL_0_NPAGES) { | ||||
rv->inpartpopq = TRUE; | rv->inpartpopq = TRUE; | ||||
TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
} else { | } else { | ||||
KASSERT(rv->pages->psind == 0, | KASSERT(rv->pages->psind == 0, | ||||
("vm_reserv_populate: reserv %p is already promoted", | ("vm_reserv_populate: reserv %p is already promoted", | ||||
rv)); | rv)); | ||||
rv->pages->psind = 1; | rv->pages->psind = 1; | ||||
} | } | ||||
vm_reserv_domain_unlock(rv->domain); | vm_reserv_domain_unlock(rv->domain); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 341 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* | /* | ||||
* Breaks the given reservation. All free pages in the reservation | * Breaks the given reservation. All free pages in the reservation | ||||
* are returned to the physical memory allocator. The reservation's | * are returned to the physical memory allocator. The reservation's | ||||
* population count and map are reset to their initial state. | * population count and map are reset to their initial state. | ||||
* | * | ||||
* The given reservation must not be in the partially populated reservation | * The given reservation must not be in the partially populated reservation | ||||
* queue. The free page queue lock must be held. | * queue. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_reserv_break(vm_reserv_t rv) | vm_reserv_break(vm_reserv_t rv) | ||||
{ | { | ||||
u_long changes; | u_long changes; | ||||
int bitpos, hi, i, lo; | int bitpos, hi, i, lo; | ||||
vm_reserv_assert_locked(rv); | vm_reserv_assert_locked(rv); | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | while ((rv = LIST_FIRST(&object->rvq)) != NULL) { | ||||
vm_reserv_lock(rv); | vm_reserv_lock(rv); | ||||
/* Reclaim race. */ | /* Reclaim race. */ | ||||
if (rv->object != object) { | if (rv->object != object) { | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
continue; | continue; | ||||
} | } | ||||
vm_reserv_domain_lock(rv->domain); | vm_reserv_domain_lock(rv->domain); | ||||
if (rv->inpartpopq) { | if (rv->inpartpopq) { | ||||
TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | } | ||||
vm_reserv_domain_unlock(rv->domain); | vm_reserv_domain_unlock(rv->domain); | ||||
vm_reserv_break(rv); | vm_reserv_break(rv); | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Frees the given page if it belongs to a reservation. Returns TRUE if the | * Frees the given page if it belongs to a reservation. Returns TRUE if the | ||||
* page is freed and FALSE otherwise. | * page is freed and FALSE otherwise. | ||||
* | |||||
* The free page queue lock must be held. | |||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_reserv_free_page(vm_page_t m) | vm_reserv_free_page(vm_page_t m) | ||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
boolean_t ret; | boolean_t ret; | ||||
rv = vm_reserv_from_page(m); | rv = vm_reserv_from_page(m); | ||||
Show All 37 Lines | while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + | ||||
rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; | rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; | ||||
rv->pages = PHYS_TO_VM_PAGE(paddr); | rv->pages = PHYS_TO_VM_PAGE(paddr); | ||||
rv->domain = seg->domain; | rv->domain = seg->domain; | ||||
mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); | mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); | ||||
paddr += VM_LEVEL_0_SIZE; | paddr += VM_LEVEL_0_SIZE; | ||||
} | } | ||||
} | } | ||||
for (i = 0; i < MAXMEMDOM; i++) { | for (i = 0; i < MAXMEMDOM; i++) { | ||||
mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL, | mtx_init(&vm_rvd[i].lock, "VM reserv domain", NULL, MTX_DEF); | ||||
MTX_DEF); | TAILQ_INIT(&vm_rvd[i].partpop); | ||||
TAILQ_INIT(&vm_rvq_partpop[i]); | |||||
} | } | ||||
for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) | for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) | ||||
mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, | mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, | ||||
MTX_DEF); | MTX_DEF); | ||||
} | } | ||||
/* | /* | ||||
Show All 35 Lines | vm_reserv_level_iffullpop(vm_page_t m) | ||||
rv = vm_reserv_from_page(m); | rv = vm_reserv_from_page(m); | ||||
return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); | return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); | ||||
} | } | ||||
/* | /* | ||||
* Breaks the given partially populated reservation, releasing its free pages | * Breaks the given partially populated reservation, releasing its free pages | ||||
* to the physical memory allocator. | * to the physical memory allocator. | ||||
* | |||||
* The free page queue lock must be held. | |||||
*/ | */ | ||||
static void | static void | ||||
vm_reserv_reclaim(vm_reserv_t rv) | vm_reserv_reclaim(vm_reserv_t rv) | ||||
{ | { | ||||
vm_reserv_assert_locked(rv); | vm_reserv_assert_locked(rv); | ||||
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | ||||
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | ||||
vm_reserv_domain_lock(rv->domain); | vm_reserv_domain_lock(rv->domain); | ||||
KASSERT(rv->inpartpopq, | KASSERT(rv->inpartpopq, | ||||
("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ||||
KASSERT(rv->domain < vm_ndomains, | KASSERT(rv->domain < vm_ndomains, | ||||
("vm_reserv_reclaim: reserv %p's domain is corrupted %d", | ("vm_reserv_reclaim: reserv %p's domain is corrupted %d", | ||||
rv, rv->domain)); | rv, rv->domain)); | ||||
TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq); | TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
vm_reserv_domain_unlock(rv->domain); | vm_reserv_domain_unlock(rv->domain); | ||||
vm_reserv_break(rv); | vm_reserv_break(rv); | ||||
counter_u64_add(vm_reserv_reclaimed, 1); | counter_u64_add(vm_reserv_reclaimed, 1); | ||||
} | } | ||||
/* | /* | ||||
* Breaks the reservation at the head of the partially populated reservation | * Breaks the reservation at the head of the partially populated reservation | ||||
* queue, releasing its free pages to the physical memory allocator. Returns | * queue, releasing its free pages to the physical memory allocator. Returns | ||||
* TRUE if a reservation is broken and FALSE otherwise. | * TRUE if a reservation is broken and FALSE otherwise. | ||||
* | |||||
* The free page queue lock must be held. | |||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_reserv_reclaim_inactive(int domain) | vm_reserv_reclaim_inactive(int domain) | ||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) { | while ((rv = TAILQ_FIRST(&vm_rvd[domain].partpop)) != NULL) { | ||||
vm_reserv_lock(rv); | vm_reserv_lock(rv); | ||||
if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) { | if (rv != TAILQ_FIRST(&vm_rvd[domain].partpop)) { | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
continue; | continue; | ||||
} | } | ||||
vm_reserv_reclaim(rv); | vm_reserv_reclaim(rv); | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* Determine whether this reservation has free pages that satisfy the given | * Determine whether this reservation has free pages that satisfy the given | ||||
* request for contiguous physical memory. Start searching from the lower | * request for contiguous physical memory. Start searching from the lower | ||||
* bound, defined by low_index. | * bound, defined by low_index. | ||||
* | |||||
* The free page queue lock must be held. | |||||
*/ | */ | ||||
static bool | static bool | ||||
vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, | vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, | ||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | ||||
{ | { | ||||
vm_paddr_t pa, size; | vm_paddr_t pa, size; | ||||
u_long changes; | u_long changes; | ||||
int bitpos, bits_left, i, hi, lo, n; | int bitpos, bits_left, i, hi, lo, n; | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Searches the partially populated reservation queue for the least recently | * Searches the partially populated reservation queue for the least recently | ||||
* changed reservation with free pages that satisfy the given request for | * changed reservation with free pages that satisfy the given request for | ||||
* contiguous physical memory. If a satisfactory reservation is found, it is | * contiguous physical memory. If a satisfactory reservation is found, it is | ||||
* broken. Returns true if a reservation is broken and false otherwise. | * broken. Returns true if a reservation is broken and false otherwise. | ||||
* | |||||
* The free page queue lock must be held. | |||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, | vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, | ||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | ||||
{ | { | ||||
vm_paddr_t pa, size; | vm_paddr_t pa, size; | ||||
vm_reserv_t rv, rvn; | vm_reserv_t rv, rvn; | ||||
if (npages > VM_LEVEL_0_NPAGES - 1) | if (npages > VM_LEVEL_0_NPAGES - 1) | ||||
return (false); | return (false); | ||||
size = npages << PAGE_SHIFT; | size = npages << PAGE_SHIFT; | ||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
again: | again: | ||||
for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) { | for (rv = TAILQ_FIRST(&vm_rvd[domain].partpop); rv != NULL; rv = rvn) { | ||||
rvn = TAILQ_NEXT(rv, partpopq); | rvn = TAILQ_NEXT(rv, partpopq); | ||||
pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | ||||
if (pa + VM_LEVEL_0_SIZE - size < low) { | if (pa + VM_LEVEL_0_SIZE - size < low) { | ||||
/* This entire reservation is too low; go to next. */ | /* This entire reservation is too low; go to next. */ | ||||
continue; | continue; | ||||
} | } | ||||
if (pa + size > high) { | if (pa + size > high) { | ||||
/* This entire reservation is too high; go to next. */ | /* This entire reservation is too high; go to next. */ | ||||
▲ Show 20 Lines • Show All 160 Lines • Show Last 20 Lines |