Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_reserv.c
Show First 20 Lines • Show All 175 Lines • ▼ Show 20 Lines | |||||
* within that object. The reservation's "popcnt" tracks the number of these | * within that object. The reservation's "popcnt" tracks the number of these | ||||
* small physical pages that are in use at any given time. When and if the | * small physical pages that are in use at any given time. When and if the | ||||
* reservation is not fully utilized, it appears in the queue of partially | * reservation is not fully utilized, it appears in the queue of partially | ||||
* populated reservations. The reservation always appears on the containing | * populated reservations. The reservation always appears on the containing | ||||
* object's list of reservations. | * object's list of reservations. | ||||
* | * | ||||
* A partially populated reservation can be broken and reclaimed at any time. | * A partially populated reservation can be broken and reclaimed at any time. | ||||
* | * | ||||
* r - vm_reserv_lock | * c - constant after boot | ||||
* d - vm_reserv_domain_lock | * d - vm_reserv_domain_lock | ||||
* o - vm_reserv_object_lock | * o - vm_reserv_object_lock | ||||
* c - constant after boot | * r - vm_reserv_lock | ||||
* s - vm_reserv_domain_scan_lock | |||||
*/ | */ | ||||
struct vm_reserv { | struct vm_reserv { | ||||
struct mtx lock; /* reservation lock. */ | struct mtx lock; /* reservation lock. */ | ||||
TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ | TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */ | ||||
LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ | LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */ | ||||
vm_object_t object; /* (o, r) containing object */ | vm_object_t object; /* (o, r) containing object */ | ||||
vm_pindex_t pindex; /* (o, r) offset in object */ | vm_pindex_t pindex; /* (o, r) offset in object */ | ||||
vm_page_t pages; /* (c) first page */ | vm_page_t pages; /* (c) first page */ | ||||
uint16_t popcnt; /* (r) # of pages in use */ | uint16_t popcnt; /* (r) # of pages in use */ | ||||
uint8_t domain; /* (c) NUMA domain. */ | uint8_t domain; /* (c) NUMA domain. */ | ||||
char inpartpopq; /* (d, r) */ | char inpartpopq; /* (d, r) */ | ||||
int lasttick; /* (r) last pop update tick. */ | int lasttick; /* (r) last pop update tick. */ | ||||
popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ | popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */ | ||||
}; | }; | ||||
TAILQ_HEAD(vm_reserv_queue, vm_reserv); | |||||
#define vm_reserv_lockptr(rv) (&(rv)->lock) | #define vm_reserv_lockptr(rv) (&(rv)->lock) | ||||
#define vm_reserv_assert_locked(rv) \ | #define vm_reserv_assert_locked(rv) \ | ||||
mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) | mtx_assert(vm_reserv_lockptr(rv), MA_OWNED) | ||||
#define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) | #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv)) | ||||
#define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) | #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv)) | ||||
#define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) | #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv)) | ||||
/* | /* | ||||
Show All 22 Lines | |||||
/* | /* | ||||
* The per-domain partially populated reservation queues | * The per-domain partially populated reservation queues | ||||
* | * | ||||
* These queues enable the fast recovery of an unused free small page from a | * These queues enable the fast recovery of an unused free small page from a | ||||
* partially populated reservation. The reservation at the head of a queue | * partially populated reservation. The reservation at the head of a queue | ||||
* is the least recently changed, partially populated reservation. | * is the least recently changed, partially populated reservation. | ||||
* | * | ||||
* Access to this queue is synchronized by the per-domain reservation lock. | * Access to this queue is synchronized by the per-domain reservation lock. | ||||
* Threads reclaiming free pages from the queue must hold the per-domain scan | |||||
* lock. | |||||
*/ | */ | ||||
struct vm_reserv_domain { | struct vm_reserv_domain { | ||||
struct mtx lock; | struct mtx lock; | ||||
TAILQ_HEAD(, vm_reserv) partpop; | struct vm_reserv_queue partpop; /* (d) */ | ||||
struct vm_reserv marker; /* (d, s) scan marker/lock */ | |||||
} __aligned(CACHE_LINE_SIZE); | } __aligned(CACHE_LINE_SIZE); | ||||
static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; | static struct vm_reserv_domain vm_rvd[MAXMEMDOM]; | ||||
#define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) | #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock) | ||||
#define vm_reserv_domain_assert_locked(d) \ | |||||
mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED) | |||||
#define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) | #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d)) | ||||
#define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) | #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d)) | ||||
#define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock) | |||||
#define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock) | |||||
static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); | ||||
static counter_u64_t vm_reserv_broken = EARLY_COUNTER; | static counter_u64_t vm_reserv_broken = EARLY_COUNTER; | ||||
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, | ||||
&vm_reserv_broken, "Cumulative number of broken reservations"); | &vm_reserv_broken, "Cumulative number of broken reservations"); | ||||
static counter_u64_t vm_reserv_freed = EARLY_COUNTER; | static counter_u64_t vm_reserv_freed = EARLY_COUNTER; | ||||
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, | ||||
▲ Show 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) | ||||
sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); | ||||
sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); | sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n"); | ||||
for (domain = 0; domain < vm_ndomains; domain++) { | for (domain = 0; domain < vm_ndomains; domain++) { | ||||
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { | ||||
counter = 0; | counter = 0; | ||||
unused_pages = 0; | unused_pages = 0; | ||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { | TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { | ||||
if (rv == &vm_rvd[domain].marker) | |||||
continue; | |||||
counter++; | counter++; | ||||
unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; | ||||
} | } | ||||
vm_reserv_domain_unlock(domain); | vm_reserv_domain_unlock(domain); | ||||
sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", | sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n", | ||||
domain, level, | domain, level, | ||||
unused_pages * ((int)PAGE_SIZE / 1024), counter); | unused_pages * ((int)PAGE_SIZE / 1024), counter); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 671 Lines • ▼ Show 20 Lines | |||||
* Requires that vm_page_array and first_page are initialized! | * Requires that vm_page_array and first_page are initialized! | ||||
*/ | */ | ||||
void | void | ||||
vm_reserv_init(void) | vm_reserv_init(void) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
struct vm_phys_seg *seg; | struct vm_phys_seg *seg; | ||||
struct vm_reserv *rv; | struct vm_reserv *rv; | ||||
int i, segind; | struct vm_reserv_domain *rvd; | ||||
int i, j, segind; | |||||
/* | /* | ||||
* Initialize the reservation array. Specifically, initialize the | * Initialize the reservation array. Specifically, initialize the | ||||
* "pages" field for every element that has an underlying superpage. | * "pages" field for every element that has an underlying superpage. | ||||
*/ | */ | ||||
for (segind = 0; segind < vm_phys_nsegs; segind++) { | for (segind = 0; segind < vm_phys_nsegs; segind++) { | ||||
seg = &vm_phys_segs[segind]; | seg = &vm_phys_segs[segind]; | ||||
paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); | paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); | ||||
while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + | while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + | ||||
VM_LEVEL_0_SIZE <= seg->end) { | VM_LEVEL_0_SIZE <= seg->end) { | ||||
rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; | rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; | ||||
rv->pages = PHYS_TO_VM_PAGE(paddr); | rv->pages = PHYS_TO_VM_PAGE(paddr); | ||||
rv->domain = seg->domain; | rv->domain = seg->domain; | ||||
mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); | mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); | ||||
paddr += VM_LEVEL_0_SIZE; | paddr += VM_LEVEL_0_SIZE; | ||||
} | } | ||||
} | } | ||||
for (i = 0; i < MAXMEMDOM; i++) { | for (i = 0; i < MAXMEMDOM; i++) { | ||||
mtx_init(&vm_rvd[i].lock, "VM reserv domain", NULL, MTX_DEF); | rvd = &vm_rvd[i]; | ||||
TAILQ_INIT(&vm_rvd[i].partpop); | mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); | ||||
TAILQ_INIT(&rvd->partpop); | |||||
mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); | |||||
kib: I think we want to initialize marker into some impossible state so that attempts to use it as… | |||||
Done Inline ActionsI guess it would be sufficient to make it look like a fully populated reservation. Such a reservation should never be present in the partpop queue. markj: I guess it would be sufficient to make it look like a fully populated reservation. Such a… | |||||
/* | |||||
* Fully populated reservations should never be present in the | |||||
* partially populated reservation queues. | |||||
*/ | |||||
rvd->marker.popcnt = VM_LEVEL_0_NPAGES; | |||||
for (j = 0; j < NBPOPMAP; j++) | |||||
popmap_set(rvd->marker.popmap, j); | |||||
} | } | ||||
for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) | for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++) | ||||
mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, | mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL, | ||||
MTX_DEF); | MTX_DEF); | ||||
} | } | ||||
/* | /* | ||||
Show All 33 Lines | |||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
rv = vm_reserv_from_page(m); | rv = vm_reserv_from_page(m); | ||||
return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); | return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); | ||||
} | } | ||||
/* | /* | ||||
* Breaks the given partially populated reservation, releasing its free pages | * Remove a partially populated reservation from the queue. | ||||
* to the physical memory allocator. | |||||
*/ | */ | ||||
static void | static void | ||||
vm_reserv_reclaim(vm_reserv_t rv) | vm_reserv_dequeue(vm_reserv_t rv) | ||||
{ | { | ||||
vm_reserv_domain_assert_locked(rv->domain); | |||||
vm_reserv_assert_locked(rv); | vm_reserv_assert_locked(rv); | ||||
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | ||||
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | ||||
vm_reserv_domain_lock(rv->domain); | |||||
KASSERT(rv->inpartpopq, | KASSERT(rv->inpartpopq, | ||||
("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv)); | ||||
KASSERT(rv->domain < vm_ndomains, | |||||
("vm_reserv_reclaim: reserv %p's domain is corrupted %d", | |||||
rv, rv->domain)); | |||||
TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); | ||||
rv->inpartpopq = FALSE; | rv->inpartpopq = FALSE; | ||||
} | |||||
/* | |||||
* Breaks the given partially populated reservation, releasing its free pages | |||||
* to the physical memory allocator. | |||||
*/ | |||||
static void | |||||
vm_reserv_reclaim(vm_reserv_t rv) | |||||
{ | |||||
vm_reserv_assert_locked(rv); | |||||
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d", | |||||
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); | |||||
if (rv->inpartpopq) { | |||||
vm_reserv_domain_lock(rv->domain); | |||||
vm_reserv_dequeue(rv); | |||||
vm_reserv_domain_unlock(rv->domain); | vm_reserv_domain_unlock(rv->domain); | ||||
} | |||||
vm_reserv_break(rv); | vm_reserv_break(rv); | ||||
counter_u64_add(vm_reserv_reclaimed, 1); | counter_u64_add(vm_reserv_reclaimed, 1); | ||||
} | } | ||||
/* | /* | ||||
* Breaks the reservation at the head of the partially populated reservation | * Breaks a reservation near the head of the partially populated reservation | ||||
* queue, releasing its free pages to the physical memory allocator. Returns | * queue, releasing its free pages to the physical memory allocator. Returns | ||||
* TRUE if a reservation is broken and FALSE otherwise. | * TRUE if a reservation is broken and FALSE otherwise. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
alcUnsubmitted Done Inline ActionsSince you are returning true/false below, instead of TRUE/FALSE, change this to "bool" here and in vm_reserv.h. alc: Since you are returning true/false below, instead of TRUE/FALSE, change this to "bool" here and… | |||||
vm_reserv_reclaim_inactive(int domain) | vm_reserv_reclaim_inactive(int domain) | ||||
{ | { | ||||
vm_reserv_t rv; | vm_reserv_t rv; | ||||
while ((rv = TAILQ_FIRST(&vm_rvd[domain].partpop)) != NULL) { | vm_reserv_domain_lock(domain); | ||||
Done Inline ActionsThis is true because you do not drop the scan lock in vm_reserv_reclaim_contig, right ? May be add a comment. kib: This is true because you do not drop the scan lock in vm_reserv_reclaim_contig, right ? May be… | |||||
Done Inline ActionsIndeed, ok. markj: Indeed, ok. | |||||
vm_reserv_lock(rv); | TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) { | ||||
if (rv != TAILQ_FIRST(&vm_rvd[domain].partpop)) { | /* | ||||
vm_reserv_unlock(rv); | * A locked reservation is likely being updated or reclaimed, | ||||
continue; | * so just skip ahead. | ||||
*/ | |||||
if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) { | |||||
vm_reserv_dequeue(rv); | |||||
break; | |||||
} | } | ||||
} | |||||
vm_reserv_domain_unlock(domain); | |||||
if (rv != NULL) { | |||||
vm_reserv_reclaim(rv); | vm_reserv_reclaim(rv); | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
return (TRUE); | return (true); | ||||
} | } | ||||
return (FALSE); | return (false); | ||||
Done Inline ActionsIf I can rewrite to eliminate a continue, and a break, and a duplicate statement, I've got to recommend it. while (!ret &&((rv...))) { ... ret = rv == TAILQ_FIRST(...); if (ret) vm_reserv_reclaim(rv); vm_reserv_unlock(rv); dougm: If I can rewrite to eliminate a continue, and a break, and a duplicate statement, I've got to… | |||||
Done Inline ActionsThanks, that's much better. markj: Thanks, that's much better. | |||||
} | } | ||||
/* | /* | ||||
* Determine whether this reservation has free pages that satisfy the given | * Determine whether this reservation has free pages that satisfy the given | ||||
* request for contiguous physical memory. Start searching from the lower | * request for contiguous physical memory. Start searching from the lower | ||||
* bound, defined by low_index. | * bound, defined by low_index. | ||||
*/ | */ | ||||
static bool | static bool | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* | /* | ||||
* Searches the partially populated reservation queue for the least recently | * Searches the partially populated reservation queue for the least recently | ||||
* changed reservation with free pages that satisfy the given request for | * changed reservation with free pages that satisfy the given request for | ||||
* contiguous physical memory. If a satisfactory reservation is found, it is | * contiguous physical memory. If a satisfactory reservation is found, it is | ||||
* broken. Returns true if a reservation is broken and false otherwise. | * broken. Returns true if a reservation is broken and false otherwise. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
alcUnsubmitted Done Inline ActionsSame here. (This case was already inconsistent, using boolean_t with true/false.) alc: Same here. (This case was already inconsistent, using boolean_t with true/false.) | |||||
vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, | vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, | ||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | vm_paddr_t high, u_long alignment, vm_paddr_t boundary) | ||||
{ | { | ||||
struct vm_reserv_queue *queue; | |||||
vm_paddr_t pa, size; | vm_paddr_t pa, size; | ||||
vm_reserv_t rv, rvn; | vm_reserv_t marker, rv, rvn; | ||||
if (npages > VM_LEVEL_0_NPAGES - 1) | if (npages > VM_LEVEL_0_NPAGES - 1) | ||||
return (false); | return (false); | ||||
marker = &vm_rvd[domain].marker; | |||||
queue = &vm_rvd[domain].partpop; | |||||
size = npages << PAGE_SHIFT; | size = npages << PAGE_SHIFT; | ||||
vm_reserv_domain_scan_lock(domain); | |||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
again: | TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) { | ||||
for (rv = TAILQ_FIRST(&vm_rvd[domain].partpop); rv != NULL; rv = rvn) { | |||||
rvn = TAILQ_NEXT(rv, partpopq); | |||||
pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | pa = VM_PAGE_TO_PHYS(&rv->pages[0]); | ||||
if (pa + VM_LEVEL_0_SIZE - size < low) { | if (pa + VM_LEVEL_0_SIZE - size < low) { | ||||
/* This entire reservation is too low; go to next. */ | /* This entire reservation is too low; go to next. */ | ||||
continue; | continue; | ||||
} | } | ||||
if (pa + size > high) { | if (pa + size > high) { | ||||
/* This entire reservation is too high; go to next. */ | /* This entire reservation is too high; go to next. */ | ||||
continue; | continue; | ||||
} | } | ||||
if (vm_reserv_trylock(rv) == 0) { | if (vm_reserv_trylock(rv) == 0) { | ||||
TAILQ_INSERT_AFTER(queue, rv, marker, partpopq); | |||||
vm_reserv_domain_unlock(domain); | vm_reserv_domain_unlock(domain); | ||||
vm_reserv_lock(rv); | vm_reserv_lock(rv); | ||||
if (!rv->inpartpopq) { | if (!rv->inpartpopq || | ||||
TAILQ_NEXT(rv, partpopq) != marker) { | |||||
vm_reserv_unlock(rv); | |||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
if (!rvn->inpartpopq) | rvn = TAILQ_NEXT(marker, partpopq); | ||||
goto again; | TAILQ_REMOVE(queue, marker, partpopq); | ||||
continue; | continue; | ||||
} | } | ||||
} else | vm_reserv_domain_lock(domain); | ||||
TAILQ_REMOVE(queue, marker, partpopq); | |||||
} | |||||
vm_reserv_domain_unlock(domain); | vm_reserv_domain_unlock(domain); | ||||
if (vm_reserv_test_contig(rv, npages, low, high, | if (vm_reserv_test_contig(rv, npages, low, high, | ||||
alignment, boundary)) { | alignment, boundary)) { | ||||
vm_reserv_domain_scan_unlock(domain); | |||||
vm_reserv_reclaim(rv); | vm_reserv_reclaim(rv); | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
return (true); | return (true); | ||||
} | } | ||||
vm_reserv_unlock(rv); | vm_reserv_unlock(rv); | ||||
vm_reserv_domain_lock(domain); | vm_reserv_domain_lock(domain); | ||||
if (rvn != NULL && !rvn->inpartpopq) | |||||
goto again; | |||||
} | } | ||||
vm_reserv_domain_unlock(domain); | vm_reserv_domain_unlock(domain); | ||||
vm_reserv_domain_scan_unlock(domain); | |||||
return (false); | return (false); | ||||
} | } | ||||
/* | /* | ||||
* Transfers the reservation underlying the given page to a new object. | * Transfers the reservation underlying the given page to a new object. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 127 Lines • Show Last 20 Lines |
I think we want to initialize marker into some impossible state so that attempts to use it as normal reservation trigger asserts.