Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 1,957 Lines • ▼ Show 20 Lines | |||||
* VM_ALLOC_SYSTEM system *really* needs a page | * VM_ALLOC_SYSTEM system *really* needs a page | ||||
* VM_ALLOC_INTERRUPT interrupt time request | * VM_ALLOC_INTERRUPT interrupt time request | ||||
* | * | ||||
* optional allocation flags: | * optional allocation flags: | ||||
* VM_ALLOC_COUNT(number) the number of additional pages that the caller | * VM_ALLOC_COUNT(number) the number of additional pages that the caller | ||||
* intends to allocate | * intends to allocate | ||||
* VM_ALLOC_NOBUSY do not exclusive busy the page | * VM_ALLOC_NOBUSY do not exclusive busy the page | ||||
* VM_ALLOC_NODUMP do not include the page in a kernel core dump | * VM_ALLOC_NODUMP do not include the page in a kernel core dump | ||||
* VM_ALLOC_NOOBJ page is not associated with an object and | |||||
* should not be exclusive busy | |||||
* VM_ALLOC_SBUSY shared busy the allocated page | * VM_ALLOC_SBUSY shared busy the allocated page | ||||
* VM_ALLOC_WIRED wire the allocated page | * VM_ALLOC_WIRED wire the allocated page | ||||
* VM_ALLOC_ZERO prefer a zeroed page | * VM_ALLOC_ZERO prefer a zeroed page | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | ||||
{ | { | ||||
return (vm_page_alloc_after(object, pindex, req, object != NULL ? | return (vm_page_alloc_after(object, pindex, req, | ||||
vm_radix_lookup_le(&object->rtree, pindex) : NULL)); | vm_radix_lookup_le(&object->rtree, pindex))); | ||||
} | } | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, | vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
int req) | int req) | ||||
{ | { | ||||
return (vm_page_alloc_domain_after(object, pindex, domain, req, | return (vm_page_alloc_domain_after(object, pindex, domain, req, | ||||
object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : | vm_radix_lookup_le(&object->rtree, pindex))); | ||||
NULL)); | |||||
} | } | ||||
/* | /* | ||||
* Allocate a page in the specified object with the given page index. To | * Allocate a page in the specified object with the given page index. To | ||||
* optimize insertion of the page into the object, the caller must also specifiy | * optimize insertion of the page into the object, the caller must also specifiy | ||||
* the resident page in the object with largest index smaller than the given | * the resident page in the object with largest index smaller than the given | ||||
* page index, or NULL if no such page exists. | * page index, or NULL if no such page exists. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, | vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
int req, vm_page_t mpred) | int req, vm_page_t mpred) | ||||
{ | { | ||||
struct vm_domain *vmd; | struct vm_domain *vmd; | ||||
vm_page_t m; | vm_page_t m; | ||||
int flags, pool; | int flags, pool; | ||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ||||
kib: I wonder if it makes sense to add initial assert that checks that req contains only flags that… | |||||
markjAuthorUnsubmitted Done Inline ActionsYes, I think it's a good idea. markj: Yes, I think it's a good idea. | |||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | |||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | |||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | ||||
("inconsistent object(%p)/req(%x)", object, req)); | ("invalid request %#x", req)); | ||||
KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, | KASSERT((req & VM_ALLOC_WAITOK) == 0, | ||||
("Can't sleep and retry object insertion.")); | ("Can't sleep and retry object insertion.")); | ||||
KASSERT(mpred == NULL || mpred->pindex < pindex, | KASSERT(mpred == NULL || mpred->pindex < pindex, | ||||
("mpred %p doesn't precede pindex 0x%jx", mpred, | ("mpred %p doesn't precede pindex 0x%jx", mpred, | ||||
(uintmax_t)pindex)); | (uintmax_t)pindex)); | ||||
if (object != NULL) | |||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
flags = 0; | flags = 0; | ||||
m = NULL; | m = NULL; | ||||
pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT; | pool = VM_FREEPOOL_DEFAULT; | ||||
Not Done Inline ActionsWe really don't need this variable any longer. alc: We really don't need this variable any longer. | |||||
again: | again: | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
* Can we allocate the page from a reservation? | * Can we allocate the page from a reservation? | ||||
*/ | */ | ||||
if (vm_object_reserv(object) && | if (vm_object_reserv(object) && | ||||
(m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != | (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) != | ||||
NULL) { | NULL) { | ||||
Show All 37 Lines | #endif | ||||
*/ | */ | ||||
found: | found: | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
vm_page_alloc_check(m); | vm_page_alloc_check(m); | ||||
/* | /* | ||||
* Initialize the page. Only the PG_ZERO flag is inherited. | * Initialize the page. Only the PG_ZERO flag is inherited. | ||||
*/ | */ | ||||
if ((req & VM_ALLOC_ZERO) != 0) | |||||
flags |= (m->flags & PG_ZERO); | flags |= (m->flags & PG_ZERO); | ||||
kibUnsubmitted Done Inline ActionsI do not think () are needed. kib: I do not think () are needed. | |||||
if ((req & VM_ALLOC_NODUMP) != 0) | if ((req & VM_ALLOC_NODUMP) != 0) | ||||
flags |= PG_NODUMP; | flags |= PG_NODUMP; | ||||
m->flags = flags; | m->flags = flags; | ||||
m->a.flags = 0; | m->a.flags = 0; | ||||
m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? | m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; | ||||
VPO_UNMANAGED : 0; | if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) | ||||
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) | |||||
m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; | m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; | ||||
else if ((req & VM_ALLOC_SBUSY) != 0) | else if ((req & VM_ALLOC_SBUSY) != 0) | ||||
m->busy_lock = VPB_SHARERS_WORD(1); | m->busy_lock = VPB_SHARERS_WORD(1); | ||||
else | else | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
if (req & VM_ALLOC_WIRED) { | if (req & VM_ALLOC_WIRED) { | ||||
vm_wire_add(1); | vm_wire_add(1); | ||||
m->ref_count = 1; | m->ref_count = 1; | ||||
} | } | ||||
m->a.act_count = 0; | m->a.act_count = 0; | ||||
if (object != NULL) { | |||||
if (vm_page_insert_after(m, object, pindex, mpred)) { | if (vm_page_insert_after(m, object, pindex, mpred)) { | ||||
if (req & VM_ALLOC_WIRED) { | if (req & VM_ALLOC_WIRED) { | ||||
vm_wire_sub(1); | vm_wire_sub(1); | ||||
m->ref_count = 0; | m->ref_count = 0; | ||||
} | } | ||||
KASSERT(m->object == NULL, ("page %p has object", m)); | KASSERT(m->object == NULL, ("page %p has object", m)); | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
/* Don't change PG_ZERO. */ | /* Don't change PG_ZERO. */ | ||||
vm_page_free_toq(m); | vm_page_free_toq(m); | ||||
if (req & VM_ALLOC_WAITFAIL) { | if (req & VM_ALLOC_WAITFAIL) { | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
vm_radix_wait(); | vm_radix_wait(); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
} | } | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
/* Ignore device objects; the pager sets "memattr" for them. */ | /* Ignore device objects; the pager sets "memattr" for them. */ | ||||
if (object->memattr != VM_MEMATTR_DEFAULT && | if (object->memattr != VM_MEMATTR_DEFAULT && | ||||
(object->flags & OBJ_FICTITIOUS) == 0) | (object->flags & OBJ_FICTITIOUS) == 0) | ||||
pmap_page_set_memattr(m, object->memattr); | pmap_page_set_memattr(m, object->memattr); | ||||
} else | |||||
m->pindex = pindex; | |||||
return (m); | return (m); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_alloc_contig: | * vm_page_alloc_contig: | ||||
* | * | ||||
* Allocate a contiguous set of physical pages of the given size "npages" | * Allocate a contiguous set of physical pages of the given size "npages" | ||||
▲ Show 20 Lines • Show All 3,421 Lines • Show Last 20 Lines |
I wonder if it makes sense to add initial assert that checks that req contains only flags that the function can handle. For this and all other vm_page_alloc* functions.