Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_fault.c
Show First 20 Lines • Show All 789 Lines • ▼ Show 20 Lines | vm_fault_lookup(struct faultstate *fs) | ||||
else | else | ||||
KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, | KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, | ||||
("!fs->wired && VM_FAULT_WIRE")); | ("!fs->wired && VM_FAULT_WIRE")); | ||||
fs->lookup_still_valid = true; | fs->lookup_still_valid = true; | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
static int | |||||
vm_fault_relookup(struct faultstate *fs) | |||||
{ | |||||
vm_object_t retry_object; | |||||
vm_pindex_t retry_pindex; | |||||
vm_prot_t retry_prot; | |||||
int result; | |||||
if (!vm_map_trylock_read(fs->map)) | |||||
return (KERN_RESTART); | |||||
fs->lookup_still_valid = true; | |||||
if (fs->map->timestamp == fs->map_generation) | |||||
return (KERN_SUCCESS); | |||||
result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, | |||||
&fs->entry, &retry_object, &retry_pindex, &retry_prot, | |||||
&fs->wired); | |||||
if (result != KERN_SUCCESS) { | |||||
/* | /* | ||||
* If retry of map lookup would have blocked then | |||||
* retry fault from start. | |||||
*/ | |||||
if (result == KERN_FAILURE) | |||||
return (KERN_RESTART); | |||||
return (result); | |||||
} | |||||
if (retry_object != fs->first_object || | |||||
retry_pindex != fs->first_pindex) | |||||
return (KERN_RESTART); | |||||
/* | |||||
* Check whether the protection has changed or the object has | |||||
* been copied while we left the map unlocked. Changing from | |||||
* read to write permission is OK - we leave the page | |||||
* write-protected, and catch the write fault. Changing from | |||||
* write to read permission means that we can't mark the page | |||||
* write-enabled after all. | |||||
*/ | |||||
fs->prot &= retry_prot; | |||||
fs->fault_type &= retry_prot; | |||||
if (fs->prot == 0) | |||||
return (KERN_RESTART); | |||||
/* Reassert because wired may have changed. */ | |||||
KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, | |||||
("!wired && VM_FAULT_WIRE")); | |||||
return (KERN_SUCCESS); | |||||
} | |||||
/* | |||||
* Wait/Retry if the page is busy. We have to do this if the page is | * Wait/Retry if the page is busy. We have to do this if the page is | ||||
* either exclusive or shared busy because the vm_pager may be using | * either exclusive or shared busy because the vm_pager may be using | ||||
* read busy for pageouts (and even pageins if it is the vnode pager), | * read busy for pageouts (and even pageins if it is the vnode pager), | ||||
* and we could end up trying to pagein and pageout the same page | * and we could end up trying to pagein and pageout the same page | ||||
* simultaneously. | * simultaneously. | ||||
* | * | ||||
* We can theoretically allow the busy case on a read fault if the page | * We can theoretically allow the busy case on a read fault if the page | ||||
* is marked valid, but since such pages are typically already pmap'd, | * is marked valid, but since such pages are typically already pmap'd, | ||||
Show All 25 Lines | |||||
} | } | ||||
int | int | ||||
vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, | vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, | ||||
int fault_flags, vm_page_t *m_hold) | int fault_flags, vm_page_t *m_hold) | ||||
{ | { | ||||
struct faultstate fs; | struct faultstate fs; | ||||
struct domainset *dset; | struct domainset *dset; | ||||
vm_object_t next_object, retry_object; | vm_object_t next_object; | ||||
vm_offset_t e_end, e_start; | vm_offset_t e_end, e_start; | ||||
vm_pindex_t retry_pindex; | |||||
vm_prot_t retry_prot; | |||||
int ahead, alloc_req, behind, cluster_offset, faultcount; | int ahead, alloc_req, behind, cluster_offset, faultcount; | ||||
int nera, oom, result, rv; | int nera, oom, result, rv; | ||||
u_char behavior; | u_char behavior; | ||||
bool dead, hardfault, is_first_object_locked; | bool dead, hardfault, is_first_object_locked; | ||||
VM_CNT_INC(v_vm_faults); | VM_CNT_INC(v_vm_faults); | ||||
if ((curthread->td_pflags & TDP_NOFAULTING) != 0) | if ((curthread->td_pflags & TDP_NOFAULTING) != 0) | ||||
▲ Show 20 Lines • Show All 506 Lines • ▼ Show 20 Lines | #endif | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* We must verify that the maps have not changed since our last | * We must verify that the maps have not changed since our last | ||||
* lookup. | * lookup. | ||||
*/ | */ | ||||
if (!fs.lookup_still_valid) { | if (!fs.lookup_still_valid) { | ||||
if (!vm_map_trylock_read(fs.map)) { | result = vm_fault_relookup(&fs); | ||||
fault_deallocate(&fs); | |||||
goto RetryFault; | |||||
} | |||||
fs.lookup_still_valid = true; | |||||
if (fs.map->timestamp != fs.map_generation) { | |||||
result = vm_map_lookup_locked(&fs.map, vaddr, fs.fault_type, | |||||
&fs.entry, &retry_object, &retry_pindex, &retry_prot, | |||||
&fs.wired); | |||||
/* | |||||
* If we don't need the page any longer, put it on the inactive | |||||
* list (the easiest thing to do here). If no one needs it, | |||||
* pageout will grab it eventually. | |||||
*/ | |||||
if (result != KERN_SUCCESS) { | if (result != KERN_SUCCESS) { | ||||
fault_deallocate(&fs); | fault_deallocate(&fs); | ||||
if (result == KERN_RESTART) | |||||
/* | |||||
* If retry of map lookup would have blocked then | |||||
* retry fault from start. | |||||
*/ | |||||
if (result == KERN_FAILURE) | |||||
goto RetryFault; | goto RetryFault; | ||||
return (result); | return (result); | ||||
} | |||||
if ((retry_object != fs.first_object) || | |||||
(retry_pindex != fs.first_pindex)) { | |||||
fault_deallocate(&fs); | |||||
goto RetryFault; | |||||
} | |||||
/* | |||||
* Check whether the protection has changed or the object has | |||||
* been copied while we left the map unlocked. Changing from | |||||
* read to write permission is OK - we leave the page | |||||
* write-protected, and catch the write fault. Changing from | |||||
* write to read permission means that we can't mark the page | |||||
* write-enabled after all. | |||||
*/ | |||||
fs.prot &= retry_prot; | |||||
fs.fault_type &= retry_prot; | |||||
if (fs.prot == 0) { | |||||
fault_deallocate(&fs); | |||||
goto RetryFault; | |||||
} | |||||
/* Reassert because wired may have changed. */ | |||||
KASSERT(fs.wired || (fs.fault_flags & VM_FAULT_WIRE) == 0, | |||||
("!wired && VM_FAULT_WIRE")); | |||||
} | } | ||||
} | } | ||||
VM_OBJECT_ASSERT_UNLOCKED(fs.object); | VM_OBJECT_ASSERT_UNLOCKED(fs.object); | ||||
/* | /* | ||||
* If the page was filled by a pager, save the virtual address that | * If the page was filled by a pager, save the virtual address that | ||||
* should be faulted on next under a sequential access pattern to the | * should be faulted on next under a sequential access pattern to the | ||||
* map entry. A read lock on the map suffices to update this address | * map entry. A read lock on the map suffices to update this address | ||||
▲ Show 20 Lines • Show All 540 Lines • Show Last 20 Lines |