Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_fault.c
Show First 20 Lines • Show All 336 Lines • ▼ Show 20 Lines | #endif | ||||
rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | | rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | | ||||
PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); | ||||
if (rv != KERN_SUCCESS) | if (rv != KERN_SUCCESS) | ||||
goto out; | goto out; | ||||
if (m_hold != NULL) { | if (m_hold != NULL) { | ||||
*m_hold = m; | *m_hold = m; | ||||
vm_page_wire(m); | vm_page_wire(m); | ||||
} | } | ||||
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags); | |||||
if (psind == 0 && !wired) | if (psind == 0 && !wired) | ||||
vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); | vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); | ||||
VM_OBJECT_RUNLOCK(fs->first_object); | VM_OBJECT_RUNLOCK(fs->first_object); | ||||
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags); | |||||
vm_map_lookup_done(fs->map, fs->entry); | vm_map_lookup_done(fs->map, fs->entry); | ||||
curthread->td_ru.ru_minflt++; | curthread->td_ru.ru_minflt++; | ||||
out: | out: | ||||
vm_object_unbusy(fs->first_object); | vm_object_unbusy(fs->first_object); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 270 Lines • ▼ Show 20 Lines | default: | ||||
result)); | result)); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return (result); | return (result); | ||||
} | } | ||||
static int | static int | ||||
vm_fault_lock_vnode(struct faultstate *fs) | vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
int error, locked; | int error, locked; | ||||
if (fs->object->type != OBJT_VNODE) | if (fs->object->type != OBJT_VNODE) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
vp = fs->object->handle; | vp = fs->object->handle; | ||||
if (vp == fs->vp) { | if (vp == fs->vp) { | ||||
Show All 19 Lines | vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) | ||||
*/ | */ | ||||
error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); | error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); | ||||
if (error == 0) { | if (error == 0) { | ||||
fs->vp = vp; | fs->vp = vp; | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
vhold(vp); | vhold(vp); | ||||
if (objlocked) | |||||
unlock_and_deallocate(fs); | unlock_and_deallocate(fs); | ||||
else | |||||
fault_deallocate(fs); | |||||
error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); | error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); | ||||
vdrop(vp); | vdrop(vp); | ||||
fs->vp = vp; | fs->vp = vp; | ||||
KASSERT(error == 0, ("vm_fault: vget failed %d", error)); | KASSERT(error == 0, ("vm_fault: vget failed %d", error)); | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 178 Lines • ▼ Show 20 Lines | if (fs.m != NULL) { | ||||
/* | /* | ||||
* The page is marked busy for other processes and the | * The page is marked busy for other processes and the | ||||
* pagedaemon. If it still isn't completely valid | * pagedaemon. If it still isn't completely valid | ||||
* (readable), jump to readrest, else break-out ( we | * (readable), jump to readrest, else break-out ( we | ||||
* found the page ). | * found the page ). | ||||
*/ | */ | ||||
if (!vm_page_all_valid(fs.m)) | if (!vm_page_all_valid(fs.m)) | ||||
goto readrest; | goto readrest; | ||||
break; /* break to PAGE HAS BEEN FOUND */ | VM_OBJECT_WUNLOCK(fs.object); | ||||
break; /* break to PAGE HAS BEEN FOUND. */ | |||||
} | } | ||||
KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); | KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); | ||||
VM_OBJECT_ASSERT_WLOCKED(fs.object); | |||||
/* | /* | ||||
* Page is not resident. If the pager might contain the page | * Page is not resident. If the pager might contain the page | ||||
* or this is the beginning of the search, allocate a new | * or this is the beginning of the search, allocate a new | ||||
* page. (Default objects are zero-fill, so there is no real | * page. (Default objects are zero-fill, so there is no real | ||||
* pager for them.) | * pager for them.) | ||||
*/ | */ | ||||
if (fs.object->type != OBJT_DEFAULT || | if (fs.object->type != OBJT_DEFAULT || | ||||
fs.object == fs.first_object) { | fs.object == fs.first_object) { | ||||
if ((fs.object->flags & OBJ_SIZEVNLOCK) != 0) { | if ((fs.object->flags & OBJ_SIZEVNLOCK) != 0) { | ||||
rv = vm_fault_lock_vnode(&fs); | rv = vm_fault_lock_vnode(&fs, true); | ||||
MPASS(rv == KERN_SUCCESS || | MPASS(rv == KERN_SUCCESS || | ||||
rv == KERN_RESOURCE_SHORTAGE); | rv == KERN_RESOURCE_SHORTAGE); | ||||
if (rv == KERN_RESOURCE_SHORTAGE) | if (rv == KERN_RESOURCE_SHORTAGE) | ||||
goto RetryFault; | goto RetryFault; | ||||
} | } | ||||
if (fs.pindex >= fs.object->size) { | if (fs.pindex >= fs.object->size) { | ||||
unlock_and_deallocate(&fs); | unlock_and_deallocate(&fs); | ||||
return (KERN_OUT_OF_BOUNDS); | return (KERN_OUT_OF_BOUNDS); | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | "proc %d (%s) failed to alloc page on fault, starting OOM\n", | ||||
curproc->p_pid, curproc->p_comm); | curproc->p_pid, curproc->p_comm); | ||||
vm_pageout_oom(VM_OOM_MEM_PF); | vm_pageout_oom(VM_OOM_MEM_PF); | ||||
goto RetryFault; | goto RetryFault; | ||||
} | } | ||||
} | } | ||||
readrest: | readrest: | ||||
/* | /* | ||||
* Default objects have no pager so no exclusive busy exists | |||||
* to protect this page in the chain. Skip to the next | |||||
* object without dropping the lock to preserve atomicity of | |||||
* shadow faults. | |||||
*/ | |||||
if (fs.object->type == OBJT_DEFAULT) | |||||
goto next; | |||||
/* | |||||
* At this point, we have either allocated a new page or found | * At this point, we have either allocated a new page or found | ||||
* an existing page that is only partially valid. | * an existing page that is only partially valid. | ||||
* | * | ||||
* We hold a reference on the current object and the page is | * We hold a reference on the current object and the page is | ||||
* exclusive busied. | * exclusive busied. The exclusive busy prevents simultaneous | ||||
* faults and collapses while the object lock is dropped. | |||||
*/ | */ | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
/* | /* | ||||
* If the pager for the current object might have the page, | * If the pager for the current object might have the page, | ||||
* then determine the number of additional pages to read and | * then determine the number of additional pages to read and | ||||
* potentially reprioritize previously read pages for earlier | * potentially reprioritize previously read pages for earlier | ||||
* reclamation. These operations should only be performed | * reclamation. These operations should only be performed | ||||
* once per page fault. Even if the current pager doesn't | * once per page fault. Even if the current pager doesn't | ||||
* have the page, the number of additional pages to read will | * have the page, the number of additional pages to read will | ||||
* apply to subsequent objects in the shadow chain. | * apply to subsequent objects in the shadow chain. | ||||
*/ | */ | ||||
if (fs.object->type != OBJT_DEFAULT && nera == -1 && | if (nera == -1 && !P_KILLED(curproc)) { | ||||
!P_KILLED(curproc)) { | |||||
KASSERT(fs.lookup_still_valid, ("map unlocked")); | KASSERT(fs.lookup_still_valid, ("map unlocked")); | ||||
era = fs.entry->read_ahead; | era = fs.entry->read_ahead; | ||||
behavior = vm_map_entry_behavior(fs.entry); | behavior = vm_map_entry_behavior(fs.entry); | ||||
if (behavior == MAP_ENTRY_BEHAV_RANDOM) { | if (behavior == MAP_ENTRY_BEHAV_RANDOM) { | ||||
nera = 0; | nera = 0; | ||||
} else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { | } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { | ||||
nera = VM_FAULT_READ_AHEAD_MAX; | nera = VM_FAULT_READ_AHEAD_MAX; | ||||
if (vaddr == fs.entry->next_read) | if (vaddr == fs.entry->next_read) | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | if (fs.object->type != OBJT_DEFAULT) { | ||||
/* | /* | ||||
* Release the map lock before locking the vnode or | * Release the map lock before locking the vnode or | ||||
* sleeping in the pager. (If the current object has | * sleeping in the pager. (If the current object has | ||||
* a shadow, then an earlier iteration of this loop | * a shadow, then an earlier iteration of this loop | ||||
* may have already unlocked the map.) | * may have already unlocked the map.) | ||||
*/ | */ | ||||
unlock_map(&fs); | unlock_map(&fs); | ||||
rv = vm_fault_lock_vnode(&fs); | rv = vm_fault_lock_vnode(&fs, false); | ||||
MPASS(rv == KERN_SUCCESS || | MPASS(rv == KERN_SUCCESS || | ||||
rv == KERN_RESOURCE_SHORTAGE); | rv == KERN_RESOURCE_SHORTAGE); | ||||
if (rv == KERN_RESOURCE_SHORTAGE) | if (rv == KERN_RESOURCE_SHORTAGE) | ||||
goto RetryFault; | goto RetryFault; | ||||
KASSERT(fs.vp == NULL || !fs.map->system_map, | KASSERT(fs.vp == NULL || !fs.map->system_map, | ||||
("vm_fault: vnode-backed object mapped by system map")); | ("vm_fault: vnode-backed object mapped by system map")); | ||||
/* | /* | ||||
Show All 24 Lines | if (fs.object->type != OBJT_DEFAULT) { | ||||
VM_FAULT_READ_DEFAULT; | VM_FAULT_READ_DEFAULT; | ||||
behind = ulmin(cluster_offset, | behind = ulmin(cluster_offset, | ||||
atop(vaddr - e_start)); | atop(vaddr - e_start)); | ||||
ahead = VM_FAULT_READ_DEFAULT - 1 - | ahead = VM_FAULT_READ_DEFAULT - 1 - | ||||
cluster_offset; | cluster_offset; | ||||
} | } | ||||
ahead = ulmin(ahead, atop(e_end - vaddr) - 1); | ahead = ulmin(ahead, atop(e_end - vaddr) - 1); | ||||
} | } | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
rv = vm_pager_get_pages(fs.object, &fs.m, 1, | rv = vm_pager_get_pages(fs.object, &fs.m, 1, | ||||
&behind, &ahead); | &behind, &ahead); | ||||
VM_OBJECT_WLOCK(fs.object); | |||||
if (rv == VM_PAGER_OK) { | if (rv == VM_PAGER_OK) { | ||||
faultcount = behind + 1 + ahead; | faultcount = behind + 1 + ahead; | ||||
hardfault = true; | hardfault = true; | ||||
break; /* break to PAGE HAS BEEN FOUND */ | break; /* break to PAGE HAS BEEN FOUND. */ | ||||
} | } | ||||
VM_OBJECT_WLOCK(fs.object); | |||||
if (rv == VM_PAGER_ERROR) | if (rv == VM_PAGER_ERROR) | ||||
printf("vm_fault: pager read error, pid %d (%s)\n", | printf("vm_fault: pager read error, pid %d (%s)\n", | ||||
curproc->p_pid, curproc->p_comm); | curproc->p_pid, curproc->p_comm); | ||||
/* | /* | ||||
* If an I/O error occurred or the requested page was | * If an I/O error occurred or the requested page was | ||||
* outside the range of the pager, clean up and return | * outside the range of the pager, clean up and return | ||||
* an error. | * an error. | ||||
*/ | */ | ||||
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { | if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { | ||||
fault_page_free(&fs.m); | fault_page_free(&fs.m); | ||||
unlock_and_deallocate(&fs); | unlock_and_deallocate(&fs); | ||||
return (KERN_OUT_OF_BOUNDS); | return (KERN_OUT_OF_BOUNDS); | ||||
} | } | ||||
} | } | ||||
next: | |||||
/* | /* | ||||
* The requested page does not exist at this object/ | * The requested page does not exist at this object/ | ||||
* offset. Remove the invalid page from the object, | * offset. Remove the invalid page from the object, | ||||
* waking up anyone waiting for it, and continue on to | * waking up anyone waiting for it, and continue on to | ||||
* the next object. However, if this is the top-level | * the next object. However, if this is the top-level | ||||
* object, we must leave the busy page in place to | * object, we must leave the busy page in place to | ||||
* prevent another process from rushing past us, and | * prevent another process from rushing past us, and | ||||
* inserting the page in that object at the same time | * inserting the page in that object at the same time | ||||
* that we are. | * that we are. | ||||
*/ | */ | ||||
if (fs.object == fs.first_object) { | if (fs.object == fs.first_object) { | ||||
fs.first_m = fs.m; | fs.first_m = fs.m; | ||||
fs.m = NULL; | fs.m = NULL; | ||||
} else | } else | ||||
fault_page_free(&fs.m); | fault_page_free(&fs.m); | ||||
/* | /* | ||||
* Move on to the next object. Lock the next object before | * Move on to the next object. Lock the next object before | ||||
* unlocking the current one. | * unlocking the current one. | ||||
*/ | */ | ||||
VM_OBJECT_ASSERT_WLOCKED(fs.object); | |||||
next_object = fs.object->backing_object; | next_object = fs.object->backing_object; | ||||
if (next_object == NULL) { | if (next_object == NULL) { | ||||
/* | /* | ||||
* If there's no object left, fill the page in the top | * If there's no object left, fill the page in the top | ||||
* object with zeros. | * object with zeros. | ||||
*/ | */ | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
if (fs.object != fs.first_object) { | if (fs.object != fs.first_object) { | ||||
vm_object_pip_wakeup(fs.object); | vm_object_pip_wakeup(fs.object); | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
fs.object = fs.first_object; | fs.object = fs.first_object; | ||||
fs.pindex = fs.first_pindex; | fs.pindex = fs.first_pindex; | ||||
VM_OBJECT_WLOCK(fs.object); | |||||
} | } | ||||
MPASS(fs.first_m != NULL); | MPASS(fs.first_m != NULL); | ||||
MPASS(fs.m == NULL); | MPASS(fs.m == NULL); | ||||
fs.m = fs.first_m; | fs.m = fs.first_m; | ||||
fs.first_m = NULL; | fs.first_m = NULL; | ||||
/* | /* | ||||
* Zero the page if necessary and mark it valid. | * Zero the page if necessary and mark it valid. | ||||
*/ | */ | ||||
if ((fs.m->flags & PG_ZERO) == 0) { | if ((fs.m->flags & PG_ZERO) == 0) { | ||||
pmap_zero_page(fs.m); | pmap_zero_page(fs.m); | ||||
} else { | } else { | ||||
VM_CNT_INC(v_ozfod); | VM_CNT_INC(v_ozfod); | ||||
} | } | ||||
VM_CNT_INC(v_zfod); | VM_CNT_INC(v_zfod); | ||||
vm_page_valid(fs.m); | vm_page_valid(fs.m); | ||||
/* Don't try to prefault neighboring pages. */ | /* Don't try to prefault neighboring pages. */ | ||||
faultcount = 1; | faultcount = 1; | ||||
break; /* break to PAGE HAS BEEN FOUND */ | break; /* break to PAGE HAS BEEN FOUND. */ | ||||
} else { | } else { | ||||
MPASS(fs.first_m != NULL); | MPASS(fs.first_m != NULL); | ||||
KASSERT(fs.object != next_object, | KASSERT(fs.object != next_object, | ||||
("object loop %p", next_object)); | ("object loop %p", next_object)); | ||||
VM_OBJECT_WLOCK(next_object); | VM_OBJECT_WLOCK(next_object); | ||||
vm_object_pip_add(next_object, 1); | vm_object_pip_add(next_object, 1); | ||||
if (fs.object != fs.first_object) | if (fs.object != fs.first_object) | ||||
vm_object_pip_wakeup(fs.object); | vm_object_pip_wakeup(fs.object); | ||||
fs.pindex += | fs.pindex += | ||||
OFF_TO_IDX(fs.object->backing_object_offset); | OFF_TO_IDX(fs.object->backing_object_offset); | ||||
VM_OBJECT_WUNLOCK(fs.object); | VM_OBJECT_WUNLOCK(fs.object); | ||||
fs.object = next_object; | fs.object = next_object; | ||||
} | } | ||||
} | } | ||||
vm_page_assert_xbusied(fs.m); | |||||
/* | /* | ||||
* PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock | * PAGE HAS BEEN FOUND. A valid page has been found and exclusively | ||||
* is held.] | * busied. The object lock must no longer be held. | ||||
*/ | */ | ||||
vm_page_assert_xbusied(fs.m); | |||||
VM_OBJECT_ASSERT_UNLOCKED(fs.object); | |||||
/* | /* | ||||
* If the page is being written, but isn't already owned by the | * If the page is being written, but isn't already owned by the | ||||
* top-level object, we have to copy it into a new page owned by the | * top-level object, we have to copy it into a new page owned by the | ||||
* top-level object. | * top-level object. | ||||
*/ | */ | ||||
if (fs.object != fs.first_object) { | if (fs.object != fs.first_object) { | ||||
/* | /* | ||||
* We only really need to copy if we want to write it. | * We only really need to copy if we want to write it. | ||||
*/ | */ | ||||
if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { | if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { | ||||
/* | /* | ||||
* This allows pages to be virtually copied from a | * This allows pages to be virtually copied from a | ||||
* backing_object into the first_object, where the | * backing_object into the first_object, where the | ||||
* backing object has no other refs to it, and cannot | * backing object has no other refs to it, and cannot | ||||
* gain any more refs. Instead of a bcopy, we just | * gain any more refs. Instead of a bcopy, we just | ||||
* move the page from the backing object to the | * move the page from the backing object to the | ||||
* first object. Note that we must mark the page | * first object. Note that we must mark the page | ||||
* dirty in the first object so that it will go out | * dirty in the first object so that it will go out | ||||
* to swap when needed. | * to swap when needed. | ||||
*/ | */ | ||||
is_first_object_locked = false; | is_first_object_locked = false; | ||||
if ( | if ( | ||||
/* | /* | ||||
* Only one shadow object | * Only one shadow object | ||||
*/ | */ | ||||
(fs.object->shadow_count == 1) && | fs.object->shadow_count == 1 && | ||||
/* | /* | ||||
* No COW refs, except us | * No COW refs, except us | ||||
*/ | */ | ||||
(fs.object->ref_count == 1) && | fs.object->ref_count == 1 && | ||||
/* | /* | ||||
* No one else can look this object up | * No one else can look this object up | ||||
*/ | */ | ||||
(fs.object->handle == NULL) && | fs.object->handle == NULL && | ||||
/* | /* | ||||
* No other ways to look the object up | * No other ways to look the object up | ||||
*/ | */ | ||||
((fs.object->flags & OBJ_ANON) != 0) && | (fs.object->flags & OBJ_ANON) != 0 && | ||||
(is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && | (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && | ||||
/* | /* | ||||
* We don't chase down the shadow chain | * We don't chase down the shadow chain | ||||
*/ | */ | ||||
fs.object == fs.first_object->backing_object) { | fs.object == fs.first_object->backing_object && | ||||
VM_OBJECT_TRYWLOCK(fs.object)) { | |||||
/* | /* | ||||
* Remove but keep xbusy for replace. fs.m is | * Remove but keep xbusy for replace. fs.m is | ||||
* moved into fs.first_object and left busy | * moved into fs.first_object and left busy | ||||
* while fs.first_m is conditionally freed. | * while fs.first_m is conditionally freed. | ||||
*/ | */ | ||||
vm_page_remove_xbusy(fs.m); | vm_page_remove_xbusy(fs.m); | ||||
vm_page_replace(fs.m, fs.first_object, | vm_page_replace(fs.m, fs.first_object, | ||||
fs.first_pindex, fs.first_m); | fs.first_pindex, fs.first_m); | ||||
vm_page_dirty(fs.m); | vm_page_dirty(fs.m); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
* Rename the reservation. | * Rename the reservation. | ||||
*/ | */ | ||||
vm_reserv_rename(fs.m, fs.first_object, | vm_reserv_rename(fs.m, fs.first_object, | ||||
fs.object, OFF_TO_IDX( | fs.object, OFF_TO_IDX( | ||||
fs.first_object->backing_object_offset)); | fs.first_object->backing_object_offset)); | ||||
#endif | #endif | ||||
VM_OBJECT_WUNLOCK(fs.object); | VM_OBJECT_WUNLOCK(fs.object); | ||||
VM_OBJECT_WUNLOCK(fs.first_object); | |||||
fs.first_m = fs.m; | fs.first_m = fs.m; | ||||
fs.m = NULL; | fs.m = NULL; | ||||
VM_CNT_INC(v_cow_optim); | VM_CNT_INC(v_cow_optim); | ||||
} else { | } else { | ||||
VM_OBJECT_WUNLOCK(fs.object); | if (is_first_object_locked) | ||||
VM_OBJECT_WUNLOCK(fs.first_object); | |||||
/* | /* | ||||
* Oh, well, lets copy it. | * Oh, well, lets copy it. | ||||
*/ | */ | ||||
pmap_copy_page(fs.m, fs.first_m); | pmap_copy_page(fs.m, fs.first_m); | ||||
vm_page_valid(fs.first_m); | vm_page_valid(fs.first_m); | ||||
if (wired && (fault_flags & | if (wired && (fault_flags & | ||||
VM_FAULT_WIRE) == 0) { | VM_FAULT_WIRE) == 0) { | ||||
vm_page_wire(fs.first_m); | vm_page_wire(fs.first_m); | ||||
Show All 22 Lines | #endif | ||||
faultcount = 1; | faultcount = 1; | ||||
/* | /* | ||||
* Only use the new page below... | * Only use the new page below... | ||||
*/ | */ | ||||
fs.object = fs.first_object; | fs.object = fs.first_object; | ||||
fs.pindex = fs.first_pindex; | fs.pindex = fs.first_pindex; | ||||
fs.m = fs.first_m; | fs.m = fs.first_m; | ||||
if (!is_first_object_locked) | |||||
VM_OBJECT_WLOCK(fs.object); | |||||
VM_CNT_INC(v_cow_faults); | VM_CNT_INC(v_cow_faults); | ||||
curthread->td_cow++; | curthread->td_cow++; | ||||
} else { | } else { | ||||
prot &= ~VM_PROT_WRITE; | prot &= ~VM_PROT_WRITE; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* We must verify that the maps have not changed since our last | * We must verify that the maps have not changed since our last | ||||
* lookup. | * lookup. | ||||
*/ | */ | ||||
if (!fs.lookup_still_valid) { | if (!fs.lookup_still_valid) { | ||||
if (!vm_map_trylock_read(fs.map)) { | if (!vm_map_trylock_read(fs.map)) { | ||||
unlock_and_deallocate(&fs); | fault_deallocate(&fs); | ||||
goto RetryFault; | goto RetryFault; | ||||
} | } | ||||
fs.lookup_still_valid = true; | fs.lookup_still_valid = true; | ||||
if (fs.map->timestamp != fs.map_generation) { | if (fs.map->timestamp != fs.map_generation) { | ||||
result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, | result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, | ||||
&fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); | &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); | ||||
/* | /* | ||||
* If we don't need the page any longer, put it on the inactive | * If we don't need the page any longer, put it on the inactive | ||||
* list (the easiest thing to do here). If no one needs it, | * list (the easiest thing to do here). If no one needs it, | ||||
* pageout will grab it eventually. | * pageout will grab it eventually. | ||||
*/ | */ | ||||
if (result != KERN_SUCCESS) { | if (result != KERN_SUCCESS) { | ||||
unlock_and_deallocate(&fs); | fault_deallocate(&fs); | ||||
/* | /* | ||||
* If retry of map lookup would have blocked then | * If retry of map lookup would have blocked then | ||||
* retry fault from start. | * retry fault from start. | ||||
*/ | */ | ||||
if (result == KERN_FAILURE) | if (result == KERN_FAILURE) | ||||
goto RetryFault; | goto RetryFault; | ||||
return (result); | return (result); | ||||
} | } | ||||
if ((retry_object != fs.first_object) || | if ((retry_object != fs.first_object) || | ||||
(retry_pindex != fs.first_pindex)) { | (retry_pindex != fs.first_pindex)) { | ||||
unlock_and_deallocate(&fs); | fault_deallocate(&fs); | ||||
goto RetryFault; | goto RetryFault; | ||||
} | } | ||||
/* | /* | ||||
* Check whether the protection has changed or the object has | * Check whether the protection has changed or the object has | ||||
* been copied while we left the map unlocked. Changing from | * been copied while we left the map unlocked. Changing from | ||||
* read to write permission is OK - we leave the page | * read to write permission is OK - we leave the page | ||||
* write-protected, and catch the write fault. Changing from | * write-protected, and catch the write fault. Changing from | ||||
* write to read permission means that we can't mark the page | * write to read permission means that we can't mark the page | ||||
* write-enabled after all. | * write-enabled after all. | ||||
*/ | */ | ||||
prot &= retry_prot; | prot &= retry_prot; | ||||
fault_type &= retry_prot; | fault_type &= retry_prot; | ||||
if (prot == 0) { | if (prot == 0) { | ||||
unlock_and_deallocate(&fs); | fault_deallocate(&fs); | ||||
goto RetryFault; | goto RetryFault; | ||||
} | } | ||||
/* Reassert because wired may have changed. */ | /* Reassert because wired may have changed. */ | ||||
KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0, | KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0, | ||||
("!wired && VM_FAULT_WIRE")); | ("!wired && VM_FAULT_WIRE")); | ||||
} | } | ||||
} | } | ||||
VM_OBJECT_ASSERT_UNLOCKED(fs.object); | |||||
/* | /* | ||||
* If the page was filled by a pager, save the virtual address that | * If the page was filled by a pager, save the virtual address that | ||||
* should be faulted on next under a sequential access pattern to the | * should be faulted on next under a sequential access pattern to the | ||||
* map entry. A read lock on the map suffices to update this address | * map entry. A read lock on the map suffices to update this address | ||||
* safely. | * safely. | ||||
*/ | */ | ||||
if (hardfault) | if (hardfault) | ||||
fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; | fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; | ||||
vm_page_assert_xbusied(fs.m); | |||||
vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags); | |||||
/* | /* | ||||
* Page must be completely valid or it is not fit to | * Page must be completely valid or it is not fit to | ||||
* map into user space. vm_pager_get_pages() ensures this. | * map into user space. vm_pager_get_pages() ensures this. | ||||
*/ | */ | ||||
vm_page_assert_xbusied(fs.m); | |||||
KASSERT(vm_page_all_valid(fs.m), | KASSERT(vm_page_all_valid(fs.m), | ||||
("vm_fault: page %p partially invalid", fs.m)); | ("vm_fault: page %p partially invalid", fs.m)); | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags); | |||||
/* | /* | ||||
* Put this page into the physical map. We had to do the unlock above | * Put this page into the physical map. We had to do the unlock above | ||||
* because pmap_enter() may sleep. We don't put the page | * because pmap_enter() may sleep. We don't put the page | ||||
* back on the active queue until later so that the pageout daemon | * back on the active queue until later so that the pageout daemon | ||||
* won't find it (yet). | * won't find it (yet). | ||||
*/ | */ | ||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | ||||
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) | ||||
vm_map_entry_t entry; | vm_map_entry_t entry; | ||||
vm_object_t first_object, object; | vm_object_t first_object, object; | ||||
vm_offset_t end, start; | vm_offset_t end, start; | ||||
vm_page_t m, m_next; | vm_page_t m, m_next; | ||||
vm_pindex_t pend, pstart; | vm_pindex_t pend, pstart; | ||||
vm_size_t size; | vm_size_t size; | ||||
object = fs->object; | object = fs->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_UNLOCKED(object); | ||||
first_object = fs->first_object; | first_object = fs->first_object; | ||||
if (first_object != object) { | |||||
if (!VM_OBJECT_TRYWLOCK(first_object)) { | |||||
VM_OBJECT_WUNLOCK(object); | |||||
VM_OBJECT_WLOCK(first_object); | |||||
VM_OBJECT_WLOCK(object); | |||||
} | |||||
} | |||||
/* Neither fictitious nor unmanaged pages can be reclaimed. */ | /* Neither fictitious nor unmanaged pages can be reclaimed. */ | ||||
if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { | if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { | ||||
VM_OBJECT_RLOCK(first_object); | |||||
size = VM_FAULT_DONTNEED_MIN; | size = VM_FAULT_DONTNEED_MIN; | ||||
if (MAXPAGESIZES > 1 && size < pagesizes[1]) | if (MAXPAGESIZES > 1 && size < pagesizes[1]) | ||||
size = pagesizes[1]; | size = pagesizes[1]; | ||||
end = rounddown2(vaddr, size); | end = rounddown2(vaddr, size); | ||||
if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && | if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && | ||||
(entry = fs->entry)->start < end) { | (entry = fs->entry)->start < end) { | ||||
if (end - entry->start < size) | if (end - entry->start < size) | ||||
start = entry->start; | start = entry->start; | ||||
Show All 23 Lines | if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && | ||||
* is in the inactive queue is racy; in the | * is in the inactive queue is racy; in the | ||||
* worst case we will requeue the page | * worst case we will requeue the page | ||||
* unnecessarily. | * unnecessarily. | ||||
*/ | */ | ||||
if (!vm_page_inactive(m)) | if (!vm_page_inactive(m)) | ||||
vm_page_deactivate(m); | vm_page_deactivate(m); | ||||
} | } | ||||
} | } | ||||
VM_OBJECT_RUNLOCK(first_object); | |||||
} | } | ||||
if (first_object != object) | |||||
VM_OBJECT_WUNLOCK(first_object); | |||||
} | } | ||||
/* | /* | ||||
* vm_fault_prefault provides a quick way of clustering | * vm_fault_prefault provides a quick way of clustering | ||||
* pagefaults into a processes address space. It is a "cousin" | * pagefaults into a processes address space. It is a "cousin" | ||||
* of vm_map_pmap_enter, except it runs at page fault time instead | * of vm_map_pmap_enter, except it runs at page fault time instead | ||||
* of mmap time. | * of mmap time. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 391 Lines • Show Last 20 Lines |