Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_fault.c
Show First 20 Lines • Show All 176 Lines • ▼ Show 20 Lines | unlock_vp(struct faultstate *fs) | ||||
if (fs->vp != NULL) { | if (fs->vp != NULL) { | ||||
vput(fs->vp); | vput(fs->vp); | ||||
fs->vp = NULL; | fs->vp = NULL; | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
unlock_and_deallocate(struct faultstate *fs) | fault_deallocate(struct faultstate *fs) | ||||
kib: I would suggest faultstate_deallocate. | |||||
{ | { | ||||
vm_object_pip_wakeup(fs->object); | vm_object_pip_wakeup(fs->object); | ||||
VM_OBJECT_WUNLOCK(fs->object); | |||||
if (fs->object != fs->first_object) { | if (fs->object != fs->first_object) { | ||||
VM_OBJECT_WLOCK(fs->first_object); | VM_OBJECT_WLOCK(fs->first_object); | ||||
vm_page_free(fs->first_m); | vm_page_free(fs->first_m); | ||||
vm_object_pip_wakeup(fs->first_object); | vm_object_pip_wakeup(fs->first_object); | ||||
VM_OBJECT_WUNLOCK(fs->first_object); | VM_OBJECT_WUNLOCK(fs->first_object); | ||||
markjUnsubmitted Not Done Inline ActionsPresumably this unlock can also be moved up? markj: Presumably this unlock can also be moved up? | |||||
fs->first_m = NULL; | fs->first_m = NULL; | ||||
} | } | ||||
vm_object_deallocate(fs->first_object); | vm_object_deallocate(fs->first_object); | ||||
unlock_map(fs); | unlock_map(fs); | ||||
unlock_vp(fs); | unlock_vp(fs); | ||||
} | } | ||||
static void | static void | ||||
unlock_and_deallocate(struct faultstate *fs) | |||||
{ | |||||
VM_OBJECT_WUNLOCK(fs->object); | |||||
fault_deallocate(fs); | |||||
} | |||||
static void | |||||
vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, | vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, | ||||
vm_prot_t fault_type, int fault_flags, bool set_wd) | vm_prot_t fault_type, int fault_flags, bool set_wd) | ||||
{ | { | ||||
bool need_dirty; | bool need_dirty; | ||||
if (((prot & VM_PROT_WRITE) == 0 && | if (((prot & VM_PROT_WRITE) == 0 && | ||||
(fault_flags & VM_FAULT_DIRTY) == 0) || | (fault_flags & VM_FAULT_DIRTY) == 0) || | ||||
(m->oflags & VPO_UNMANAGED) != 0) | (m->oflags & VPO_UNMANAGED) != 0) | ||||
▲ Show 20 Lines • Show All 1,021 Lines • ▼ Show 20 Lines | |||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
* Rename the reservation. | * Rename the reservation. | ||||
*/ | */ | ||||
vm_reserv_rename(fs.m, fs.first_object, | vm_reserv_rename(fs.m, fs.first_object, | ||||
fs.object, OFF_TO_IDX( | fs.object, OFF_TO_IDX( | ||||
fs.first_object->backing_object_offset)); | fs.first_object->backing_object_offset)); | ||||
#endif | #endif | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
fs.first_m = fs.m; | fs.first_m = fs.m; | ||||
fs.m = NULL; | fs.m = NULL; | ||||
VM_CNT_INC(v_cow_optim); | VM_CNT_INC(v_cow_optim); | ||||
} else { | } else { | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
/* | /* | ||||
* Oh, well, lets copy it. | * Oh, well, lets copy it. | ||||
*/ | */ | ||||
pmap_copy_page(fs.m, fs.first_m); | pmap_copy_page(fs.m, fs.first_m); | ||||
vm_page_valid(fs.first_m); | vm_page_valid(fs.first_m); | ||||
if (wired && (fault_flags & | if (wired && (fault_flags & | ||||
VM_FAULT_WIRE) == 0) { | VM_FAULT_WIRE) == 0) { | ||||
vm_page_wire(fs.first_m); | vm_page_wire(fs.first_m); | ||||
vm_page_unwire(fs.m, PQ_INACTIVE); | vm_page_unwire(fs.m, PQ_INACTIVE); | ||||
} | } | ||||
/* | /* | ||||
* We no longer need the old page or object. | * We no longer need the old page or object. | ||||
*/ | */ | ||||
release_page(&fs); | release_page(&fs); | ||||
} | } | ||||
/* | /* | ||||
* fs.object != fs.first_object due to above | * fs.object != fs.first_object due to above | ||||
* conditional | * conditional | ||||
*/ | */ | ||||
vm_object_pip_wakeup(fs.object); | vm_object_pip_wakeup(fs.object); | ||||
VM_OBJECT_WUNLOCK(fs.object); | |||||
/* | /* | ||||
* We only try to prefault read-only mappings to the | * We only try to prefault read-only mappings to the | ||||
* neighboring pages when this copy-on-write fault is | * neighboring pages when this copy-on-write fault is | ||||
* a hard fault. In other cases, trying to prefault | * a hard fault. In other cases, trying to prefault | ||||
* is typically wasted effort. | * is typically wasted effort. | ||||
*/ | */ | ||||
if (faultcount == 0) | if (faultcount == 0) | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | #endif | ||||
*/ | */ | ||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | ||||
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | ||||
if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && | if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && | ||||
wired == 0) | wired == 0) | ||||
vm_fault_prefault(&fs, vaddr, | vm_fault_prefault(&fs, vaddr, | ||||
faultcount > 0 ? behind : PFBAK, | faultcount > 0 ? behind : PFBAK, | ||||
faultcount > 0 ? ahead : PFFOR, false); | faultcount > 0 ? ahead : PFFOR, false); | ||||
VM_OBJECT_WLOCK(fs.object); | |||||
/* | /* | ||||
* If the page is not wired down, then put it where the pageout daemon | * If the page is not wired down, then put it where the pageout daemon | ||||
* can find it. | * can find it. | ||||
*/ | */ | ||||
if ((fault_flags & VM_FAULT_WIRE) != 0) { | if ((fault_flags & VM_FAULT_WIRE) != 0) { | ||||
vm_page_wire(fs.m); | vm_page_wire(fs.m); | ||||
} else { | } else { | ||||
vm_page_lock(fs.m); | vm_page_lock(fs.m); | ||||
vm_page_activate(fs.m); | vm_page_activate(fs.m); | ||||
vm_page_unlock(fs.m); | vm_page_unlock(fs.m); | ||||
} | } | ||||
if (m_hold != NULL) { | if (m_hold != NULL) { | ||||
*m_hold = fs.m; | *m_hold = fs.m; | ||||
vm_page_wire(fs.m); | vm_page_wire(fs.m); | ||||
} | } | ||||
vm_page_xunbusy(fs.m); | vm_page_xunbusy(fs.m); | ||||
/* | /* | ||||
* Unlock everything, and return | * Unlock everything, and return | ||||
*/ | */ | ||||
unlock_and_deallocate(&fs); | fault_deallocate(&fs); | ||||
if (hardfault) { | if (hardfault) { | ||||
VM_CNT_INC(v_io_faults); | VM_CNT_INC(v_io_faults); | ||||
curthread->td_ru.ru_majflt++; | curthread->td_ru.ru_majflt++; | ||||
#ifdef RACCT | #ifdef RACCT | ||||
if (racct_enable && fs.object->type == OBJT_VNODE) { | if (racct_enable && fs.object->type == OBJT_VNODE) { | ||||
PROC_LOCK(curproc); | PROC_LOCK(curproc); | ||||
if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { | if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { | ||||
racct_add_force(curproc, RACCT_WRITEBPS, | racct_add_force(curproc, RACCT_WRITEBPS, | ||||
▲ Show 20 Lines • Show All 491 Lines • Show Last 20 Lines |
I would suggest faultstate_deallocate.