Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_fault.c
Show First 20 Lines • Show All 245 Lines • ▼ Show 20 Lines | vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, | ||||
if (need_dirty) | if (need_dirty) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if (!set_wd) | if (!set_wd) | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
else if (need_dirty) | else if (need_dirty) | ||||
vm_pager_page_unswapped(m); | vm_pager_page_unswapped(m); | ||||
} | } | ||||
static void | |||||
vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m) | |||||
{ | |||||
if (m_hold != NULL) { | |||||
*m_hold = m; | |||||
vm_page_lock(m); | |||||
vm_page_wire(m); | |||||
vm_page_unlock(m); | |||||
} | |||||
} | |||||
/* | /* | ||||
* Unlocks fs.first_object and fs.map on success. | * Unlocks fs.first_object and fs.map on success. | ||||
*/ | */ | ||||
static int | static int | ||||
vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, | vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, | ||||
int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) | int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) | ||||
{ | { | ||||
vm_page_t m, m_map; | vm_page_t m, m_map; | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if (vm_page_ps_test(m_super, flags, m)) { | ||||
fault_type |= VM_PROT_WRITE; | fault_type |= VM_PROT_WRITE; | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | | rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | | ||||
PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); | ||||
if (rv != KERN_SUCCESS) | if (rv != KERN_SUCCESS) | ||||
return (rv); | return (rv); | ||||
vm_fault_fill_hold(m_hold, m); | if (m_hold != NULL) { | ||||
*m_hold = m; | |||||
vm_page_wire(m); | |||||
} | |||||
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); | vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); | ||||
if (psind == 0 && !wired) | if (psind == 0 && !wired) | ||||
vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); | vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); | ||||
VM_OBJECT_RUNLOCK(fs->first_object); | VM_OBJECT_RUNLOCK(fs->first_object); | ||||
vm_map_lookup_done(fs->map, fs->entry); | vm_map_lookup_done(fs->map, fs->entry); | ||||
curthread->td_ru.ru_minflt++; | curthread->td_ru.ru_minflt++; | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 159 Lines • ▼ Show 20 Lines | if (psind > 0 && rv == KERN_FAILURE) { | ||||
} | } | ||||
} | } | ||||
#else | #else | ||||
MPASS(rv == KERN_SUCCESS); | MPASS(rv == KERN_SUCCESS); | ||||
#endif | #endif | ||||
VM_OBJECT_WLOCK(fs->first_object); | VM_OBJECT_WLOCK(fs->first_object); | ||||
m_mtx = NULL; | m_mtx = NULL; | ||||
for (i = 0; i < npages; i++) { | for (i = 0; i < npages; i++) { | ||||
vm_page_change_lock(&m[i], &m_mtx); | if ((fault_flags & VM_FAULT_WIRE) != 0) { | ||||
if ((fault_flags & VM_FAULT_WIRE) != 0) | |||||
vm_page_wire(&m[i]); | vm_page_wire(&m[i]); | ||||
else | } else { | ||||
vm_page_change_lock(&m[i], &m_mtx); | |||||
vm_page_activate(&m[i]); | vm_page_activate(&m[i]); | ||||
} | |||||
if (m_hold != NULL && m[i].pindex == fs->first_pindex) { | if (m_hold != NULL && m[i].pindex == fs->first_pindex) { | ||||
*m_hold = &m[i]; | *m_hold = &m[i]; | ||||
vm_page_wire(&m[i]); | vm_page_wire(&m[i]); | ||||
} | } | ||||
vm_page_xunbusy_maybelocked(&m[i]); | vm_page_xunbusy_maybelocked(&m[i]); | ||||
jeff: vm_page_xunbusy_maybelock acquires the page lock if it is not already held. You have to have… | |||||
Done Inline ActionsThe condition (fault_flags & VM_FAULT_WIRE) will have the same value throughout the loop. So either we update m_mtx for each page, in which case vm_page_xbusy_maybelocked() will use the correct page lock, or m_mtx will be NULL, in which case vm_page_xbusy_maybelocked() will acquire the page lock. markj: The condition `(fault_flags & VM_FAULT_WIRE)` will have the same value throughout the loop. So… | |||||
} | } | ||||
if (m_mtx != NULL) | if (m_mtx != NULL) | ||||
mtx_unlock(m_mtx); | mtx_unlock(m_mtx); | ||||
} | } | ||||
curthread->td_ru.ru_majflt++; | curthread->td_ru.ru_majflt++; | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 626 Lines • ▼ Show 20 Lines | if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { | ||||
fs.object == fs.first_object->backing_object) { | fs.object == fs.first_object->backing_object) { | ||||
/* | /* | ||||
* Keep the page wired to ensure that it is not | * Keep the page wired to ensure that it is not | ||||
* freed by another thread, such as the page | * freed by another thread, such as the page | ||||
* daemon, while it is disassociated from an | * daemon, while it is disassociated from an | ||||
* object. | * object. | ||||
*/ | */ | ||||
vm_page_wire(fs.m); | |||||
mtx = NULL; | mtx = NULL; | ||||
vm_page_change_lock(fs.m, &mtx); | vm_page_change_lock(fs.m, &mtx); | ||||
vm_page_wire(fs.m); | (void)vm_page_remove(fs.m); | ||||
vm_page_remove(fs.m); | |||||
vm_page_change_lock(fs.first_m, &mtx); | vm_page_change_lock(fs.first_m, &mtx); | ||||
vm_page_replace_checked(fs.m, fs.first_object, | vm_page_replace_checked(fs.m, fs.first_object, | ||||
fs.first_pindex, fs.first_m); | fs.first_pindex, fs.first_m); | ||||
vm_page_free(fs.first_m); | vm_page_free(fs.first_m); | ||||
vm_page_change_lock(fs.m, &mtx); | vm_page_change_lock(fs.m, &mtx); | ||||
vm_page_unwire(fs.m, PQ_ACTIVE); | vm_page_unwire(fs.m, PQ_ACTIVE); | ||||
mtx_unlock(mtx); | mtx_unlock(mtx); | ||||
vm_page_dirty(fs.m); | vm_page_dirty(fs.m); | ||||
Show All 16 Lines | #endif | ||||
} else { | } else { | ||||
/* | /* | ||||
* Oh, well, lets copy it. | * Oh, well, lets copy it. | ||||
*/ | */ | ||||
pmap_copy_page(fs.m, fs.first_m); | pmap_copy_page(fs.m, fs.first_m); | ||||
fs.first_m->valid = VM_PAGE_BITS_ALL; | fs.first_m->valid = VM_PAGE_BITS_ALL; | ||||
if (wired && (fault_flags & | if (wired && (fault_flags & | ||||
VM_FAULT_WIRE) == 0) { | VM_FAULT_WIRE) == 0) { | ||||
vm_page_lock(fs.first_m); | |||||
vm_page_wire(fs.first_m); | vm_page_wire(fs.first_m); | ||||
vm_page_unlock(fs.first_m); | |||||
vm_page_lock(fs.m); | vm_page_lock(fs.m); | ||||
vm_page_unwire(fs.m, PQ_INACTIVE); | vm_page_unwire(fs.m, PQ_INACTIVE); | ||||
vm_page_unlock(fs.m); | vm_page_unlock(fs.m); | ||||
} | } | ||||
/* | /* | ||||
* We no longer need the old page or object. | * We no longer need the old page or object. | ||||
*/ | */ | ||||
release_page(&fs); | release_page(&fs); | ||||
▲ Show 20 Lines • Show All 119 Lines • ▼ Show 20 Lines | #endif | ||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | pmap_enter(fs.map->pmap, vaddr, fs.m, prot, | ||||
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); | ||||
if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && | if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && | ||||
wired == 0) | wired == 0) | ||||
vm_fault_prefault(&fs, vaddr, | vm_fault_prefault(&fs, vaddr, | ||||
faultcount > 0 ? behind : PFBAK, | faultcount > 0 ? behind : PFBAK, | ||||
faultcount > 0 ? ahead : PFFOR, false); | faultcount > 0 ? ahead : PFFOR, false); | ||||
VM_OBJECT_WLOCK(fs.object); | VM_OBJECT_WLOCK(fs.object); | ||||
vm_page_lock(fs.m); | |||||
/* | /* | ||||
* If the page is not wired down, then put it where the pageout daemon | * If the page is not wired down, then put it where the pageout daemon | ||||
* can find it. | * can find it. | ||||
*/ | */ | ||||
if ((fault_flags & VM_FAULT_WIRE) != 0) | if ((fault_flags & VM_FAULT_WIRE) != 0) { | ||||
vm_page_wire(fs.m); | vm_page_wire(fs.m); | ||||
else | } else { | ||||
vm_page_lock(fs.m); | |||||
vm_page_activate(fs.m); | vm_page_activate(fs.m); | ||||
vm_page_unlock(fs.m); | |||||
} | |||||
if (m_hold != NULL) { | if (m_hold != NULL) { | ||||
*m_hold = fs.m; | *m_hold = fs.m; | ||||
vm_page_wire(fs.m); | vm_page_wire(fs.m); | ||||
} | } | ||||
vm_page_unlock(fs.m); | |||||
vm_page_xunbusy(fs.m); | vm_page_xunbusy(fs.m); | ||||
/* | /* | ||||
* Unlock everything, and return | * Unlock everything, and return | ||||
*/ | */ | ||||
unlock_and_deallocate(&fs); | unlock_and_deallocate(&fs); | ||||
if (hardfault) { | if (hardfault) { | ||||
VM_CNT_INC(v_io_faults); | VM_CNT_INC(v_io_faults); | ||||
▲ Show 20 Lines • Show All 254 Lines • ▼ Show 20 Lines | for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) | ||||
VM_FAULT_NORMAL, mp) != KERN_SUCCESS) | VM_FAULT_NORMAL, mp) != KERN_SUCCESS) | ||||
goto error; | goto error; | ||||
} | } | ||||
return (count); | return (count); | ||||
error: | error: | ||||
for (mp = ma; mp < ma + count; mp++) | for (mp = ma; mp < ma + count; mp++) | ||||
if (*mp != NULL) { | if (*mp != NULL) { | ||||
vm_page_lock(*mp); | vm_page_lock(*mp); | ||||
if (vm_page_unwire(*mp, PQ_INACTIVE) && | vm_page_unwire(*mp, PQ_INACTIVE); | ||||
(*mp)->object == NULL) | |||||
vm_page_free(*mp); | |||||
vm_page_unlock(*mp); | vm_page_unlock(*mp); | ||||
} | } | ||||
return (-1); | return (-1); | ||||
} | } | ||||
/* | /* | ||||
* Routine: | * Routine: | ||||
* vm_fault_copy_entry | * vm_fault_copy_entry | ||||
▲ Show 20 Lines • Show All 184 Lines • ▼ Show 20 Lines | again: | ||||
*/ | */ | ||||
VM_OBJECT_WLOCK(dst_object); | VM_OBJECT_WLOCK(dst_object); | ||||
if (upgrade) { | if (upgrade) { | ||||
if (src_m != dst_m) { | if (src_m != dst_m) { | ||||
vm_page_lock(src_m); | vm_page_lock(src_m); | ||||
vm_page_unwire(src_m, PQ_INACTIVE); | vm_page_unwire(src_m, PQ_INACTIVE); | ||||
vm_page_unlock(src_m); | vm_page_unlock(src_m); | ||||
vm_page_lock(dst_m); | |||||
vm_page_wire(dst_m); | vm_page_wire(dst_m); | ||||
vm_page_unlock(dst_m); | |||||
} else { | } else { | ||||
KASSERT(vm_page_wired(dst_m), | KASSERT(vm_page_wired(dst_m), | ||||
("dst_m %p is not wired", dst_m)); | ("dst_m %p is not wired", dst_m)); | ||||
} | } | ||||
} else { | } else { | ||||
vm_page_lock(dst_m); | vm_page_lock(dst_m); | ||||
vm_page_activate(dst_m); | vm_page_activate(dst_m); | ||||
vm_page_unlock(dst_m); | vm_page_unlock(dst_m); | ||||
Show All 29 Lines |
vm_page_xunbusy_maybelock acquires the page lock if it is not already held. You have to have changed the page lock to match here before calling it.