Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_fault.c
Show First 20 Lines • Show All 473 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
npages = atop(pagesizes[psind]); | npages = atop(pagesizes[psind]); | ||||
for (i = 0; i < npages; i++) { | for (i = 0; i < npages; i++) { | ||||
vm_fault_populate_check_page(&m[i]); | vm_fault_populate_check_page(&m[i]); | ||||
vm_fault_dirty(fs->entry, &m[i], prot, fault_type, | vm_fault_dirty(fs->entry, &m[i], prot, fault_type, | ||||
fault_flags, true); | fault_flags, true); | ||||
} | } | ||||
VM_OBJECT_WUNLOCK(fs->first_object); | VM_OBJECT_WUNLOCK(fs->first_object); | ||||
pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | (wired ? | rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | | ||||
markj: Doesn't vm_fault_soft_fast() need the same treatment? Maybe this should be handled internally… | |||||
kibAuthorUnsubmitted Done Inline Actionsvm_fault_soft_fast() returns error to vm_fault() and then the fault handler executes 'slow' non-superpage pmap_enter(). First I thought that indeed inserting 4k entries in pmap_enter() and returning success would be fine, but really it is hard to distinguish other failures from the specific configuration (which is also not real failure). More, current contract for pmap_enter() is that the function that just does what directed; if pmap_enter(psind = 1) inserts non-superpage mapping it looks somewhat broken. kib: vm_fault_soft_fast() returns error to vm_fault() and then the fault handler executes 'slow' non… | |||||
markjUnsubmitted Done Inline ActionsFair enough. I agree that it would be odd for pmap_enter(psind = 1) to implement a 4KB fallback. markj: Fair enough. I agree that it would be odd for pmap_enter(psind = 1) to implement a 4KB fallback. | |||||
PMAP_ENTER_WIRED : 0), psind); | (wired ? PMAP_ENTER_WIRED : 0), psind); | ||||
#if defined(__amd64__) | |||||
if (psind > 0 && rv == KERN_FAILURE) { | |||||
for (i = 0; i < npages; i++) { | |||||
rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), | |||||
&m[i], prot, fault_type | | |||||
(wired ? PMAP_ENTER_WIRED : 0), 0); | |||||
MPASS(rv == KERN_SUCCESS); | |||||
} | |||||
} | |||||
#else | |||||
MPASS(rv == KERN_SUCCESS); | |||||
#endif | |||||
VM_OBJECT_WLOCK(fs->first_object); | VM_OBJECT_WLOCK(fs->first_object); | ||||
m_mtx = NULL; | m_mtx = NULL; | ||||
for (i = 0; i < npages; i++) { | for (i = 0; i < npages; i++) { | ||||
vm_page_change_lock(&m[i], &m_mtx); | vm_page_change_lock(&m[i], &m_mtx); | ||||
if ((fault_flags & VM_FAULT_WIRE) != 0) | if ((fault_flags & VM_FAULT_WIRE) != 0) | ||||
vm_page_wire(&m[i]); | vm_page_wire(&m[i]); | ||||
else | else | ||||
vm_page_activate(&m[i]); | vm_page_activate(&m[i]); | ||||
▲ Show 20 Lines • Show All 1,331 Lines • Show Last 20 Lines |
Doesn't vm_fault_soft_fast() need the same treatment? Maybe this should be handled internally by pmap_enter().