Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -2806,12 +2806,15 @@ gbincore(struct bufobj *bo, daddr_t lblkno) { struct buf *bp; + struct pctrie ptree; ASSERT_BO_LOCKED(bo); - bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); + ptree = bo->bo_clean.bv_root; + bp = BUF_PCTRIE_LOOKUP(&ptree, lblkno); if (bp != NULL) return (bp); - return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); + ptree = bo->bo_dirty.bv_root; + return (BUF_PCTRIE_LOOKUP(&ptree, lblkno)); } /* Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1970,7 +1970,7 @@ lobject = entry->object.vm_object; if (!obj_locked) VM_OBJECT_RLOCK(lobject); - while ((m = vm_page_lookup(lobject, pindex)) == NULL && + while ((m = vm_page_lookup_readonly(lobject, pindex)) == NULL && !vm_fault_object_needs_getpages(lobject) && (backing_object = lobject->backing_object) != NULL) { KASSERT((lobject->backing_object_offset & PAGE_MASK) == @@ -2187,7 +2187,8 @@ VM_OBJECT_RLOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; - while ((src_m = vm_page_lookup(object, pindex)) == NULL && + while ((src_m = vm_page_lookup_readonly(object, pindex)) == + NULL && (backing_object = object->backing_object) != NULL) { /* * Unless the source mapping is read-only or Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -618,6 +618,7 @@ vm_object_t new_object, vm_pindex_t new_pindex); void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t); +vm_page_t vm_page_lookup_readonly(vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t); void vm_page_pqbatch_drain(void); void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1742,16 +1742,34 @@ * Returns the page associated with the object/offset * pair specified; if none is found, NULL is returned. * - * The object must be locked. + * The object must be write locked. */ vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { - VM_OBJECT_ASSERT_LOCKED(object); + VM_OBJECT_ASSERT_WLOCKED(object); return (vm_radix_lookup(&object->rtree, pindex)); } +/* + * vm_page_lookup_readonly: + * + * Returns the page associated with the object/offset + * pair specified; if none is found, NULL is returned. + * + * The object must be locked. + */ +vm_page_t +vm_page_lookup_readonly(vm_object_t object, vm_pindex_t pindex) +{ + struct vm_radix copy; + + VM_OBJECT_ASSERT_LOCKED(object); + copy = object->rtree; + return (vm_radix_lookup(©, pindex)); +} + /* * vm_page_iter_init: * Index: sys/x86/iommu/intel_utils.c =================================================================== --- sys/x86/iommu/intel_utils.c +++ sys/x86/iommu/intel_utils.c @@ -271,7 +271,7 @@ DMAR_ASSERT_LOCKED(unit); VM_OBJECT_RLOCK(unit->ctx_obj); - root_entry = vm_page_lookup(unit->ctx_obj, 0); + root_entry = vm_page_lookup_readonly(unit->ctx_obj, 0); VM_OBJECT_RUNLOCK(unit->ctx_obj); dmar_write8(unit, DMAR_RTADDR_REG, VM_PAGE_TO_PHYS(root_entry)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SRTP);