Index: sys/kern/subr_pctrie.c =================================================================== --- sys/kern/subr_pctrie.c +++ sys/kern/subr_pctrie.c @@ -338,6 +338,22 @@ return (pctrie_match_value(node, index)); } +/* + * Returns the value stored at the index without mutating the trie and assuming + * access is externally synchronized by a read lock or mutex. + * + * If the index is not present, NULL is returned. + */ +uint64_t * +pctrie_lookup_readonly(struct pctrie *ptree, uint64_t index) +{ + struct pctrie_node *node, *parent; + + node = _pctrie_lookup_node(ptree, NULL, index, &parent, NULL, + PCTRIE_LOCKED); + return (pctrie_match_value(node, index)); +} + /* * Returns the value stored at the index without requiring an external lock. * Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -2808,10 +2808,10 @@ struct buf *bp; ASSERT_BO_LOCKED(bo); - bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); + bp = BUF_PCTRIE_LOOKUP_READONLY(&bo->bo_clean.bv_root, lblkno); if (bp != NULL) return (bp); - return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); + return (BUF_PCTRIE_LOOKUP_READONLY(&bo->bo_dirty.bv_root, lblkno)); } /* Index: sys/sys/pctrie.h =================================================================== --- sys/sys/pctrie.h +++ sys/sys/pctrie.h @@ -215,6 +215,14 @@ return name##_PCTRIE_VAL2PTR(pctrie_lookup(ptree, key)); \ } \ \ +static __inline __unused struct type * \ +name##_PCTRIE_LOOKUP_READONLY(struct pctrie *ptree, uint64_t key) \ +{ \ + \ + return ( \ + name##_PCTRIE_VAL2PTR(pctrie_lookup_readonly(ptree, key))); \ +} \ + \ static __inline __unused int \ name##_PCTRIE_LOOKUP_RANGE(struct pctrie *ptree, uint64_t key, \ struct type *value[], int count) \ @@ -405,6 +413,7 @@ void pctrie_insert_node(uint64_t *val, struct pctrie_node *parent, void *parentp, struct pctrie_node *child); uint64_t *pctrie_lookup(struct pctrie *ptree, uint64_t key); +uint64_t *pctrie_lookup_readonly(struct pctrie *ptree, uint64_t key); uint64_t *pctrie_lookup_unlocked(struct pctrie *ptree, uint64_t key, smr_t smr); int pctrie_lookup_range(struct pctrie *ptree, Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1441,7 +1441,7 @@ } vm_object_pip_wakeup(fs->object); vm_fault_unlock_map(fs); - if (fs->m != vm_page_lookup(fs->object, fs->pindex) || + if (fs->m != vm_page_lookup_readonly(fs->object, fs->pindex) || !vm_page_busy_sleep(fs->m, "vmpfw", 0)) VM_OBJECT_UNLOCK(fs->object); VM_CNT_INC(v_intrans); @@ -1971,7 +1971,7 @@ lobject = entry->object.vm_object; if (!obj_locked) VM_OBJECT_RLOCK(lobject); - while ((m = vm_page_lookup(lobject, pindex)) == NULL && + while ((m = vm_page_lookup_readonly(lobject, pindex)) == NULL && !vm_fault_object_needs_getpages(lobject) && (backing_object = lobject->backing_object) != NULL) { KASSERT((lobject->backing_object_offset & PAGE_MASK) == @@ -2188,7 +2188,8 @@ VM_OBJECT_RLOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; - while ((src_m = vm_page_lookup(object, pindex)) == NULL && + while ((src_m = vm_page_lookup_readonly(object, pindex)) == + NULL && (backing_object = object->backing_object) != NULL) { /* * Unless the source mapping is read-only or Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -618,6 +618,7 @@ vm_object_t new_object, vm_pindex_t new_pindex); void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t); +vm_page_t vm_page_lookup_readonly(vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t); void vm_page_pqbatch_drain(void); void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1748,10 +1748,26 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { - VM_OBJECT_ASSERT_LOCKED(object); + VM_OBJECT_ASSERT_WLOCKED(object); return (vm_radix_lookup(&object->rtree, pindex)); } +/* + * vm_page_lookup_readonly: + * + * Returns the page associated with the object/offset + * pair specified; if none is found, NULL is returned. + * + * The object must be locked. + */ +vm_page_t +vm_page_lookup_readonly(vm_object_t object, vm_pindex_t pindex) +{ + + VM_OBJECT_ASSERT_LOCKED(object); + return (vm_radix_lookup_readonly(&object->rtree, pindex)); +} + /* * vm_page_iter_init: * Index: sys/vm/vm_radix.h =================================================================== --- sys/vm/vm_radix.h +++ sys/vm/vm_radix.h @@ -80,7 +80,8 @@ } /* - * Returns the value stored at the index assuming there is an external lock. + * Returns the value stored at the index assuming there is an external write + * lock. * * If the index is not present, NULL is returned. */ @@ -90,6 +91,18 @@ return (VM_RADIX_PCTRIE_LOOKUP(&rtree->rt_trie, index)); } +/* + * Returns the value stored at the index assuming there is an external read + * lock. + * + * If the index is not present, NULL is returned. + */ +static __inline vm_page_t +vm_radix_lookup_readonly(struct vm_radix *rtree, vm_pindex_t index) +{ + return (VM_RADIX_PCTRIE_LOOKUP_READONLY(&rtree->rt_trie, index)); +} + /* * Returns the value stored at the index without requiring an external lock. * Index: sys/x86/iommu/intel_utils.c =================================================================== --- sys/x86/iommu/intel_utils.c +++ sys/x86/iommu/intel_utils.c @@ -271,7 +271,7 @@ DMAR_ASSERT_LOCKED(unit); VM_OBJECT_RLOCK(unit->ctx_obj); - root_entry = vm_page_lookup(unit->ctx_obj, 0); + root_entry = vm_page_lookup_readonly(unit->ctx_obj, 0); VM_OBJECT_RUNLOCK(unit->ctx_obj); dmar_write8(unit, DMAR_RTADDR_REG, VM_PAGE_TO_PHYS(root_entry)); dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SRTP);