Index: sys/sys/pctrie.h =================================================================== --- sys/sys/pctrie.h +++ sys/sys/pctrie.h @@ -453,6 +453,50 @@ /* Synchronize to make changes visible. */ \ pctrie_node_store(parentp, child, access); \ return (res); \ +} \ + \ +static __inline __unused int \ +name##_PCTRIE_TRANSFER_LOOKUP_LT(struct pctrie *src, uint64_t sindex, \ + struct pctrie *dst, uint64_t dindex, struct type **found_out) \ +{ \ + struct pctrie_node *d_child, *freenode, *nbr, *node, *s_child; \ + smr_pctnode_t *dst_p, *src_p; \ + uint64_t *found, *val; \ + \ + dst_p = pctrie_insert_lookup_lt(dst, dindex, &found, &nbr); \ + if (__predict_false(found != NULL)) { \ + *found_out = name##_PCTRIE_VAL2PTR(found); \ + return (EEXIST); \ + } \ + node = pctrie_node_load(dst_p, NULL, PCTRIE_UNSERIALIZED); \ + if (node != PCTRIE_NULL && \ + __predict_false((d_child = allocfn(dst)) == NULL)) { \ + if (found_out != NULL) \ + *found_out = NULL; \ + return (ENOMEM); \ + } \ + val = pctrie_remove_lookup(src, sindex, &src_p, &s_child); \ + if (val == NULL) \ + panic("%s: key not found", __func__); \ + if (s_child != PCTRIE_NULL) { \ + freenode = pctrie_node_load(src_p, NULL, \ + PCTRIE_UNSERIALIZED); \ + } else \ + freenode = NULL; \ + pctrie_node_store(src_p, s_child, PCTRIE_UNSERIALIZED); \ + *val = dindex; \ + if (node != PCTRIE_NULL) \ + pctrie_insert_node(dst_p, node, d_child, val); \ + else \ + d_child = pctrie_toleaf(val); \ + /* Synchronize to make changes visible. */ \ + pctrie_node_store(dst_p, d_child, access); \ + if (freenode != NULL) \ + freefn(src, freenode); \ + found = pctrie_subtree_lookup_lt(nbr, *val); \ + *found_out = name##_PCTRIE_VAL2PTR(found); \ + pctrie_subtree_lookup_lt_assert(nbr, *val, dst, found); \ + return (0); \ } struct pctrie_iter; Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1615,7 +1615,7 @@ * updating the page's fields to reflect this removal. */ static void -vm_page_object_remove(vm_page_t m) +vm_page_object_remove(vm_page_t m, bool do_radix_remove) { vm_object_t object; vm_page_t mrem __diagused; @@ -1633,8 +1633,11 @@ vm_pager_page_removed(object, m); m->object = NULL; - mrem = vm_radix_remove(&object->rtree, m->pindex); - KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); + if (do_radix_remove) { + mrem = vm_radix_remove(&object->rtree, m->pindex); + KASSERT(mrem == m, + ("removed page %p, expected page %p", mrem, m)); + } /* * Now remove from the object's list of backed pages. @@ -1686,7 +1689,7 @@ vm_page_remove_xbusy(vm_page_t m) { - vm_page_object_remove(m); + vm_page_object_remove(m, true); return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); } @@ -1978,7 +1981,6 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) { vm_page_t mpred; - vm_pindex_t opidx; VM_OBJECT_ASSERT_WLOCKED(new_object); @@ -1989,22 +1991,17 @@ * by m_prev and can cheat on the implementation aspects of the * function. */ - opidx = m->pindex; - m->pindex = new_pindex; - if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) { - m->pindex = opidx; + if (vm_radix_transfer_lookup_lt(&m->object->rtree, m->pindex, + &new_object->rtree, new_pindex, &mpred) != 0) return (1); - } /* * The operation cannot fail anymore. The removal must happen before * the listq iterator is tainted. */ - m->pindex = opidx; - vm_page_object_remove(m); + vm_page_object_remove(m, false); /* Return back to the new pindex to complete vm_page_insert(). */ - m->pindex = new_pindex; m->object = new_object; vm_page_insert_radixdone(m, new_object, mpred); @@ -4092,7 +4089,7 @@ m->ref_count == VPRC_OBJREF, ("vm_page_free_prep: page %p has unexpected ref_count %u", m, m->ref_count)); - vm_page_object_remove(m); + vm_page_object_remove(m, true); m->ref_count -= VPRC_OBJREF; } else vm_page_assert_unbusied(m); Index: sys/vm/vm_radix.h =================================================================== --- sys/vm/vm_radix.h +++ sys/vm/vm_radix.h @@ -302,5 +302,27 @@ return (VM_RADIX_PCTRIE_REPLACE(&rtree->rt_trie, newpage)); } +/* + * Remove the page with the vm_radix sindex from the src tree, change it's + * vm_index to dindex, and insert it into the dst tree. Panic if sindex does + * not exist in src, or if dindex already exists in dst. Return zero on success + * or a non-zero error on memory allocation failure. On success, set the out + * parameter mpred to the previous page in the dst tree as if found by a + * previous call to vm_radix_lookup_le with the new page dindex. + */ +static __inline int +vm_radix_transfer_lookup_lt(struct vm_radix *src, vm_pindex_t sindex, + struct vm_radix *dst, vm_pindex_t dindex, vm_page_t *mpred) +{ + int error; + + error = VM_RADIX_PCTRIE_TRANSFER_LOOKUP_LT( + &src->rt_trie, sindex, &dst->rt_trie, dindex, mpred); + if (__predict_false(error == EEXIST)) + panic("vm_radix_insert_lookup_lt: page already present, %p", + *mpred); + return (error); +} + #endif /* _KERNEL */ #endif /* !_VM_RADIX_H_ */