Page MenuHomeFreeBSD

D8664.id22546.diff
No OneTemporary

D8664.id22546.diff

Index: vm/_vm_radix.h
===================================================================
--- vm/_vm_radix.h
+++ vm/_vm_radix.h
@@ -36,12 +36,8 @@
*/
struct vm_radix {
uintptr_t rt_root;
- uint8_t rt_flags;
};
-#define RT_INSERT_INPROG 0x01
-#define RT_TRIE_MODIFIED 0x02
-
#ifdef _KERNEL
static __inline boolean_t
Index: vm/vm_object.c
===================================================================
--- vm/vm_object.c
+++ vm/vm_object.c
@@ -205,7 +205,6 @@
object->type = OBJT_DEAD;
object->ref_count = 0;
object->rtree.rt_root = 0;
- object->rtree.rt_flags = 0;
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->shadow_count = 0;
Index: vm/vm_radix.c
===================================================================
--- vm/vm_radix.c
+++ vm/vm_radix.c
@@ -341,8 +341,6 @@
index = page->pindex;
-restart:
-
/*
* The owner of record for root is not really important because it
* will never be used.
@@ -360,32 +358,10 @@
panic("%s: key %jx is already present",
__func__, (uintmax_t)index);
clev = vm_radix_keydiff(m->pindex, index);
-
- /*
- * During node allocation the trie that is being
- * walked can be modified because of recursing radix
- * trie operations.
- * If this is the case, the recursing functions signal
- * such situation and the insert operation must
- * start from scratch again.
- * The freed radix node will then be in the UMA
- * caches very likely to avoid the same situation
- * to happen.
- */
- rtree->rt_flags |= RT_INSERT_INPROG;
tmp = vm_radix_node_get(vm_radix_trimkey(index,
clev + 1), 2, clev);
- rtree->rt_flags &= ~RT_INSERT_INPROG;
- if (tmp == NULL) {
- rtree->rt_flags &= ~RT_TRIE_MODIFIED;
+ if (tmp == NULL)
return (ENOMEM);
- }
- if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) {
- rtree->rt_flags &= ~RT_TRIE_MODIFIED;
- tmp->rn_count = 0;
- vm_radix_node_put(tmp);
- goto restart;
- }
*parentp = tmp;
vm_radix_addpage(tmp, index, clev, page);
vm_radix_addpage(tmp, m->pindex, clev, m);
@@ -409,21 +385,9 @@
*/
newind = rnode->rn_owner;
clev = vm_radix_keydiff(newind, index);
-
- /* See the comments above. */
- rtree->rt_flags |= RT_INSERT_INPROG;
tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev);
- rtree->rt_flags &= ~RT_INSERT_INPROG;
- if (tmp == NULL) {
- rtree->rt_flags &= ~RT_TRIE_MODIFIED;
+ if (tmp == NULL)
return (ENOMEM);
- }
- if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) {
- rtree->rt_flags &= ~RT_TRIE_MODIFIED;
- tmp->rn_count = 0;
- vm_radix_node_put(tmp);
- goto restart;
- }
*parentp = tmp;
vm_radix_addpage(tmp, index, clev, page);
slot = vm_radix_slot(newind, clev);
@@ -708,20 +672,6 @@
vm_page_t m;
int i, slot;
- /*
- * Detect if a page is going to be removed from a trie which is
- * already undergoing another trie operation.
- * Right now this is only possible for vm_radix_remove() recursing
- * into vm_radix_insert().
- * If this is the case, the caller must be notified about this
- * situation. It will also takecare to update the RT_TRIE_MODIFIED
- * accordingly.
- * The RT_TRIE_MODIFIED bit is set here because the remove operation
- * will always succeed.
- */
- if ((rtree->rt_flags & RT_INSERT_INPROG) != 0)
- rtree->rt_flags |= RT_TRIE_MODIFIED;
-
rnode = vm_radix_getroot(rtree);
if (vm_radix_isleaf(rnode)) {
m = vm_radix_topage(rnode);
@@ -776,9 +726,6 @@
{
struct vm_radix_node *root;
- KASSERT((rtree->rt_flags & RT_INSERT_INPROG) == 0,
- ("vm_radix_reclaim_allnodes: unexpected trie recursion"));
-
root = vm_radix_getroot(rtree);
if (root == NULL)
return;

File Metadata

Mime Type
text/plain
Expires
Sun, Mar 29, 9:59 PM (4 h, 19 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
30557264
Default Alt Text
D8664.id22546.diff (3 KB)

Event Timeline