Page MenuHomeFreeBSD

D41099.id125326.diff
No OneTemporary

D41099.id125326.diff

diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -525,7 +525,7 @@
int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, vm_prot_t new_maxprot, int flags);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
-void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
+vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
vm_map_entry_t entry);
void vm_map_startup (void);
int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -148,6 +148,8 @@
static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
vm_offset_t failed_addr);
+#define CONTAINS_BITS(set, bits) ((~(set) & (bits)) == 0)
+
#define ENTRY_CHARGED(e) ((e)->cred != NULL || \
((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
!((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
@@ -1601,20 +1603,16 @@
}
/*
- * vm_map_insert:
- *
- * Inserts the given whole VM object into the target
- * map at the specified address range. The object's
- * size should match that of the address range.
- *
- * Requires that the map be locked, and leaves it so.
- *
- * If object is non-NULL, ref count must be bumped by caller
- * prior to making call to account for the new entry.
+ * vm_map_insert1() is identical to vm_map_insert(), and returns the
+ * newly inserted map entry in '*res'. in case the new entry is
+ * coalesced with a neightborow or existing entry was resized, that
+ * entry is returned. In any case, the returned entry covers the
+ * specified address range.
*/
-int
-vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
- vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
+static int
+vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow,
+ vm_map_entry_t *res)
{
vm_map_entry_t new_entry, next_entry, prev_entry;
struct ucred *cred;
@@ -1761,7 +1759,8 @@
map->size += end - prev_entry->end;
vm_map_entry_resize(map, prev_entry,
end - prev_entry->end);
- vm_map_try_merge_entries(map, prev_entry, next_entry);
+ *res = vm_map_try_merge_entries(map, prev_entry,
+ next_entry);
return (KERN_SUCCESS);
}
@@ -1822,7 +1821,7 @@
* other cases, which are less common.
*/
vm_map_try_merge_entries(map, prev_entry, new_entry);
- vm_map_try_merge_entries(map, new_entry, next_entry);
+ *res = vm_map_try_merge_entries(map, new_entry, next_entry);
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@@ -1832,6 +1831,28 @@
return (KERN_SUCCESS);
}
+/*
+ * vm_map_insert:
+ *
+ * Inserts the given whole VM object into the target
+ * map at the specified address range. The object's
+ * size should match that of the address range.
+ *
+ * Requires that the map be locked, and leaves it so.
+ *
+ * If object is non-NULL, ref count must be bumped by caller
+ * prior to making call to account for the new entry.
+ */
+int
+vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
+{
+ vm_map_entry_t res;
+
+ return (vm_map_insert1(map, object, offset, start, end, prot, max,
+ cow, &res));
+}
+
/*
* vm_map_findspace:
*
@@ -2273,7 +2294,8 @@
* another entry.
*/
#define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
- MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
+ MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \
+ MAP_ENTRY_STACK_GAP_UP | MAP_ENTRY_STACK_GAP_DN)
static bool
vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
@@ -2325,7 +2347,7 @@
*
* The map must be locked.
*/
-void
+vm_map_entry_t
vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
vm_map_entry_t entry)
{
@@ -2335,7 +2357,9 @@
vm_map_mergeable_neighbors(prev_entry, entry)) {
vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
vm_map_merged_neighbor_dispose(map, prev_entry);
+ return (entry);
}
+ return (prev_entry);
}
/*
@@ -2701,6 +2725,49 @@
VM_OBJECT_RUNLOCK(object);
}
+static int
+vm_map_protect_guard_phase1(vm_map_entry_t entry, vm_prot_t new_prot,
+ vm_prot_t new_maxprot, int flags)
+{
+ vm_prot_t max_prot;
+
+ MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
+ if ((entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
+ MAP_ENTRY_STACK_GAP_UP)) == 0)
+ return (KERN_SUCCESS);
+ max_prot = PROT_MAX_EXTRACT(entry->offset);
+ if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
+ new_prot = PROT_EXTRACT(entry->offset);
+ if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
+ new_maxprot = PROT_MAX_EXTRACT(entry->offset);
+ if (!CONTAINS_BITS(max_prot, new_prot) ||
+ !CONTAINS_BITS(max_prot, new_maxprot))
+ return (KERN_PROTECTION_FAILURE);
+ return (KERN_SUCCESS);
+}
+
+static void
+vm_map_protect_guard_phase3(vm_map_entry_t entry, vm_prot_t new_prot,
+ vm_prot_t new_maxprot, int flags)
+{
+ vm_prot_t old_prot;
+
+ MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
+ if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP |
+ MAP_ENTRY_STACK_GAP_DN)) == 0)
+ return;
+
+ old_prot = PROT_EXTRACT(entry->offset);
+ if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
+ entry->offset = PROT_MAX(new_maxprot) |
+ (new_maxprot & old_prot);
+ }
+ if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) {
+ entry->offset = new_prot | PROT_MAX(
+ PROT_MAX_EXTRACT(entry->offset));
+ }
+}
+
/*
* vm_map_protect:
*
@@ -2720,9 +2787,9 @@
if (start == end)
return (KERN_SUCCESS);
- if ((flags & (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT)) ==
- (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT) &&
- (new_prot & new_maxprot) != new_prot)
+ if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT |
+ VM_MAP_PROTECT_SET_MAXPROT) &&
+ !CONTAINS_BITS(new_maxprot, new_prot))
return (KERN_OUT_OF_BOUNDS);
again:
@@ -2731,8 +2798,7 @@
if ((map->flags & MAP_WXORX) != 0 &&
(flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
- (new_prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
- VM_PROT_EXECUTE)) {
+ CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
@@ -2755,21 +2821,40 @@
*/
for (entry = first_entry; entry->start < end;
entry = vm_map_entry_succ(entry)) {
- if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
- continue;
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
- if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
- new_prot = entry->protection;
- if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
- new_maxprot = entry->max_protection;
- if ((new_prot & entry->max_protection) != new_prot ||
- (new_maxprot & entry->max_protection) != new_maxprot) {
+ if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
+ rv = vm_map_protect_guard_phase1(entry, new_prot,
+ new_maxprot, flags);
+ if (rv != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return (rv);
+ }
+ continue;
+ }
+ if (((flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
+ !CONTAINS_BITS(entry->max_protection, new_prot)) ||
+ ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0 &&
+ !CONTAINS_BITS(entry->max_protection, new_maxprot))) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
+
+ if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
+ prev_entry = vm_map_entry_pred(entry);
+ if (CONTAINS_BITS(prev_entry->eflags, MAP_ENTRY_GUARD |
+ MAP_ENTRY_STACK_GAP_DN)) {
+ rv = vm_map_protect_guard_phase1(prev_entry,
+ new_prot, new_maxprot, flags);
+ if (rv != KERN_SUCCESS) {
+ vm_map_unlock(map);
+ return (rv);
+ }
+ }
+ }
+
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
in_tran = entry;
}
@@ -2864,10 +2949,15 @@
entry->start < end;
vm_map_try_merge_entries(map, prev_entry, entry),
prev_entry = entry, entry = vm_map_entry_succ(entry)) {
- if (rv != KERN_SUCCESS ||
- (entry->eflags & MAP_ENTRY_GUARD) != 0)
+ if (rv != KERN_SUCCESS)
continue;
+ if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
+ vm_map_protect_guard_phase3(entry, new_prot,
+ new_maxprot, flags);
+ continue;
+ }
+
old_prot = entry->protection;
if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
@@ -2900,6 +2990,13 @@
entry->protection & MASK(entry));
#undef MASK
}
+
+ if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0 &&
+ CONTAINS_BITS(prev_entry->eflags, MAP_ENTRY_GUARD |
+ MAP_ENTRY_STACK_GAP_DN)) {
+ vm_map_protect_guard_phase3(prev_entry,
+ new_prot, new_maxprot, flags);
+ }
}
vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map);
@@ -4553,10 +4650,10 @@
gap_bot = top;
gap_top = addrbos + max_ssize;
}
- rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
+ rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow,
+ &new_entry);
if (rv != KERN_SUCCESS)
return (rv);
- new_entry = vm_map_entry_succ(prev_entry);
KASSERT(new_entry->end == top || new_entry->start == bot,
("Bad entry start/end for new stack entry"));
KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
@@ -4567,10 +4664,17 @@
("new entry lacks MAP_ENTRY_GROWS_UP"));
if (gap_bot == gap_top)
return (KERN_SUCCESS);
- rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
+ rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
- MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
+ MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry);
if (rv == KERN_SUCCESS) {
+ KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0,
+ ("entry %p not gap %#x", gap_entry, gap_entry->eflags));
+ KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
+ MAP_ENTRY_STACK_GAP_UP)) != 0,
+ ("entry %p not stack gap %#x", gap_entry,
+ gap_entry->eflags));
+
/*
* Gap can never successfully handle a fault, so
* read-ahead logic is never used for it. Re-use
@@ -4580,10 +4684,8 @@
* store the original stack protections in the
* object offset.
*/
- gap_entry = orient == MAP_STACK_GROWS_DOWN ?
- vm_map_entry_pred(new_entry) : vm_map_entry_succ(new_entry);
gap_entry->next_read = sgp;
- gap_entry->offset = prot;
+ gap_entry->offset = prot | PROT_MAX(max);
} else {
(void)vm_map_delete(map, bot, top);
}
@@ -4602,8 +4704,8 @@
struct vmspace *vm;
struct ucred *cred;
vm_offset_t gap_end, gap_start, grow_start;
- vm_size_t grow_amount, guard, max_grow;
- vm_prot_t prot;
+ vm_size_t grow_amount, guard, max_grow, sgp;
+ vm_prot_t prot, max;
rlim_t lmemlim, stacklim, vmemlim;
int rv, rv1 __diagused;
bool gap_deleted, grow_down, is_procstack;
@@ -4748,7 +4850,9 @@
* The gap_entry "offset" field is overloaded. See
* vm_map_stack_locked().
*/
- prot = gap_entry->offset;
+ prot = PROT_EXTRACT(gap_entry->offset);
+ max = PROT_MAX_EXTRACT(gap_entry->offset);
+ sgp = gap_entry->next_read;
grow_start = gap_entry->end - grow_amount;
if (gap_entry->start + grow_amount == gap_entry->end) {
@@ -4762,13 +4866,16 @@
gap_deleted = false;
}
rv = vm_map_insert(map, NULL, 0, grow_start,
- grow_start + grow_amount, prot, prot, MAP_STACK_GROWS_DOWN);
+ grow_start + grow_amount, prot, max, MAP_STACK_GROWS_DOWN);
if (rv != KERN_SUCCESS) {
if (gap_deleted) {
- rv1 = vm_map_insert(map, NULL, 0, gap_start,
+ rv1 = vm_map_insert1(map, NULL, 0, gap_start,
gap_end, VM_PROT_NONE, VM_PROT_NONE,
- MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
+ MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN,
+ &gap_entry);
MPASS(rv1 == KERN_SUCCESS);
+ gap_entry->next_read = sgp;
+ gap_entry->offset = prot | PROT_MAX(max);
} else
vm_map_entry_resize(map, gap_entry,
grow_amount);

File Metadata

Mime Type
text/plain
Expires
Tue, Jan 27, 9:42 PM (14 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28060503
Default Alt Text
D41099.id125326.diff (11 KB)

Event Timeline