Changeset View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 1,972 Lines • ▼ Show 20 Lines | ||||||||||||
} | } | |||||||||||
static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; | static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; | |||||||||||
static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; | static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; | |||||||||||
static int cluster_anon = 1; | static int cluster_anon = 1; | |||||||||||
SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, | SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, | |||||||||||
&cluster_anon, 0, | &cluster_anon, 0, | |||||||||||
"Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); | "Cluster anonymous mappings: " | |||||||||||
"0 = no, 1 = yes if no hint, 2 = always, 3 = keep anon_loc const"); | ||||||||||||
static bool | static bool | |||||||||||
clustering_anon_allowed(vm_offset_t addr) | clustering_anon_allowed(vm_offset_t addr) | |||||||||||
{ | { | |||||||||||
switch (cluster_anon) { | switch (cluster_anon) { | |||||||||||
case 0: | case 0: | |||||||||||
return (false); | return (false); | |||||||||||
case 1: | case 1: | |||||||||||
return (addr == 0); | return (addr == 0); | |||||||||||
alc: Consider this diagnostic patch:
```
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index… | ||||||||||||
kibAuthorUnsubmitted Done Inline ActionsYes it is relevant for the cluster mode 1, which is default. kib: Yes it is relevant for the cluster mode 1, which is default. | ||||||||||||
case 2: | case 2: | |||||||||||
default: | default: | |||||||||||
return (true); | return (true); | |||||||||||
} | } | |||||||||||
} | } | |||||||||||
static bool | ||||||||||||
clustering_anon_loc_const(void) | ||||||||||||
{ | ||||||||||||
return (cluster_anon == 3); | ||||||||||||
} | ||||||||||||
static long aslr_restarts; | static long aslr_restarts; | |||||||||||
SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, | SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, | |||||||||||
&aslr_restarts, 0, | &aslr_restarts, 0, | |||||||||||
"Number of aslr failures"); | "Number of aslr failures"); | |||||||||||
/* | /* | |||||||||||
* Searches for the specified amount of free space in the given map with the | * Searches for the specified amount of free space in the given map with the | |||||||||||
* specified alignment. Performs an address-ordered, first-fit search from | * specified alignment. Performs an address-ordered, first-fit search from | |||||||||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | ||||||||||||
* prior to making call to account for the new entry. | * prior to making call to account for the new entry. | |||||||||||
*/ | */ | |||||||||||
int | int | |||||||||||
vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, | |||||||||||
vm_offset_t *addr, /* IN/OUT */ | vm_offset_t *addr, /* IN/OUT */ | |||||||||||
vm_size_t length, vm_offset_t max_addr, int find_space, | vm_size_t length, vm_offset_t max_addr, int find_space, | |||||||||||
vm_prot_t prot, vm_prot_t max, int cow) | vm_prot_t prot, vm_prot_t max, int cow) | |||||||||||
{ | { | |||||||||||
vm_map_entry_t entry; | ||||||||||||
vm_offset_t alignment, curr_min_addr, min_addr; | vm_offset_t alignment, curr_min_addr, min_addr; | |||||||||||
int gap, pidx, rv, try; | int gap, pidx, rv, try; | |||||||||||
bool cluster, en_aslr, update_anon; | bool cluster, en_aslr, update_anon; | |||||||||||
KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || | KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || | |||||||||||
object == NULL, | object == NULL, | |||||||||||
("vm_map_find: non-NULL backing object for stack")); | ("vm_map_find: non-NULL backing object for stack")); | |||||||||||
MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && | MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && | |||||||||||
Show All 18 Lines | if (en_aslr && min_addr == 0 && !cluster && | |||||||||||
(map->flags & MAP_ASLR_IGNSTART) != 0) | (map->flags & MAP_ASLR_IGNSTART) != 0) | |||||||||||
curr_min_addr = min_addr = vm_map_min(map); | curr_min_addr = min_addr = vm_map_min(map); | |||||||||||
try = 0; | try = 0; | |||||||||||
vm_map_lock(map); | vm_map_lock(map); | |||||||||||
if (cluster) { | if (cluster) { | |||||||||||
curr_min_addr = map->anon_loc; | curr_min_addr = map->anon_loc; | |||||||||||
if (curr_min_addr == 0) | if (curr_min_addr == 0) | |||||||||||
cluster = false; | cluster = false; | |||||||||||
else if (!vm_map_lookup_entry(map, curr_min_addr, &entry)) | ||||||||||||
Done Inline Actions
alc: | ||||||||||||
curr_min_addr = entry->end; | ||||||||||||
} | } | |||||||||||
if (find_space != VMFS_NO_SPACE) { | if (find_space != VMFS_NO_SPACE) { | |||||||||||
KASSERT(find_space == VMFS_ANY_SPACE || | KASSERT(find_space == VMFS_ANY_SPACE || | |||||||||||
find_space == VMFS_OPTIMAL_SPACE || | find_space == VMFS_OPTIMAL_SPACE || | |||||||||||
find_space == VMFS_SUPER_SPACE || | find_space == VMFS_SUPER_SPACE || | |||||||||||
alignment != 0, ("unexpected VMFS flag")); | alignment != 0, ("unexpected VMFS flag")); | |||||||||||
again: | again: | |||||||||||
/* | /* | |||||||||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | again: | |||||||||||
} | } | |||||||||||
if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { | if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { | |||||||||||
rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, | rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, | |||||||||||
max, cow); | max, cow); | |||||||||||
} else { | } else { | |||||||||||
rv = vm_map_insert(map, object, offset, *addr, *addr + length, | rv = vm_map_insert(map, object, offset, *addr, *addr + length, | |||||||||||
prot, max, cow); | prot, max, cow); | |||||||||||
} | } | |||||||||||
if (rv == KERN_SUCCESS && update_anon) | if (rv == KERN_SUCCESS && update_anon && !clustering_anon_loc_const()) | |||||||||||
map->anon_loc = *addr + length; | map->anon_loc = *addr + length; | |||||||||||
done: | done: | |||||||||||
vm_map_unlock(map); | vm_map_unlock(map); | |||||||||||
return (rv); | return (rv); | |||||||||||
} | } | |||||||||||
/* | /* | |||||||||||
* vm_map_find_min() is a variant of vm_map_find() that takes an | * vm_map_find_min() is a variant of vm_map_find() that takes an | |||||||||||
▲ Show 20 Lines • Show All 1,723 Lines • ▼ Show 20 Lines | for (; entry->start < end; entry = next_entry) { | |||||||||||
* Remove mappings for the pages, but only if the | * Remove mappings for the pages, but only if the | |||||||||||
* mappings could exist. For instance, it does not | * mappings could exist. For instance, it does not | |||||||||||
* make sense to call pmap_remove() for guard entries. | * make sense to call pmap_remove() for guard entries. | |||||||||||
*/ | */ | |||||||||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || | if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || | |||||||||||
entry->object.vm_object != NULL) | entry->object.vm_object != NULL) | |||||||||||
pmap_map_delete(map->pmap, entry->start, entry->end); | pmap_map_delete(map->pmap, entry->start, entry->end); | |||||||||||
if (entry->end == map->anon_loc) | if (entry->end == map->anon_loc&& !clustering_anon_loc_const()) | |||||||||||
kibAuthorUnsubmitted Done Inline Actions
This is fixed in my repo kib: This is fixed in my repo | ||||||||||||
Done Inline ActionsConsider this alternative approach: Introduce an "update anon_loc on removal" flag on map entries. Set it in vm_map_find/insert() on anonymous mappings when cluster_anon is set to 2. Here, when the flag is set and the map entry is beneath the current anon_loc, reset the anon_loc to the end of the previous map entry. This should not only allow the recovery of more address ranges, but also eliminate the need for the new vm_map_lookup_entry() call in vm_map_find(). alc: Consider this alternative approach: Introduce an "update anon_loc on removal" flag on map… | ||||||||||||
map->anon_loc = entry->start; | map->anon_loc = entry->start; | |||||||||||
Not Done Inline Actions
The flag allows us to recognize cases where a clustered allocation is being freed even though it isn't the last allocation, and reset anon_loc to allow reallocation of that address range. That said, I'm having second thoughts about this approach. alc: The flag allows us to recognize cases where a clustered allocation is being freed even though… | ||||||||||||
/* | /* | |||||||||||
* Delete the entry only after removing all pmap | * Delete the entry only after removing all pmap | |||||||||||
* entries pointing to its pages. (Otherwise, its | * entries pointing to its pages. (Otherwise, its | |||||||||||
* page frames may be reallocated, and any modify bits | * page frames may be reallocated, and any modify bits | |||||||||||
* will be set in the wrong object!) | * will be set in the wrong object!) | |||||||||||
*/ | */ | |||||||||||
vm_map_entry_delete(map, entry); | vm_map_entry_delete(map, entry); | |||||||||||
} | } | |||||||||||
▲ Show 20 Lines • Show All 1,387 Lines • Show Last 20 Lines |
Consider this diagnostic patch:
And, this output from a machine that's been busy for a couple hours:
In other words, by default we never cluster, because addr is never zero.