Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_map.c
Show First 20 Lines • Show All 2,279 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
vm_map_entry_t entry; | vm_map_entry_t entry; | ||||
vm_map_entry_t temp_entry; | vm_map_entry_t temp_entry; | ||||
switch (new_inheritance) { | switch (new_inheritance) { | ||||
case VM_INHERIT_NONE: | case VM_INHERIT_NONE: | ||||
case VM_INHERIT_COPY: | case VM_INHERIT_COPY: | ||||
case VM_INHERIT_SHARE: | case VM_INHERIT_SHARE: | ||||
case VM_INHERIT_ZERO: | |||||
break; | break; | ||||
default: | default: | ||||
return (KERN_INVALID_ARGUMENT); | return (KERN_INVALID_ARGUMENT); | ||||
} | } | ||||
if (start == end) | if (start == end) | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
VM_MAP_RANGE_CHECK(map, start, end); | VM_MAP_RANGE_CHECK(map, start, end); | ||||
▲ Show 20 Lines • Show All 1,141 Lines • ▼ Show 20 Lines | case VM_INHERIT_COPY: | ||||
new_entry->wired_count = 0; | new_entry->wired_count = 0; | ||||
new_entry->object.vm_object = NULL; | new_entry->object.vm_object = NULL; | ||||
new_entry->cred = NULL; | new_entry->cred = NULL; | ||||
vm_map_entry_link(new_map, new_map->header.prev, | vm_map_entry_link(new_map, new_map->header.prev, | ||||
new_entry); | new_entry); | ||||
vmspace_map_entry_forked(vm1, vm2, new_entry); | vmspace_map_entry_forked(vm1, vm2, new_entry); | ||||
vm_map_copy_entry(old_map, new_map, old_entry, | vm_map_copy_entry(old_map, new_map, old_entry, | ||||
new_entry, fork_charge); | new_entry, fork_charge); | ||||
break; | |||||
case VM_INHERIT_ZERO: | |||||
/* | |||||
* Create a new anonymous mapping entry modelled from | |||||
* the old one. | |||||
*/ | |||||
new_entry = vm_map_entry_create(new_map); | |||||
memset(new_entry, 0, sizeof(*new_entry)); | |||||
new_entry->start = old_entry->start; | |||||
new_entry->end = old_entry->end; | |||||
new_entry->avail_ssize = old_entry->avail_ssize; | |||||
new_entry->adj_free = old_entry->adj_free; | |||||
new_entry->max_free = old_entry->max_free; | |||||
new_entry->eflags = old_entry->eflags & | |||||
~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | | |||||
MAP_ENTRY_VN_WRITECNT); | |||||
new_entry->protection = old_entry->protection; | |||||
new_entry->max_protection = old_entry->max_protection; | |||||
new_entry->inheritance = VM_INHERIT_ZERO; | |||||
vm_map_entry_link(new_map, new_map->header.prev, | |||||
new_entry); | |||||
vmspace_map_entry_forked(vm1, vm2, new_entry); | |||||
new_entry->cred = curthread->td_ucred; | |||||
crhold(new_entry->cred); | |||||
*fork_charge += (new_entry->end - new_entry->start); | |||||
break; | break; | ||||
} | } | ||||
old_entry = old_entry->next; | old_entry = old_entry->next; | ||||
} | } | ||||
/* | /* | ||||
* Use inlined vm_map_unlock() to postpone handling the deferred | * Use inlined vm_map_unlock() to postpone handling the deferred | ||||
* map entries, which cannot be done until both old_map and | * map entries, which cannot be done until both old_map and | ||||
* new_map locks are released. | * new_map locks are released. | ||||
*/ | */ | ||||
sx_xunlock(&old_map->lock); | sx_xunlock(&old_map->lock); | ||||
kib: This comment is definitely not true. I suppose that the code was copied from the… | |||||
sx_xunlock(&new_map->lock); | sx_xunlock(&new_map->lock); | ||||
vm_map_process_deferred(); | vm_map_process_deferred(); | ||||
return (vm2); | return (vm2); | ||||
} | } | ||||
int | int | ||||
vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, | vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, | ||||
vm_prot_t prot, vm_prot_t max, int cow) | vm_prot_t prot, vm_prot_t max, int cow) | ||||
Done Inline ActionsHm, you need also avail_ssize, adh_free and max_free copied as well. Does it make sense to set stack to VM_INHERIT_ZERO ? kib: Hm, you need also avail_ssize, adh_free and max_free copied as well. Does it make sense to set… | |||||
{ | { | ||||
vm_size_t growsize, init_ssize; | vm_size_t growsize, init_ssize; | ||||
Not Done Inline ActionsThis is not the correct test. The entry charge may be already migrated to the backing object. You should use ENTRY_CHARGED() to see if the entry is accounted for swap use. kib: This is not the correct test. The entry charge may be already migrated to the backing object. | |||||
rlim_t lmemlim, vmemlim; | rlim_t lmemlim, vmemlim; | ||||
int rv; | int rv; | ||||
growsize = sgrowsiz; | growsize = sgrowsiz; | ||||
init_ssize = (max_ssize < growsize) ? max_ssize : growsize; | init_ssize = (max_ssize < growsize) ? max_ssize : growsize; | ||||
vm_map_lock(map); | vm_map_lock(map); | ||||
lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); | lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); | ||||
vmemlim = lim_cur(curthread, RLIMIT_VMEM); | vmemlim = lim_cur(curthread, RLIMIT_VMEM); | ||||
▲ Show 20 Lines • Show All 853 Lines • Show Last 20 Lines |
This comment is definitely not true. I suppose that the code was copied from the VM_INHERIT_COPY case. It might be more reasonable to not do *new_entry = *old_entry assignment at all, since the only copied fields are start/end and (partially) flags.