Changeset View
Standalone View
sys/kern/kern_exec.c
Show First 20 Lines • Show All 1,104 Lines • ▼ Show 20 Lines | exec_free_abi_mappings(struct proc *p) | ||||
if (!PROC_HAS_SHP(p)) | if (!PROC_HAS_SHP(p)) | ||||
return; | return; | ||||
pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base, | pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base, | ||||
vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len); | vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len); | ||||
} | } | ||||
/* | /* | ||||
* Run down the current address space and install a new one. Map the shared | * Run down the current address space and install a new one. | ||||
* page. | |||||
*/ | */ | ||||
int | int | ||||
exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) | exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) | ||||
{ | { | ||||
int error; | int error; | ||||
struct proc *p = imgp->proc; | struct proc *p = imgp->proc; | ||||
struct vmspace *vmspace = p->p_vmspace; | struct vmspace *vmspace = p->p_vmspace; | ||||
struct thread *td = curthread; | struct thread *td = curthread; | ||||
vm_object_t obj; | |||||
vm_offset_t sv_minuser; | vm_offset_t sv_minuser; | ||||
vm_map_t map; | vm_map_t map; | ||||
imgp->vmspace_destroyed = true; | imgp->vmspace_destroyed = true; | ||||
imgp->sysent = sv; | imgp->sysent = sv; | ||||
if (p->p_sysent->sv_onexec_old != NULL) | if (p->p_sysent->sv_onexec_old != NULL) | ||||
p->p_sysent->sv_onexec_old(td); | p->p_sysent->sv_onexec_old(td); | ||||
Show All 31 Lines | if (refcount_load(&vmspace->vm_refcnt) == 1 && | ||||
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); | error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
vmspace = p->p_vmspace; | vmspace = p->p_vmspace; | ||||
map = &vmspace->vm_map; | map = &vmspace->vm_map; | ||||
} | } | ||||
map->flags |= imgp->map_flags; | map->flags |= imgp->map_flags; | ||||
/* Map a shared page */ | |||||
obj = sv->sv_shared_page_obj; | |||||
if (obj != NULL) { | |||||
vm_object_reference(obj); | |||||
error = vm_map_fixed(map, obj, 0, | |||||
sv->sv_shared_page_base, sv->sv_shared_page_len, | |||||
VM_PROT_READ | VM_PROT_EXECUTE, | |||||
VM_PROT_READ | VM_PROT_EXECUTE, | |||||
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); | |||||
if (error != KERN_SUCCESS) { | |||||
vm_object_deallocate(obj); | |||||
return (vm_mmap_to_errno(error)); | |||||
} | |||||
vmspace->vm_shp_base = sv->sv_shared_page_base; | |||||
} | |||||
return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0); | return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0); | ||||
} | } | ||||
/* | /* | ||||
* Compute the stack size limit and map the main process stack. | * Compute the stack size limit and map the main process stack. | ||||
* Map the shared page. | |||||
kib: Would it be useful to mention in the herald comment that this procedure installs shared page… | |||||
*/ | */ | ||||
int | int | ||||
exec_map_stack(struct image_params *imgp) | exec_map_stack(struct image_params *imgp) | ||||
{ | { | ||||
struct rlimit rlim_stack; | struct rlimit rlim_stack; | ||||
struct sysentvec *sv; | struct sysentvec *sv; | ||||
struct proc *p; | struct proc *p; | ||||
vm_map_t map; | vm_map_t map; | ||||
struct vmspace *vmspace; | struct vmspace *vmspace; | ||||
vm_offset_t stack_addr, stack_top; | vm_offset_t stack_addr, stack_top; | ||||
vm_offset_t sharedpage_addr, guard_addr; | |||||
u_long ssiz; | u_long ssiz; | ||||
int error, find_space, stack_off; | int error, find_space, stack_off; | ||||
vm_prot_t stack_prot; | vm_prot_t stack_prot; | ||||
vm_object_t obj; | |||||
p = imgp->proc; | p = imgp->proc; | ||||
sv = p->p_sysent; | sv = p->p_sysent; | ||||
if (imgp->stack_sz != 0) { | if (imgp->stack_sz != 0) { | ||||
ssiz = trunc_page(imgp->stack_sz); | ssiz = trunc_page(imgp->stack_sz); | ||||
PROC_LOCK(p); | PROC_LOCK(p); | ||||
lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack); | lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack); | ||||
Show All 35 Lines | exec_map_stack(struct image_params *imgp) | ||||
stack_top = stack_addr + ssiz; | stack_top = stack_addr + ssiz; | ||||
if ((map->flags & MAP_ASLR_STACK) != 0) { | if ((map->flags & MAP_ASLR_STACK) != 0) { | ||||
/* Randomize within the first page of the stack. */ | /* Randomize within the first page of the stack. */ | ||||
arc4rand(&stack_off, sizeof(stack_off), 0); | arc4rand(&stack_off, sizeof(stack_off), 0); | ||||
stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *)); | stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *)); | ||||
} | } | ||||
/* Map a shared page */ | |||||
obj = sv->sv_shared_page_obj; | |||||
if (obj == NULL) { | |||||
sharedpage_addr = 0; | |||||
goto out; | |||||
} | |||||
/* | /* | ||||
Not Done Inline ActionsIf amd64_lower_shared_page() was applied, this would map the guard one page below the top of UVA. kib: If amd64_lower_shared_page() was applied, this would map the guard one page below the top of… | |||||
Done Inline ActionsThe problem here is that the amd64_lower_shared_page also lowers the sv_maxuser, which is used to determine that top of a UVA. kd: The problem here is that the amd64_lower_shared_page also lowers the sv_maxuser, which is used… | |||||
* If randomization is disabled the vm logic maps it exactly | |||||
* to the specific address (VMFS_NO_SPACE). | |||||
* Otherwise it can choose any address above .data section. | |||||
* Same logic is used for stack address randomization. | |||||
* If the address randomization is applied map a guard page | |||||
* at the top of UVA. | |||||
*/ | |||||
if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) { | |||||
sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + | |||||
lim_max(curthread, RLIMIT_DATA)); | |||||
find_space = VMFS_ANY_SPACE; | |||||
Done Inline ActionsThe guard should be one page long, there is no reason to use sv_shared_page_len there. kib: The guard should be one page long, there is no reason to use sv_shared_page_len there. | |||||
guard_addr = sv->sv_maxuser - PAGE_SIZE; | |||||
error = vm_map_find(map, NULL, 0, | |||||
kibUnsubmitted Done Inline ActionsWhy not use vm_map_fixed() there? IMO it is the right KPI, instead of _find(). kib: Why not use vm_map_fixed() there? IMO it is the right KPI, instead of _find(). | |||||
&guard_addr, PAGE_SIZE, | |||||
sv->sv_maxuser, VMFS_NO_SPACE, | |||||
VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD); | |||||
if (error != KERN_SUCCESS) { | |||||
/* | |||||
* This is not fatal, so let's just print a warning | |||||
* and continue. | |||||
*/ | |||||
uprintf("%s: Mapping guard page at the usual location" | |||||
"of the shared page mach error %d errno %d", | |||||
__func__, error, vm_mmap_to_errno(error)); | |||||
Done Inline ActionsI suggest to uprintf() something in case of error there, to give a hint to user. See other examples in imgact_elf.c etc. kib: I suggest to uprintf() something in case of error there, to give a hint to user. See other… | |||||
} | |||||
} else { | |||||
sharedpage_addr = sv->sv_shared_page_base; | |||||
find_space = VMFS_NO_SPACE; | |||||
kibUnsubmitted Not Done Inline ActionsSame there, I think vm_map_fixed() is better use than _find(). Original code used _fixed(). kib: Same there, I think vm_map_fixed() is better use than _find(). Original code used _fixed(). | |||||
kdAuthorUnsubmitted Done Inline ActionsI used _find() here since that's how the stack is allocated. kd: I used _find() here since that's how the stack is allocated.
IMO it's better to be consistent… | |||||
kibUnsubmitted Not Done Inline ActionsI believe it is a good opportunity to use the proper KPI there. vm_map_fixed() is directly translated to vm_map_insert(), while vm_map_find() is convoluted enough even for VMFS_NO_SPACE. It prepares for anonymous clustering, adjusts for ASLR calculations etc. vm_map_fixed() avoids both the CPU overhead and cognitive load to recognize VMFS_NO_SPACE. Other than that, I do not have more comments for the patch. kib: I believe it is a good opportunity to use the proper KPI there. vm_map_fixed() is directly… | |||||
} | |||||
vm_object_reference(obj); | |||||
error = vm_map_find(map, obj, 0, | |||||
&sharedpage_addr, sv->sv_shared_page_len, | |||||
sv->sv_maxuser, find_space, | |||||
VM_PROT_READ | VM_PROT_EXECUTE, | |||||
VM_PROT_READ | VM_PROT_EXECUTE, | |||||
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); | |||||
if (error != KERN_SUCCESS) { | |||||
uprintf("%s: mapping shared page at addr: %p" | |||||
"failed, mach error %d errno %d\n", __func__, | |||||
(void *)sharedpage_addr, error, vm_mmap_to_errno(error)); | |||||
vm_object_deallocate(obj); | |||||
return (vm_mmap_to_errno(error)); | |||||
} | |||||
out: | |||||
/* | |||||
* vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they | * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they | ||||
* are still used to enforce the stack rlimit on the process stack. | * are still used to enforce the stack rlimit on the process stack. | ||||
*/ | */ | ||||
vmspace->vm_maxsaddr = (char *)stack_addr; | vmspace->vm_maxsaddr = (char *)stack_addr; | ||||
vmspace->vm_stacktop = stack_top; | vmspace->vm_stacktop = stack_top; | ||||
vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; | vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; | ||||
vmspace->vm_shp_base = sharedpage_addr; | |||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Copy out argument and environment strings from the old process address | * Copy out argument and environment strings from the old process address | ||||
* space into the temporary string buffer. | * space into the temporary string buffer. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 775 Lines • Show Last 20 Lines |
Would it be useful to mention in the herald comment that this procedure installs shared page mapping as well? It is less obvious now, from the non-changed name.