Changeset View
Standalone View
sys/kern/imgact_elf.c
Show First 20 Lines • Show All 195 Lines • ▼ Show 20 Lines | |||||
* cases it will not be used anyway. This setting is valid only | * cases it will not be used anyway. This setting is valid only | ||||
* for the ASLR enabled and allows for utilizing the bss grow region. | * for the ASLR enabled and allows for utilizing the bss grow region. | ||||
*/ | */ | ||||
static int __elfN(aslr_honor_sbrk) = 0; | static int __elfN(aslr_honor_sbrk) = 0; | ||||
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, | SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, | ||||
&__elfN(aslr_honor_sbrk), 0, | &__elfN(aslr_honor_sbrk), 0, | ||||
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); | __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); | ||||
static int __elfN(aslr_stack_gap) = 0; | static int __elfN(aslr_stack) = 1; | ||||
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW, | SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN, | ||||
&__elfN(aslr_stack_gap), 0, | &__elfN(aslr_stack), 0, | ||||
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) | __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) | ||||
": maximum percentage of main stack to waste on a random gap"); | ": enable stack randomization"); | ||||
kib: This should be edited. | |||||
emasteUnsubmitted Done Inline Actionsstack address randomization emaste: stack address randomization | |||||
static int __elfN(sigfastblock) = 1; | static int __elfN(sigfastblock) = 1; | ||||
Done Inline ActionsWhy is it set to RD? First, COMPAT_43 is really meaningful for a.out binaries. Second, why disable toggling the stack_gap if 43 is configured? I am not questioning default values. kib: Why is it set to RD? First, COMPAT_43 is really meaningful for a.out binaries. Second, why… | |||||
Done Inline ActionsI was looking at i386_setup_lcall_gate(), which references the elf32 sysent, and I couldn't convince myself that it applied only to a.out binaries, so it seemed safer to simply disallow toggling entirely if COMPAT_43 is defined. I'm not sure about this. markj: I was looking at i386_setup_lcall_gate(), which references the elf32 sysent, and I couldn't… | |||||
Done Inline ActionsI think i386_setup_lcall_gate() references elf sysent due to combination of two issues:
I would be curious to see a real case of elf i386 binary using lcall $7,$0 instead of of int $0x80. Anyway, this cannot be an argument for keeping the control read-only. kib: I think i386_setup_lcall_gate() references elf sysent due to combination of two issues:
- a.out… | |||||
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, | SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, | ||||
CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, | CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, | ||||
"enable sigfastblock for new processes"); | "enable sigfastblock for new processes"); | ||||
static bool __elfN(allow_wx) = true; | static bool __elfN(allow_wx) = true; | ||||
SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, | SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, | ||||
CTLFLAG_RWTUN, &__elfN(allow_wx), 0, | CTLFLAG_RWTUN, &__elfN(allow_wx), 0, | ||||
"Allow pages to be mapped simultaneously writable and executable"); | "Allow pages to be mapped simultaneously writable and executable"); | ||||
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; | static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; | ||||
Not Done Inline ActionsShould it be renamed? E.g. stack_loc_randomize kib: Should it be renamed? E.g. stack_loc_randomize | |||||
Not Done Inline ActionsI think it should yes. We have a .pie_enable already, maybe .stack_enable? emaste: I think it should yes. We have a `.pie_enable` already, maybe `.stack_enable`? | |||||
Done Inline ActionsSo I wonder about the purpose of having a separate sysctl. When is it useful to disable only stack randomization, aside from debugging? If debugging is the only reason, then I would add a new sysctl under the debug OID instead, in kern_exec.c. Right now the value of stack_gap is actually ignored: exec_map_stack() only looks at MAP_ASLR and the image activator is not involved in the decision. To maintain this sysctl, we need some interface for image activators to indicate the desired policy, e.g., we could add a MAP_ASLR_STACK flag. markj: So I wonder about the purpose of having a separate sysctl. When is it useful to disable only… | |||||
Not Done Inline ActionsThe knob allows to ease diagnostic, for instance if the new stack randomization appears to have some rough edges. I do not like moving it under debug subtree, kern.elfXX.aslr provides good structuring and grouping of all related knobs. kib: The knob allows to ease diagnostic, for instance if the new stack randomization appears to have… | |||||
Done Inline ActionsOk, I will pass it along with a new map flag then. markj: Ok, I will pass it along with a new map flag then. | |||||
#define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) | #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) | ||||
Elf_Brandnote __elfN(freebsd_brandnote) = { | Elf_Brandnote __elfN(freebsd_brandnote) = { | ||||
.hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), | .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), | ||||
.hdr.n_descsz = sizeof(int32_t), | .hdr.n_descsz = sizeof(int32_t), | ||||
.hdr.n_type = NT_FREEBSD_ABI_TAG, | .hdr.n_type = NT_FREEBSD_ABI_TAG, | ||||
.vendor = FREEBSD_ABI_VENDOR, | .vendor = FREEBSD_ABI_VENDOR, | ||||
▲ Show 20 Lines • Show All 1,067 Lines • ▼ Show 20 Lines | if ((sv->sv_flags & SV_ASLR) == 0 || | ||||
* If user does not care about sbrk, utilize the bss | * If user does not care about sbrk, utilize the bss | ||||
* grow region for mappings as well. We can select | * grow region for mappings as well. We can select | ||||
* the base for the image anywere and still not suffer | * the base for the image anywere and still not suffer | ||||
* from the fragmentation. | * from the fragmentation. | ||||
*/ | */ | ||||
if (!__elfN(aslr_honor_sbrk) || | if (!__elfN(aslr_honor_sbrk) || | ||||
(imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) | (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) | ||||
imgp->map_flags |= MAP_ASLR_IGNSTART; | imgp->map_flags |= MAP_ASLR_IGNSTART; | ||||
if (__elfN(aslr_stack)) | |||||
imgp->map_flags |= MAP_ASLR_STACK; | |||||
} | } | ||||
if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && | if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && | ||||
(imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || | (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || | ||||
(imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) | (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) | ||||
imgp->map_flags |= MAP_WXORX; | imgp->map_flags |= MAP_WXORX; | ||||
error = exec_new_vmspace(imgp, sv); | error = exec_new_vmspace(imgp, sv); | ||||
vmspace = imgp->proc->p_vmspace; | |||||
map = &vmspace->vm_map; | |||||
imgp->proc->p_sysent = sv; | imgp->proc->p_sysent = sv; | ||||
imgp->proc->p_elf_brandinfo = brand_info; | imgp->proc->p_elf_brandinfo = brand_info; | ||||
maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); | vmspace = imgp->proc->p_vmspace; | ||||
if (mapsz >= maxv - vm_map_min(map)) { | map = &vmspace->vm_map; | ||||
maxv = sv->sv_usrstack; | |||||
Done Inline ActionsDid you intended to write maxv = sv->sv_usrstack - lim_max(td, RLIMIT_STACK) ? kib: Did you intended to write `maxv = sv->sv_usrstack - lim_max(td, RLIMIT_STACK)` ? | |||||
Done Inline ActionsI removed it on purpose, since with stack randomization there is no reason in principle to exclude that region(?). But it should at least be conditional on MAP_ASLR_STACK. markj: I removed it on purpose, since with stack randomization there is no reason in principle to… | |||||
if ((imgp->map_flags & MAP_ASLR_STACK) == 0) | |||||
maxv -= lim_max(td, RLIMIT_STACK); | |||||
if (error == 0 && mapsz >= maxv - vm_map_min(map)) { | |||||
uprintf("Excessive mapping size\n"); | uprintf("Excessive mapping size\n"); | ||||
error = ENOEXEC; | error = ENOEXEC; | ||||
} | } | ||||
if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) { | if (error == 0 && et_dyn_addr == ET_DYN_ADDR_RAND) { | ||||
KASSERT((map->flags & MAP_ASLR) != 0, | KASSERT((map->flags & MAP_ASLR) != 0, | ||||
("ET_DYN_ADDR_RAND but !MAP_ASLR")); | ("ET_DYN_ADDR_RAND but !MAP_ASLR")); | ||||
error = __CONCAT(rnd_, __elfN(base))(map, | error = __CONCAT(rnd_, __elfN(base))(map, | ||||
Show All 9 Lines | __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) | ||||
error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); | error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); | ||||
if (error != 0) | if (error != 0) | ||||
goto ret; | goto ret; | ||||
error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); | error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); | ||||
if (error != 0) | if (error != 0) | ||||
goto ret; | goto ret; | ||||
entry = (u_long)hdr->e_entry + et_dyn_addr; | |||||
/* | /* | ||||
* We load the dynamic linker where a userland call | * We load the dynamic linker where a userland call | ||||
* to mmap(0, ...) would put it. The rationale behind this | * to mmap(0, ...) would put it. The rationale behind this | ||||
* calculation is that it leaves room for the heap to grow to | * calculation is that it leaves room for the heap to grow to | ||||
* its maximum allowed size. | * its maximum allowed size. | ||||
*/ | */ | ||||
addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, | addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, | ||||
RLIMIT_DATA)); | RLIMIT_DATA)); | ||||
if ((map->flags & MAP_ASLR) != 0) { | if ((map->flags & MAP_ASLR) != 0) { | ||||
maxv1 = maxv / 2 + addr / 2; | maxv1 = maxv / 2 + addr / 2; | ||||
error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, | error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, | ||||
(MAXPAGESIZES > 1 && pagesizes[1] != 0) ? | (MAXPAGESIZES > 1 && pagesizes[1] != 0) ? | ||||
pagesizes[1] : pagesizes[0], &anon_loc); | pagesizes[1] : pagesizes[0], &anon_loc); | ||||
if (error != 0) | if (error != 0) | ||||
goto ret; | goto ret; | ||||
map->anon_loc = anon_loc; | map->anon_loc = anon_loc; | ||||
} else { | } else { | ||||
map->anon_loc = addr; | map->anon_loc = addr; | ||||
} | } | ||||
entry = (u_long)hdr->e_entry + et_dyn_addr; | |||||
imgp->entry_addr = entry; | imgp->entry_addr = entry; | ||||
if (interp != NULL) { | if (interp != NULL) { | ||||
VOP_UNLOCK(imgp->vp); | VOP_UNLOCK(imgp->vp); | ||||
if ((map->flags & MAP_ASLR) != 0) { | if ((map->flags & MAP_ASLR) != 0) { | ||||
/* Assume that interpreter fits into 1/4 of AS */ | /* Assume that interpreter fits into 1/4 of AS */ | ||||
maxv1 = maxv / 2 + addr / 2; | maxv1 = maxv / 2 + addr / 2; | ||||
error = __CONCAT(rnd_, __elfN(base))(map, addr, | error = __CONCAT(rnd_, __elfN(base))(map, addr, | ||||
maxv1, PAGE_SIZE, &addr); | maxv1, PAGE_SIZE, &addr); | ||||
} | } | ||||
if (error == 0) { | if (error == 0) { | ||||
error = __elfN(load_interp)(imgp, brand_info, interp, | error = __elfN(load_interp)(imgp, brand_info, interp, | ||||
&addr, &imgp->entry_addr); | &addr, &imgp->entry_addr); | ||||
} | } | ||||
vn_lock(imgp->vp, LK_SHARED | LK_RETRY); | vn_lock(imgp->vp, LK_SHARED | LK_RETRY); | ||||
if (error != 0) | if (error != 0) | ||||
goto ret; | goto ret; | ||||
} else | } else | ||||
addr = et_dyn_addr; | addr = et_dyn_addr; | ||||
error = exec_map_stack(imgp); | |||||
if (error != 0) | |||||
goto ret; | |||||
/* | /* | ||||
* Construct auxargs table (used by the copyout_auxargs routine) | * Construct auxargs table (used by the copyout_auxargs routine) | ||||
*/ | */ | ||||
elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); | elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); | ||||
if (elf_auxargs == NULL) { | if (elf_auxargs == NULL) { | ||||
VOP_UNLOCK(imgp->vp); | VOP_UNLOCK(imgp->vp); | ||||
elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); | elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); | ||||
vn_lock(imgp->vp, LK_SHARED | LK_RETRY); | vn_lock(imgp->vp, LK_SHARED | LK_RETRY); | ||||
▲ Show 20 Lines • Show All 1,116 Lines • ▼ Show 20 Lines | __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) | ||||
int structsize; | int structsize; | ||||
p = arg; | p = arg; | ||||
size = sizeof(structsize) + sizeof(ps_strings); | size = sizeof(structsize) + sizeof(ps_strings); | ||||
if (sb != NULL) { | if (sb != NULL) { | ||||
KASSERT(*sizep == size, ("invalid size")); | KASSERT(*sizep == size, ("invalid size")); | ||||
structsize = sizeof(ps_strings); | structsize = sizeof(ps_strings); | ||||
#if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 | #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 | ||||
ps_strings = PTROUT(p->p_sysent->sv_psstrings); | ps_strings = PTROUT(PROC_PS_STRINGS(p)); | ||||
#else | #else | ||||
ps_strings = p->p_sysent->sv_psstrings; | ps_strings = PROC_PS_STRINGS(p); | ||||
#endif | #endif | ||||
sbuf_bcat(sb, &structsize, sizeof(structsize)); | sbuf_bcat(sb, &structsize, sizeof(structsize)); | ||||
sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); | sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); | ||||
} | } | ||||
*sizep = size; | *sizep = size; | ||||
} | } | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 221 Lines • ▼ Show 20 Lines | __elfN(untrans_prot)(vm_prot_t prot) | ||||
flags = 0; | flags = 0; | ||||
if (prot & VM_PROT_EXECUTE) | if (prot & VM_PROT_EXECUTE) | ||||
flags |= PF_X; | flags |= PF_X; | ||||
if (prot & VM_PROT_READ) | if (prot & VM_PROT_READ) | ||||
flags |= PF_R; | flags |= PF_R; | ||||
if (prot & VM_PROT_WRITE) | if (prot & VM_PROT_WRITE) | ||||
flags |= PF_W; | flags |= PF_W; | ||||
return (flags); | return (flags); | ||||
} | |||||
vm_size_t | |||||
__elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base) | |||||
{ | |||||
uintptr_t range, rbase, gap; | |||||
int pct; | |||||
pct = __elfN(aslr_stack_gap); | |||||
if (pct == 0) | |||||
return (0); | |||||
if (pct > 50) | |||||
pct = 50; | |||||
range = imgp->eff_stack_sz * pct / 100; | |||||
arc4rand(&rbase, sizeof(rbase), 0); | |||||
gap = rbase % range; | |||||
gap &= ~(sizeof(u_long) - 1); | |||||
*stack_base -= gap; | |||||
return (gap); | |||||
} | } |
This should be edited.