Index: share/man/man9/malloc.9 =================================================================== --- share/man/man9/malloc.9 +++ share/man/man9/malloc.9 @@ -29,7 +29,7 @@ .\" $NetBSD: malloc.9,v 1.3 1996/11/11 00:05:11 lukem Exp $ .\" $FreeBSD$ .\" -.Dd January 24, 2018 +.Dd June 7, 2018 .Dt MALLOC 9 .Os .Sh NAME @@ -281,6 +281,9 @@ For larger requests, one or more pages is allocated. While it should not be relied upon, this information may be useful for optimizing the efficiency of memory use. +.Pp +The memory that these allocation calls return is not guaranteed to be +executable on all platforms. .Sh RETURN VALUES The .Fn malloc , Index: share/man/man9/zone.9 =================================================================== --- share/man/man9/zone.9 +++ share/man/man9/zone.9 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd April 26, 2017 +.Dd June 7, 2018 .Dt ZONE 9 .Os .Sh NAME @@ -375,6 +375,8 @@ and .Dv M_NOWAIT was specified. +The memory that these allocation calls return is not guaranteed to be +executable on all platforms. .Sh SEE ALSO .Xr malloc 9 .Sh HISTORY Index: sys/amd64/amd64/bpf_jit_machdep.c =================================================================== --- sys/amd64/amd64/bpf_jit_machdep.c +++ sys/amd64/amd64/bpf_jit_machdep.c @@ -609,7 +609,7 @@ * We cannot use malloc(9) because DMAP is mapped as NX. */ stream.ibuf = (void *)kmem_malloc(kernel_arena, *size, - M_NOWAIT); + M_EXEC | M_NOWAIT); if (stream.ibuf == NULL) break; #else Index: sys/kern/kern_malloc.c =================================================================== --- sys/kern/kern_malloc.c +++ sys/kern/kern_malloc.c @@ -558,6 +558,8 @@ unsigned long osize = size; #endif + KASSERT((flags & M_EXEC) == 0, ("malloc: called with M_EXEC")); + #ifdef MALLOC_DEBUG va = NULL; if (malloc_dbg(&va, &size, mtp, flags) != 0) Index: sys/sys/malloc.h =================================================================== --- sys/sys/malloc.h +++ sys/sys/malloc.h @@ -49,7 +49,7 @@ #define MINALLOCSIZE UMA_SMALLEST_UNIT /* - * flags to malloc. + * Flags to memory allocation functions. */ #define M_NOWAIT 0x0001 /* do not block */ #define M_WAITOK 0x0002 /* ok to block */ @@ -59,6 +59,7 @@ #define M_NODUMP 0x0800 /* don't dump pages in this allocation */ #define M_FIRSTFIT 0x1000 /* Only for vmem, fast fit. */ #define M_BESTFIT 0x2000 /* Only for vmem, low fragmentation. */ +#define M_EXEC 0x4000 /* allocate executable space. */ #define M_MAGIC 877983977 /* time when first defined :-) */ Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -2217,6 +2217,7 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } + KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_arg: called with spinlock or critical section held")); Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -175,6 +175,7 @@ vm_object_t object = kernel_object; vm_offset_t addr, i, offset; vm_page_t m; + vm_prot_t prot; int pflags, tries; size = round_page(size); @@ -185,6 +186,7 @@ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; + prot = (flags & M_EXEC) ? VM_PROT_ALL : VM_PROT_RW; VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { tries = 0; @@ -212,8 +214,8 @@ if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, - VM_PROT_ALL | PMAP_ENTER_WIRED, 0); + pmap_enter(kernel_pmap, addr + i, m, prot, + prot | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); return (addr); @@ -258,6 +260,7 @@ vm_object_t object = kernel_object; vm_offset_t addr, offset, tmp; vm_page_t end_m, m; + vm_prot_t prot; u_long npages; int pflags, tries; @@ -294,12 +297,13 @@ vm_phys_domain(m), domain)); end_m = m + npages; tmp = addr; + prot = (flags & M_EXEC) ? VM_PROT_ALL : VM_PROT_RW; for (; m < end_m; m++) { if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, - VM_PROT_ALL | PMAP_ENTER_WIRED, 0); + pmap_enter(kernel_pmap, tmp, m, prot, + prot | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; } VM_OBJECT_WUNLOCK(object); @@ -422,6 +426,7 @@ { vm_offset_t offset, i; vm_page_t m, mpred; + vm_prot_t prot; int pflags; KASSERT(object == kernel_object, @@ -432,6 +437,7 @@ pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); if (flags & M_WAITOK) pflags |= VM_ALLOC_WAITFAIL; + prot = (flags & M_EXEC) ? VM_PROT_ALL : VM_PROT_RW; i = 0; VM_OBJECT_WLOCK(object); @@ -461,8 +467,8 @@ KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); m->valid = VM_PAGE_BITS_ALL; - pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, - VM_PROT_ALL | PMAP_ENTER_WIRED, 0); + pmap_enter(kernel_pmap, addr + i, m, prot, + prot | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object);