Index: share/man/man9/malloc.9 =================================================================== --- share/man/man9/malloc.9 +++ share/man/man9/malloc.9 @@ -29,7 +29,7 @@ .\" $NetBSD: malloc.9,v 1.3 1996/11/11 00:05:11 lukem Exp $ .\" $FreeBSD$ .\" -.Dd August 28, 2020 +.Dd October 30, 2020 .Dt MALLOC 9 .Os .Sh NAME @@ -57,6 +57,8 @@ .Fn reallocf "void *addr" "size_t size" "struct malloc_type *type" "int flags" .Ft size_t .Fn malloc_usable_size "const void *addr" +.Ft void * +.Fn malloc_exec "size_t size" "struct malloc_type *type" "int flags" .Fn MALLOC_DECLARE type .In sys/param.h .In sys/malloc.h @@ -66,6 +68,8 @@ .In sys/domainset.h .Ft void * .Fn malloc_domainset "size_t size" "struct malloc_type *type" "struct domainset *ds" "int flags" +.Ft void * +.Fn malloc_domainset_exec "size_t size" "struct malloc_type *type" "struct domainset *ds" "int flags" .Sh DESCRIPTION The .Fn malloc @@ -82,6 +86,12 @@ .Xr domainset 9 for some example policies. .Pp +Both +.Fn malloc_exec +and +.Fn malloc_domainset_exec +can be used to return executable memory. Note all platforms enforce a distinction between executable and non-executable memory. +.Pp The .Fn mallocarray function allocates uninitialized memory in kernel address space for an @@ -214,11 +224,6 @@ .Dv M_NOWAIT when an allocation failure cannot be tolerated by the caller without catastrophic effects on the system. -.It Dv M_EXEC -Indicates that the system should allocate executable memory. -If this flag is not set, the system will not allocate executable memory. -Not all platforms enforce a distinction between executable and -non-executable memory. .El .Pp Exactly one of either Index: sys/amd64/amd64/bpf_jit_machdep.c =================================================================== --- sys/amd64/amd64/bpf_jit_machdep.c +++ sys/amd64/amd64/bpf_jit_machdep.c @@ -602,7 +602,7 @@ *size = stream.cur_ip; #ifdef _KERNEL - stream.ibuf = malloc(*size, M_BPFJIT, M_EXEC | M_NOWAIT); + stream.ibuf = malloc_exec(*size, M_BPFJIT, M_NOWAIT); if (stream.ibuf == NULL) break; #else Index: sys/i386/i386/bpf_jit_machdep.c =================================================================== --- sys/i386/i386/bpf_jit_machdep.c +++ sys/i386/i386/bpf_jit_machdep.c @@ -632,7 +632,7 @@ *size = stream.cur_ip; #ifdef _KERNEL - stream.ibuf = malloc(*size, M_BPFJIT, M_EXEC | M_NOWAIT); + stream.ibuf = malloc_exec(*size, M_BPFJIT, M_NOWAIT); if (stream.ibuf == NULL) break; #else Index: sys/kern/kern_malloc.c =================================================================== --- sys/kern/kern_malloc.c +++ sys/kern/kern_malloc.c @@ -618,13 +618,14 @@ unsigned long osize = size; #endif + MPASS((flags & M_EXEC) == 0); #ifdef MALLOC_DEBUG va = NULL; if (malloc_dbg(&va, &size, mtp, flags) != 0) return (va); #endif - if (size <= kmem_zmax && (flags & M_EXEC) == 0) { + if (size <= kmem_zmax) { if (size & KMEM_ZMASK) size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; indx = kmemsize[size >> KMEM_ZSHIFT]; @@ -640,10 +641,11 @@ va = malloc_large(&size, DOMAINSET_RR(), flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } - if (flags & M_WAITOK) - KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); - else if (va == NULL) + if (__predict_false(va == NULL)) { + KASSERT((flags & M_WAITOK) == 0, + ("malloc(M_WAITOK) returned NULL")); t_malloc_fail = time_uptime; + } #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); @@ -682,40 +684,102 @@ int flags) { struct vm_domainset_iter di; - caddr_t ret; + caddr_t va; int domain; int indx; #if defined(DEBUG_REDZONE) unsigned long osize = size; #endif + MPASS((flags & M_EXEC) == 0); #ifdef MALLOC_DEBUG - ret= NULL; - if (malloc_dbg(&ret, &size, mtp, flags) != 0) - return (ret); + va = NULL; + if (malloc_dbg(&va, &size, mtp, flags) != 0) + return (va); #endif - if (size <= kmem_zmax && (flags & M_EXEC) == 0) { + if (size <= kmem_zmax) { vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { - ret = malloc_domain(&size, &indx, mtp, domain, flags); - } while (ret == NULL && + va = malloc_domain(&size, &indx, mtp, domain, flags); + } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0); - malloc_type_zone_allocated(mtp, ret == NULL ? 0 : size, indx); + malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { /* Policy is handled by kmem. */ - ret = malloc_large(&size, ds, flags); - malloc_type_allocated(mtp, ret == NULL ? 0 : size); + va = malloc_large(&size, ds, flags); + malloc_type_allocated(mtp, va == NULL ? 0 : size); } + if (__predict_false(va == NULL)) { + KASSERT((flags & M_WAITOK) == 0, + ("malloc(M_WAITOK) returned NULL")); + t_malloc_fail = time_uptime; + } +#ifdef DEBUG_REDZONE + if (va != NULL) + va = redzone_setup(va, osize); +#endif + return (va); +} - if (flags & M_WAITOK) - KASSERT(ret != NULL, ("malloc(M_WAITOK) returned NULL")); - else if (ret == NULL) +/* + * Allocate an executable area. + */ +void * +malloc_exec(size_t size, struct malloc_type *mtp, int flags) +{ + caddr_t va; +#if defined(DEBUG_REDZONE) + unsigned long osize = size; +#endif + + flags |= M_EXEC; +#ifdef MALLOC_DEBUG + va = NULL; + if (malloc_dbg(&va, &size, mtp, flags) != 0) + return (va); +#endif + va = malloc_large(&size, DOMAINSET_RR(), flags); + malloc_type_allocated(mtp, va == NULL ? 0 : size); + if (__predict_false(va == NULL)) { + KASSERT((flags & M_WAITOK) == 0, + ("malloc(M_WAITOK) returned NULL")); t_malloc_fail = time_uptime; + } #ifdef DEBUG_REDZONE - if (ret != NULL) - ret = redzone_setup(ret, osize); + if (va != NULL) + va = redzone_setup(va, osize); #endif - return (ret); + return ((void *) va); +} + +void * +malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, + int flags) +{ + caddr_t va; +#if defined(DEBUG_REDZONE) + unsigned long osize = size; +#endif + + flags |= M_EXEC; +#ifdef MALLOC_DEBUG + va = NULL; + if (malloc_dbg(&va, &size, mtp, flags) != 0) + return (va); +#endif + /* Policy is handled by kmem. */ + va = malloc_large(&size, ds, flags); + malloc_type_allocated(mtp, va == NULL ? 0 : size); + if (__predict_false(va == NULL)) { + KASSERT((flags & M_WAITOK) == 0, + ("malloc(M_WAITOK) returned NULL")); + t_malloc_fail = time_uptime; + } +#ifdef DEBUG_REDZONE + if (va != NULL) + va = redzone_setup(va, osize); +#endif + return (va); } void * Index: sys/kern/link_elf.c =================================================================== --- sys/kern/link_elf.c +++ sys/kern/link_elf.c @@ -1129,7 +1129,7 @@ goto out; } #else - mapbase = malloc(mapsize, M_LINKER, M_EXEC | M_WAITOK); + mapbase = malloc_exec(mapsize, M_LINKER, M_WAITOK); #endif ef->address = mapbase; Index: sys/sys/malloc.h =================================================================== --- sys/sys/malloc.h +++ sys/sys/malloc.h @@ -239,6 +239,11 @@ void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) __malloc_like __result_use_check __alloc_size2(1, 2); +void *malloc_exec(size_t size, struct malloc_type *type, int flags) __malloc_like + __result_use_check __alloc_size(1); +void *malloc_domainset_exec(size_t size, struct malloc_type *type, + struct domainset *ds, int flags) __malloc_like __result_use_check + __alloc_size(1); void malloc_init(void *); int malloc_last_fail(void); void malloc_type_allocated(struct malloc_type *type, unsigned long size);