diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -53,6 +53,7 @@ #include #include +#include #include #include #include @@ -110,7 +111,7 @@ #define MALLOC_DEBUG 1 #endif -#ifdef DEBUG_REDZONE +#if defined(KASAN) || defined(DEBUG_REDZONE) #define DEBUG_REDZONE_ARG_DEF , unsigned long osize #define DEBUG_REDZONE_ARG , osize #else @@ -603,11 +604,12 @@ if (__predict_false(va == NULL)) { KASSERT((flags & M_WAITOK) == 0, ("malloc(M_WAITOK) returned NULL")); - } + } else { #ifdef DEBUG_REDZONE - if (va != NULL) va = redzone_setup(va, osize); #endif + kasan_mark((void *)va, osize, sz, KASAN_MALLOC_REDZONE); + } return (va); } @@ -633,7 +635,7 @@ int indx; caddr_t va; uma_zone_t zone; -#ifdef DEBUG_REDZONE +#if defined(DEBUG_REDZONE) || defined(KASAN) unsigned long osize = size; #endif @@ -664,6 +666,10 @@ #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); +#endif +#ifdef KASAN + if (va != NULL) + kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); #endif return ((void *) va); } @@ -699,7 +705,7 @@ caddr_t va; int domain; int indx; -#ifdef DEBUG_REDZONE +#if defined(KASAN) || defined(DEBUG_REDZONE) unsigned long osize = size; #endif @@ -727,6 +733,10 @@ #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); +#endif +#ifdef KASAN + if (va != NULL) + kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); #endif return (va); } @@ -745,7 +755,7 @@ malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, int flags) { -#ifdef DEBUG_REDZONE +#if defined(DEBUG_REDZONE) || defined(KASAN) unsigned long osize = size; #endif #ifdef MALLOC_DEBUG @@ -815,7 +825,7 @@ return (malloc_domainset(size * nmemb, type, ds, flags)); } -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) static void free_save_type(void *addr, struct malloc_type *mtp, u_long size) { @@ -896,7 +906,7 @@ if (__predict_true(!malloc_large_slab(slab))) { size = zone->uz_size; -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) free_save_type(addr, mtp, size); #endif uma_zfree_arg(zone, addr, slab); @@ -936,13 +946,15 @@ if (__predict_true(!malloc_large_slab(slab))) { size = zone->uz_size; -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) free_save_type(addr, mtp, size); #endif + kasan_mark(addr, size, size, 0); explicit_bzero(addr, size); uma_zfree_arg(zone, addr, slab); } else { size = malloc_large_size(slab); + kasan_mark(addr, size, size, 0); explicit_bzero(addr, size); free_large(addr, size); } @@ -997,16 +1009,22 @@ alloc = malloc_large_size(slab); /* Reuse the original block if appropriate */ - if (size <= alloc - && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) + if (size <= alloc && + (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) { + kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE); return (addr); + } #endif /* !DEBUG_REDZONE */ /* Allocate a new, bigger (or smaller) block */ if ((newaddr = malloc(size, mtp, flags)) == NULL) return (NULL); - /* Copy over original contents */ + /* + * Copy over original contents. For KASAN, the redzone must be marked + * valid before performing the copy. + */ + kasan_mark(addr, size, size, 0); bcopy(addr, newaddr, min(size, alloc)); free(addr, mtp); return (newaddr); @@ -1207,7 +1225,7 @@ for (subzone = 0; subzone < numzones; subzone++) { kmemzones[indx].kz_zone[subzone] = uma_zcreate(name, size, -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL,