Index: sys/kern/kern_malloc.c =================================================================== --- sys/kern/kern_malloc.c +++ sys/kern/kern_malloc.c @@ -53,6 +53,7 @@ #include #include +#include #include #include #include @@ -110,7 +111,7 @@ #define MALLOC_DEBUG 1 #endif -#ifdef DEBUG_REDZONE +#if defined(KASAN) || defined(DEBUG_REDZONE) #define DEBUG_REDZONE_ARG_DEF , unsigned long osize #define DEBUG_REDZONE_ARG , osize #else @@ -603,11 +604,12 @@ if (__predict_false(va == NULL)) { KASSERT((flags & M_WAITOK) == 0, ("malloc(M_WAITOK) returned NULL")); - } + } else { #ifdef DEBUG_REDZONE - if (va != NULL) va = redzone_setup(va, osize); #endif + kasan_mark((void *)va, osize, sz, KASAN_MALLOC_REDZONE); + } return (va); } @@ -633,7 +635,7 @@ int indx; caddr_t va; uma_zone_t zone; -#ifdef DEBUG_REDZONE +#if defined(DEBUG_REDZONE) || defined(KASAN) unsigned long osize = size; #endif @@ -664,6 +666,10 @@ #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); +#endif +#ifdef KASAN + if (va != NULL) + kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); #endif return ((void *) va); } @@ -699,7 +705,7 @@ caddr_t va; int domain; int indx; -#ifdef DEBUG_REDZONE +#if defined(KASAN) || defined(DEBUG_REDZONE) unsigned long osize = size; #endif @@ -727,6 +733,10 @@ #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); +#endif +#ifdef KASAN + if (va != NULL) + kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); #endif return (va); } @@ -745,7 +755,7 @@ malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, int flags) { -#ifdef DEBUG_REDZONE +#if defined(DEBUG_REDZONE) || defined(KASAN) unsigned long osize = size; #endif #ifdef MALLOC_DEBUG @@ -815,7 +825,7 @@ return (malloc_domainset(size * nmemb, type, ds, flags)); } -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) static void free_save_type(void *addr, struct malloc_type *mtp, u_long size) { @@ -896,7 +906,7 @@ if (__predict_true(!malloc_large_slab(slab))) { size = zone->uz_size; -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) free_save_type(addr, mtp, size); #endif uma_zfree_arg(zone, addr, slab); @@ -936,13 +946,15 @@ if (__predict_true(!malloc_large_slab(slab))) { size = zone->uz_size; -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) free_save_type(addr, mtp, size); #endif + kasan_mark(addr, size, size, 0); explicit_bzero(addr, size); uma_zfree_arg(zone, addr, slab); } else { size = malloc_large_size(slab); + kasan_mark(addr, size, size, 0); explicit_bzero(addr, size); free_large(addr, size); } @@ -997,16 +1009,22 @@ alloc = malloc_large_size(slab); /* Reuse the original block if appropriate */ - if (size <= alloc - && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) + if (size <= alloc && + (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) { + kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE); return (addr); + } #endif /* !DEBUG_REDZONE */ /* Allocate a new, bigger (or smaller) block */ if ((newaddr = malloc(size, mtp, flags)) == NULL) return (NULL); - /* Copy over original contents */ + /* + * Copy over original contents. For KASAN, the redzone must be marked + * valid before performing the copy. + */ + kasan_mark(addr, size, size, 0); bcopy(addr, newaddr, min(size, alloc)); free(addr, mtp); return (newaddr); @@ -1207,7 +1225,7 @@ for (subzone = 0; subzone < numzones; subzone++) { kmemzones[indx].kz_zone[subzone] = uma_zcreate(name, size, -#ifdef INVARIANTS +#if defined(INVARIANTS) && !defined(KASAN) mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL, Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -171,6 +171,17 @@ vmem_free(kernel_arena, addr, size); } +static vm_size_t +kmem_alloc_size(vm_size_t size) +{ + size = round_page(size); +#ifdef KASAN + /* Provide a red zone. */ + size += PAGE_SIZE; +#endif + return (size); +} + static vm_page_t kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain, int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high, @@ -226,7 +237,7 @@ vm_prot_t prot; object = kernel_object; - asize = round_page(size); + asize = kmem_alloc_size(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, asize, M_BESTFIT | flags, &addr)) return (0); @@ -307,7 +318,7 @@ int pflags; object = kernel_object; - asize = round_page(size); + asize = kmem_alloc_size(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr)) return (0); @@ -419,7 +430,7 @@ arena = vm_dom[domain].vmd_kernel_arena; else arena = vm_dom[domain].vmd_kernel_rwx_arena; - asize = round_page(size); + asize = kmem_alloc_size(size); if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr)) return (0); @@ -619,7 +630,7 @@ { struct vmem *arena; - size = round_page(size); + size = kmem_alloc_size(size); kasan_mark((void *)addr, size, size, 0); arena = _kmem_unback(kernel_object, addr, size); if (arena != NULL)