diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -763,6 +763,67 @@ return (malloc_large(&size, mtp, ds, flags DEBUG_REDZONE_ARG)); } +void * +malloc_domainset_aligned(size_t size, size_t align, struct malloc_type *mtp, + struct domainset *ds, int flags) +{ + char *mem, *res; + size_t addend; + uintptr_t x; + + MPASS(align > 0 && (align & (align - 1)) == 0); + + if (align < sizeof(void *)) + align = sizeof(void *); + + addend = sizeof(void *) + align; +#ifdef INVARIANTS + addend += sizeof(size_t); +#endif + mem = malloc_domainset(size + addend, mtp, ds, flags); + if (mem == NULL) + return (NULL); + + x = roundup2((uintptr_t)mem + sizeof(void *), align); + res = (void *)x; + x -= sizeof(void *); + memcpy((void *)x, &mem, sizeof(mem)); +#ifdef INVARIANTS + x -= sizeof(size_t); + memcpy((void *)x, &align, sizeof(size_t)); +#endif + return (res); +} + +void +free_aligned(void *addr, struct malloc_type *mtp) +{ + void *mem; + uintptr_t x; + + if (addr == NULL) + return; + x = (uintptr_t)addr; + x -= sizeof(void *); + memcpy(&mem, (void *)x, sizeof(mem)); +#ifdef INVARIANTS + { + size_t align; + + x -= sizeof(size_t); + memcpy(&align, (void *)x, sizeof(size_t)); + if (align == 0 || (align & (align - 1)) != 0) + panic("free_aligned: bad align " + "addr %p alloc %p align %#zx", addr, mem, align); + if ((uintptr_t)addr - (uintptr_t)mem > sizeof(void *) + + sizeof(size_t) + align) + panic("free_aligned: addresses too far " + "addr %p alloc %p align %#zx", addr, mem, align); + } +#endif + free(mem, mtp); +} + void * mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) { diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -261,6 +261,10 @@ __result_use_check __alloc_size(2); void *reallocf(void *addr, size_t size, struct malloc_type *type, int flags) __result_use_check __alloc_size(2); +void *malloc_domainset_aligned(size_t size, size_t align, + struct malloc_type *mtp, struct domainset *ds, int flags) + __malloc_like __result_use_check __alloc_size(1); +void free_aligned(void *addr, struct malloc_type *mtp); struct malloc_type *malloc_desc2type(const char *desc); diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c --- a/sys/x86/x86/busdma_bounce.c +++ b/sys/x86/x86/busdma_bounce.c @@ -445,18 +445,13 @@ * else allocate a block of contiguous pages because one or more of the * constraints is something that only the contig allocator can fulfill. * - * NOTE: The (dmat->common.alignment <= dmat->maxsize) check - * below is just a quick hack. The exact alignment guarantees - * of malloc(9) need to be nailed down, and the code below - * should be rewritten to take that into account. - * - * In the meantime warn the user if malloc gets it wrong. + * Warn the user if we get it wrong. */ if (dmat->common.maxsize <= PAGE_SIZE && - dmat->common.alignment <= dmat->common.maxsize && dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF, + *vaddr = malloc_domainset_aligned(dmat->common.maxsize, + dmat->common.alignment, M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); } else if (dmat->common.nsegments >= howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, @@ -503,7 +498,7 @@ if (map != NULL) panic("bus_dmamem_free: Invalid map freed\n"); if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) - free(vaddr, M_DEVBUF); + free_aligned(vaddr, M_DEVBUF); else kmem_free((vm_offset_t)vaddr, dmat->common.maxsize); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,