diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -768,20 +768,31 @@ struct malloc_type *mtp, struct domainset *ds, int flags) { void *res; + size_t asize; KASSERT(align != 0 && powerof2(align), ("malloc_domainset_aligned: wrong align %#zx size %#zx", align, size)); - KASSERT(align <= kmemzones[nitems(kmemzones) - 2].kz_size, + KASSERT(align <= PAGE_SIZE, ("malloc_domainset_aligned: align %#zx (size %#zx) too large", align, size)); - if (size < align) - size = align; - res = malloc_domainset(size, mtp, ds, flags); + /* + * Round the allocation size up to the next power of 2, + * because we can only guarantee alignment for + * power-of-2-sized allocations. Further increase the + * allocation size to align if the rounded size is less than + * the align, since malloc zones provide alignment equal to + * size. + */ + asize = size == 0 ? align : 1UL << flsl(size - 1); + if (asize < align) + asize = align; + + res = malloc_domainset(asize, mtp, ds, flags); KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0, ("malloc_domainset_aligned: result not aligned %p size %#zx " - "align %#zx", res, size, align)); + "allocsize %#zx align %#zx", res, size, asize, align)); return (res); } @@ -1173,7 +1184,7 @@ align = UMA_ALIGN_PTR; if (powerof2(size) && size > sizeof(void *)) - align = size - 1; + align = MIN(size, PAGE_SIZE) - 1; for (subzone = 0; subzone < numzones; subzone++) { kmemzones[indx].kz_zone[subzone] = uma_zcreate(name, size,