Index: ObsoleteFiles.inc =================================================================== --- ObsoleteFiles.inc +++ ObsoleteFiles.inc @@ -38,6 +38,8 @@ # xargs -n1 | sort | uniq -d; # done +# 20181028: malloc_domain(9) KPI change +OLD_FILES+=share/man/man9/malloc_domain.9.gz # 20181026: joy(4) removal OLD_FILES+=usr/share/man/man4/joy.4.gz # 20181025: OpenSSL libraries version bump to avoid conflict with ports Index: share/man/man9/Makefile =================================================================== --- share/man/man9/Makefile +++ share/man/man9/Makefile @@ -812,7 +812,8 @@ MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_establish.9 \ config_intrhook.9 config_intrhook_oneshot.9 -MLINKS+=contigmalloc.9 contigfree.9 +MLINKS+=contigmalloc.9 contigmalloc_domainset.9 \ + contigmalloc.9 contigfree.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 @@ -1289,7 +1290,7 @@ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ - malloc.9 malloc_domain.9 \ + malloc.9 malloc_domainset.9 \ malloc.9 free_domain.9 \ malloc.9 mallocarray.9 \ malloc.9 MALLOC_DECLARE.9 \ Index: share/man/man9/contigmalloc.9 =================================================================== --- share/man/man9/contigmalloc.9 +++ share/man/man9/contigmalloc.9 @@ -50,6 +50,19 @@ .Fa "unsigned long size" .Fa "struct malloc_type *type" .Fc +.In sys/param.h +.In sys/domainset.h +.Ft "void *" +.Fo contigmalloc_domainset +.Fa "unsigned long size" +.Fa "struct malloc_type *type" +.Fa "struct domainset *ds" +.Fa "int flags" +.Fa "vm_paddr_t low" +.Fa "vm_paddr_t high" +.Fa "unsigned long alignment" +.Fa "vm_paddr_t boundary" +.Fc .Sh DESCRIPTION The .Fn contigmalloc @@ -70,6 +83,15 @@ bytes allocated from the kernel virtual address (KVA) map. .Pp The +.Fn contigmalloc_domainset +variant allows the caller to additionally specify a +.Xr numa 4 +domain selection policy. +See +.Xr domainset 9 +for some example policies. +.Pp +The .Fa flags parameter modifies .Fn contigmalloc Ns 's @@ -90,7 +112,9 @@ The .Fn contigfree function deallocates memory allocated by a previous call to -.Fn contigmalloc . +.Fn contigmalloc +or +.Fn contigmalloc_domainset . .Sh IMPLEMENTATION NOTES The .Fn contigmalloc Index: share/man/man9/domainset.9 =================================================================== --- share/man/man9/domainset.9 +++ share/man/man9/domainset.9 @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd October 20, 2018 +.Dd October 28, 2018 .Dt DOMAINSET 9 .Os .Sh NAME @@ -43,7 +43,11 @@ }; .Ed .Pp +.Ft struct domainset * +.Fn DOMAINSET_FIXED domain +.Ft struct domainset * .Fn DOMAINSET_RR +.Ft struct domainset * .Fn DOMAINSET_PREF domain .Ft struct domainset * .Fn domainset_create "const struct domainset *key" @@ -98,11 +102,26 @@ .El .Pp The +.Fn DOMAINSET_FIXED , .Fn DOMAINSET_RR and .Fn DOMAINSET_PREF -provide pointers to global pre-defined policies for use when the +macros provide pointers to global pre-defined policies for use when the desired policy is known at compile time. +.Fn DOMAINSET_FIXED +is a policy which only permits allocations from the specified domain. +.Fn DOMAINSET_RR +provides round-robin selection among all domains in the system. +The +.Fn DOMAINSET_PREF +policies attempt allocation from the specified domain, but unlike +.Fn DOMAINSET_FIXED +will fall back to other domains to satisfy the request. +These policies should be used in preference to +.Fn DOMAINSET_FIXED +to avoid blocking indefinitely on a +.Dv M_WAITOK +request. The .Fn domainset_create function takes a partially filled in domainset as a key and returns a Index: share/man/man9/malloc.9 =================================================================== --- share/man/man9/malloc.9 +++ share/man/man9/malloc.9 @@ -29,7 +29,7 @@ .\" $NetBSD: malloc.9,v 1.3 1996/11/11 00:05:11 lukem Exp $ .\" $FreeBSD$ .\" -.Dd June 13, 2018 +.Dd October 28, 2018 .Dt MALLOC 9 .Os .Sh NAME @@ -46,13 +46,9 @@ .Ft void * .Fn malloc "size_t size" "struct malloc_type *type" "int flags" .Ft void * -.Fn malloc_domain "size_t size" "struct malloc_type *type" "int domain" "int flags" -.Ft void * .Fn mallocarray "size_t nmemb" "size_t size" "struct malloc_type *type" "int flags" .Ft void .Fn free "void *addr" "struct malloc_type *type" -.Ft void -.Fn free_domain "void *addr" "struct malloc_type *type" .Ft void * .Fn realloc "void *addr" "size_t size" "struct malloc_type *type" "int flags" .Ft void * @@ -62,6 +58,12 @@ .In sys/malloc.h .In sys/kernel.h .Fn MALLOC_DEFINE type shortdesc longdesc +.In sys/param.h +.In sys/domainset.h +.Ft void * +.Fn malloc_domainset "size_t size" "struct malloc_type *type" "struct domainset *ds" "int flags" +.Ft void +.Fn free_domain "void *addr" "struct malloc_type *type" .Sh DESCRIPTION The .Fn malloc @@ -70,12 +72,15 @@ .Fa size . .Pp The -.Fn malloc_domain -variant allocates the object from the specified memory domain. Memory allocated -with this function should be returned with -.Fn free_domain . +.Fn malloc_domainset +variant allocates memory from a specific +.Xr numa 4 +domain using the specified domain selection policy. See -.Xr numa 9 for more details. +.Xr domainset 9 +for some example policies. +Memory allocated with this function should be returned with +.Fn free_domain . .Pp The .Fn mallocarray @@ -310,7 +315,9 @@ Failing consistency checks will cause a panic or a system console message. .Sh SEE ALSO +.Xr numa 4 , .Xr vmstat 8 , .Xr contigmalloc 9 , +.Xr domainset 9 , .Xr memguard 9 , .Xr vnode 9 Index: sys/dev/hwpmc/hwpmc_logging.c =================================================================== --- sys/dev/hwpmc/hwpmc_logging.c +++ sys/dev/hwpmc/hwpmc_logging.c @@ -41,6 +41,7 @@ #include #include +#include #include #include #include @@ -1231,8 +1232,8 @@ void pmclog_initialize() { - int domain; struct pmclog_buffer *plb; + int domain, ncpus, total; if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) { (void) printf("hwpmc: tunable logbuffersize=%d must be " @@ -1253,16 +1254,17 @@ pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; } for (domain = 0; domain < vm_ndomains; domain++) { - int ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; - int total = ncpus*pmc_nlogbuffers_pcpu; + ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; + total = ncpus * pmc_nlogbuffers_pcpu; - plb = malloc_domain(sizeof(struct pmclog_buffer)*total, M_PMC, domain, M_WAITOK|M_ZERO); + plb = malloc_domainset(sizeof(struct pmclog_buffer) * total, + M_PMC, DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); pmc_dom_hdrs[domain]->pdbh_plbs = plb; - for (int i = 0; i < total; i++, plb++) { + for (; total > 0; total--, plb++) { void *buf; - buf = malloc_domain(1024 * pmclog_buffer_size, M_PMC, domain, - M_WAITOK|M_ZERO); + buf = malloc_domainset(1024 * pmclog_buffer_size, M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); PMCLOG_INIT_BUFFER_DESCRIPTOR(plb, buf, domain); pmc_plb_rele_unlocked(plb); } Index: sys/dev/hwpmc/hwpmc_mod.c =================================================================== --- sys/dev/hwpmc/hwpmc_mod.c +++ sys/dev/hwpmc/hwpmc_mod.c @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include #include @@ -78,14 +79,6 @@ #include "hwpmc_soft.h" -#ifdef NUMA -#define NDOMAINS vm_ndomains -#else -#define NDOMAINS 1 -#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags)) -#define free_domain(addr, type) free(addr, type) -#endif - #define PMC_EPOCH_ENTER() struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et) #define PMC_EPOCH_EXIT() epoch_exit_preempt(global_epoch_preempt, &pmc_et) @@ -5643,15 +5636,16 @@ continue; pc = pcpu_find(cpu); domain = pc->pc_domain; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); KASSERT(pmc_pcpu[cpu] != NULL, ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + @@ -5659,35 +5653,27 @@ pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); - - KASSERT(pmc_pcpu[cpu] != NULL, - ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + (n * pmc_callchaindepth); pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); - - KASSERT(pmc_pcpu[cpu] != NULL, - ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); - + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) - ps->ps_pc = sb->ps_callchains + - (n * pmc_callchaindepth); + ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth; pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb; } Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -864,8 +864,8 @@ /* Inform UMA that this allocator uses kernel_map/object. */ *flags = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, 0x0ULL, - 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); + return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), + bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); } #endif Index: sys/kern/kern_cpuset.c =================================================================== --- sys/kern/kern_cpuset.c +++ sys/kern/kern_cpuset.c @@ -119,6 +119,7 @@ */ LIST_HEAD(domainlist, domainset); +struct domainset __read_mostly domainset_fixed[MAXMEMDOM]; struct domainset __read_mostly domainset_prefer[MAXMEMDOM]; struct domainset __read_mostly domainset_roundrobin; @@ -1402,6 +1403,12 @@ _domainset_create(dset, NULL); for (i = 0; i < vm_ndomains; i++) { + dset = &domainset_fixed[i]; + DOMAINSET_ZERO(&dset->ds_mask); + DOMAINSET_SET(i, &dset->ds_mask); + dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; + _domainset_create(dset, NULL); + dset = &domainset_prefer[i]; DOMAINSET_COPY(&all_domains, &dset->ds_mask); dset->ds_policy = DOMAINSET_POLICY_PREFER; Index: sys/kern/kern_malloc.c =================================================================== --- sys/kern/kern_malloc.c +++ sys/kern/kern_malloc.c @@ -68,6 +68,7 @@ #include #include +#include #include #include #include @@ -453,13 +454,13 @@ } void * -contigmalloc_domain(unsigned long size, struct malloc_type *type, - int domain, int flags, vm_paddr_t low, vm_paddr_t high, +contigmalloc_domainset(unsigned long size, struct malloc_type *type, + struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) { void *ret; - ret = (void *)kmem_alloc_contig_domain(domain, size, flags, low, high, + ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, alignment, boundary, VM_MEMATTR_DEFAULT); if (ret != NULL) malloc_type_allocated(type, round_page(size)); @@ -595,9 +596,8 @@ return ((void *) va); } -void * -malloc_domain(size_t size, struct malloc_type *mtp, int domain, - int flags) +static void * +malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags) { int indx; caddr_t va; @@ -640,6 +640,24 @@ return ((void *) va); } +void * +malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, + int flags) +{ + struct vm_domainset_iter di; + void *ret; + int domain; + + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); + do { + ret = malloc_domain(size, mtp, domain, flags); + if (ret != NULL) + break; + } while (vm_domainset_iter_policy(&di, &domain) == 0); + + return (ret); +} + void * mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) { Index: sys/kern/kern_mbuf.c =================================================================== --- sys/kern/kern_mbuf.c +++ sys/kern/kern_mbuf.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -590,8 +591,9 @@ /* Inform UMA that this allocator uses kernel_map/object. */ *flags = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, - (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); + return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), + bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, + VM_MEMATTR_DEFAULT)); } /* Index: sys/kern/kern_pmc.c =================================================================== --- sys/kern/kern_pmc.c +++ sys/kern/kern_pmc.c @@ -35,8 +35,9 @@ #include "opt_hwpmc_hooks.h" -#include +#include #include +#include #include #include #include @@ -330,22 +331,6 @@ mtx_unlock_spin(&pmc_softs_mtx); } -#ifdef NUMA -#define NDOMAINS vm_ndomains - -static int -getdomain(int cpu) -{ - struct pcpu *pc; - - pc = pcpu_find(cpu); - return (pc->pc_domain); -} -#else -#define NDOMAINS 1 -#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags)) -#define getdomain(cpu) 0 -#endif /* * Initialise hwpmc. */ @@ -363,14 +348,15 @@ pmc_softs = malloc(pmc_softevents * sizeof(struct pmc_soft *), M_PMCHOOKS, M_NOWAIT|M_ZERO); KASSERT(pmc_softs != NULL, ("cannot allocate soft events table")); - for (domain = 0; domain < NDOMAINS; domain++) { - pmc_dom_hdrs[domain] = malloc_domain(sizeof(struct pmc_domain_buffer_header), M_PMC, domain, - M_WAITOK|M_ZERO); + for (domain = 0; domain < vm_ndomains; domain++) { + pmc_dom_hdrs[domain] = malloc_domainset( + sizeof(struct pmc_domain_buffer_header), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); mtx_init(&pmc_dom_hdrs[domain]->pdbh_mtx, "pmc_bufferlist_mtx", "pmc-leaf", MTX_SPIN); TAILQ_INIT(&pmc_dom_hdrs[domain]->pdbh_head); } CPU_FOREACH(cpu) { - domain = getdomain(cpu); + domain = pcpu_find(cpu)->pc_domain; KASSERT(pmc_dom_hdrs[domain] != NULL, ("no mem allocated for domain: %d", domain)); pmc_dom_hdrs[domain]->pdbh_ncpus++; } Index: sys/sys/domainset.h =================================================================== --- sys/sys/domainset.h +++ sys/sys/domainset.h @@ -96,7 +96,8 @@ domainid_t ds_order[MAXMEMDOM]; /* nth domain table. */ }; -extern struct domainset domainset_prefer[MAXMEMDOM]; +extern struct domainset domainset_fixed[MAXMEMDOM], domainset_prefer[MAXMEMDOM]; +#define DOMAINSET_FIXED(domain) (&domainset_fixed[(domain)]) #define DOMAINSET_PREF(domain) (&domainset_prefer[(domain)]) extern struct domainset domainset_roundrobin; #define DOMAINSET_RR() (&domainset_roundrobin) Index: sys/sys/malloc.h =================================================================== --- sys/sys/malloc.h +++ sys/sys/malloc.h @@ -160,6 +160,7 @@ */ MALLOC_DECLARE(M_IOV); +struct domainset; extern struct mtx malloc_mtx; /* @@ -172,8 +173,8 @@ vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) __malloc_like __result_use_check __alloc_size(1) __alloc_align(6); -void *contigmalloc_domain(unsigned long size, struct malloc_type *type, - int domain, int flags, vm_paddr_t low, vm_paddr_t high, +void *contigmalloc_domainset(unsigned long size, struct malloc_type *type, + struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) __malloc_like __result_use_check __alloc_size(1) __alloc_align(6); void free(void *addr, struct malloc_type *type); @@ -230,8 +231,9 @@ _malloc_item; \ }) -void *malloc_domain(size_t size, struct malloc_type *type, int domain, - int flags) __malloc_like __result_use_check __alloc_size(1); +void *malloc_domainset(size_t size, struct malloc_type *type, + struct domainset *ds, int flags) __malloc_like __result_use_check + __alloc_size(1); void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) __malloc_like __result_use_check __alloc_size2(1, 2); Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -1172,7 +1172,7 @@ void *p; /* Returned page */ *pflag = UMA_SLAB_KERNEL; - p = (void *) kmem_malloc_domain(domain, bytes, wait); + p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); return (p); } @@ -3718,6 +3718,7 @@ void * uma_large_malloc_domain(vm_size_t size, int domain, int wait) { + struct domainset *policy; vm_offset_t addr; uma_slab_t slab; @@ -3729,10 +3730,9 @@ slab = zone_alloc_item(slabzone, NULL, domain, wait); if (slab == NULL) return (NULL); - if (domain == UMA_ANYDOMAIN) - addr = kmem_malloc(size, wait); - else - addr = kmem_malloc_domain(domain, size, wait); + policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() : + DOMAINSET_FIXED(domain); + addr = kmem_malloc_domainset(policy, size, wait); if (addr != 0) { vsetslab(addr, slab); slab->us_data = (void *)addr; Index: sys/vm/vm_extern.h =================================================================== --- sys/vm/vm_extern.h +++ sys/vm/vm_extern.h @@ -44,6 +44,7 @@ #ifdef _KERNEL struct cdev; struct cdevsw; +struct domainset; /* These operate on kernel virtual addresses only. */ vm_offset_t kva_alloc(vm_size_t); @@ -56,16 +57,17 @@ /* These operate on virtual addresses backed by memory. */ vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); -vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, - vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); +vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, + int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); -vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, - vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, - vm_memattr_t memattr); +vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, + int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, + vm_paddr_t boundary, vm_memattr_t memattr); vm_offset_t kmem_malloc(vm_size_t size, int flags); -vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags); +vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size, + int flags); void kmem_free(vm_offset_t addr, vm_size_t size); /* This provides memory for previously allocated address space. */ Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -175,7 +175,7 @@ * necessarily physically contiguous. If M_ZERO is specified through the * given flags, then the pages are zeroed before they are mapped. */ -vm_offset_t +static vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { @@ -231,11 +231,20 @@ kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { + + return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low, + high, memattr)); +} + +vm_offset_t +kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) +{ struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_alloc_attr_domain(domain, size, flags, low, high, memattr); @@ -254,7 +263,7 @@ * through the given flags, then the pages are zeroed before they are * mapped. */ -vm_offset_t +static vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) @@ -315,11 +324,21 @@ kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { + + return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low, + high, alignment, boundary, memattr)); +} + +vm_offset_t +kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, + vm_memattr_t memattr) +{ struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_alloc_contig_domain(domain, size, flags, low, high, alignment, boundary, memattr); @@ -368,11 +387,11 @@ } /* - * kmem_malloc: + * kmem_malloc_domain: * * Allocate wired-down pages in the kernel's address space. */ -vm_offset_t +static vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags) { vmem_t *arena; @@ -401,12 +420,19 @@ vm_offset_t kmem_malloc(vm_size_t size, int flags) +{ + + return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags)); +} + +vm_offset_t +kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags) { struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_malloc_domain(domain, size, flags); if (addr != 0) Index: sys/x86/iommu/busdma_dmar.c =================================================================== --- sys/x86/iommu/busdma_dmar.c +++ sys/x86/iommu/busdma_dmar.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -373,16 +374,16 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__); tag = (struct bus_dma_tag_dmar *)dmat; - map = malloc_domain(sizeof(*map), M_DMAR_DMAMAP, - tag->common.domain, M_NOWAIT | M_ZERO); + map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP, + DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); if (map == NULL) { *mapp = NULL; return (ENOMEM); } if (tag->segments == NULL) { - tag->segments = malloc_domain(sizeof(bus_dma_segment_t) * + tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * tag->common.nsegments, M_DMAR_DMAMAP, - tag->common.domain, M_NOWAIT); + DOMAINSET_PREF(tag->common.domain), M_NOWAIT); if (tag->segments == NULL) { free_domain(map, M_DMAR_DMAMAP); *mapp = NULL; @@ -447,13 +448,13 @@ if (tag->common.maxsize < PAGE_SIZE && tag->common.alignment <= tag->common.maxsize && attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc_domain(tag->common.maxsize, M_DEVBUF, - tag->common.domain, mflags); + *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, + DOMAINSET_PREF(tag->common.domain), mflags); map->flags |= BUS_DMAMAP_DMAR_MALLOC; } else { - *vaddr = (void *)kmem_alloc_attr_domain(tag->common.domain, - tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR, - attr); + *vaddr = (void *)kmem_alloc_attr_domainset( + DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, + mflags, 0ul, BUS_SPACE_MAXADDR, attr); map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC; } if (*vaddr == NULL) { Index: sys/x86/x86/busdma_bounce.c =================================================================== --- sys/x86/x86/busdma_bounce.c +++ sys/x86/x86/busdma_bounce.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -294,9 +295,9 @@ error = 0; if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc_domain( + dmat->segments = (bus_dma_segment_t *)malloc_domainset( sizeof(bus_dma_segment_t) * dmat->common.nsegments, - M_DEVBUF, dmat->common.domain, M_NOWAIT); + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT); if (dmat->segments == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); @@ -317,8 +318,8 @@ } bz = dmat->bounce_zone; - *mapp = (bus_dmamap_t)malloc_domain(sizeof(**mapp), M_DEVBUF, - dmat->common.domain, M_NOWAIT | M_ZERO); + *mapp = (bus_dmamap_t)malloc_domainset(sizeof(**mapp), M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); @@ -411,9 +412,9 @@ *mapp = NULL; if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc_domain( + dmat->segments = (bus_dma_segment_t *)malloc_domainset( sizeof(bus_dma_segment_t) * dmat->common.nsegments, - M_DEVBUF, dmat->common.domain, mflags); + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); if (dmat->segments == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); @@ -452,20 +453,21 @@ (dmat->common.alignment <= dmat->common.maxsize) && dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc_domain(dmat->common.maxsize, M_DEVBUF, - dmat->common.domain, mflags); + *vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), mflags); } else if (dmat->common.nsegments >= howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && dmat->common.alignment <= PAGE_SIZE && (dmat->common.boundary % PAGE_SIZE) == 0) { /* Page-based multi-segment allocations allowed */ - *vaddr = (void *)kmem_alloc_attr_domain(dmat->common.domain, - dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, - attr); + *vaddr = (void *)kmem_alloc_attr_domainset( + DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, + mflags, 0ul, dmat->common.lowaddr, attr); dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; } else { - *vaddr = (void *)kmem_alloc_contig_domain(dmat->common.domain, - dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, + *vaddr = (void *)kmem_alloc_contig_domainset( + DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, + mflags, 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, dmat->common.boundary, attr); dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; @@ -1149,14 +1151,14 @@ while (numpages > 0) { struct bounce_page *bpage; - bpage = (struct bounce_page *)malloc_domain(sizeof(*bpage), - M_DEVBUF, dmat->common.domain, M_NOWAIT | M_ZERO); + bpage = malloc_domainset(sizeof(*bpage), M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); if (bpage == NULL) break; - bpage->vaddr = (vm_offset_t)contigmalloc_domain(PAGE_SIZE, - M_DEVBUF, dmat->common.domain, M_NOWAIT, 0ul, - bz->lowaddr, PAGE_SIZE, 0); + bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE, + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT, + 0ul, bz->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free_domain(bpage, M_DEVBUF); break;