diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h --- a/sys/dev/netmap/netmap_kern.h +++ b/sys/dev/netmap/netmap_kern.h @@ -81,6 +81,7 @@ #if defined(__FreeBSD__) #include +#include #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) @@ -1727,10 +1728,27 @@ #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor) #if defined(__FreeBSD__) +/* + * On FreeBSD, the group ID of a memory pool refers to the NUMA domain rather + * than an IOMMU domain. + */ +static inline int +nm_iommu_group_id(struct netmap_adapter *na) +{ + int domain; -/* Assigns the device IOMMU domain to an allocator. - * Returns -ENOMEM in case the domain is different */ -#define nm_iommu_group_id(dev) (-1) + /* + * If the system has only one NUMA domain, don't bother distinguishing + * between -1 and domain 0. Otherwise we may end up needlessly + * allocating new memory pools. + */ + if (vm_ndomains == 1) + return (-1); + domain = if_getnumadomain(na->ifp); + if (domain == IF_NODOM) + return (-1); + return (domain); +} /* Callback invoked by the dma machinery after a successful dmamap_load */ static void netmap_dmamap_cb(__unused void *arg, diff --git a/sys/dev/netmap/netmap_mem2.c b/sys/dev/netmap/netmap_mem2.c --- a/sys/dev/netmap/netmap_mem2.c +++ b/sys/dev/netmap/netmap_mem2.c @@ -37,8 +37,8 @@ #endif /* __APPLE__ */ #ifdef __FreeBSD__ -#include /* prerequisite */ #include +#include #include #include /* MALLOC_DEFINE */ #include @@ -174,7 +174,7 @@ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; nm_memid_t nm_id; /* allocator identifier */ - int nm_grp; /* iommu group id */ + int nm_grp; /* iommu group id */ /* list of all existing allocators, sorted by nm_id */ struct netmap_mem_d *prev, *next; @@ -310,7 +310,7 @@ static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); -static int nm_mem_check_group(struct netmap_mem_d *, bus_dma_tag_t); +static int nm_mem_check_group(struct netmap_mem_d *, void *); static void nm_mem_release_id(struct netmap_mem_d *); nm_memid_t @@ -728,7 +728,7 @@ } static int -nm_mem_check_group(struct netmap_mem_d *nmd, bus_dma_tag_t dev) +nm_mem_check_group(struct netmap_mem_d *nmd, void *dev) { int err = 0, id; @@ -1399,7 +1399,7 @@ /* call with NMA_LOCK held */ static int -netmap_finalize_obj_allocator(struct netmap_obj_pool *p) +netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p) { int i; /* must be signed */ @@ -1440,8 +1440,16 @@ * can live with standard malloc, because the hardware will not * access the pages directly. */ - clust = contigmalloc(p->_clustsize, M_NETMAP, M_NOWAIT | M_ZERO, - (size_t)0, -1UL, PAGE_SIZE, 0); + if (nmd->nm_grp == -1) { + clust = contigmalloc(p->_clustsize, M_NETMAP, + M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); + } else { + struct domainset *ds; + + ds = DOMAINSET_PREF(nmd->nm_grp); + clust = contigmalloc_domainset(p->_clustsize, M_NETMAP, + ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); + } if (clust == NULL) { /* * If we get here, there is a severe memory shortage, @@ -1634,7 +1642,7 @@ nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { - nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); + nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal;