Index: sys/kern/kern_malloc.c =================================================================== --- sys/kern/kern_malloc.c +++ sys/kern/kern_malloc.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,8 @@ #include #include #include +#include +#include #include #include #include @@ -551,6 +554,50 @@ } #endif +/* + * Handle large allocations and frees by using kmem_malloc directly. + */ +static inline bool +malloc_large_slab(uma_slab_t slab) +{ + uintptr_t va; + + va = (uintptr_t)slab; + return ((va & 1) != 0); +} + +static inline size_t +malloc_large_size(uma_slab_t slab) +{ + uintptr_t va; + + va = (uintptr_t)slab; + return (va >> 1); +} + +static caddr_t +malloc_large(size_t size, struct domainset *policy, int flags) +{ + vm_offset_t va; + + size = roundup(size, PAGE_SIZE); + va = kmem_malloc_domainset(policy, size, flags); + if (va != 0) { + /* The low bit is unused for slab pointers. */ + vsetslab(va, (void *)((size << 1) | 1)); + uma_total_inc(size); + } + return ((caddr_t)va); +} + +static void +free_large(void *addr, size_t size) +{ + + kmem_free((vm_offset_t)addr, size); + uma_total_dec(size); +} + /* * malloc: * @@ -588,9 +635,7 @@ size = zone->uz_size; malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { - size = roundup(size, PAGE_SIZE); - zone = NULL; - va = uma_large_malloc(size, flags); + va = malloc_large(size, DOMAINSET_RR(), flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) @@ -610,6 +655,7 @@ int indx; caddr_t va; uma_zone_t zone; + struct domainset *policy; #if defined(DEBUG_REDZONE) unsigned long osize = size; #endif @@ -632,9 +678,11 @@ size = zone->uz_size; malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { - size = roundup(size, PAGE_SIZE); - zone = NULL; - va = uma_large_malloc_domain(size, domain, flags); + if (VM_DOMAIN_EMPTY(domain)) + policy = DOMAINSET_RR(); + else + policy = DOMAINSET_FIXED(domain); + va = malloc_large(size, policy, flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) @@ -754,15 +802,15 @@ panic("free: address %p(%p) has not been allocated.\n", addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); - if (!(slab->us_flags & UMA_SLAB_MALLOC)) { + if (__predict_true(!malloc_large_slab(slab))) { size = slab->us_keg->uk_size; #ifdef INVARIANTS free_save_type(addr, mtp, size); #endif uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); } else { - size = slab->us_size; - uma_large_free(slab); + size = malloc_large_size(slab); + free_large(addr, size); } malloc_type_freed(mtp, size); } @@ -787,7 +835,7 @@ panic("free_domain: address %p(%p) has not been allocated.\n", addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); - if (!(slab->us_flags & UMA_SLAB_MALLOC)) { + if (__predict_true(!malloc_large_slab(slab))) { size = slab->us_keg->uk_size; #ifdef INVARIANTS free_save_type(addr, mtp, size); @@ -795,8 +843,8 @@ uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); } else { - size = slab->us_size; - uma_large_free(slab); + size = malloc_large_size(slab); + free_large(addr, size); } malloc_type_freed(mtp, size); } @@ -841,10 +889,10 @@ ("realloc: address %p out of range", (void *)addr)); /* Get the size of the original block */ - if (!(slab->us_flags & UMA_SLAB_MALLOC)) + if (!malloc_large_slab(slab)) alloc = slab->us_keg->uk_size; else - alloc = slab->us_size; + alloc = malloc_large_size(slab); /* Reuse the original block if appropriate */ if (size <= alloc Index: sys/vm/memguard.c =================================================================== --- sys/vm/memguard.c +++ sys/vm/memguard.c @@ -311,7 +311,7 @@ * When we pass our memory limit, reject sub-page allocations. * Page-size and larger allocations will use the same amount * of physical memory whether we allocate or hand off to - * uma_large_alloc(), so keep those. + * malloc_large(), so keep those. */ if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit && req_size < PAGE_SIZE) { Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h +++ sys/vm/uma.h @@ -640,7 +640,6 @@ #define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kmem */ #define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */ #define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */ -#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */ /* 0x02, 0x40, and 0x80 are available */ /* Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -150,10 +150,10 @@ * kmem soft limit, initialized by uma_set_limit(). Ensure that early * allocations don't trigger a wakeup of the reclaim thread. */ -static unsigned long uma_kmem_limit = LONG_MAX; +unsigned long uma_kmem_limit = LONG_MAX; SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, "UMA kernel memory soft limit"); -static unsigned long uma_kmem_total; +unsigned long uma_kmem_total; SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0, "UMA kernel memory usage"); @@ -329,22 +329,6 @@ SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, "Warn when UMA zones becomes full"); -/* Adjust bytes under management by UMA. */ -static inline void -uma_total_dec(unsigned long size) -{ - - atomic_subtract_long(&uma_kmem_total, size); -} - -static inline void -uma_total_inc(unsigned long size) -{ - - if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) - uma_reclaim_wakeup(); -} - /* * This routine checks to see whether or not it's safe to enable buckets. */ @@ -4185,57 +4169,6 @@ return (zone->uz_sleepers > 0); } -void * -uma_large_malloc_domain(vm_size_t size, int domain, int wait) -{ - struct domainset *policy; - vm_offset_t addr; - uma_slab_t slab; - - if (domain != UMA_ANYDOMAIN) { - /* avoid allocs targeting empty domains */ - if (VM_DOMAIN_EMPTY(domain)) - domain = UMA_ANYDOMAIN; - } - slab = zone_alloc_item(slabzone, NULL, domain, wait); - if (slab == NULL) - return (NULL); - policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() : - DOMAINSET_FIXED(domain); - addr = kmem_malloc_domainset(policy, size, wait); - if (addr != 0) { - vsetslab(addr, slab); - slab->us_data = (void *)addr; - slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; - slab->us_size = size; - slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE( - pmap_kextract(addr))); - uma_total_inc(size); - } else { - zone_free_item(slabzone, slab, NULL, SKIP_NONE); - } - - return ((void *)addr); -} - -void * -uma_large_malloc(vm_size_t size, int wait) -{ - - return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); -} - -void -uma_large_free(uma_slab_t slab) -{ - - KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, - ("uma_large_free: Memory not allocated with uma_large_malloc.")); - kmem_free((vm_offset_t)slab->us_data, slab->us_size); - uma_total_dec(slab->us_size); - zone_free_item(slabzone, slab, NULL, SKIP_NONE); -} - static void uma_zero_item(void *item, uma_zone_t zone) { Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h +++ sys/vm/uma_int.h @@ -283,10 +283,7 @@ */ struct uma_slab { uma_keg_t us_keg; /* Keg we live in */ - union { - LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ - unsigned long _us_size; /* Size of allocation */ - } us_type; + LIST_ENTRY(uma_slab) us_link; /* slabs in zone */ SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ uint8_t *us_data; /* First item */ struct slabbits us_free; /* Free bitmask. */ @@ -298,9 +295,6 @@ uint8_t us_domain; /* Backing NUMA domain. */ }; -#define us_link us_type._us_link -#define us_size us_type._us_size - #if MAXMEMDOM >= 255 #error "Slab domain type insufficient" #endif @@ -404,9 +398,6 @@ #ifdef _KERNEL /* Internal prototypes */ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); -void *uma_large_malloc(vm_size_t size, int wait); -void *uma_large_malloc_domain(vm_size_t size, int domain, int wait); -void uma_large_free(uma_slab_t slab); /* Lock Macros */ @@ -491,6 +482,25 @@ p->plinks.s.pv = slab; } +extern unsigned long uma_kmem_limit; +extern unsigned long uma_kmem_total; + +/* Adjust bytes under management by UMA. */ +static inline void +uma_total_dec(unsigned long size) +{ + + atomic_subtract_long(&uma_kmem_total, size); +} + +static inline void +uma_total_inc(unsigned long size) +{ + + if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) + uma_reclaim_wakeup(); +} + /* * The following two functions may be defined by architecture specific code * if they can provide more efficient allocation functions. This is useful