diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h --- a/sys/powerpc/include/vmparam.h +++ b/sys/powerpc/include/vmparam.h @@ -111,8 +111,6 @@ #define KERNBASE 0x00100100 /* start of kernel virtual */ -#define UMA_MD_SMALL_ALLOC - #ifdef AIM #ifndef __powerpc64__ #define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT) @@ -125,12 +123,14 @@ * takes pressure off the small amount of available KVA. */ #define UMA_USE_DMAP +#define UMA_MD_DMAP_HOOK #else /* Book-E */ /* Use the direct map for UMA small allocs on powerpc64. */ #ifdef __powerpc64__ #define UMA_USE_DMAP +#define UMA_MD_DMAP_HOOK #else #define VM_MIN_KERNEL_ADDRESS 0xc0000000 #define VM_MAX_KERNEL_ADDRESS 0xffffefff diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c --- a/sys/powerpc/powerpc/uma_machdep.c +++ b/sys/powerpc/powerpc/uma_machdep.c @@ -38,44 +38,32 @@ #include #include +#ifdef UMA_MD_DMAP_HOOK static int hw_uma_mdpages; SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0, "UMA MD pages in use"); -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, - int wait) +vm_offset_t +uma_vm_page_to_dmap(vm_page_t m) { - void *va; + vm_offset_t va; vm_paddr_t pa; - vm_page_t m; - - *flags = UMA_SLAB_PRIV; - - m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | - VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); pa = VM_PAGE_TO_PHYS(m); -#ifdef __powerpc64__ - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); -#endif if (!hw_direct_map) { pmap_kenter(pa, pa); - va = (void *)(vm_offset_t)pa; + va = (vm_offset_t)pa; } else { - va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa); + va = (vm_offset_t)PHYS_TO_DMAP(pa); } atomic_add_int(&hw_uma_mdpages, 1); return (va); } -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) +vm_page_t +uma_dmap_to_vm_page(void *mem) { vm_page_t m; @@ -85,13 +73,10 @@ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem)); pmap_kremove((vm_offset_t)mem); } - KASSERT(m != NULL, - ("Freeing UMA block at %p with no associated page", mem)); -#ifdef __powerpc64__ - dump_drop_page(VM_PAGE_TO_PHYS(m)); -#endif - vm_page_unwire_noq(m); - vm_page_free(m); + ("Releasing UMA block at %p with no associated page", mem)); atomic_subtract_int(&hw_uma_mdpages, 1); + + return (m); } +#endif /* UMA_MD_DMAP_HOOK */ diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -93,6 +93,7 @@ #include #include #include +#include #include #include #include @@ -281,11 +282,13 @@ void uma_startup1(vm_offset_t); void uma_startup2(void); +static void *small_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); +static void small_free(void *, vm_size_t, uint8_t); static void page_free(void *, vm_size_t, uint8_t); static void pcpu_page_free(void *, vm_size_t, uint8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); @@ -1868,6 +1871,25 @@ return (NULL); } +/* + * Architectures that use the DMAP for speeding up UMA allocations + * may provide their own uma_{vm_page_to_dmap, dmap_to_vm_page} + * implementations by defining UMA_MD_DMAP_HOOK (see powerpc/uma_machdep.c). + */ +#if defined(UMA_USE_DMAP) && !defined(UMA_MD_DMAP_HOOK) +vm_offset_t +uma_vm_page_to_dmap(vm_page_t m) +{ + return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); +} + +vm_page_t +uma_dmap_to_vm_page(void *mem) +{ + return (PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)mem))); +} +#endif + /* * This function is intended to be used early on in place of page_alloc(). It * performs contiguous physical memory allocations and uses a bump allocator for @@ -2005,6 +2027,37 @@ return (NULL); } +/* + * Allocates a single page not belonging to a VM object. + * + * Arguments: + * bytes The number of bytes requested + * wait Shall we wait? + * + * Returns: + * A pointer to the alloced memory or possibly + * NULL if M_NOWAIT is set. + */ +static void * +small_alloc(uma_zone_t zone, vm_size_t bytes __unused, int domain, + uint8_t *flags, int wait) +{ + vm_page_t m; + + *flags = UMA_SLAB_PRIV; +#if VM_NRESERVLEVEL > 0 + m = vm_reserv_uma_small_alloc(domain, wait); +#else + m = vm_page_alloc_noobj_domain(domain, req); +#endif + if (m == NULL) + return (NULL); + if ((wait & M_NODUMP) == 0) + dump_add_page(VM_PAGE_TO_PHYS(m)); + + return ((void *)uma_vm_page_to_dmap(m)); +} + /* * Allocates a number of pages not belonging to a VM object * @@ -2080,27 +2133,34 @@ bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); } -#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC) -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, - int wait) +/* + * Frees a single unmanaged page to the system. + * + * Arguments: + * mem A pointer to the memory to be freed + * size The size of the memory being freed + * flags The original p->us_flags field + * + * Returns: + * Nothing + */ +static void +small_free(void *mem, vm_size_t size __unused, uint8_t flags) { vm_page_t m; - vm_paddr_t pa; - void *va; - *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_noobj_domain(domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); - pa = m->phys_addr; - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - return (va); -} + KASSERT(((vm_offset_t)mem) % PAGE_SIZE == 0, + ("%s: unaligned address %p", __func__, mem)); + + m = uma_dmap_to_vm_page(mem); + dump_drop_page(VM_PAGE_TO_PHYS(m)); +#if VM_NRESERVLEVEL > 0 + vm_reserv_uma_small_free(m); +#else + vm_page_unwire_noq(m); + vm_page_free(m); #endif +} /* * Frees a number of pages to the system @@ -2164,21 +2224,6 @@ kva_free(sva, size); } -#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC) -void -uma_small_free(void *mem, vm_size_t size, uint8_t flags) -{ - vm_page_t m; - vm_paddr_t pa; - - pa = DMAP_TO_PHYS((vm_offset_t)mem); - dump_drop_page(pa); - m = PHYS_TO_VM_PAGE(pa); - vm_page_unwire_noq(m); - vm_page_free(m); -} -#endif - /* * Zero fill initializer * @@ -2528,7 +2573,7 @@ */ #ifdef UMA_USE_DMAP if (keg->uk_ppera == 1) - keg->uk_allocf = uma_small_alloc; + keg->uk_allocf = small_alloc; else #endif if (booted < BOOT_KVA) @@ -2541,7 +2586,7 @@ keg->uk_allocf = page_alloc; #ifdef UMA_USE_DMAP if (keg->uk_ppera == 1) - keg->uk_freef = uma_small_free; + keg->uk_freef = small_free; else #endif if (keg->uk_flags & UMA_ZONE_PCPU) @@ -5225,7 +5270,7 @@ keg->uk_offset = 0; zone->uz_max_items = pages * keg->uk_ipers; #ifdef UMA_USE_DMAP - keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; + keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : small_alloc; #else keg->uk_allocf = noobj_alloc; #endif diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -665,12 +665,11 @@ /* * The following two functions may be defined by architecture specific code - * if they can provide more efficient allocation functions. This is useful - * for using direct mapped addresses. + * if additional operations have to be performed before using + * direct map addresses. */ -void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, - uint8_t *pflag, int wait); -void uma_small_free(void *mem, vm_size_t size, uint8_t flags); +vm_offset_t uma_vm_page_to_dmap(vm_page_t m); +vm_page_t uma_dmap_to_vm_page(void *mem); /* Set a global soft limit on UMA managed memory. */ void uma_set_limit(unsigned long limit);