diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c deleted file mode 100644 --- a/sys/amd64/amd64/uma_machdep.c +++ /dev/null @@ -1,71 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause - * - * Copyright (c) 2003 Alan L. Cox - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, - int wait) -{ - vm_page_t m; - vm_paddr_t pa; - void *va; - - *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | - VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); - pa = m->phys_addr; - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - return (va); -} - -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) -{ - vm_page_t m; - vm_paddr_t pa; - - pa = DMAP_TO_PHYS((vm_offset_t)mem); - dump_drop_page(pa); - m = PHYS_TO_VM_PAGE(pa); - vm_page_unwire_noq(m); - vm_page_free(m); -} diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h --- a/sys/amd64/include/vmparam.h +++ b/sys/amd64/include/vmparam.h @@ -72,12 +72,12 @@ #endif /* - * We provide a machine specific single page allocator through the use - * of the direct mapped segment. This uses 2MB pages for reduced + * We provide a single page allocator through the use of the + * direct mapped segment. This uses 2MB pages for reduced * TLB pressure. */ #if !defined(KASAN) && !defined(KMSAN) -#define UMA_MD_SMALL_ALLOC +#define UMA_USE_DMAP #endif /* diff --git a/sys/arm64/arm64/uma_machdep.c b/sys/arm64/arm64/uma_machdep.c deleted file mode 100644 --- a/sys/arm64/arm64/uma_machdep.c +++ /dev/null @@ -1,69 +0,0 @@ -/*- - * Copyright (c) 2003 Alan L. Cox - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, - int wait) -{ - vm_page_t m; - vm_paddr_t pa; - void *va; - - *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | - VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); - pa = m->phys_addr; - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - return (va); -} - -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) -{ - vm_page_t m; - vm_paddr_t pa; - - pa = DMAP_TO_PHYS((vm_offset_t)mem); - dump_drop_page(pa); - m = PHYS_TO_VM_PAGE(pa); - vm_page_unwire_noq(m); - vm_page_free(m); -} diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h --- a/sys/arm64/include/vmparam.h +++ b/sys/arm64/include/vmparam.h @@ -293,7 +293,7 @@ #endif #if !defined(KASAN) && !defined(KMSAN) -#define UMA_MD_SMALL_ALLOC +#define UMA_USE_DMAP #endif #ifndef LOCORE diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -92,7 +92,6 @@ amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard -amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/pci/pci_cfgreg.c optional pci cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -78,7 +78,6 @@ arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard -arm64/arm64/uma_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \ compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*}" diff --git a/sys/conf/files.riscv b/sys/conf/files.riscv --- a/sys/conf/files.riscv +++ b/sys/conf/files.riscv @@ -67,7 +67,6 @@ riscv/riscv/trap.c standard riscv/riscv/timer.c standard riscv/riscv/uio_machdep.c standard -riscv/riscv/uma_machdep.c standard riscv/riscv/unwind.c optional ddb | kdtrace_hooks | stack riscv/riscv/vm_machdep.c standard diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c --- a/sys/kern/subr_vmem.c +++ b/sys/kern/subr_vmem.c @@ -624,14 +624,14 @@ uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN); } -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP static struct mtx_padalign __exclusive_cache_line vmem_bt_lock; /* * vmem_bt_alloc: Allocate a new page of boundary tags. * - * On architectures with uma_small_alloc there is no recursion; no address + * On architectures with UMA_USE_DMAP there is no recursion; no address * space need be allocated to allocate boundary tags. For the others, we * must handle recursion. Boundary tags are necessary to allocate new * boundary tags. @@ -640,7 +640,7 @@ * page of kva. We dip into this reserve by specifying M_USE_RESERVE only * when allocating the page to hold new boundary tags. In this way the * reserve is automatically filled by the allocation that uses the reserve. - * + * * We still have to guarantee that the new tags are allocated atomically since * many threads may try concurrently. The bt_lock provides this guarantee. * We convert WAITOK allocations to NOWAIT and then handle the blocking here @@ -707,7 +707,7 @@ vmem_bt_zone = uma_zcreate("vmem btag", sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); uma_prealloc(vmem_bt_zone, BT_MAXALLOC); /* diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h --- a/sys/powerpc/include/vmparam.h +++ b/sys/powerpc/include/vmparam.h @@ -122,13 +122,15 @@ * Use the direct-mapped BAT registers for UMA small allocs. This * takes pressure off the small amount of available KVA. */ -#define UMA_MD_SMALL_ALLOC +#define UMA_USE_DMAP +#define UMA_MD_DMAP_HOOK #else /* Book-E */ /* Use the direct map for UMA small allocs on powerpc64. */ #ifdef __powerpc64__ -#define UMA_MD_SMALL_ALLOC +#define UMA_USE_DMAP +#define UMA_MD_DMAP_HOOK #else #define VM_MIN_KERNEL_ADDRESS 0xc0000000 #define VM_MAX_KERNEL_ADDRESS 0xffffefff diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c --- a/sys/powerpc/powerpc/uma_machdep.c +++ b/sys/powerpc/powerpc/uma_machdep.c @@ -38,44 +38,32 @@ #include #include +#ifdef UMA_MD_DMAP_HOOK static int hw_uma_mdpages; SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0, "UMA MD pages in use"); -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, - int wait) +vm_offset_t +uma_vm_page_to_dmap(vm_page_t m) { - void *va; + vm_offset_t va; vm_paddr_t pa; - vm_page_t m; - - *flags = UMA_SLAB_PRIV; - - m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | - VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); pa = VM_PAGE_TO_PHYS(m); -#ifdef __powerpc64__ - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); -#endif if (!hw_direct_map) { pmap_kenter(pa, pa); - va = (void *)(vm_offset_t)pa; + va = (vm_offset_t)pa; } else { - va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa); + va = (vm_offset_t)PHYS_TO_DMAP(pa); } atomic_add_int(&hw_uma_mdpages, 1); return (va); } -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) +vm_page_t +uma_dmap_to_vm_page(void *mem) { vm_page_t m; @@ -85,13 +73,10 @@ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem)); pmap_kremove((vm_offset_t)mem); } - KASSERT(m != NULL, - ("Freeing UMA block at %p with no associated page", mem)); -#ifdef __powerpc64__ - dump_drop_page(VM_PAGE_TO_PHYS(m)); -#endif - vm_page_unwire_noq(m); - vm_page_free(m); + ("Releasing UMA block at %p with no associated page", mem)); atomic_subtract_int(&hw_uma_mdpages, 1); + + return (m); } +#endif /* UMA_MD_DMAP_HOOK */ diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h --- a/sys/riscv/include/vmparam.h +++ b/sys/riscv/include/vmparam.h @@ -234,7 +234,7 @@ #define VM_INITIAL_PAGEIN 16 #endif -#define UMA_MD_SMALL_ALLOC +#define UMA_USE_DMAP #ifndef LOCORE extern vm_paddr_t dmap_phys_base; diff --git a/sys/riscv/riscv/uma_machdep.c b/sys/riscv/riscv/uma_machdep.c deleted file mode 100644 --- a/sys/riscv/riscv/uma_machdep.c +++ /dev/null @@ -1,68 +0,0 @@ -/*- - * Copyright (c) 2003 Alan L. Cox - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, - int wait) -{ - vm_page_t m; - vm_paddr_t pa; - void *va; - - *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | - VM_ALLOC_WIRED); - if (m == NULL) - return (NULL); - pa = m->phys_addr; - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); - va = (void *)PHYS_TO_DMAP(pa); - return (va); -} - -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) -{ - vm_page_t m; - vm_paddr_t pa; - - pa = DMAP_TO_PHYS((vm_offset_t)mem); - dump_drop_page(pa); - m = PHYS_TO_VM_PAGE(pa); - vm_page_unwire_noq(m); - vm_page_free(m); -} diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -1865,6 +1865,44 @@ return (NULL); } +static __inline void +uma_dump_add_page(vm_paddr_t pa, int wait) +{ +#if defined(__aarch64__) || defined(__amd64__) || defined(__riscv) || \ + defined(__powerpc64__) + if ((wait & M_NODUMP) == 0) + dump_add_page(pa); +#endif +} + +static __inline void +uma_dump_drop_page(vm_paddr_t pa) +{ +#if defined(__aarch64__) || defined(__amd64__) || defined(__riscv) || \ + defined(__powerpc64__) + dump_drop_page(pa); +#endif +} + +/* + * Architectures that use the DMAP for speeding up UMA allocations + * may provide their own uma_{vm_page_to_dmap, dmap_to_vm_page} + * implementations by defining UMA_MD_DMAP_HOOK (see powerpc/uma_machdep.c). + */ +#if defined(UMA_USE_DMAP) && !defined(UMA_MD_DMAP_HOOK) +vm_offset_t +uma_vm_page_to_dmap(vm_page_t m) +{ + return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); +} + +vm_page_t +uma_dmap_to_vm_page(void *mem) +{ + return (PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)mem))); +} +#endif + /* * This function is intended to be used early on in place of page_alloc(). It * performs contiguous physical memory allocations and uses a bump allocator for @@ -1890,13 +1928,8 @@ pa = VM_PAGE_TO_PHYS(m); for (i = 0; i < pages; i++, pa += PAGE_SIZE) { -#if defined(__aarch64__) || defined(__amd64__) || \ - defined(__riscv) || defined(__powerpc64__) - if ((wait & M_NODUMP) == 0) - dump_add_page(pa); -#endif + uma_dump_add_page(pa, wait); } - /* Allocate KVA and indirectly advance bootmem. */ return ((void *)pmap_map(&bootmem, m->phys_addr, m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE)); @@ -1918,10 +1951,7 @@ if (va >= bootstart && va + bytes <= bootmem) pmap_remove(kernel_pmap, va, va + bytes); for (; bytes != 0; bytes -= PAGE_SIZE, m++) { -#if defined(__aarch64__) || defined(__amd64__) || \ - defined(__riscv) || defined(__powerpc64__) - dump_drop_page(VM_PAGE_TO_PHYS(m)); -#endif + uma_dump_drop_page(VM_PAGE_TO_PHYS(m)); vm_page_unwire_noq(m); vm_page_free(m); } @@ -2079,6 +2109,35 @@ bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); } +/* + * Frees a single unmanaged page to the system. + * + * Arguments: + * mem A pointer to the memory to be freed + * size The size of the memory being freed + * flags The original p->us_flags field + * + * Returns: + * Nothing + */ +static void +small_free(void *mem, vm_size_t size __unused, uint8_t flags) +{ + vm_page_t m; + + KASSERT(((vm_offset_t)mem) % PAGE_SIZE == 0, + ("%s: unaligned address %p", __func__, mem)); + + m = uma_dmap_to_vm_page(mem); + uma_dump_drop_page(VM_PAGE_TO_PHYS(m)); +#if VM_NRESERVLEVEL > 0 + vm_reserv_uma_small_free(m); +#else + vm_page_unwire_noq(m); + vm_page_free(m); +#endif +} + /* * Frees a number of pages to the system * @@ -3154,7 +3213,7 @@ smr_init(); } -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP extern void vm_radix_reserve_kva(void); #endif @@ -3174,7 +3233,7 @@ vm_map_unlock(kernel_map); } -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP /* Set up radix zone to use noobj_alloc. */ vm_radix_reserve_kva(); #endif @@ -5171,7 +5230,7 @@ pages = howmany(count, keg->uk_ipers) * keg->uk_ppera; -#ifdef UMA_MD_SMALL_ALLOC +#ifdef UMA_USE_DMAP if (keg->uk_ppera > 1) { #else if (1) { diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -665,12 +665,11 @@ /* * The following two functions may be defined by architecture specific code - * if they can provide more efficient allocation functions. This is useful - * for using direct mapped addresses. + * if additional operations have to be performed before using + * direct map addresses. */ -void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, - uint8_t *pflag, int wait); -void uma_small_free(void *mem, vm_size_t size, uint8_t flags); +vm_offset_t uma_vm_page_to_dmap(vm_page_t m); +vm_page_t uma_dmap_to_vm_page(void *mem); /* Set a global soft limit on UMA managed memory. */ void uma_set_limit(unsigned long limit); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -172,7 +172,7 @@ start = end; \ } -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP /* * Allocate a new slab for kernel map entries. The kernel map may be locked or @@ -264,7 +264,7 @@ kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOBUCKET); -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP /* Reserve an extra map entry for use when replenishing the reserve. */ uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1); uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1); @@ -660,7 +660,7 @@ VM_MAP_UNLOCK_CONSISTENT(map); if (map->system_map) { -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { uma_prealloc(kmapentzone, 1); map->flags &= ~MAP_REPLENISH; @@ -937,7 +937,7 @@ { vm_map_entry_t new_entry; -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP if (map == kernel_map) { VM_MAP_ASSERT_LOCKED(map); diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c --- a/sys/vm/vm_radix.c +++ b/sys/vm/vm_radix.c @@ -82,7 +82,7 @@ uma_zfree_smr(vm_radix_node_zone, node); } -#ifndef UMA_MD_SMALL_ALLOC +#ifndef UMA_USE_DMAP void vm_radix_reserve_kva(void); /* * Reserve the KVA necessary to satisfy the node allocation.