Index: amd64/amd64/mp_machdep.c =================================================================== --- amd64/amd64/mp_machdep.c +++ amd64/amd64/mp_machdep.c @@ -402,18 +402,14 @@ apic_id = cpu_apic_ids[cpu]; /* allocate and set up an idle stack data page */ - bootstacks[cpu] = (void *)kmem_malloc(kernel_arena, - kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO); - doublefault_stack = (char *)kmem_malloc(kernel_arena, - PAGE_SIZE, M_WAITOK | M_ZERO); - mce_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE, + bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO); - nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE, - M_WAITOK | M_ZERO); - dbg_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE, - M_WAITOK | M_ZERO); - dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | + M_ZERO); + mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + nmi_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + dbg_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8; bootAP = cpu; Index: amd64/amd64/pmap.c =================================================================== --- amd64/amd64/pmap.c +++ amd64/amd64/pmap.c @@ -1412,8 +1412,7 @@ */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); - pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, - M_WAITOK | M_ZERO); + pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); TAILQ_INIT(&pv_dummy.pv_list); Index: amd64/amd64/sys_machdep.c =================================================================== --- amd64/amd64/sys_machdep.c +++ amd64/amd64/sys_machdep.c @@ -361,8 +361,8 @@ */ pcb = td->td_pcb; if (pcb->pcb_tssp == NULL) { - tssp = (struct amd64tss *)kmem_malloc(kernel_arena, - ctob(IOPAGES + 1), M_WAITOK); + tssp = (struct amd64tss *)kmem_malloc(ctob(IOPAGES + 1), + M_WAITOK); pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp + ctob(IOPAGES + 1), false); iomap = (char *)&tssp[1]; @@ -463,7 +463,7 @@ mtx_unlock(&dt_lock); new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK); sz = max_ldt_segment * sizeof(struct user_segment_descriptor); - sva = kmem_malloc(kernel_arena, sz, M_WAITOK | M_ZERO); + sva = kmem_malloc(sz, M_WAITOK | M_ZERO); new_ldt->ldt_base = (caddr_t)sva; pmap_pti_add_kva(sva, sva + sz, false); new_ldt->ldt_refcnt = 1; Index: arm/arm/mp_machdep.c =================================================================== --- arm/arm/mp_machdep.c +++ arm/arm/mp_machdep.c @@ -119,8 +119,7 @@ /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) - dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); Index: arm/arm/pmap-v6.c =================================================================== --- arm/arm/pmap-v6.c +++ arm/arm/pmap-v6.c @@ -1782,8 +1782,7 @@ */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); - pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, - M_WAITOK | M_ZERO); + pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); Index: arm64/arm64/mp_machdep.c =================================================================== --- arm64/arm64/mp_machdep.c +++ arm64/arm64/mp_machdep.c @@ -484,8 +484,7 @@ pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); - dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); printf("Starting CPU %u (%lx)\n", cpuid, target_cpu); Index: arm64/arm64/pmap.c =================================================================== --- arm64/arm64/pmap.c +++ arm64/arm64/pmap.c @@ -923,8 +923,7 @@ */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); - pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, - M_WAITOK | M_ZERO); + pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); TAILQ_INIT(&pv_dummy.pv_list); Index: compat/linuxkpi/common/src/linux_page.c =================================================================== --- compat/linuxkpi/common/src/linux_page.c +++ compat/linuxkpi/common/src/linux_page.c @@ -165,7 +165,7 @@ vm_offset_t addr; if ((flags & GFP_DMA32) == 0) { - addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK); + addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); } else { addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); Index: dev/hyperv/vmbus/hyperv.c =================================================================== --- dev/hyperv/vmbus/hyperv.c +++ dev/hyperv/vmbus/hyperv.c @@ -283,8 +283,8 @@ * the NX bit. * - Assume kmem_malloc() returns properly aligned memory. */ - hypercall_context.hc_addr = (void *)kmem_malloc(kernel_arena, PAGE_SIZE, - M_EXEC | M_WAITOK); + hypercall_context.hc_addr = (void *)kmem_malloc(PAGE_SIZE, M_EXEC | + M_WAITOK); hypercall_context.hc_paddr = vtophys(hypercall_context.hc_addr); /* Get the 'reserved' bits, which requires preservation. */ Index: i386/i386/mp_machdep.c =================================================================== --- i386/i386/mp_machdep.c +++ i386/i386/mp_machdep.c @@ -328,11 +328,9 @@ apic_id = cpu_apic_ids[cpu]; /* allocate and set up a boot stack data page */ - bootstacks[cpu] = - (char *)kmem_malloc(kernel_arena, kstack_pages * PAGE_SIZE, + bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO); - dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); /* setup a vector to our boot code */ *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); Index: i386/i386/pmap.c =================================================================== --- i386/i386/pmap.c +++ i386/i386/pmap.c @@ -998,8 +998,7 @@ */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); - pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, - M_WAITOK | M_ZERO); + pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); Index: mips/mips/mp_machdep.c =================================================================== --- mips/mips/mp_machdep.c +++ mips/mips/mp_machdep.c @@ -185,7 +185,7 @@ int cpus, ms; cpus = mp_naps; - dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); mips_sync(); Index: powerpc/powerpc/mp_machdep.c =================================================================== --- powerpc/powerpc/mp_machdep.c +++ powerpc/powerpc/mp_machdep.c @@ -168,8 +168,8 @@ void *dpcpu; pc = &__pcpu[cpu.cr_cpuid]; - dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | + M_ZERO); pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc)); dpcpu_init(dpcpu, cpu.cr_cpuid); } else { Index: riscv/riscv/mp_machdep.c =================================================================== --- riscv/riscv/mp_machdep.c +++ riscv/riscv/mp_machdep.c @@ -391,8 +391,7 @@ pcpu_init(pcpup, id, sizeof(struct pcpu)); - dpcpu[id - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO); + dpcpu[id - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[id - 1], id); printf("Starting CPU %u (%lx)\n", id, target_cpu); Index: sparc64/sparc64/mp_machdep.c =================================================================== --- sparc64/sparc64/mp_machdep.c +++ sparc64/sparc64/mp_machdep.c @@ -342,12 +342,10 @@ cpuid_to_mid[cpuid] = mid; cpu_identify(csa->csa_ver, clock, cpuid); - va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE, - M_WAITOK | M_ZERO); + va = kmem_malloc(PCPU_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO); pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1; pcpu_init(pc, cpuid, sizeof(*pc)); - dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE, - M_WAITOK | M_ZERO), cpuid); + dpcpu_init((void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO), cpuid); pc->pc_addr = va; pc->pc_clock = clock; pc->pc_impl = cpu_impl; Index: vm/uma_core.c =================================================================== --- vm/uma_core.c +++ vm/uma_core.c @@ -3687,7 +3687,7 @@ if (slab == NULL) return (NULL); if (domain == UMA_ANYDOMAIN) - addr = kmem_malloc(NULL, size, wait); + addr = kmem_malloc(size, wait); else addr = kmem_malloc_domain(domain, size, wait); if (addr != 0) { Index: vm/vm_extern.h =================================================================== --- vm/vm_extern.h +++ vm/vm_extern.h @@ -64,7 +64,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); -vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags); +vm_offset_t kmem_malloc(vm_size_t size, int flags); vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags); void kmem_free(struct vmem *, vm_offset_t, vm_size_t); Index: vm/vm_init.c =================================================================== --- vm/vm_init.c +++ vm/vm_init.c @@ -278,8 +278,7 @@ (vm_paddr_t)1 << 32, ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT); if (firstaddr == 0) #endif - firstaddr = kmem_malloc(kernel_arena, size, - M_ZERO | M_WAITOK); + firstaddr = kmem_malloc(size, M_ZERO | M_WAITOK); if (firstaddr == 0) panic("startup: no room for tables"); goto again; Index: vm/vm_kern.c =================================================================== --- vm/vm_kern.c +++ vm/vm_kern.c @@ -392,7 +392,7 @@ } vm_offset_t -kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags) +kmem_malloc(vm_size_t size, int flags) { struct vm_domainset_iter di; vm_offset_t addr; Index: x86/xen/pv.c =================================================================== --- x86/xen/pv.c +++ x86/xen/pv.c @@ -350,18 +350,12 @@ const size_t stacksize = kstack_pages * PAGE_SIZE; /* allocate and set up an idle stack data page */ - bootstacks[cpu] = - (void *)kmem_malloc(kernel_arena, stacksize, M_WAITOK | M_ZERO); - doublefault_stack = - (char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO); - mce_stack = - (char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO); - nmi_stack = - (char *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO); - dbg_stack = - (void *)kmem_malloc(kernel_arena, PAGE_SIZE, M_WAITOK | M_ZERO); - dpcpu = - (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); + bootstacks[cpu] = (void *)kmem_malloc(stacksize, M_WAITOK | M_ZERO); + doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + nmi_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + dbg_stack = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8; bootAP = cpu;