Index: arm/allwinner/a10_fb.c =================================================================== --- arm/allwinner/a10_fb.c +++ arm/allwinner/a10_fb.c @@ -178,8 +178,8 @@ static int a10fb_allocfb(struct a10fb_softc *sc) { - sc->vaddr = kmem_alloc_contig(kernel_arena, sc->fbsize, - M_NOWAIT | M_ZERO, 0, ~0, FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); + sc->vaddr = kmem_alloc_contig(sc->fbsize, M_NOWAIT | M_ZERO, 0, ~0, + FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->vaddr == 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENOMEM); Index: arm/arm/busdma_machdep-v4.c =================================================================== --- arm/arm/busdma_machdep-v4.c +++ arm/arm/busdma_machdep-v4.c @@ -750,9 +750,8 @@ *vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0, dmat->lowaddr, memattr); } else { - *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, - mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, - memattr); + *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0, + dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", Index: arm/arm/busdma_machdep-v6.c =================================================================== --- arm/arm/busdma_machdep-v6.c +++ arm/arm/busdma_machdep-v6.c @@ -815,9 +815,8 @@ *vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0, dmat->lowaddr, memattr); } else { - *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, - mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, - memattr); + *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0, + dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", Index: arm/arm/pmap-v6.c =================================================================== --- arm/arm/pmap-v6.c +++ arm/arm/pmap-v6.c @@ -2219,9 +2219,8 @@ */ if (pmap->pm_pt1 == NULL) { - pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(kernel_arena, - NB_IN_PT1, M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, - pt_memattr); + pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1, + M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr); if (pmap->pm_pt1 == NULL) return (0); } Index: arm/freescale/imx/imx6_sdma.c =================================================================== --- arm/freescale/imx/imx6_sdma.c +++ arm/freescale/imx/imx6_sdma.c @@ -179,9 +179,8 @@ chn = i; /* Allocate area for buffer descriptors */ - channel->bd = (void *)kmem_alloc_contig(kernel_arena, - PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE, 0, - VM_MEMATTR_UNCACHEABLE); + channel->bd = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, + PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); return (chn); } @@ -392,8 +391,8 @@ sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \ sizeof(struct sdma_context_data); - sc->ccb = (void *)kmem_alloc_contig(kernel_arena, - sz, M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); + sc->ccb = (void *)kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0, + VM_MEMATTR_UNCACHEABLE); sc->ccb_phys = vtophys(sc->ccb); sc->context = (void *)((char *)sc->ccb + \ @@ -411,9 +410,8 @@ /* Channel 0 is used for booting firmware */ chn = 0; - sc->bd0 = (void *)kmem_alloc_contig(kernel_arena, - PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE, 0, - VM_MEMATTR_UNCACHEABLE); + sc->bd0 = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE, + 0, VM_MEMATTR_UNCACHEABLE); bd0 = sc->bd0; sc->ccb[chn].base_bd_ptr = vtophys(bd0); sc->ccb[chn].current_bd_ptr = vtophys(bd0); Index: arm/nvidia/drm2/tegra_dc.c =================================================================== --- arm/nvidia/drm2/tegra_dc.c +++ arm/nvidia/drm2/tegra_dc.c @@ -1234,9 +1234,9 @@ } /* allocate memory for cursor cache */ - sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(kernel_arena, - 256 * 256 * 4, M_WAITOK | M_ZERO, - 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); + sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4, + M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0, + VM_MEMATTR_WRITE_COMBINING); sc->tegra_crtc.cursor_pbase = vtophys(sc->tegra_crtc.cursor_vbase); return (0); } Index: arm/nvidia/tegra_pcie.c =================================================================== --- arm/nvidia/tegra_pcie.c +++ arm/nvidia/tegra_pcie.c @@ -1395,8 +1395,8 @@ sc = device_get_softc(dev); - sc->msi_page = kmem_alloc_contig(kernel_arena, PAGE_SIZE, M_WAITOK, - 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0, + BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); /* MSI BAR */ tegra_pcib_set_bar(sc, 9, vtophys(sc->msi_page), vtophys(sc->msi_page), Index: arm/nvidia/tegra_xhci.c =================================================================== --- arm/nvidia/tegra_xhci.c +++ arm/nvidia/tegra_xhci.c @@ -808,8 +808,8 @@ fw_hdr = (const struct tegra_xusb_fw_hdr *)fw->data; fw_size = fw_hdr->fwimg_len; - fw_vaddr = kmem_alloc_contig(kernel_arena, fw_size, - M_WAITOK, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); + fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0, + VM_MEMATTR_UNCACHEABLE); fw_paddr = vtophys(fw_vaddr); fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr; memcpy((void *)fw_vaddr, fw->data, fw_size); Index: arm/samsung/exynos/exynos5_fimd.c =================================================================== --- arm/samsung/exynos/exynos5_fimd.c +++ arm/samsung/exynos/exynos5_fimd.c @@ -356,8 +356,8 @@ sc->sc_info.fb_stride = sc->sc_info.fb_width * 2; sc->sc_info.fb_bpp = sc->sc_info.fb_depth = 16; sc->sc_info.fb_size = sc->sc_info.fb_height * sc->sc_info.fb_stride; - sc->sc_info.fb_vbase = (intptr_t)kmem_alloc_contig(kernel_arena, - sc->sc_info.fb_size, M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); + sc->sc_info.fb_vbase = (intptr_t)kmem_alloc_contig(sc->sc_info.fb_size, + M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); sc->sc_info.fb_pbase = (intptr_t)vtophys(sc->sc_info.fb_vbase); #if 0 Index: arm64/arm64/busdma_bounce.c =================================================================== --- arm64/arm64/busdma_bounce.c +++ arm64/arm64/busdma_bounce.c @@ -495,10 +495,9 @@ 0ul, dmat->common.lowaddr, attr); dmat->bounce_flags |= BF_KMEM_ALLOC; } else { - *vaddr = (void *)kmem_alloc_contig(kernel_arena, - dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, - dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, - dmat->common.boundary, attr); + *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags, + 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? + dmat->common.alignment : 1ul, dmat->common.boundary, attr); dmat->bounce_flags |= BF_KMEM_ALLOC; } if (*vaddr == NULL) { Index: compat/linuxkpi/common/include/linux/dma-mapping.h =================================================================== --- compat/linuxkpi/common/include/linux/dma-mapping.h +++ compat/linuxkpi/common/include/linux/dma-mapping.h @@ -134,8 +134,8 @@ else high = BUS_SPACE_MAXADDR; align = PAGE_SIZE << get_order(size); - mem = (void *)kmem_alloc_contig(kmem_arena, size, flag, 0, high, align, - 0, VM_MEMATTR_DEFAULT); + mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, + VM_MEMATTR_DEFAULT); if (mem) *dma_handle = vtophys(mem); else Index: compat/linuxkpi/common/src/linux_page.c =================================================================== --- compat/linuxkpi/common/src/linux_page.c +++ compat/linuxkpi/common/src/linux_page.c @@ -167,9 +167,8 @@ if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK); } else { - addr = kmem_alloc_contig(kmem_arena, size, - flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, - PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, + BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } return (addr); } Index: compat/ndis/subr_ntoskrnl.c =================================================================== --- compat/ndis/subr_ntoskrnl.c +++ compat/ndis/subr_ntoskrnl.c @@ -2491,8 +2491,8 @@ break; } - ret = (void *)kmem_alloc_contig(kernel_arena, size, M_ZERO | M_NOWAIT, - lowest, highest, PAGE_SIZE, boundary, memattr); + ret = (void *)kmem_alloc_contig(size, M_ZERO | M_NOWAIT, lowest, + highest, PAGE_SIZE, boundary, memattr); if (ret != NULL) malloc_type_allocated(M_DEVBUF, round_page(size)); return (ret); Index: dev/agp/agp.c =================================================================== --- dev/agp/agp.c +++ dev/agp/agp.c @@ -154,9 +154,9 @@ return 0; gatt->ag_entries = entries; - gatt->ag_virtual = (void *)kmem_alloc_contig(kernel_arena, - entries * sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, - 0, VM_MEMATTR_WRITE_COMBINING); + gatt->ag_virtual = (void *)kmem_alloc_contig(entries * + sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, + VM_MEMATTR_WRITE_COMBINING); if (!gatt->ag_virtual) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); Index: dev/agp/agp_i810.c =================================================================== --- dev/agp/agp_i810.c +++ dev/agp/agp_i810.c @@ -1189,9 +1189,8 @@ sc->dcache_size = 0; /* According to the specs the gatt on the i810 must be 64k. */ - sc->gatt->ag_virtual = (void *)kmem_alloc_contig(kernel_arena, - 64 * 1024, M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, - 0, VM_MEMATTR_WRITE_COMBINING); + sc->gatt->ag_virtual = (void *)kmem_alloc_contig(64 * 1024, M_NOWAIT | + M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->gatt->ag_virtual == NULL) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); Index: dev/liquidio/lio_network.h =================================================================== --- dev/liquidio/lio_network.h +++ dev/liquidio/lio_network.h @@ -198,8 +198,8 @@ void *mem; align = PAGE_SIZE << lio_get_order(size); - mem = (void *)kmem_alloc_contig(kmem_arena, size, M_WAITOK, 0, ~0ul, - align, 0, VM_MEMATTR_DEFAULT); + mem = (void *)kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0, + VM_MEMATTR_DEFAULT); if (mem != NULL) *dma_handle = vtophys(mem); else Index: dev/xdma/controller/pl330.c =================================================================== --- dev/xdma/controller/pl330.c +++ dev/xdma/controller/pl330.c @@ -392,8 +392,8 @@ chan->sc = sc; chan->used = 1; - chan->ibuf = (void *)kmem_alloc_contig(kernel_arena, - PAGE_SIZE*8, M_ZERO, 0, ~0, PAGE_SIZE, 0, + chan->ibuf = (void *)kmem_alloc_contig(PAGE_SIZE * 8, + M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE); chan->ibuf_phys = vtophys(chan->ibuf); Index: kern/kern_malloc.c =================================================================== --- kern/kern_malloc.c +++ kern/kern_malloc.c @@ -443,8 +443,8 @@ { void *ret; - ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high, - alignment, boundary, VM_MEMATTR_DEFAULT); + ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, + boundary, VM_MEMATTR_DEFAULT); if (ret != NULL) malloc_type_allocated(type, round_page(size)); return (ret); Index: mips/ingenic/jz4780_lcd.c =================================================================== --- mips/ingenic/jz4780_lcd.c +++ mips/ingenic/jz4780_lcd.c @@ -115,8 +115,8 @@ static int jzlcd_allocfb(struct jzlcd_softc *sc) { - sc->vaddr = kmem_alloc_contig(kernel_arena, sc->fbsize, - M_NOWAIT | M_ZERO, 0, ~0, FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); + sc->vaddr = kmem_alloc_contig(sc->fbsize, M_NOWAIT | M_ZERO, 0, ~0, + FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->vaddr == 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENOMEM); Index: mips/mips/busdma_machdep.c =================================================================== --- mips/mips/busdma_machdep.c +++ mips/mips/busdma_machdep.c @@ -717,9 +717,8 @@ vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0, dmat->lowaddr, memattr); } else { - vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, - mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, - memattr); + vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0, + dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } if (vaddr == NULL) { _busdma_free_dmamap(newmap); Index: powerpc/powerpc/busdma_machdep.c =================================================================== --- powerpc/powerpc/busdma_machdep.c +++ powerpc/powerpc/busdma_machdep.c @@ -542,9 +542,9 @@ * multi-seg allocations yet though. * XXX Certain AGP hardware does. */ - *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize, - mflags, 0ul, dmat->lowaddr, dmat->alignment ? - dmat->alignment : 1ul, dmat->boundary, attr); + *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul, + dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul, + dmat->boundary, attr); (*mapp)->contigalloc = 1; } if (*vaddr == NULL) { Index: vm/vm_extern.h =================================================================== --- vm/vm_extern.h +++ vm/vm_extern.h @@ -58,7 +58,7 @@ vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); -vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags, +vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, Index: vm/vm_kern.c =================================================================== --- vm/vm_kern.c +++ vm/vm_kern.c @@ -304,17 +304,13 @@ } vm_offset_t -kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, - vm_paddr_t high, u_long alignment, vm_paddr_t boundary, - vm_memattr_t memattr) +kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, + u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { struct vm_domainset_iter di; vm_offset_t addr; int domain; - KASSERT(vmem == kernel_arena, - ("kmem_alloc_contig: Only kernel_arena is supported.")); - vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); do { addr = kmem_alloc_contig_domain(domain, size, flags, low, high, Index: x86/iommu/intel_intrmap.c =================================================================== --- x86/iommu/intel_intrmap.c +++ x86/iommu/intel_intrmap.c @@ -338,7 +338,7 @@ return (0); } unit->irte_cnt = clp2(NUM_IO_INTS); - unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(kernel_arena, + unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig( unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0, dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ? VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE); Index: x86/iommu/intel_qi.c =================================================================== --- x86/iommu/intel_qi.c +++ x86/iommu/intel_qi.c @@ -398,8 +398,8 @@ unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ; /* The invalidation queue reads by DMARs are always coherent. */ - unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size, - M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); + unit->inv_queue = kmem_alloc_contig(unit->inv_queue_size, M_WAITOK | + M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); unit->inv_waitd_seq_hw_phys = pmap_kextract( (vm_offset_t)&unit->inv_waitd_seq_hw);