Index: sys/kern/sys_pipe.c =================================================================== --- sys/kern/sys_pipe.c +++ sys/kern/sys_pipe.c @@ -749,19 +749,19 @@ /* * Direct copy, bypassing a kernel buffer. */ - } else if ((size = rpipe->pipe_map.cnt) != 0) { + } else if ((size = rpipe->pipe_pages.cnt) != 0) { if (size > uio->uio_resid) size = (u_int) uio->uio_resid; PIPE_UNLOCK(rpipe); - error = uiomove_fromphys(rpipe->pipe_map.ms, - rpipe->pipe_map.pos, size, uio); + error = uiomove_fromphys(rpipe->pipe_pages.ms, + rpipe->pipe_pages.pos, size, uio); PIPE_LOCK(rpipe); if (error) break; nread += size; - rpipe->pipe_map.pos += size; - rpipe->pipe_map.cnt -= size; - if (rpipe->pipe_map.cnt == 0) { + rpipe->pipe_pages.pos += size; + rpipe->pipe_pages.cnt -= size; + if (rpipe->pipe_pages.cnt == 0) { rpipe->pipe_state &= ~PIPE_WANTW; wakeup(rpipe); } @@ -865,7 +865,7 @@ PIPE_LOCK_ASSERT(wpipe, MA_OWNED); KASSERT((wpipe->pipe_state & PIPE_DIRECTW) == 0, ("%s: PIPE_DIRECTW set on %p", __func__, wpipe)); - KASSERT(wpipe->pipe_map.cnt == 0, + KASSERT(wpipe->pipe_pages.cnt == 0, ("%s: pipe map for %p contains residual data", __func__, wpipe)); if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size) @@ -877,17 +877,17 @@ PIPE_UNLOCK(wpipe); i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ, - wpipe->pipe_map.ms, PIPENPAGES); + wpipe->pipe_pages.ms, PIPENPAGES); PIPE_LOCK(wpipe); if (i < 0) { wpipe->pipe_state &= ~PIPE_DIRECTW; return (EFAULT); } - wpipe->pipe_map.npages = i; - wpipe->pipe_map.pos = + wpipe->pipe_pages.npages = i; + wpipe->pipe_pages.pos = ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; - wpipe->pipe_map.cnt = size; + wpipe->pipe_pages.cnt = size; uio->uio_iov->iov_len -= size; uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; @@ -908,12 +908,12 @@ PIPE_LOCK_ASSERT(wpipe, MA_OWNED); KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0, ("%s: PIPE_DIRECTW not set on %p", __func__, wpipe)); - KASSERT(wpipe->pipe_map.cnt == 0, + KASSERT(wpipe->pipe_pages.cnt == 0, ("%s: pipe map for %p contains residual data", __func__, wpipe)); wpipe->pipe_state &= ~PIPE_DIRECTW; - vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages); - wpipe->pipe_map.npages = 0; + vm_page_unhold_pages(wpipe->pipe_pages.ms, wpipe->pipe_pages.npages); + wpipe->pipe_pages.npages = 0; } /* @@ -933,9 +933,9 @@ KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0, ("%s: PIPE_DIRECTW not set on %p", __func__, wpipe)); - size = wpipe->pipe_map.cnt; - pos = wpipe->pipe_map.pos; - wpipe->pipe_map.cnt = 0; + size = wpipe->pipe_pages.cnt; + pos = wpipe->pipe_pages.pos; + wpipe->pipe_pages.cnt = 0; wpipe->pipe_buffer.in = size; wpipe->pipe_buffer.out = 0; @@ -951,7 +951,7 @@ uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = curthread; - uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio); + uiomove_fromphys(wpipe->pipe_pages.ms, pos, size, &uio); PIPE_LOCK(wpipe); pipe_destroy_write_buffer(wpipe); } @@ -1015,7 +1015,7 @@ goto error1; } - while (wpipe->pipe_map.cnt != 0 && + while (wpipe->pipe_pages.cnt != 0 && (wpipe->pipe_state & PIPE_EOF) == 0) { if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; @@ -1032,7 +1032,7 @@ } if ((wpipe->pipe_state & PIPE_EOF) != 0) { - wpipe->pipe_map.cnt = 0; + wpipe->pipe_pages.cnt = 0; pipe_destroy_write_buffer(wpipe); pipeselwakeup(wpipe); error = EPIPE; @@ -1157,7 +1157,7 @@ * pipe buffer. We break out if a signal occurs or the * reader goes away. */ - if (wpipe->pipe_map.cnt != 0) { + if (wpipe->pipe_pages.cnt != 0) { if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; wakeup(wpipe); @@ -1375,8 +1375,8 @@ PIPE_UNLOCK(mpipe); return (0); } - if (mpipe->pipe_map.cnt != 0) - *(int *)data = mpipe->pipe_map.cnt; + if (mpipe->pipe_pages.cnt != 0) + *(int *)data = mpipe->pipe_pages.cnt; else *(int *)data = mpipe->pipe_buffer.cnt; break; @@ -1431,7 +1431,7 @@ goto locked_error; #endif if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) - if (rpipe->pipe_map.cnt > 0 || rpipe->pipe_buffer.cnt > 0) + if (rpipe->pipe_pages.cnt > 0 || rpipe->pipe_buffer.cnt > 0) revents |= events & (POLLIN | POLLRDNORM); if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) @@ -1513,8 +1513,8 @@ bzero(ub, sizeof(*ub)); ub->st_mode = S_IFIFO; ub->st_blksize = PAGE_SIZE; - if (pipe->pipe_map.cnt != 0) - ub->st_size = pipe->pipe_map.cnt; + if (pipe->pipe_pages.cnt != 0) + ub->st_size = pipe->pipe_pages.cnt; else ub->st_size = pipe->pipe_buffer.cnt; ub->st_blocks = howmany(ub->st_size, ub->st_blksize); @@ -1604,9 +1604,9 @@ } #ifndef PIPE_NODIRECT { - cpipe->pipe_map.cnt = 0; - cpipe->pipe_map.pos = 0; - cpipe->pipe_map.npages = 0; + cpipe->pipe_pages.cnt = 0; + cpipe->pipe_pages.pos = 0; + cpipe->pipe_pages.npages = 0; } #endif } @@ -1752,7 +1752,7 @@ PIPE_LOCK_ASSERT(rpipe, MA_OWNED); kn->kn_data = rpipe->pipe_buffer.cnt; if (kn->kn_data == 0) - kn->kn_data = rpipe->pipe_map.cnt; + kn->kn_data = rpipe->pipe_pages.cnt; if ((rpipe->pipe_state & PIPE_EOF) != 0 && ((rpipe->pipe_state & PIPE_NAMED) == 0 || Index: sys/sys/pipe.h =================================================================== --- sys/sys/pipe.h +++ sys/sys/pipe.h @@ -103,7 +103,7 @@ */ struct pipe { struct pipebuf pipe_buffer; /* data storage */ - struct pipemapping pipe_map; /* pipe mapping for direct I/O */ + struct pipemapping pipe_pages; /* wired pages for direct I/O */ struct selinfo pipe_sel; /* for compat with select */ struct timespec pipe_atime; /* time of last access */ struct timespec pipe_mtime; /* time of last modify */ Index: sys/vm/vm_extern.h =================================================================== --- sys/vm/vm_extern.h +++ sys/vm/vm_extern.h @@ -77,8 +77,8 @@ /* Bootstrapping. */ void kmem_bootstrap_free(vm_offset_t, vm_size_t); -vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, - boolean_t); +void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, + bool); void kmem_init(vm_offset_t, vm_offset_t); void kmem_init_zero_region(void); void kmeminit(void); Index: sys/vm/vm_init.c =================================================================== --- sys/vm/vm_init.c +++ sys/vm/vm_init.c @@ -253,8 +253,8 @@ exec_map_entries = 2 * mp_ncpus + 4; #endif exec_map_entry_size = round_page(PATH_MAX + ARG_MAX); - exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE); - pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva, - FALSE); + kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr, + exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false); + kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva, + false); } Index: sys/vm/vm_kern.h =================================================================== --- sys/vm/vm_kern.h +++ sys/vm/vm_kern.h @@ -66,9 +66,12 @@ #define _VM_VM_KERN_H_ /* Kernel memory management definitions. */ -extern vm_map_t kernel_map; -extern vm_map_t exec_map; -extern vm_map_t pipe_map; +extern struct vm_map kernel_map_store; +#define kernel_map (&kernel_map_store) +extern struct vm_map exec_map_store; +#define exec_map (&exec_map_store) +extern struct vm_map pipe_map_store; +#define pipe_map (&pipe_map_store) extern struct vmem *kernel_arena; extern struct vmem *kmem_arena; extern struct vmem *buffer_arena; Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -97,9 +97,9 @@ #include #include -vm_map_t kernel_map; -vm_map_t exec_map; -vm_map_t pipe_map; +struct vm_map kernel_map_store; +struct vm_map exec_map_store; +struct vm_map pipe_map_store; const void *zero_region; CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); @@ -360,9 +360,9 @@ } /* - * kmem_suballoc: + * kmem_subinit: * - * Allocates a map to manage a subrange + * Initializes a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: @@ -372,12 +372,11 @@ * size Size of range to find * superpage_align Request that min is superpage aligned */ -vm_map_t -kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, - vm_size_t size, boolean_t superpage_align) +void +kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max, + vm_size_t size, bool superpage_align) { int ret; - vm_map_t result; size = round_page(size); @@ -386,14 +385,11 @@ VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_NO_CHARGE); if (ret != KERN_SUCCESS) - panic("kmem_suballoc: bad status return of %d", ret); + panic("kmem_subinit: bad status return of %d", ret); *max = *min + size; - result = vm_map_create(vm_map_pmap(parent), *min, *max); - if (result == NULL) - panic("kmem_suballoc: cannot create submap"); - if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) - panic("kmem_suballoc: unable to change range to submap"); - return (result); + vm_map_init(map, vm_map_pmap(parent), *min, *max); + if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS) + panic("kmem_subinit: unable to change range to submap"); } /* @@ -750,16 +746,14 @@ void kmem_init(vm_offset_t start, vm_offset_t end) { - vm_map_t m; vm_size_t quantum; int domain; - m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); - m->system_map = 1; - vm_map_lock(m); + vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); + kernel_map->system_map = 1; + vm_map_lock(kernel_map); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ - kernel_map = m; - (void)vm_map_insert(m, NULL, 0, + (void)vm_map_insert(kernel_map, NULL, 0, #ifdef __amd64__ KERNBASE, #else @@ -774,12 +768,12 @@ * that handle vm_page_array allocation can simply adjust virtual_avail * instead. */ - (void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array, + (void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array, (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * sizeof(struct vm_page)), VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); #endif - vm_map_unlock(m); + vm_map_unlock(kernel_map); /* * Use a large import quantum on NUMA systems. This helps minimize Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -455,7 +455,6 @@ #ifdef _KERNEL boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); -vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t); int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t); int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int); Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -128,7 +128,6 @@ static struct mtx map_sleep_mtx; static uma_zone_t mapentzone; static uma_zone_t kmapentzone; -static uma_zone_t mapzone; static uma_zone_t vmspace_zone; static int vmspace_zinit(void *mem, int size, int flags); static int vm_map_zinit(void *mem, int ize, int flags); @@ -198,13 +197,6 @@ vm_map_startup(void) { mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); - mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, -#ifdef INVARIANTS - vm_map_zdtor, -#else - NULL, -#endif - vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZONE_VM); @@ -869,24 +861,6 @@ return pmap_resident_count(vmspace_pmap(vmspace)); } -/* - * vm_map_create: - * - * Creates and returns a new empty VM map with - * the given physical map structure, and having - * the given lower and upper address bounds. - */ -vm_map_t -vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) -{ - vm_map_t result; - - result = uma_zalloc(mapzone, M_WAITOK); - CTR1(KTR_VM, "vm_map_create: %p", result); - _vm_map_init(result, pmap, min, max); - return (result); -} - /* * Initialize an existing vm_map structure * such as that in the vmspace structure.