Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -3094,8 +3094,8 @@ return; addr = roundup2(addr, NBPDR); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); if ((*pdpe & X86_PG_V) == 0) { @@ -3115,8 +3115,8 @@ pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); if ((*pde & X86_PG_V) != 0) { kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -3134,8 +3134,8 @@ pde_store(pde, newpdir); kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -2043,21 +2043,21 @@ * not called, it could be first unused KVA (which is not * rounded up to PTE1_SIZE), * - * (2) when all KVA space is mapped and kernel_map->max_offset + * (2) when all KVA space is mapped and vm_map_max(kernel_map) * address is not rounded up to PTE1_SIZE. (For example, * it could be 0xFFFFFFFF.) */ kernel_vm_end = pte1_roundup(kernel_vm_end); mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, PTE1_SIZE); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { pte1 = pte1_load(kern_pte1(kernel_vm_end)); if (pte1_is_valid(pte1)) { kernel_vm_end += PTE1_SIZE; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -2099,8 +2099,8 @@ pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); kernel_vm_end = kernel_vm_end_new; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1744,8 +1744,8 @@ mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, L2_SIZE); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { l0 = pmap_l0(kernel_pmap, kernel_vm_end); KASSERT(pmap_load(l0) != 0, @@ -1768,8 +1768,8 @@ l2 = pmap_l1_to_l2(l1, kernel_vm_end); if ((pmap_load(l2) & ATTR_AF) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -1787,8 +1787,8 @@ pmap_invalidate_page(kernel_pmap, kernel_vm_end); kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h @@ -104,13 +104,6 @@ #include #include #include -/* There is clash. vm_map.h defines the two below and vdev_cache.c use them. */ -#ifdef min_offset -#undef min_offset -#endif -#ifdef max_offset -#undef max_offset -#endif #include #include Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -2229,13 +2229,13 @@ mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, NBPDR); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { if (pdir_pde(PTD, kernel_vm_end)) { kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -2257,8 +2257,8 @@ pmap_kenter_pde(kernel_vm_end, newpdir); kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -1255,8 +1255,8 @@ mtx_assert(&kernel_map->system_mtx, MA_OWNED); req_class = VM_ALLOC_INTERRUPT; addr = roundup2(addr, NBSEG); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); #ifdef __mips_n64 @@ -1272,8 +1272,8 @@ pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); if (*pde != 0) { kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -1305,8 +1305,8 @@ pte[i] = PTE_G; kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -1424,8 +1424,8 @@ mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, L2_SIZE); - if (addr - 1 >= kernel_map->max_offset) - addr = kernel_map->max_offset; + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { l1 = pmap_l1(kernel_pmap, kernel_vm_end); if (pmap_load(l1) == 0) { @@ -1452,8 +1452,8 @@ l2 = pmap_l1_to_l2(l1, kernel_vm_end); if ((pmap_load(l2) & PTE_A) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -1478,8 +1478,8 @@ pmap_invalidate_page(kernel_pmap, kernel_vm_end); kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; - if (kernel_vm_end - 1 >= kernel_map->max_offset) { - kernel_vm_end = kernel_map->max_offset; + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } Index: sys/vm/vm_glue.c =================================================================== --- sys/vm/vm_glue.c +++ sys/vm/vm_glue.c @@ -122,7 +122,7 @@ KASSERT((rw & ~VM_PROT_ALL) == 0, ("illegal ``rw'' argument to kernacc (%x)\n", rw)); - if ((vm_offset_t)addr + len > kernel_map->max_offset || + if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || (vm_offset_t)addr + len < (vm_offset_t)addr) return (FALSE); Index: sys/vm/vm_init.c =================================================================== --- sys/vm/vm_init.c +++ sys/vm/vm_init.c @@ -259,8 +259,8 @@ * Discount the physical memory larger than the size of kernel_map * to avoid eating up all of KVA space. */ - physmem_est = lmin(physmem, btoc(kernel_map->max_offset - - kernel_map->min_offset)); + physmem_est = lmin(physmem, btoc(vm_map_max(kernel_map) - + vm_map_min(kernel_map))); v = kern_vfs_bio_buffer_alloc(v, physmem_est); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -173,19 +173,25 @@ * A map is a set of map entries. These map entries are * organized both as a binary search tree and as a doubly-linked * list. Both structures are ordered based upon the start and - * end addresses contained within each map entry. The list - * header has max start value and min end value to act as - * sentinels for sequential search of the doubly-linked list. + * end addresses contained within each map entry. + * + * Map minimal offset value is map->header.end, max offset value + * is map->header.start. + * + * The list header has max start value and min end value to act + * as sentinels for sequential search of the doubly-linked list. * Sleator and Tarjan's top-down splay algorithm is employed to * control height imbalance in the binary search tree. * - * List of locks + * List of locks * (c) const until freed */ struct vm_map { struct vm_map_entry header; /* List of entries */ -#define min_offset header.end /* (c) */ -#define max_offset header.start /* (c) */ +/* + map min_offset header.end (c) + map max_offset header.start (c) +*/ struct sx lock; /* Lock for map data */ struct mtx system_mtx; int nentries; /* Number of entries */ @@ -214,13 +220,15 @@ static __inline vm_offset_t vm_map_max(const struct vm_map *map) { - return (map->max_offset); + + return (map->header.start); } static __inline vm_offset_t vm_map_min(const struct vm_map *map) { - return (map->min_offset); + + return (map->header.end); } static __inline pmap_t Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -339,8 +339,8 @@ * Delete all of the mappings and pages they hold, then call * the pmap module to reclaim anything left. */ - (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, - vm->vm_map.max_offset); + (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), + vm_map_max(&vm->vm_map)); pmap_release(vmspace_pmap(vm)); vm->vm_map.pmap = NULL; @@ -799,8 +799,8 @@ map->needs_wakeup = FALSE; map->system_map = 0; map->pmap = pmap; - map->min_offset = min; - map->max_offset = max; + map->header.end = min; + map->header.start = max; map->flags = 0; map->root = NULL; map->timestamp = 0; @@ -1198,7 +1198,8 @@ /* * Check that the start and end points are not bogus. */ - if (start < map->min_offset || end > map->max_offset || start >= end) + if (start < vm_map_min(map) || end > vm_map_max(map) || + start >= end) return (KERN_INVALID_ADDRESS); /* @@ -1401,9 +1402,8 @@ * Request must fit within min/max VM address and must avoid * address wrap. */ - if (start < map->min_offset) - start = map->min_offset; - if (start + length > map->max_offset || start + length < start) + start = MAX(start, vm_map_min(map)); + if (start + length > vm_map_max(map) || start + length < start) return (1); /* Empty tree means wide open address space. */ @@ -3429,7 +3429,7 @@ old_map = &vm1->vm_map; /* Copy immutable fields of vm1 to vm2. */ - vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); + vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), NULL); if (vm2 == NULL) return (NULL); vm2->vm_taddr = vm1->vm_taddr; @@ -4329,14 +4329,14 @@ vm_map_max_KBI(const struct vm_map *map) { - return (map->max_offset); + return (vm_map_max(map)); } vm_offset_t vm_map_min_KBI(const struct vm_map *map) { - return (map->min_offset); + return (vm_map_min(map)); } pmap_t