Index: vm_map.c =================================================================== --- vm_map.c +++ vm_map.c @@ -2186,17 +2186,22 @@ VM_MAP_ASSERT_LOCKED(map); KASSERT(entry->end > start && entry->start < start, ("_vm_map_clip_start: invalid clip of entry %p", entry)); + vm_map_simplify_entry(map, entry); /* - * Split off the front portion -- note that we must insert the new - * entry BEFORE this one, so that this entry has the specified - * starting address. + * Create a backing object now, if none exists, so that more individual + * objects won't be created after the map entry is split. */ - vm_map_simplify_entry(map, entry); vm_map_entry_charge_object(map, entry); + + /* Clone the entry. */ new_entry = vm_map_entry_create(map); *new_entry = *entry; + /* + * Split off the front portion. Insert the new entry BEFORE this one, + * so that this entry has the specified starting address. + */ new_entry->end = start; entry->offset += (start - entry->start); entry->start = start; @@ -2244,14 +2249,19 @@ KASSERT(entry->start < end && entry->end > end, ("_vm_map_clip_end: invalid clip of entry %p", entry)); - /* - * Create a new entry and insert it AFTER the specified entry + * Create a backing object now, if none exists, so that more individual + * objects won't be created after the map entry is split. */ vm_map_entry_charge_object(map, entry); + + /* Clone the entry. */ new_entry = vm_map_entry_create(map); *new_entry = *entry; + /* + * Insert new entry AFTER the specified entry. + */ new_entry->start = entry->end = end; new_entry->offset += (end - entry->start); if (new_entry->cred != NULL) Index: vm_reserv.c =================================================================== --- vm_reserv.c +++ vm_reserv.c @@ -1171,25 +1171,29 @@ * request for contiguous physical memory. Start searching from the lower * bound, defined by low_index. * - * The free page queue lock must be held. + * The reservation must be locked. */ static bool vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { vm_paddr_t pa, size; - u_long changes; - int bitpos, bits_left, i, hi, lo, n; + u_long changes, hi, lo; + int bitpos, bits_left, i, n; vm_reserv_assert_locked(rv); size = npages << PAGE_SHIFT; pa = VM_PAGE_TO_PHYS(&rv->pages[0]); + KASSERT(pa + VM_LEVEL_0_SIZE - size >= low, + ("%s: reservation is too low", __func__)); lo = (pa < low) ? ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0; i = lo / NBPOPMAP; changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1); + KASSERT(pa + size <= high, + ("%s: reservation is too high", __func__)); hi = (pa + VM_LEVEL_0_SIZE > high) ? - ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES; + ((high - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES; n = hi / NBPOPMAP; bits_left = hi % NBPOPMAP; hi = lo = -1; @@ -1231,7 +1235,7 @@ return (false); pa = VM_PAGE_TO_PHYS(&rv->pages[lo]); } - if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE) + if (lo < hi && npages <= hi - lo) return (true); lo = hi; } @@ -1250,8 +1254,6 @@ * changed reservation with free pages that satisfy the given request for * contiguous physical memory. If a satisfactory reservation is found, it is * broken. Returns true if a reservation is broken and false otherwise. - * - * The free page queue lock must be held. */ boolean_t vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low, @@ -1261,7 +1263,7 @@ vm_reserv_t rv, rvn; if (npages > VM_LEVEL_0_NPAGES - 1) - return (false); + return (FALSE); size = npages << PAGE_SHIFT; vm_reserv_domain_lock(domain); again: @@ -1291,7 +1293,7 @@ alignment, boundary)) { vm_reserv_reclaim(rv); vm_reserv_unlock(rv); - return (true); + return (TRUE); } vm_reserv_unlock(rv); vm_reserv_domain_lock(domain); @@ -1299,7 +1301,7 @@ goto again; } vm_reserv_domain_unlock(domain); - return (false); + return (FALSE); } /*