Index: vm/vm_page.c =================================================================== --- vm/vm_page.c +++ vm/vm_page.c @@ -423,7 +423,7 @@ vm_page_startup(vm_offset_t vaddr) { vm_offset_t mapped; - vm_paddr_t page_range; + vm_paddr_t high_avail, low_avail, page_range, size; vm_paddr_t new_end; int i; vm_paddr_t pa; @@ -431,7 +431,6 @@ char *list, *listend; vm_paddr_t end; vm_paddr_t biggestsize; - vm_paddr_t low_water, high_water; int biggestone; int pages_per_zone; @@ -443,27 +442,12 @@ phys_avail[i] = round_page(phys_avail[i]); phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); } - - low_water = phys_avail[0]; - high_water = phys_avail[1]; - - for (i = 0; i < vm_phys_nsegs; i++) { - if (vm_phys_segs[i].start < low_water) - low_water = vm_phys_segs[i].start; - if (vm_phys_segs[i].end > high_water) - high_water = vm_phys_segs[i].end; - } for (i = 0; phys_avail[i + 1]; i += 2) { - vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; - + size = phys_avail[i + 1] - phys_avail[i]; if (size > biggestsize) { biggestone = i; biggestsize = size; } - if (phys_avail[i] < low_water) - low_water = phys_avail[i]; - if (phys_avail[i + 1] > high_water) - high_water = phys_avail[i + 1]; } end = phys_avail[biggestone+1]; @@ -529,6 +513,16 @@ new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)vm_page_dump, vm_page_dump_size); #endif +#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) + /* + * Include the UMA bootstrap pages and vm_page_dump in a crash dump. + * When pmap_map() uses the direct map, they are not automatically + * included. + */ + for (pa = new_end; pa < end; pa += PAGE_SIZE) + dump_add_page(pa); +#endif + phys_avail[biggestone + 1] = new_end; #ifdef __amd64__ /* * Request that the physical pages underlying the message buffer be @@ -547,17 +541,42 @@ * use (taking into account the overhead of a page structure per * page). */ - first_page = low_water / PAGE_SIZE; -#ifdef VM_PHYSSEG_SPARSE - page_range = 0; + low_avail = phys_avail[0]; + high_avail = phys_avail[1]; for (i = 0; i < vm_phys_nsegs; i++) { - page_range += atop(vm_phys_segs[i].end - - vm_phys_segs[i].start); + if (vm_phys_segs[i].start < low_avail) + low_avail = vm_phys_segs[i].start; + if (vm_phys_segs[i].end > high_avail) + high_avail = vm_phys_segs[i].end; } + /* Skip the first chunk. It is already accounted for. */ + for (i = 2; phys_avail[i + 1] != 0; i += 2) { + if (phys_avail[i] < low_avail) + low_avail = phys_avail[i]; + if (phys_avail[i + 1] > high_avail) + high_avail = phys_avail[i + 1]; + } + first_page = low_avail / PAGE_SIZE; +#ifdef VM_PHYSSEG_SPARSE + size = 0; + for (i = 0; i < vm_phys_nsegs; i++) + size += vm_phys_segs[i].end - vm_phys_segs[i].start; for (i = 0; phys_avail[i + 1] != 0; i += 2) - page_range += atop(phys_avail[i + 1] - phys_avail[i]); + size += phys_avail[i + 1] - phys_avail[i]; + page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); #elif defined(VM_PHYSSEG_DENSE) - page_range = high_water / PAGE_SIZE - first_page; + /* + * In the VM_PHYSSEG_DENSE case, the number of pages can account for + * the overhead of a page structure per page only if vm_page_array is + * allocated from the last physical memory chunk. Otherwise, we must + * allocate page structures representing the physical memory + * underlying vm_page_array, even though they will not be used. + */ + if (new_end != high_avail) + page_range = high_avail / PAGE_SIZE - first_page; + else + page_range = (high_avail - low_avail) / (PAGE_SIZE + + sizeof(struct vm_page)); #else #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." #endif @@ -565,12 +584,13 @@ /* * Reserve an unmapped guard page to trap access to vm_page_array[-1]. + * However, because this page is allocated from KVM, out-of-bounds + * accesses using the direct map will not be trapped. */ vaddr += PAGE_SIZE; /* - * Initialize the mem entry structures now, and put them in the free - * queue. + * Allocate physical memory for the page structures, and map it. */ new_end = trunc_page(end - page_range * sizeof(struct vm_page)); mapped = pmap_map(&vaddr, new_end, end, @@ -578,19 +598,18 @@ vm_page_array = (vm_page_t) mapped; #if VM_NRESERVLEVEL > 0 /* - * Allocate memory for the reservation management system's data - * structures. + * Allocate physical memory for the reservation management system's + * data structures, and map it. */ - new_end = vm_reserv_startup(&vaddr, new_end, high_water); + if (high_avail == end) + high_avail = new_end; + new_end = vm_reserv_startup(&vaddr, new_end, high_avail); #endif #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) /* - * pmap_map on arm64, amd64, and mips can come out of the direct-map, - * not kvm like i386, so the pages must be tracked for a crashdump to - * include this data. This includes the vm_page_array and the early - * UMA bootstrap pages. + * Include vm_page_array and vm_reserv_array in a crash dump. */ - for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) + for (pa = new_end; pa < end; pa += PAGE_SIZE) dump_add_page(pa); #endif phys_avail[biggestone + 1] = new_end;