Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -1034,13 +1034,37 @@ } /* - * Free a contiguous, arbitrarily sized set of physical pages. + * Return the largest possible order of a page starting at m. + */ +static int +max_order(vm_page_t m) +{ + /* + * Unsigned "min" is used here so that "order" is assigned + * "VM_NFREEORDER - 1" when "m"'s physical address is zero + * or the low-order bits of its physical address are zero + * because the size of a physical address exceeds the size of + * a long. + */ + return min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, + VM_NFREEORDER - 1); +} + +/* + * Free a contiguous, arbitrarily sized set of physical pages, + * without block merging. + * + * The pages just before and after the freed pages must be + * allocated, because there is no check here for buddy- + * system merges. * * The free page queues must be locked. */ -void -vm_phys_free_contig(vm_page_t m, u_long npages) +static void +vm_phys_nomerge_free_contig(vm_page_t m, u_long npages) { + struct vm_freelist *fl; + struct vm_phys_seg *seg; u_int n; int order; @@ -1049,32 +1073,59 @@ * possible power-of-two-sized subsets. */ vm_domain_free_assert_locked(vm_pagequeue_domain(m)); + seg = &vm_phys_segs[m->segind]; + fl = (*seg->free_queues)[m->pool]; for (;; npages -= n) { - /* - * Unsigned "min" is used here so that "order" is assigned - * "VM_NFREEORDER - 1" when "m"'s physical address is zero - * or the low-order bits of its physical address are zero - * because the size of a physical address exceeds the size of - * a long. - */ - order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, - VM_NFREEORDER - 1); + order = max_order(m); n = 1 << order; if (npages < n) break; - vm_phys_free_pages(m, order); + vm_freelist_add(fl, m, order, 1); m += n; } /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ for (; npages > 0; npages -= n) { order = flsl(npages) - 1; n = 1 << order; - vm_phys_free_pages(m, order); + vm_freelist_add(fl, m, order, 1); m += n; } } /* + * Free a contiguous, arbitrarily sized set of physical pages. + * + * The free page queues must be locked. + */ +void +vm_phys_free_contig(vm_page_t m, u_long npages) +{ + int order_start, order_end; + vm_page_t m_start, m_end; + + vm_domain_free_assert_locked(vm_pagequeue_domain(m)); + + m_start = m; + order_start = max_order(m_start); + if (order_start < VM_NFREEORDER - 1) + m_start += 1 << order_start; + m_end = m + npages; + order_end = max_order(m_end); + if (order_end < VM_NFREEORDER - 1) + m_end -= 1 << order_end; + /* + * Avoid unnecessary coalescing by freeing the pages at the start and + * end of the range last. + */ + if (m_start < m_end) + vm_phys_nomerge_free_contig(m_start, m_end - m_start); + if (order_start < VM_NFREEORDER - 1) + vm_phys_free_pages(m, order_start); + if (order_end < VM_NFREEORDER - 1) + vm_phys_free_pages(m_end, order_end); +} + +/* * Scan physical memory between the specified addresses "low" and "high" for a * run of contiguous physical pages that satisfy the specified conditions, and * return the lowest page in the run. The specified "alignment" determines