Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -1034,13 +1034,20 @@ } /* - * Free a contiguous, arbitrarily sized set of physical pages. + * Free a contiguous, arbitrarily sized set of physical pages, + * without block merging. + * + * The pages just before and after the freed pages must be + * allocated, because there is no check here for buddy- + * system merges. * * The free page queues must be locked. */ -void -vm_phys_free_contig(vm_page_t m, u_long npages) +static void +vm_phys_nomerge_free_contig(vm_page_t m, u_long npages) { + struct vm_freelist *fl; + struct vm_phys_seg *seg; u_int n; int order; @@ -1049,6 +1056,8 @@ * possible power-of-two-sized subsets. */ vm_domain_free_assert_locked(vm_pagequeue_domain(m)); + seg = &vm_phys_segs[m->segind]; + fl = (*seg->free_queues)[m->pool]; for (;; npages -= n) { /* * Unsigned "min" is used here so that "order" is assigned @@ -1062,19 +1071,61 @@ n = 1 << order; if (npages < n) break; - vm_phys_free_pages(m, order); + vm_freelist_add(fl, m, order, 1); m += n; } /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ for (; npages > 0; npages -= n) { order = flsl(npages) - 1; n = 1 << order; - vm_phys_free_pages(m, order); + vm_freelist_add(fl, m, order, 1); m += n; } } /* + * Free a contiguous, arbitrarily sized set of physical pages. + * + * The free page queues must be locked. + */ +void +vm_phys_free_contig(vm_page_t m, u_long npages) +{ + int order_beg, order_end; + vm_page_t m_beg, m_end; + + /* + * Avoid unnecessary coalescing by freeing the pages at the start and + * end of the range last. + */ + vm_domain_free_assert_locked(vm_pagequeue_domain(m)); + + /* + * Unsigned "min" is used here so that "order" is assigned + * "VM_NFREEORDER - 1" when "m"'s physical address is zero + * or the low-order bits of its physical address are zero + * because the size of a physical address exceeds the size of + * a long. + */ + m_beg = m; + order_beg = min(ffsl(VM_PAGE_TO_PHYS(m_beg) >> PAGE_SHIFT) - 1, + VM_NFREEORDER - 1); + if (order_beg < VM_NFREEORDER - 1) + m_beg += 1 << order_beg; + m_end = m + npages; + order_end = min(ffsl(VM_PAGE_TO_PHYS(m_end) >> PAGE_SHIFT) - 1, + VM_NFREEORDER - 1); + if (order_end < VM_NFREEORDER - 1) + m_end -= 1 << order_end; + if (m_beg < m_end) + vm_phys_nomerge_free_contig(m_beg, m_end - m_beg); + if (order_beg < VM_NFREEORDER - 1) + vm_phys_free_pages(m, order_beg); + if (order_end < VM_NFREEORDER - 1) + vm_phys_free_pages(m_end, order_end); +} + +/* * Scan physical memory between the specified addresses "low" and "high" for a * run of contiguous physical pages that satisfy the specified conditions, and * return the lowest page in the run. The specified "alignment" determines