diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -72,6 +72,8 @@ _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, "Too many physsegs."); +_Static_assert(sizeof(long long) >= sizeof(vm_paddr_t), + "vm_paddr_t too big for ffsll, flsll."); #ifdef NUMA struct mem_affinity __read_mostly *mem_affinity; @@ -690,18 +692,16 @@ * * The physical page m's buddy must not be free. */ -static void +static vm_page_t vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail) { - u_int n; int order; - KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0")); KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) & ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0, ("vm_phys_enq_range: page %p and npages %u are misaligned", m, npages)); - do { + while (npages > 0) { KASSERT(m->order == VM_NFREEORDER, ("vm_phys_enq_range: page %p has unexpected order %d", m, m->order)); @@ -709,10 +709,10 @@ KASSERT(order < VM_NFREEORDER, ("vm_phys_enq_range: order %d is out of range", order)); vm_freelist_add(fl, m, order, tail); - n = 1 << order; - m += n; - npages -= n; - } while (npages > 0); + m += 1 << order; + npages -= 1 << order; + } + return (m); } /* @@ -744,7 +744,7 @@ { struct vm_freelist *alt, *fl; vm_page_t m; - int avail, end, flind, freelist, i, need, oind, pind; + int avail, end, flind, freelist, i, oind, pind; KASSERT(domain >= 0 && domain < vm_ndomains, ("vm_phys_alloc_npages: domain %d is out of range", domain)); @@ -762,20 +762,18 @@ for (oind = 0; oind < VM_NFREEORDER; oind++) { while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) { vm_freelist_rem(fl, m, oind); - avail = 1 << oind; - need = imin(npages - i, avail); - for (end = i + need; i < end;) + avail = i + (1 << oind); + end = imin(npages, avail); + while (i < end) ma[i++] = m++; - if (need < avail) { + if (i == npages) { /* - * Return excess pages to fl. Its - * order [0, oind) queues are empty. + * Return excess pages to fl. Its order + * [0, oind) queues are empty. */ - vm_phys_enq_range(m, avail - need, fl, - 1); - return (npages); - } else if (i == npages) + vm_phys_enq_range(m, avail - i, fl, 1); return (npages); + } } } for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { @@ -785,21 +783,20 @@ NULL) { vm_freelist_rem(alt, m, oind); vm_phys_set_pool(pool, m, oind); - avail = 1 << oind; - need = imin(npages - i, avail); - for (end = i + need; i < end;) + avail = i + (1 << oind); + end = imin(npages, avail); + while (i < end) ma[i++] = m++; - if (need < avail) { + if (i == npages) { /* * Return excess pages to fl. * Its order [0, oind) queues * are empty. */ - vm_phys_enq_range(m, avail - - need, fl, 1); - return (npages); - } else if (i == npages) + vm_phys_enq_range(m, avail - i, + fl, 1); return (npages); + } } } } @@ -1146,7 +1143,7 @@ * because the size of a physical address exceeds the size of * a long. */ - return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, + return (min(ffsll(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, VM_NFREEORDER - 1)); } @@ -1162,6 +1159,7 @@ struct vm_freelist *fl; struct vm_phys_seg *seg; vm_page_t m_end; + vm_paddr_t diff, lo; int order; /* @@ -1173,15 +1171,15 @@ fl = (*seg->free_queues)[m->pool]; m_end = m + npages; /* Free blocks of increasing size. */ - while ((order = max_order(m)) < VM_NFREEORDER - 1 && - m + (1 << order) <= m_end) { - KASSERT(seg == &vm_phys_segs[m->segind], - ("%s: page range [%p,%p) spans multiple segments", - __func__, m_end - npages, m)); - vm_freelist_add(fl, m, order, 1); - m += 1 << order; + lo = VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT; + if (m < m_end && + (diff = lo ^ (lo + npages - 1)) != 0) { + order = min(flsll(diff) - 1, VM_NFREEORDER - 1); + m = vm_phys_enq_range(m, roundup2(-lo, 1 << order), fl, 1); } + /* Free blocks of maximum size. */ + order = VM_NFREEORDER - 1; while (m + (1 << order) <= m_end) { KASSERT(seg == &vm_phys_segs[m->segind], ("%s: page range [%p,%p) spans multiple segments", @@ -1560,10 +1558,8 @@ vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind); } /* Return excess pages to the free lists. */ - if (&m_run[npages] < m) { - fl = (*queues)[VM_FREEPOOL_DEFAULT]; - vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); - } + fl = (*queues)[VM_FREEPOOL_DEFAULT]; + vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0); return (m_run); }