Page MenuHomeFreeBSD

D33947.id101819.diff
No OneTemporary

D33947.id101819.diff

Index: sys/vm/_vm_phys.h
===================================================================
--- sys/vm/_vm_phys.h
+++ sys/vm/_vm_phys.h
@@ -53,6 +53,8 @@
int lcnt;
};
+typedef struct vm_freelist (*vm_freelist_tbl)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
+
struct vm_phys_seg {
vm_paddr_t start;
vm_paddr_t end;
@@ -64,7 +66,7 @@
void *md_first;
#endif
int domain;
- struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
+ vm_freelist_tbl free_queues;
};
extern struct vm_phys_seg vm_phys_segs[];
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -173,7 +173,7 @@
SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
&vm_ndomains, 0, "Number of physical memory domains available.");
-static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
+static vm_page_t vm_phys_alloc_queues_contig(vm_freelist_tbl queues,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary);
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
@@ -1367,6 +1367,7 @@
vm_paddr_t pa_end, pa_start;
vm_page_t m_run;
struct vm_phys_seg *seg;
+ vm_freelist_tbl prev_tbl;
int segind;
KASSERT(npages > 0, ("npages is 0"));
@@ -1375,6 +1376,7 @@
vm_domain_free_assert_locked(VM_DOMAIN(domain));
if (low >= high)
return (NULL);
+ prev_tbl = NULL;
m_run = NULL;
for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
seg = &vm_phys_segs[segind];
@@ -1392,8 +1394,17 @@
pa_end = seg->end;
if (pa_end - pa_start < ptoa(npages))
continue;
- m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
- alignment, boundary);
+ /*
+ * If a previous segment led to a search using
+ * the same free lists as would this segment, then
+ * we've actually already searched within this
+ * too. So skip it.
+ */
+ if (seg->free_queues == prev_tbl)
+ continue;
+ prev_tbl = seg->free_queues;
+ m_run = vm_phys_alloc_queues_contig(prev_tbl, npages,
+ low, high, alignment, boundary);
if (m_run != NULL)
break;
}
@@ -1401,31 +1412,137 @@
}
/*
- * Allocate a run of contiguous physical pages from the free list for the
- * specified segment.
+ * Allocate a run of contiguous physical pages from the specified page list.
*/
static vm_page_t
-vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
+vm_phys_alloc_freelist_contig(struct vm_freelist *fl, int oind, u_long npages,
+ vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
+{
+ struct vm_phys_seg *seg;
+ vm_paddr_t lbound, pa, pa_end, pa_prev, size;
+ vm_page_t m, m_listed, m_ret;
+ int order;
+
+ KASSERT(npages > 0, ("npages is 0"));
+ KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
+ KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
+ /* Search for a run satisfying the specified conditions. */
+ size = npages << PAGE_SHIFT;
+ TAILQ_FOREACH(m_listed, &fl[oind].pl, listq) {
+ /*
+ * Determine if the address range starting at pa is
+ * too low.
+ */
+ pa = VM_PAGE_TO_PHYS(m_listed);
+ if (pa < low)
+ continue;
+
+ /*
+ * Back up to the first free oind-block in this range, without
+ * moving below low. Put backed-over blocks in the list before
+ * m_listed.
+ */
+ m_ret = m_listed;
+ seg = &vm_phys_segs[m_listed->segind];
+ lbound = low > seg->start ? low : seg->start;
+ for (;;) {
+ pa_prev = pa - (1 << (PAGE_SHIFT + oind));
+ m = &seg->first_page[atop(pa_prev - seg->start)];
+ if (pa_prev < lbound || m->order != oind)
+ break;
+ TAILQ_REMOVE(&fl[oind].pl, m, listq);
+ TAILQ_INSERT_BEFORE(m_ret, m, listq);
+ m_ret = m;
+ pa = pa_prev;
+ }
+
+ /*
+ * Back up to the first free block block in this range, without
+ * moving below low.
+ */
+ for (order = oind - 1; order >= 0; order--) {
+ pa_prev = pa - (1 << (PAGE_SHIFT + order));
+ m = &seg->first_page[atop(pa_prev - seg->start)];
+ if (pa_prev >= lbound && m->order == order)
+ pa = pa_prev;
+ }
+
+ /*
+ * Advance as necessary to satisfy alignment, boundary
+ * conditions.
+ */
+ if (!vm_addr_align_ok(pa, alignment))
+ pa = roundup2(pa, alignment);
+ if (!vm_addr_bound_ok(pa, size, boundary))
+ pa = roundup2(pa + 1, boundary);
+ pa_end = pa + size;
+
+ /*
+ * Determine if the address range is valid (without overflow in
+ * pa_end calculation), and fits within the segment.
+ */
+ if (pa_end < pa || seg->end < pa_end)
+ continue;
+
+ m_ret = &seg->first_page[atop(pa - seg->start)];
+
+ /*
+ * Determine if a sufficient number of subsequent oind-blocks to
+ * satisfy the allocation request are free. Put walked-over
+ * blocks in the list after m_listed, advancing m_listed.
+ */
+ pa = VM_PAGE_TO_PHYS(m_listed);
+ for (;;) {
+ pa += 1 << (PAGE_SHIFT + oind);
+ if (pa >= pa_end)
+ return (m_ret);
+ m = &seg->first_page[atop(pa - seg->start)];
+ if (m->order != oind)
+ break;
+ TAILQ_REMOVE(&fl[oind].pl, m, listq);
+ TAILQ_INSERT_AFTER(&fl[oind].pl, m_listed, m, listq);
+ m_listed = m;
+ }
+
+ /*
+ * Determine whether enough smaller blocks can be found at the
+ * end to satisfy the request.
+ */
+ while (m->order < oind &&
+ pa + (2 << (PAGE_SHIFT + m->order)) > pa_end) {
+ pa += 1 << (PAGE_SHIFT + m->order);
+ if (pa >= pa_end)
+ return (m_ret);
+ m = &seg->first_page[atop(pa - seg->start)];
+ }
+ }
+ return (NULL);
+}
+
+/*
+ * Allocate a run of contiguous physical pages from the specified free list
+ * table.
+ */
+static vm_page_t
+vm_phys_alloc_queues_contig(vm_freelist_tbl queues, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
{
struct vm_freelist *fl;
- vm_paddr_t pa, pa_end, size;
vm_page_t m, m_ret;
+ vm_paddr_t pa, pa_end, size;
u_long npages_end;
int oind, order, pind;
KASSERT(npages > 0, ("npages is 0"));
KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
- vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
/* Compute the queue that is the best fit for npages. */
order = flsl(npages - 1);
- /* Search for a run satisfying the specified conditions. */
+ /* Search for a large enough free block. */
size = npages << PAGE_SHIFT;
- for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
- oind++) {
+ for (oind = order; oind < VM_NFREEORDER; oind++) {
for (pind = 0; pind < VM_NFREEPOOL; pind++) {
- fl = (*seg->free_queues)[pind];
+ fl = (*queues)[pind];
TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
/*
* Determine if the address range starting at pa
@@ -1435,45 +1552,26 @@
*/
pa = VM_PAGE_TO_PHYS(m_ret);
pa_end = pa + size;
- if (pa < low || pa_end > high ||
- !vm_addr_ok(pa, size, alignment, boundary))
- continue;
-
- /*
- * Is the size of this allocation request
- * no more than the largest block size?
- */
- if (order < VM_NFREEORDER)
+ if (low <= pa && pa_end <= high &&
+ vm_addr_ok(pa, size, alignment, boundary))
goto done;
-
- /*
- * Determine if the address range is valid
- * (without overflow in pa_end calculation)
- * and fits within the segment.
- */
- if (pa_end < pa ||
- pa < seg->start || seg->end < pa_end)
- continue;
-
- /*
- * Determine if a sufficient number of
- * subsequent blocks to satisfy the
- * allocation request are free.
- */
- do {
- pa += 1 <<
- (PAGE_SHIFT + VM_NFREEORDER - 1);
- if (pa >= pa_end)
- goto done;
- } while (VM_NFREEORDER - 1 == seg->first_page[
- atop(pa - seg->start)].order);
}
}
}
+ /* Search for a long-enough sequence of small blocks. */
+ oind = min(order - 1, VM_NFREEORDER - 1);
+ for (pind = 0; pind < VM_NFREEPOOL; pind++) {
+ fl = (*queues)[pind];
+ m_ret = vm_phys_alloc_freelist_contig(fl, oind, npages,
+ low, high, alignment, boundary);
+ if (m_ret != NULL)
+ goto done;
+ }
return (NULL);
done:
for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
- fl = (*seg->free_queues)[m->pool];
+ fl = (*queues)[m->pool];
+ oind = m->order;
vm_freelist_rem(fl, m, oind);
if (m->pool != VM_FREEPOOL_DEFAULT)
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
@@ -1481,7 +1579,7 @@
/* Return excess pages to the free lists. */
npages_end = roundup2(npages, 1 << oind);
if (npages < npages_end) {
- fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
+ fl = (*queues)[VM_FREEPOOL_DEFAULT];
vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
}
return (m_ret);

File Metadata

Mime Type
text/plain
Expires
Thu, Apr 16, 11:19 AM (44 m, 34 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31598879
Default Alt Text
D33947.id101819.diff (8 KB)

Event Timeline