Page MenuHomeFreeBSD

D45409.id139455.diff
No OneTemporary

D45409.id139455.diff

Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -509,7 +509,7 @@
m->psind = 0;
m->segind = segind;
m->order = VM_NFREEORDER;
- m->pool = VM_FREEPOOL_DEFAULT;
+ m->pool = VM_NFREEPOOL;
m->valid = m->dirty = 0;
pmap_page_init(m);
}
@@ -785,7 +785,8 @@
m = seg->first_page + atop(startp - seg->start);
vmd = VM_DOMAIN(seg->domain);
vm_domain_free_lock(vmd);
- vm_phys_enqueue_contig(m, pagecount);
+ vm_phys_enqueue_contig(m, VM_FREEPOOL_DEFAULT,
+ pagecount);
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, pagecount);
vm_cnt.v_page_count += (u_int)pagecount;
Index: sys/vm/vm_phys.h
===================================================================
--- sys/vm/vm_phys.h
+++ sys/vm/vm_phys.h
@@ -66,7 +66,7 @@
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
-void vm_phys_enqueue_contig(vm_page_t m, u_long npages);
+void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -669,6 +669,7 @@
int tail)
{
vm_page_t m_buddy;
+ int pool = m->pool;
while (oind > order) {
oind--;
@@ -676,6 +677,10 @@
KASSERT(m_buddy->order == VM_NFREEORDER,
("vm_phys_split_pages: page %p has unexpected order %d",
m_buddy, m_buddy->order));
+ KASSERT(m_buddy->pool == VM_NFREEPOOL,
+ ("vm_phys_split_pages: page %p has unexpected pool %d",
+ m_buddy, m_buddy->pool));
+ m_buddy->pool = pool;
vm_freelist_add(fl, m_buddy, oind, tail);
}
}
@@ -693,7 +698,8 @@
* The physical page m's buddy must not be free.
*/
static void
-vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -709,6 +715,7 @@
order = ilog2(npages);
KASSERT(order < VM_NFREEORDER,
("%s: order %d is out of range", __func__, order));
+ m->pool = pool;
vm_freelist_add(fl, m, order, tail);
m += 1 << order;
npages -= 1 << order;
@@ -729,7 +736,8 @@
* parameter m. Otherwise, the physical page m's buddy must not be free.
*/
static vm_page_t
-vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -745,6 +753,7 @@
order = ffs(npages) - 1;
KASSERT(order < VM_NFREEORDER,
("vm_phys_enq_range: order %d is out of range", order));
+ m->pool = pool;
vm_freelist_add(fl, m, order, tail);
m += 1 << order;
npages -= 1 << order;
@@ -752,18 +761,6 @@
return (m);
}
-/*
- * Set the pool for a contiguous, power of two-sized set of physical pages.
- */
-static void
-vm_phys_set_pool(int pool, vm_page_t m, int order)
-{
- vm_page_t m_tmp;
-
- for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
- m_tmp->pool = pool;
-}
-
/*
* Tries to allocate the specified number of pages from the specified pool
* within the specified domain. Returns the actual number of allocated pages
@@ -772,7 +769,8 @@
* The returned pages may not be physically contiguous. However, in contrast
* to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
* calling this function once to allocate the desired number of pages will
- * avoid wasted time in vm_phys_split_pages().
+ * avoid wasted time in vm_phys_split_pages(). Sets the pool field for
+ * every allocated page.
*
* The free page queues for the specified domain must be locked.
*/
@@ -801,14 +799,18 @@
vm_freelist_rem(fl, m, oind);
avail = i + (1 << oind);
end = imin(npages, avail);
- while (i < end)
+ ma[i++] = m++;
+ while (i < end) {
+ m->pool = pool;
ma[i++] = m++;
+ }
if (i == npages) {
/*
* Return excess pages to fl. Its order
* [0, oind) queues are empty.
*/
- vm_phys_enq_range(m, avail - i, fl, 1);
+ vm_phys_enq_range(m, avail - i, fl,
+ pool, 1);
return (npages);
}
}
@@ -819,11 +821,12 @@
while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
avail = i + (1 << oind);
end = imin(npages, avail);
- while (i < end)
+ do {
+ m->pool = pool;
ma[i++] = m++;
+ } while (i < end);
if (i == npages) {
/*
* Return excess pages to fl.
@@ -831,7 +834,7 @@
* are empty.
*/
vm_phys_enq_range(m, avail - i,
- fl, 1);
+ fl, pool, 1);
return (npages);
}
}
@@ -843,7 +846,7 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages
- * from the free lists.
+ * from the free lists. Sets the pool field in the first page only.
*
* The free page queues must be locked.
*/
@@ -864,7 +867,8 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the
- * manifest constants VM_FREELIST_*.
+ * manifest constants VM_FREELIST_*. Sets the pool field in the first page
+ * only.
*
* The free page queues must be locked.
*/
@@ -915,7 +919,7 @@
m = TAILQ_FIRST(&alt[oind].pl);
if (m != NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
+ m->pool = pool;
/* The order [order, oind) queues are empty. */
vm_phys_split_pages(m, oind, fl, order, 1);
return (m);
@@ -1122,7 +1126,8 @@
}
/*
- * Free a contiguous, power of two-sized set of physical pages.
+ * Free a contiguous, power of two-sized set of physical pages. Assumes that
+ * only the first page has a valid pool field.
*
* The free page queues must be locked.
*/
@@ -1133,18 +1138,19 @@
struct vm_phys_seg *seg;
vm_paddr_t pa;
vm_page_t m_buddy;
+ int pool = m->pool;
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_free_pages: page %p has unexpected order %d",
m, m->order));
- KASSERT(m->pool < VM_NFREEPOOL,
- ("vm_phys_free_pages: page %p has unexpected pool %d",
- m, m->pool));
+ KASSERT(pool < VM_NFREEPOOL,
+ ("vm_phys_free_pages: page %p has unexpected pool %d", m, pool));
KASSERT(order < VM_NFREEORDER,
("vm_phys_free_pages: order %d is out of range", order));
seg = &vm_phys_segs[m->segind];
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
if (order < VM_NFREEORDER - 1) {
+ vm_page_t m_start = m;
pa = VM_PAGE_TO_PHYS(m);
do {
pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
@@ -1155,25 +1161,28 @@
break;
fl = (*seg->free_queues)[m_buddy->pool];
vm_freelist_rem(fl, m_buddy, order);
- if (m_buddy->pool != m->pool)
- vm_phys_set_pool(m->pool, m_buddy, order);
+ m_buddy->pool = VM_NFREEPOOL;
order++;
pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
m = &seg->first_page[atop(pa - seg->start)];
} while (order < VM_NFREEORDER - 1);
+ if (m != m_start) {
+ m_start->pool = VM_NFREEPOOL;
+ m->pool = pool;
+ }
}
- fl = (*seg->free_queues)[m->pool];
+ fl = (*seg->free_queues)[pool];
vm_freelist_add(fl, m, order, 1);
}
/*
- * Free a contiguous, arbitrarily sized set of physical pages, without
- * merging across set boundaries.
+ * Free a contiguous, arbitrarily sized set of physical pages, without merging
+ * across set boundaries. Assumes no pages have a valid pool field.
*
* The free page queues must be locked.
*/
void
-vm_phys_enqueue_contig(vm_page_t m, u_long npages)
+vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -1187,14 +1196,15 @@
*/
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
seg = &vm_phys_segs[m->segind];
- fl = (*seg->free_queues)[m->pool];
+ fl = (*seg->free_queues)[pool];
m_end = m + npages;
/* Free blocks of increasing size. */
lo = atop(VM_PAGE_TO_PHYS(m));
if (m < m_end &&
(diff = lo ^ (lo + npages - 1)) != 0) {
order = min(ilog2(diff), VM_NFREEORDER - 1);
- m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
+ m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl,
+ pool, 1);
}
/* Free blocks of maximum size. */
@@ -1203,15 +1213,17 @@
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
__func__, m_end - npages, m));
+ m->pool = pool;
vm_freelist_add(fl, m, order, 1);
m += 1 << order;
}
/* Free blocks of diminishing size. */
- vm_phys_enq_beg(m, m_end - m, fl, 1);
+ vm_phys_enq_beg(m, m_end - m, fl, pool, 1);
}
/*
* Free a contiguous, arbitrarily sized set of physical pages.
+ * Assumes that every page has the same, valid, pool field value.
*
* The free page queues must be locked.
*/
@@ -1221,17 +1233,22 @@
vm_paddr_t lo;
vm_page_t m_start, m_end;
unsigned max_order, order_start, order_end;
+ int pool = m->pool;
+
+ KASSERT(pool < VM_NFREEPOOL,
+ ("%s: pool %d is out of range", __func__, pool));
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
lo = atop(VM_PAGE_TO_PHYS(m));
max_order = min(ilog2(lo ^ (lo + npages)), VM_NFREEORDER - 1);
-
- m_start = m;
+ m_end = m + npages;
+ for (m_start = m; m < m_end; m++)
+ m->pool = VM_NFREEPOOL;
+ m = m_start;
order_start = ffsll(lo) - 1;
if (order_start < max_order)
m_start += 1 << order_start;
- m_end = m + npages;
order_end = ffsll(lo + npages) - 1;
if (order_end < max_order)
m_end -= 1 << order_end;
@@ -1240,11 +1257,15 @@
* end of the range last.
*/
if (m_start < m_end)
- vm_phys_enqueue_contig(m_start, m_end - m_start);
- if (order_start < max_order)
+ vm_phys_enqueue_contig(m_start, pool, m_end - m_start);
+ if (order_start < max_order) {
+ m->pool = pool;
vm_phys_free_pages(m, order_start);
- if (order_end < max_order)
+ }
+ if (order_end < max_order) {
+ m_end->pool = pool;
vm_phys_free_pages(m_end, order_end);
+ }
}
/*
@@ -1292,7 +1313,7 @@
struct vm_phys_seg *seg;
vm_paddr_t pa, pa_half;
vm_page_t m_set, m_tmp;
- int order;
+ int order, pool;
/*
* First, find the contiguous, power of two-sized set of free
@@ -1324,7 +1345,8 @@
* is larger than a page, shrink "m_set" by returning the half
* of "m_set" that does not contain "m" to the free lists.
*/
- fl = (*seg->free_queues)[m_set->pool];
+ pool = m_set->pool;
+ fl = (*seg->free_queues)[pool];
order = m_set->order;
vm_freelist_rem(fl, m_set, order);
while (order > 0) {
@@ -1336,8 +1358,10 @@
m_tmp = m_set;
m_set = &seg->first_page[atop(pa_half - seg->start)];
}
+ m_tmp->pool = pool;
vm_freelist_add(fl, m_tmp, order, 0);
}
+ m_set->pool = pool;
KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
return (true);
}
@@ -1477,7 +1501,8 @@
* alignment of the first physical page in the set. If the given value
* "boundary" is non-zero, then the set of physical pages cannot cross
* any physical address boundary that is a multiple of that value. Both
- * "alignment" and "boundary" must be a power of two.
+ * "alignment" and "boundary" must be a power of two. Sets the pool
+ * field in every allocated page.
*/
vm_page_t
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
@@ -1536,14 +1561,16 @@
fl = (*queues)[m->pool];
oind = m->order;
vm_freelist_rem(fl, m, oind);
- if (m->pool != VM_FREEPOOL_DEFAULT)
- vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
}
/* Return excess pages to the free lists. */
fl = (*queues)[VM_FREEPOOL_DEFAULT];
- vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
+ vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl,
+ VM_FREEPOOL_DEFAULT, 0);
/* Return page verified to satisfy conditions of request. */
+ for (m = m_run; m < &m_run[npages]; m++)
+ m->pool = VM_FREEPOOL_DEFAULT;
+
pa_start = VM_PAGE_TO_PHYS(m_run);
KASSERT(low <= pa_start,
("memory allocated below minimum requested range"));
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -889,30 +889,35 @@
static void
vm_reserv_break(vm_reserv_t rv)
{
- int hi, lo, pos;
+ int pool, pos, pos0, pos1;
vm_reserv_assert_locked(rv);
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
vm_reserv_remove(rv);
rv->pages->psind = 0;
- hi = lo = -1;
- pos = 0;
- for (;;) {
- bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
- if (lo == hi) {
- if (pos == -1)
- break;
- lo = pos;
- continue;
- }
+ pool = rv->pages->pool;
+ rv->pages->pool = VM_NFREEPOOL;
+ pos0 = bit_test(rv->popmap, 0) ? -1 : 0;
+ pos1 = -1 - pos0;
+ for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) {
+ /* Find the first different bit after pos. */
+ bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES,
+ pos1 < pos0, &pos);
if (pos == -1)
pos = VM_LEVEL_0_NPAGES;
- hi = pos;
+ if (pos0 <= pos1) {
+ /* Set pool for pages from pos1 to pos. */
+ pos0 = pos1;
+ while (pos0 < pos)
+ rv->pages[pos0++].pool = pool;
+ continue;
+ }
+ /* Free unused pages from pos0 to pos. */
+ pos1 = pos;
vm_domain_free_lock(VM_DOMAIN(rv->domain));
- vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
+ vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0);
vm_domain_free_unlock(VM_DOMAIN(rv->domain));
- lo = hi;
}
bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
rv->popcnt = 0;

File Metadata

Mime Type
text/plain
Expires
Mon, Mar 3, 9:08 PM (5 h, 50 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16953550
Default Alt Text
D45409.id139455.diff (13 KB)

Event Timeline