Page MenuHomeFreeBSD

D45409.id139219.diff
No OneTemporary

D45409.id139219.diff

Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -509,7 +509,7 @@
m->psind = 0;
m->segind = segind;
m->order = VM_NFREEORDER;
- m->pool = VM_FREEPOOL_DEFAULT;
+ m->pool = VM_NFREEPOOL;
m->valid = m->dirty = 0;
pmap_page_init(m);
}
@@ -785,7 +785,7 @@
m = seg->first_page + atop(startp - seg->start);
vmd = VM_DOMAIN(seg->domain);
vm_domain_free_lock(vmd);
- vm_phys_enqueue_contig(m, pagecount);
+ vm_phys_enqueue_contig(m, VM_FREEPOOL_DEFAULT, pagecount);
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, pagecount);
vm_cnt.v_page_count += (u_int)pagecount;
Index: sys/vm/vm_phys.h
===================================================================
--- sys/vm/vm_phys.h
+++ sys/vm/vm_phys.h
@@ -66,7 +66,7 @@
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
-void vm_phys_enqueue_contig(vm_page_t m, u_long npages);
+void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -182,7 +182,7 @@
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
- int order, int tail);
+ int pool, int order, int tail);
/*
* Red-black tree helpers for vm fictitious range management.
@@ -376,9 +376,11 @@
#endif
static void
-vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
+vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int pool, int order,
+ int tail)
{
+ m->pool = pool;
m->order = order;
if (tail)
TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
@@ -394,6 +396,7 @@
TAILQ_REMOVE(&fl[order].pl, m, listq);
fl[order].lcnt--;
m->order = VM_NFREEORDER;
+ m->pool = VM_NFREEPOOL;
}
/*
@@ -665,8 +668,8 @@
* (hopefully) deallocation.
*/
static __inline void
-vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
- int tail)
+vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int pool,
+ int order, int tail)
{
vm_page_t m_buddy;
@@ -676,7 +679,7 @@
KASSERT(m_buddy->order == VM_NFREEORDER,
("vm_phys_split_pages: page %p has unexpected order %d",
m_buddy, m_buddy->order));
- vm_freelist_add(fl, m_buddy, oind, tail);
+ vm_freelist_add(fl, m_buddy, pool, oind, tail);
}
}
@@ -693,7 +696,8 @@
* The physical page m's buddy must not be free.
*/
static void
-vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -709,7 +713,7 @@
order = fls(npages) - 1;
KASSERT(order < VM_NFREEORDER,
("%s: order %d is out of range", __func__, order));
- vm_freelist_add(fl, m, order, tail);
+ vm_freelist_add(fl, m, pool, order, tail);
m += 1 << order;
npages -= 1 << order;
}
@@ -729,7 +733,8 @@
* parameter m. Otherwise, the physical page m's buddy must not be free.
*/
static vm_page_t
-vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -745,25 +750,13 @@
order = ffs(npages) - 1;
KASSERT(order < VM_NFREEORDER,
("vm_phys_enq_range: order %d is out of range", order));
- vm_freelist_add(fl, m, order, tail);
+ vm_freelist_add(fl, m, pool, order, tail);
m += 1 << order;
npages -= 1 << order;
}
return (m);
}
-/*
- * Set the pool for a contiguous, power of two-sized set of physical pages.
- */
-static void
-vm_phys_set_pool(int pool, vm_page_t m, int order)
-{
- vm_page_t m_tmp;
-
- for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
- m_tmp->pool = pool;
-}
-
/*
* Tries to allocate the specified number of pages from the specified pool
* within the specified domain. Returns the actual number of allocated pages
@@ -772,7 +765,8 @@
* The returned pages may not be physically contiguous. However, in contrast
* to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
* calling this function once to allocate the desired number of pages will
- * avoid wasted time in vm_phys_split_pages().
+ * avoid wasted time in vm_phys_split_pages(). Sets the pool field for
+ * every allocated page.
*
* The free page queues for the specified domain must be locked.
*/
@@ -801,14 +795,20 @@
vm_freelist_rem(fl, m, oind);
avail = i + (1 << oind);
end = imin(npages, avail);
- while (i < end)
+ while (i < end) {
+ KASSERT(m->pool == VM_NFREEPOOL,
+ ("%s: pool %d is wrongly valid",
+ __func__, m->pool));
+ m->pool = pool;
ma[i++] = m++;
+ }
if (i == npages) {
/*
* Return excess pages to fl. Its order
* [0, oind) queues are empty.
*/
- vm_phys_enq_range(m, avail - i, fl, 1);
+ vm_phys_enq_range(m, avail - i, fl,
+ pool, 1);
return (npages);
}
}
@@ -819,11 +819,15 @@
while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
avail = i + (1 << oind);
end = imin(npages, avail);
- while (i < end)
+ while (i < end) {
+ KASSERT(m->pool == VM_NFREEPOOL,
+ ("%s: pool %d is wrongly v"
+ "alid", __func__, m->pool));
+ m->pool = pool;
ma[i++] = m++;
+ }
if (i == npages) {
/*
* Return excess pages to fl.
@@ -831,7 +835,7 @@
* are empty.
*/
vm_phys_enq_range(m, avail - i,
- fl, 1);
+ fl, pool, 1);
return (npages);
}
}
@@ -843,7 +847,7 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages
- * from the free lists.
+ * from the free lists. Sets the pool field in the first page only.
*
* The free page queues must be locked.
*/
@@ -864,7 +868,8 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the
- * manifest constants VM_FREELIST_*.
+ * manifest constants VM_FREELIST_*. Sets the pool field in the first page
+ * only.
*
* The free page queues must be locked.
*/
@@ -898,7 +903,11 @@
if (m != NULL) {
vm_freelist_rem(fl, m, oind);
/* The order [order, oind) queues are empty. */
- vm_phys_split_pages(m, oind, fl, order, 1);
+ vm_phys_split_pages(m, oind, fl, pool, order, 1);
+ KASSERT(m->pool == VM_NFREEPOOL,
+ ("%s: pool %d is wrongly valid",
+ __func__, m->pool));
+ m->pool = pool;
return (m);
}
}
@@ -915,9 +924,13 @@
m = TAILQ_FIRST(&alt[oind].pl);
if (m != NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
/* The order [order, oind) queues are empty. */
- vm_phys_split_pages(m, oind, fl, order, 1);
+ vm_phys_split_pages(m, oind, fl, pool, order,
+ 1);
+ KASSERT(m->pool == VM_NFREEPOOL,
+ ("%s: pool %d is wrongly valid",
+ __func__, m->pool));
+ m->pool = pool;
return (m);
}
}
@@ -1122,12 +1135,13 @@
}
/*
- * Free a contiguous, power of two-sized set of physical pages.
+ * Free a contiguous, power of two-sized set of physical pages. Assumes that no
+ * pages have a valid pool field.
*
* The free page queues must be locked.
*/
-void
-vm_phys_free_pages(vm_page_t m, int order)
+static void
+vm_phys_free_pool_pages(vm_page_t m, int pool, int order)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -1137,9 +1151,8 @@
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_free_pages: page %p has unexpected order %d",
m, m->order));
- KASSERT(m->pool < VM_NFREEPOOL,
- ("vm_phys_free_pages: page %p has unexpected pool %d",
- m, m->pool));
+ KASSERT(pool < VM_NFREEPOOL,
+ ("vm_phys_free_pages: unexpected pool %d", pool));
KASSERT(order < VM_NFREEORDER,
("vm_phys_free_pages: order %d is out of range", order));
seg = &vm_phys_segs[m->segind];
@@ -1155,25 +1168,41 @@
break;
fl = (*seg->free_queues)[m_buddy->pool];
vm_freelist_rem(fl, m_buddy, order);
- if (m_buddy->pool != m->pool)
- vm_phys_set_pool(m->pool, m_buddy, order);
order++;
pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
m = &seg->first_page[atop(pa - seg->start)];
} while (order < VM_NFREEORDER - 1);
}
- fl = (*seg->free_queues)[m->pool];
- vm_freelist_add(fl, m, order, 1);
+ fl = (*seg->free_queues)[pool];
+ vm_freelist_add(fl, m, pool, order, 1);
}
/*
- * Free a contiguous, arbitrarily sized set of physical pages, without
- * merging across set boundaries.
+ * Free a contiguous, power of two-sized set of physical pages. Assumes that
+ * only the first page has a valid pool field.
*
* The free page queues must be locked.
*/
void
-vm_phys_enqueue_contig(vm_page_t m, u_long npages)
+vm_phys_free_pages(vm_page_t m, int order)
+{
+ int pool = m->pool;
+
+ KASSERT(pool < VM_NFREEPOOL,
+ ("%s: page %p has unexpected pool %d",
+ __func__, m, m->pool));
+ m->pool = VM_NFREEPOOL;
+ vm_phys_free_pool_pages(m, pool, order);
+}
+
+/*
+ * Free a contiguous, arbitrarily sized set of physical pages, without merging
+ * across set boundaries. Assumes no pages have a valid pool field.
+ *
+ * The free page queues must be locked.
+ */
+void
+vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -1181,20 +1210,24 @@
vm_paddr_t diff, lo;
int order;
+ KASSERT(pool < VM_NFREEPOOL,
+ ("%s: pool %d is out of range", __func__, pool));
+
/*
* Avoid unnecessary coalescing by freeing the pages in the largest
* possible power-of-two-sized subsets.
*/
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
seg = &vm_phys_segs[m->segind];
- fl = (*seg->free_queues)[m->pool];
+ fl = (*seg->free_queues)[pool];
m_end = m + npages;
/* Free blocks of increasing size. */
lo = atop(VM_PAGE_TO_PHYS(m));
if (m < m_end &&
(diff = lo ^ (lo + npages - 1)) != 0) {
order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
- m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
+ m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl,
+ pool, 1);
}
/* Free blocks of maximum size. */
@@ -1203,15 +1236,16 @@
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
__func__, m_end - npages, m));
- vm_freelist_add(fl, m, order, 1);
+ vm_freelist_add(fl, m, pool, order, 1);
m += 1 << order;
}
/* Free blocks of diminishing size. */
- vm_phys_enq_beg(m, m_end - m, fl, 1);
+ vm_phys_enq_beg(m, m_end - m, fl, pool, 1);
}
/*
* Free a contiguous, arbitrarily sized set of physical pages.
+ * Assumes that every page has the same, valid, pool field value.
*
* The free page queues must be locked.
*/
@@ -1221,17 +1255,26 @@
vm_paddr_t lo;
vm_page_t m_start, m_end;
unsigned max_order, order_start, order_end;
+ int pool = m->pool;
+
+ KASSERT(pool < VM_NFREEPOOL,
+ ("%s: pool %d is out of range", __func__, pool));
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
lo = atop(VM_PAGE_TO_PHYS(m));
max_order = min(flsll(lo ^ (lo + npages)) - 1, VM_NFREEORDER - 1);
-
- m_start = m;
+ m_end = m + npages;
+ for (m_start = m; m < m_end; m++){
+ KASSERT(m->pool == pool,
+ ("%s: pool %d is mismatches first pool %d",
+ __func__, m->pool, pool));
+ m->pool = VM_NFREEPOOL;
+ }
+ m = m_start;
order_start = ffsll(lo) - 1;
if (order_start < max_order)
m_start += 1 << order_start;
- m_end = m + npages;
order_end = ffsll(lo + npages) - 1;
if (order_end < max_order)
m_end -= 1 << order_end;
@@ -1240,11 +1283,11 @@
* end of the range last.
*/
if (m_start < m_end)
- vm_phys_enqueue_contig(m_start, m_end - m_start);
+ vm_phys_enqueue_contig(m_start, pool, m_end - m_start);
if (order_start < max_order)
- vm_phys_free_pages(m, order_start);
+ vm_phys_free_pool_pages(m, pool, order_start);
if (order_end < max_order)
- vm_phys_free_pages(m_end, order_end);
+ vm_phys_free_pool_pages(m_end, pool, order_end);
}
/*
@@ -1292,7 +1335,7 @@
struct vm_phys_seg *seg;
vm_paddr_t pa, pa_half;
vm_page_t m_set, m_tmp;
- int order;
+ int order, pool;
/*
* First, find the contiguous, power of two-sized set of free
@@ -1324,7 +1367,8 @@
* is larger than a page, shrink "m_set" by returning the half
* of "m_set" that does not contain "m" to the free lists.
*/
- fl = (*seg->free_queues)[m_set->pool];
+ pool = m_set->pool;
+ fl = (*seg->free_queues)[pool];
order = m_set->order;
vm_freelist_rem(fl, m_set, order);
while (order > 0) {
@@ -1336,7 +1380,7 @@
m_tmp = m_set;
m_set = &seg->first_page[atop(pa_half - seg->start)];
}
- vm_freelist_add(fl, m_tmp, order, 0);
+ vm_freelist_add(fl, m_tmp, pool, order, 0);
}
KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
return (true);
@@ -1477,7 +1521,8 @@
* alignment of the first physical page in the set. If the given value
* "boundary" is non-zero, then the set of physical pages cannot cross
* any physical address boundary that is a multiple of that value. Both
- * "alignment" and "boundary" must be a power of two.
+ * "alignment" and "boundary" must be a power of two. Sets the pool
+ * field in every allocated page.
*/
vm_page_t
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
@@ -1536,14 +1581,16 @@
fl = (*queues)[m->pool];
oind = m->order;
vm_freelist_rem(fl, m, oind);
- if (m->pool != VM_FREEPOOL_DEFAULT)
- vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
}
/* Return excess pages to the free lists. */
fl = (*queues)[VM_FREEPOOL_DEFAULT];
- vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
+ vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl,
+ VM_FREEPOOL_DEFAULT, 0);
/* Return page verified to satisfy conditions of request. */
+ for (m = m_run; m < &m_run[npages]; m++)
+ m->pool = VM_FREEPOOL_DEFAULT;
+
pa_start = VM_PAGE_TO_PHYS(m_run);
KASSERT(low <= pa_start,
("memory allocated below minimum requested range"));
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -889,30 +889,35 @@
static void
vm_reserv_break(vm_reserv_t rv)
{
- int hi, lo, pos;
+ int pool, pos, pos0, pos1;
vm_reserv_assert_locked(rv);
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
__FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
vm_reserv_remove(rv);
rv->pages->psind = 0;
- hi = lo = -1;
- pos = 0;
- for (;;) {
- bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
- if (lo == hi) {
- if (pos == -1)
- break;
- lo = pos;
- continue;
- }
+ pool = rv->pages->pool;
+ rv->pages->pool = VM_NFREEPOOL;
+ pos0 = bit_test(rv->popmap, 0) ? -1 : 0;
+ pos1 = -1 - pos0;
+ for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) {
+ /* Find the first different bit after pos. */
+ bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES,
+ pos1 < pos0, &pos);
if (pos == -1)
pos = VM_LEVEL_0_NPAGES;
- hi = pos;
+ if (pos0 <= pos1) {
+ /* Set pool for pages from pos1 to pos. */
+ pos0 = pos1;
+ while (pos0 < pos)
+ rv->pages[pos0++].pool = pool;
+ continue;
+ }
+ /* Free unused pages from pos0 to pos. */
+ pos1 = pos;
vm_domain_free_lock(VM_DOMAIN(rv->domain));
- vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
+ vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0);
vm_domain_free_unlock(VM_DOMAIN(rv->domain));
- lo = hi;
}
bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
rv->popcnt = 0;

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 21, 2:45 AM (53 m, 44 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31881901
Default Alt Text
D45409.id139219.diff (16 KB)

Event Timeline