Page MenuHomeFreeBSD

D11784.diff
No OneTemporary

D11784.diff

Index: vm/uma_core.c
===================================================================
--- vm/uma_core.c
+++ vm/uma_core.c
@@ -1197,8 +1197,6 @@
keg_small_init(uma_keg_t keg)
{
u_int rsize;
- u_int memused;
- u_int wastedspace;
u_int shsize;
u_int slabsize;
@@ -1231,16 +1229,13 @@
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
shsize = 0;
- else
+ else {
shsize = sizeof(struct uma_slab);
+ if (shsize & UMA_ALIGN_PTR)
+ shsize = (shsize & ~UMA_ALIGN_PTR) +
+ (UMA_ALIGN_PTR + 1);
+ }
- keg->uk_ipers = (slabsize - shsize) / rsize;
- KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
- ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
-
- memused = keg->uk_ipers * rsize + shsize;
- wastedspace = slabsize - memused;
-
/*
* We can't do OFFPAGE if we're internal or if we've been
* asked to not go to the VM for buckets. If we do this we
@@ -1249,35 +1244,43 @@
* of UMA_ZONE_VM, which clearly forbids it.
*/
if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
- (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
- return;
-
- /*
- * See if using an OFFPAGE slab will limit our waste. Only do
- * this if it permits more items per-slab.
- *
- * XXX We could try growing slabsize to limit max waste as well.
- * Historically this was not done because the VM could not
- * efficiently handle contiguous allocations.
- */
- if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
- (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
- keg->uk_ipers = slabsize / keg->uk_rsize;
+ (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) {
+ keg->uk_ipers = (slabsize - shsize) / rsize;
KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
- CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
- "keg: %s(%p), calculated wastedspace = %d, "
- "maximum wasted space allowed = %d, "
- "calculated ipers = %d, "
- "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
- slabsize / UMA_MAX_WASTE, keg->uk_ipers,
- slabsize - keg->uk_ipers * keg->uk_rsize);
- keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ return;
}
+ if (keg->uk_flags & UMA_ZONE_PCPU) {
+ KASSERT(keg->uk_flags & UMA_ZONE_OFFPAGE,
+ ("%s: keg->uk_flags %u", __func__, keg->uk_flags));
+ } else {
+ /*
+ * Grow the slab size to limit wasted memory.
+ */
+ while (slabsize - rsize * (slabsize / rsize) > slabsize /
+ UMA_MAX_WASTE) {
+ slabsize += UMA_SLAB_SIZE;
+ keg->uk_ppera++;
+ }
+ if (keg->uk_ppera > 1)
+ keg->uk_flags |= UMA_ZONE_VTOSLAB;
+ }
+ keg->uk_ipers = slabsize / rsize;
+ KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
+ ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
+
+ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
+ slabsize - (keg->uk_rsize * keg->uk_ipers) < shsize)
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
(keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
keg->uk_flags |= UMA_ZONE_HASH;
+
+ if (keg->uk_ppera > 1 && (keg->uk_flags & UMA_ZONE_PCPU) == 0)
+printf(" --> %s: uk_ppera=%d, uk_ipers=%d, uk_size=%d, uk_rsize=%d\n",
+ keg->uk_name, keg->uk_ppera, keg->uk_ipers, keg->uk_size, keg->uk_rsize);
}
/*
Index: vm/vm_page.c
===================================================================
--- vm/vm_page.c
+++ vm/vm_page.c
@@ -496,6 +496,14 @@
bzero((void *)mapped, end - new_end);
uma_startup((void *)mapped, boot_pages);
+ /*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated boot pages so that vm_page structures representing
+ * these pages will be created. The vm_page structures are required
+ * when a zone uses UMA_ZONE_VTOSLAB to find its slab headers.
+ */
+ vm_phys_add_seg(new_end, end);
+
#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
defined(__i386__) || defined(__mips__)
/*

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 21, 6:32 PM (2 h, 21 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31925932
Default Alt Text
D11784.diff (3 KB)

Event Timeline