Page MenuHomeFreeBSD

D14054.diff
No OneTemporary

D14054.diff

Index: head/sys/kern/kern_malloc.c
===================================================================
--- head/sys/kern/kern_malloc.c
+++ head/sys/kern/kern_malloc.c
@@ -96,6 +96,8 @@
dtrace_malloc_probe_func_t dtrace_malloc_probe;
#endif
+extern void uma_startup2(void);
+
#if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
#define MALLOC_DEBUG 1
Index: head/sys/vm/uma.h
===================================================================
--- head/sys/vm/uma.h
+++ head/sys/vm/uma.h
@@ -431,40 +431,6 @@
typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
/*
- * Sets up the uma allocator. (Called by vm_mem_init)
- *
- * Arguments:
- * bootmem A pointer to memory used to bootstrap the system.
- *
- * Returns:
- * Nothing
- *
- * Discussion:
- * This memory is used for zones which allocate things before the
- * backend page supplier can give us pages. It should be
- * UMA_SLAB_SIZE * boot_pages bytes. (see uma_int.h)
- *
- */
-
-void uma_startup(void *bootmem, int boot_pages);
-
-/*
- * Finishes starting up the allocator. This should
- * be called when kva is ready for normal allocs.
- *
- * Arguments:
- * None
- *
- * Returns:
- * Nothing
- *
- * Discussion:
- * uma_startup2 is called by kmeminit() to enable us of uma for malloc.
- */
-
-void uma_startup2(void);
-
-/*
* Reclaims unused memory for all zones
*
* Arguments:
Index: head/sys/vm/uma_core.c
===================================================================
--- head/sys/vm/uma_core.c
+++ head/sys/vm/uma_core.c
@@ -149,9 +149,8 @@
static volatile unsigned long uma_kmem_total;
/* Is the VM done starting up? */
-static int booted = 0;
-#define UMA_STARTUP 1
-#define UMA_STARTUP2 2
+static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
+ BOOT_RUNNING } booted = BOOT_COLD;
/*
* This is the handle used to schedule events that need to happen
@@ -226,6 +225,11 @@
/* Prototypes.. */
+int uma_startup_count(int);
+void uma_startup(void *, int);
+void uma_startup1(void);
+void uma_startup2(void);
+
static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
@@ -1084,6 +1088,11 @@
* Check our small startup cache to see if it has pages remaining.
*/
mtx_lock(&uma_boot_pages_mtx);
+#ifdef DIAGNOSTIC
+ if (booted < BOOT_PAGEALLOC)
+ printf("%s from \"%s\", %d boot pages left\n", __func__,
+ zone->uz_name, boot_pages);
+#endif
if (pages <= boot_pages) {
mem = bootmem;
boot_pages -= pages;
@@ -1093,7 +1102,7 @@
return (mem);
}
mtx_unlock(&uma_boot_pages_mtx);
- if (booted < UMA_STARTUP2)
+ if (booted < BOOT_PAGEALLOC)
panic("UMA: Increase vm.boot_pages");
/*
* Now that we've booted reset these users to their real allocator.
@@ -1472,7 +1481,7 @@
* If we haven't booted yet we need allocations to go through the
* startup cache until the vm is ready.
*/
- if (booted < UMA_STARTUP2)
+ if (booted < BOOT_PAGEALLOC)
keg->uk_allocf = startup_alloc;
#ifdef UMA_MD_SMALL_ALLOC
else if (keg->uk_ppera == 1)
@@ -1770,25 +1779,63 @@
rw_runlock(&uma_rwlock);
}
-/* Public functions */
-/* See uma.h */
+/*
+ * Count how many pages do we need to bootstrap. VM supplies
+ * its need in early zones in the argument, we add up our zones,
+ * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
+ * zone of zones and zone of kegs are accounted separately.
+ */
+#define UMA_BOOT_ZONES 11
+static int zsize, ksize;
+int
+uma_startup_count(int zones)
+{
+ int pages;
+
+ ksize = sizeof(struct uma_keg) +
+ (sizeof(struct uma_domain) * vm_ndomains);
+ zsize = sizeof(struct uma_zone) +
+ (sizeof(struct uma_cache) * (mp_maxid + 1)) +
+ (sizeof(struct uma_zone_domain) * vm_ndomains);
+
+ /* Memory for the zone of zones and zone of kegs. */
+ pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
+ roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
+
+ zones += UMA_BOOT_ZONES;
+
+ /* Memory for startup zones, UMA and VM, ... */
+ if (zsize > UMA_SLAB_SIZE)
+ pages += zones * howmany(zsize, UMA_SLAB_SIZE);
+ else
+ pages += howmany(zones, UMA_SLAB_SIZE / zsize);
+
+ /* ... and their kegs. */
+ pages += howmany(ksize * zones, UMA_SLAB_SIZE);
+
+ /*
+ * Take conservative approach that every zone
+ * is going to allocate hash.
+ */
+ pages += howmany(sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT *
+ zones, UMA_SLAB_SIZE);
+
+ return (pages);
+}
+
void
uma_startup(void *mem, int npages)
{
struct uma_zctor_args args;
uma_keg_t masterkeg;
uintptr_t m;
- int zsize;
- int ksize;
+#ifdef DIAGNOSTIC
+ printf("Entering %s with %d boot pages configured\n", __func__, npages);
+#endif
+
rw_init(&uma_rwlock, "UMA lock");
- ksize = sizeof(struct uma_keg) +
- (sizeof(struct uma_domain) * vm_ndomains);
- zsize = sizeof(struct uma_zone) +
- (sizeof(struct uma_cache) * mp_ncpus) +
- (sizeof(struct uma_zone_domain) * vm_ndomains);
-
/* Use bootpages memory for the zone of zones and zone of kegs. */
m = (uintptr_t)mem;
zones = (uma_zone_t)m;
@@ -1819,9 +1866,7 @@
boot_pages = npages;
args.name = "UMA Zones";
- args.size = sizeof(struct uma_zone) +
- (sizeof(struct uma_cache) * (mp_maxid + 1)) +
- (sizeof(struct uma_zone_domain) * vm_ndomains);
+ args.size = zsize;
args.ctor = zone_ctor;
args.dtor = zone_dtor;
args.uminit = zero_init;
@@ -1844,27 +1889,37 @@
bucket_init();
- booted = UMA_STARTUP;
+ booted = BOOT_STRAPPED;
}
-/* see uma.h */
void
+uma_startup1(void)
+{
+
+#ifdef DIAGNOSTIC
+ printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
+#endif
+ booted = BOOT_PAGEALLOC;
+}
+
+void
uma_startup2(void)
{
- booted = UMA_STARTUP2;
- bucket_enable();
+
+ booted = BOOT_BUCKETS;
sx_init(&uma_drain_lock, "umadrain");
+ bucket_enable();
}
/*
* Initialize our callout handle
*
*/
-
static void
uma_startup3(void)
{
+ booted = BOOT_RUNNING;
callout_init(&uma_callout, 1);
callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
}
@@ -1884,6 +1939,7 @@
return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
}
+/* Public functions */
/* See uma.h */
void
uma_set_align(int align)
@@ -1932,7 +1988,7 @@
args.flags = flags;
args.keg = NULL;
- if (booted < UMA_STARTUP2) {
+ if (booted < BOOT_BUCKETS) {
locked = false;
} else {
sx_slock(&uma_drain_lock);
@@ -1966,7 +2022,7 @@
args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
args.keg = keg;
- if (booted < UMA_STARTUP2) {
+ if (booted < BOOT_BUCKETS) {
locked = false;
} else {
sx_slock(&uma_drain_lock);
Index: head/sys/vm/uma_int.h
===================================================================
--- head/sys/vm/uma_int.h
+++ head/sys/vm/uma_int.h
@@ -134,10 +134,6 @@
#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
-#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
-#define UMA_BOOT_PAGES_ZONES 32 /* Multiplier for pages to reserve */
- /* if uma_zone > PAGE_SIZE */
-
/* Max waste percentage before going to off page slab management */
#define UMA_MAX_WASTE 10
Index: head/sys/vm/vm_page.c
===================================================================
--- head/sys/vm/vm_page.c
+++ head/sys/vm/vm_page.c
@@ -125,6 +125,10 @@
#include <machine/md_var.h>
+extern int uma_startup_count(int);
+extern void uma_startup(void *, int);
+extern void uma_startup1(void);
+
/*
* Associated with page of user-allocatable memory is a
* page structure.
@@ -145,7 +149,7 @@
long vm_page_array_size;
long first_page;
-static int boot_pages = UMA_BOOT_PAGES;
+static int boot_pages;
SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&boot_pages, 0,
"number of pages allocated for bootstrapping the VM system");
@@ -466,7 +470,7 @@
vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
vm_paddr_t biggestsize, last_pa, pa;
u_long pagecount;
- int biggestone, i, pages_per_zone, segind;
+ int biggestone, i, segind;
biggestsize = 0;
biggestone = 0;
@@ -496,26 +500,13 @@
vm_page_domain_init(&vm_dom[i]);
/*
- * Almost all of the pages needed for bootstrapping UMA are used
- * for zone structures, so if the number of CPUs results in those
- * structures taking more than one page each, we set aside more pages
- * in proportion to the zone structure size.
- */
- pages_per_zone = howmany(sizeof(struct uma_zone) +
- sizeof(struct uma_cache) * (mp_maxid + 1) +
- roundup2(sizeof(struct uma_slab), sizeof(void *)), UMA_SLAB_SIZE);
- if (pages_per_zone > 1) {
- /* Reserve more pages so that we don't run out. */
- boot_pages = UMA_BOOT_PAGES_ZONES * pages_per_zone;
- }
-
- /*
* Allocate memory for use when boot strapping the kernel memory
* allocator.
*
* CTFLAG_RDTUN doesn't work during the early boot process, so we must
* manually fetch the value.
*/
+ boot_pages = uma_startup_count(0);
TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
new_end = end - (boot_pages * UMA_SLAB_SIZE);
new_end = trunc_page(new_end);
@@ -748,6 +739,9 @@
* can work.
*/
domainset_zero();
+
+ /* Announce page availability to UMA. */
+ uma_startup1();
return (vaddr);
}

File Metadata

Mime Type
text/plain
Expires
Mon, Nov 17, 10:37 AM (2 h, 44 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25413673
Default Alt Text
D14054.diff (9 KB)

Event Timeline