Page MenuHomeFreeBSD

D22830.id65698.diff
No OneTemporary

D22830.id65698.diff

Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -1088,8 +1088,11 @@
zdom->uzd_imin);
while (zdom->uzd_nitems > target) {
bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
- if (bucket == NULL)
- break;
+ if (bucket == NULL) {
+ bucket = zdom->uzd_cross;
+ if (bucket == NULL)
+ break;
+ }
tofree = bucket->ub_cnt;
TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
zdom->uzd_nitems -= tofree;
@@ -2254,6 +2257,7 @@
zone_foreach(zone_count, &cnt);
zone->uz_namecnt = cnt.count;
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
+ ZONE_CROSS_LOCK_INIT(zone);
for (i = 0; i < vm_ndomains; i++)
TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
@@ -2439,6 +2443,7 @@
counter_u64_free(zone->uz_fails);
free(zone->uz_ctlname, M_UMA);
ZONE_LOCK_FINI(zone);
+ ZONE_CROSS_LOCK_FINI(zone);
}
/*
@@ -3705,6 +3710,68 @@
zone_free_item(zone, item, udata, SKIP_DTOR);
}
+#ifdef UMA_XDOMAIN
+/*
+ * sort crossdomain free buckets to domain correct buckets and cache
+ * them.
+ */
+static void
+zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
+{
+ struct uma_bucketlist fullbuckets;
+ uma_zone_domain_t zdom;
+ uma_bucket_t b;
+ void *item;
+ int domain;
+
+ CTR3(KTR_UMA,
+ "uma_zfree: zone %s(%p) draining cross bucket %p",
+ zone->uz_name, zone, bucket);
+
+ TAILQ_INIT(&fullbuckets);
+
+ /*
+ * To avoid having ndomain * ndomain buckets for sorting we have a
+ * lock on the current crossfree bucket. A full matrix with
+ * per-domain locking could be used if necessary.
+ */
+ ZONE_CROSS_LOCK(zone);
+ while (bucket->ub_cnt > 0) {
+ item = bucket->ub_bucket[bucket->ub_cnt - 1];
+ domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
+ zdom = &zone->uz_domain[domain];
+ if (zdom->uzd_cross == NULL) {
+ zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT);
+ if (zdom->uzd_cross == NULL)
+ break;
+ }
+ zdom->uzd_cross->ub_bucket[zdom->uzd_cross->ub_cnt++] = item;
+ if (zdom->uzd_cross->ub_cnt == zdom->uzd_cross->ub_entries) {
+ TAILQ_INSERT_HEAD(&fullbuckets, zdom->uzd_cross,
+ ub_link);
+ zdom->uzd_cross = NULL;
+ }
+ bucket->ub_cnt--;
+ }
+ ZONE_CROSS_UNLOCK(zone);
+ if (!TAILQ_EMPTY(&fullbuckets)) {
+ ZONE_LOCK(zone);
+ while ((b = TAILQ_FIRST(&fullbuckets)) != NULL) {
+ TAILQ_REMOVE(&fullbuckets, b, ub_link);
+ domain = _vm_phys_domain(pmap_kextract((vm_offset_t)
+ bucket->ub_bucket[0]));
+ zdom = &zone->uz_domain[domain];
+ zone_put_bucket(zone, zdom, b, true);
+ }
+ ZONE_UNLOCK(zone);
+ }
+ if (bucket->ub_cnt != 0)
+ bucket_drain(zone, bucket);
+ bucket_free(zone, bucket, udata);
+ return;
+}
+#endif
+
static void
zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
int domain, int itemdomain)
@@ -3716,17 +3783,14 @@
* Buckets coming from the wrong domain will be entirely for the
* only other domain on two domain systems. In this case we can
* simply cache them. Otherwise we need to sort them back to
- * correct domains by freeing the contents to the slab layer.
+ * correct domains.
*/
if (domain != itemdomain && vm_ndomains > 2) {
- CTR3(KTR_UMA,
- "uma_zfree: zone %s(%p) draining cross bucket %p",
- zone->uz_name, zone, bucket);
- bucket_drain(zone, bucket);
- bucket_free(zone, bucket, udata);
+ zone_free_cross(zone, bucket, udata);
return;
}
#endif
+
/*
* Attempt to save the bucket in the zone's domain bucket cache.
*
Index: sys/vm/uma_int.h
===================================================================
--- sys/vm/uma_int.h
+++ sys/vm/uma_int.h
@@ -400,6 +400,7 @@
struct uma_zone_domain {
struct uma_bucketlist uzd_buckets; /* full buckets */
+ uma_bucket_t uzd_cross; /* Fills from cross buckets. */
long uzd_nitems; /* total item count */
long uzd_imax; /* maximum item count this period */
long uzd_imin; /* minimum item count this period */
@@ -450,6 +451,8 @@
struct task uz_maxaction; /* Task to run when at limit */
uint16_t uz_bucket_size_min; /* Min number of items in bucket */
+ struct mtx_padalign uz_cross_lock; /* Cross domain free lock */
+
/* Offset 256+, stats and misc. */
counter_u64_t uz_allocs; /* Total number of allocations */
counter_u64_t uz_frees; /* Total number of frees */
@@ -573,6 +576,12 @@
#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
#define ZONE_LOCK_ASSERT(z) mtx_assert(&(z)->uz_lock, MA_OWNED)
+#define ZONE_CROSS_LOCK_INIT(z) \
+ mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF)
+#define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock)
+#define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock)
+#define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock)
+
/*
* Find a slab within a hash table. This is used for OFFPAGE zones to lookup
* the slab structure.

File Metadata

Mime Type
text/plain
Expires
Tue, Dec 24, 11:32 PM (3 h, 21 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15591781
Default Alt Text
D22830.id65698.diff (4 KB)

Event Timeline