diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -225,6 +225,19 @@ #define SLABZONE1_SIZE SLABZONE_SIZE(SLABZONE1_SETSIZE) static uma_zone_t slabzones[2]; +#ifdef UMA_USE_DMAP +/* + * How many items can we fit into a bitmap embedded in a vm_page header? + */ +#define SLAB_EMBEDDED_SETSIZE \ + (((__offsetof(struct vm_page, vm_page_end_opaque) - \ + __offsetof(struct uma_slab, us_free)) / \ + (SLAB_BITSETS * sizeof(int))) * NBBY * sizeof(int)) +_Static_assert(__offsetof(struct vm_page, vm_page_end_opaque) >= + sizeof(struct uma_slab), + "vm_page header is not large enough to embed a UMA slab"); +#endif + /* * The initial hash tables come out of this zone so they can be allocated * prior to malloc coming up. @@ -1703,7 +1716,8 @@ keg->uk_fini(slab_item(slab, keg, i), keg->uk_size); } flags = slab->us_flags; - if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) { + if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 && + (keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) == 0) { zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab), NULL, SKIP_NONE); } @@ -1849,7 +1863,8 @@ slab = NULL; mem = NULL; - if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) { + if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 && + (keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) == 0) { uma_hash_slab_t hslab; hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL, domain, aflags); @@ -1880,7 +1895,7 @@ size = keg->uk_ppera * PAGE_SIZE; mem = keg->uk_allocf(zone, size, domain, &sflags, aflags); if (mem == NULL) { - if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) + if (slab != NULL) zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab), NULL, SKIP_NONE); goto fail; @@ -1897,6 +1912,9 @@ /* Point the slab into the allocated memory */ if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) slab = (uma_slab_t)(mem + keg->uk_pgoff); + else if ((keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) != 0) + slab = (uma_slab_t) + PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem)); else slab_tohashslab(slab)->uhs_data = mem; @@ -2176,6 +2194,10 @@ if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)PHYS_TO_DMAP(pa); + KASSERT(m->ref_count == 1, + ("%s: unexpected ref_count %u", __func__, m->ref_count)); + KASSERT(m->busy_lock == VPB_UNBUSIED, + ("%s: unexpected busy_lock %d", __func__, m->busy_lock)); return (va); } #endif @@ -2251,7 +2273,11 @@ pa = DMAP_TO_PHYS((vm_offset_t)mem); dump_drop_page(pa); + m = PHYS_TO_VM_PAGE(pa); + /* Restore state that may have been clobbered. */ + m->ref_count = 1; + m->busy_lock = VPB_UNBUSIED; vm_page_unwire_noq(m); vm_page_free(m); } @@ -2357,9 +2383,19 @@ kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize, (fmt & UMA_ZFLAG_OFFPAGE) == 0); +#ifdef UMA_USE_DMAP + /* Can we embed a slab header in the vm_page structure? */ + if ((fmt & UMA_ZFLAG_EMBEDSLAB) != 0 && + (slabsize != UMA_SLAB_SIZE || kl->ipers > SLAB_EMBEDDED_SETSIZE)) { + printf("%s:%d %lu\n", __func__, __LINE__, SLAB_EMBEDDED_SETSIZE); + kl->eff = 0; + return; + } +#endif + /* Account for memory used by an offpage slab header. */ total = kl->slabsize; - if ((fmt & UMA_ZFLAG_OFFPAGE) != 0) + if ((fmt & UMA_ZFLAG_OFFPAGE) != 0 && (fmt & UMA_ZFLAG_EMBEDSLAB) == 0) total += slabzone(kl->ipers)->uz_keg->uk_rsize; kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total); @@ -2379,7 +2415,7 @@ keg_layout(uma_keg_t keg) { struct keg_layout_result kl = {}, kl_tmp; - u_int fmts[2]; + u_int fmts[3]; u_int alignsize; u_int nfmt; u_int pages; @@ -2441,12 +2477,20 @@ /* Build a list of all of the available formats for this keg. */ nfmt = 0; +#ifdef UMA_USE_DMAP + /* + * See if we can embed the slab header directly in the vm_page + * structure. This works only when the slab is a single page + * and accessed via the direct map. + */ + if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) + fmts[nfmt++] = UMA_ZFLAG_EMBEDSLAB | UMA_ZFLAG_OFFPAGE; +#endif + /* Evaluate an inline slab layout. */ if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0) fmts[nfmt++] = 0; - /* TODO: vm_page-embedded slab. */ - /* * We can't do OFFPAGE if we're internal or if we've been * asked to not go to the VM for buckets. If we do this we @@ -2526,7 +2570,7 @@ (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) { if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0) keg->uk_flags |= UMA_ZFLAG_HASH; - else + else if ((keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) == 0) keg->uk_flags |= UMA_ZFLAG_VTOSLAB; } @@ -4961,8 +5005,11 @@ lock = KEG_LOCK(keg, 0); for (i = 0; i < cnt; i++) { item = bucket[i]; - if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) { + if ((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0) { slab = vtoslab((vm_offset_t)item); + } else if ((zone->uz_flags & UMA_ZFLAG_EMBEDSLAB) != 0) { + slab = (uma_slab_t) + PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)item)); } else { mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0) @@ -5803,7 +5850,8 @@ int avail, effpct, total; total = keg->uk_ppera * PAGE_SIZE; - if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) + if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 && + (keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) == 0) total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize; /* * We consider the client's requested size and alignment here, not the @@ -5845,6 +5893,9 @@ return (NULL); if (zone->uz_flags & UMA_ZFLAG_VTOSLAB) return (vtoslab((vm_offset_t)mem)); + if ((zone->uz_flags & UMA_ZFLAG_EMBEDSLAB) != 0) + return ((uma_slab_t) + PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem))); keg = zone->uz_keg; if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0) return ((uma_slab_t)(mem + keg->uk_pgoff)); diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -162,12 +162,17 @@ #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */ +#define UMA_ZFLAG_EMBEDSLAB 0x80000000 /* + * Slab is embedded in the + * vm_page structure. + */ #define UMA_ZFLAG_INHERIT \ (UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \ - UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL) + UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL | UMA_ZFLAG_EMBEDSLAB) #define PRINT_UMA_ZFLAGS "\20" \ + "\40EMBEDSLAB" \ "\37TRASH" \ "\36INTERNAL" \ "\35BUCKET" \ @@ -390,6 +395,7 @@ typedef struct uma_hash_slab * uma_hash_slab_t; +#ifdef _KERNEL static inline uma_hash_slab_t slab_tohashslab(uma_slab_t slab) { @@ -403,6 +409,10 @@ if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0) return ((void *)((uintptr_t)slab - keg->uk_pgoff)); +#ifdef UMA_USE_DMAP + else if ((keg->uk_flags & UMA_ZFLAG_EMBEDSLAB) != 0) + return ((void *)PHYS_TO_DMAP(((vm_page_t)slab)->phys_addr)); +#endif else return (slab_tohashslab(slab)->uhs_data); } @@ -424,6 +434,7 @@ data = (uintptr_t)slab_data(slab, keg); return (((uintptr_t)item - data) / keg->uk_rsize); } +#endif STAILQ_HEAD(uma_bucketlist, uma_bucket);