Index: head/sys/vm/uma_core.c =================================================================== --- head/sys/vm/uma_core.c +++ head/sys/vm/uma_core.c @@ -117,6 +117,16 @@ static uma_zone_t zones; /* + * On INVARIANTS builds, the slab contains a second bitset of the same size, + * "dbg_bits", which is laid out immediately after us_free. + */ +#ifdef INVARIANTS +#define SLAB_BITSETS 2 +#else +#define SLAB_BITSETS 1 +#endif + +/* * These are the two zones from which all offpage uma_slab_ts are allocated. * * One zone is for slab headers that can represent a larger number of items, @@ -1898,7 +1908,7 @@ } #ifdef INVARIANTS -struct noslabbits * +static struct noslabbits * slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) { @@ -1909,7 +1919,7 @@ /* * Actual size of embedded struct slab (!OFFPAGE). */ -size_t +static size_t slab_sizeof(int nitems) { size_t s; @@ -1918,15 +1928,6 @@ return (roundup(s, UMA_ALIGN_PTR + 1)); } -/* - * Size of memory for embedded slabs (!OFFPAGE). - */ -size_t -slab_space(int nitems) -{ - return (UMA_SLAB_SIZE - slab_sizeof(nitems)); -} - #define UMA_FIXPT_SHIFT 31 #define UMA_FRAC_FIXPT(n, d) \ ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d))) @@ -1965,18 +1966,6 @@ } return (ipers); -} - -/* - * Compute the number of items that will fit in a slab for a startup zone. - */ -int -slab_ipers(size_t size, int align) -{ - int rsize; - - rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */ - return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true)); } struct keg_layout_result { Index: head/sys/vm/uma_int.h =================================================================== --- head/sys/vm/uma_int.h +++ head/sys/vm/uma_int.h @@ -368,11 +368,6 @@ }; typedef struct uma_keg * uma_keg_t; -#ifdef _KERNEL -#define KEG_ASSERT_COLD(k) \ - KASSERT(uma_keg_get_allocs((k)) == 0, \ - ("keg %s initialization after use.", (k)->uk_name)) - /* * Free bits per-slab. */ @@ -391,30 +386,14 @@ uint8_t us_domain; /* Backing NUMA domain. */ struct noslabbits us_free; /* Free bitmask, flexible. */ }; -_Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free), +_Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free), "us_free field must be last"); -#if MAXMEMDOM >= 255 -#error "Slab domain type insufficient" -#endif +_Static_assert(MAXMEMDOM < 255, + "us_domain field is not wide enough"); typedef struct uma_slab * uma_slab_t; /* - * On INVARIANTS builds, the slab contains a second bitset of the same size, - * "dbg_bits", which is laid out immediately after us_free. - */ -#ifdef INVARIANTS -#define SLAB_BITSETS 2 -#else -#define SLAB_BITSETS 1 -#endif - -/* These three functions are for embedded (!OFFPAGE) use only. */ -size_t slab_sizeof(int nitems); -size_t slab_space(int nitems); -int slab_ipers(size_t size, int align); - -/* * Slab structure with a full sized bitset and hash link for both * HASH and OFFPAGE zones. */ @@ -460,7 +439,6 @@ data = (uintptr_t)slab_data(slab, keg); return (((uintptr_t)item - data) / keg->uk_rsize); } -#endif /* _KERNEL */ STAILQ_HEAD(uma_bucketlist, uma_bucket); @@ -578,6 +556,10 @@ KASSERT((void *)(keg) != NULL, \ ("%s: Invalid zone %p type", __func__, (zone))); \ } while (0) + +#define KEG_ASSERT_COLD(k) \ + KASSERT(uma_keg_get_allocs((k)) == 0, \ + ("keg %s initialization after use.", (k)->uk_name)) /* Domains are contiguous after the last CPU */ #define ZDOM_GET(z, n) \