Index: head/sys/vm/uma_core.c =================================================================== --- head/sys/vm/uma_core.c +++ head/sys/vm/uma_core.c @@ -292,6 +292,8 @@ static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS); #ifdef INVARIANTS +static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg); + static bool uma_dbg_kskip(uma_keg_t keg, void *mem); static bool uma_dbg_zskip(uma_zone_t zone, void *mem); static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); @@ -1201,7 +1203,7 @@ slab->us_domain = domain; BIT_FILL(keg->uk_ipers, &slab->us_free); #ifdef INVARIANTS - BIT_ZERO(SLAB_MAX_SETSIZE, &slab->us_debugfree); + BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg)); #endif if (keg->uk_init != NULL) { @@ -1484,6 +1486,15 @@ return (0); } +#ifdef INVARIANTS +struct noslabbits * +slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) +{ + + return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers))); +} +#endif + /* * Actual size of embedded struct slab (!OFFPAGE). */ @@ -1492,7 +1503,7 @@ { size_t s; - s = sizeof(struct uma_slab) + BITSET_SIZE(nitems); + s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS; return (roundup(s, UMA_ALIGN_PTR + 1)); } @@ -4514,12 +4525,10 @@ keg = zone->uz_keg; freei = slab_item_index(slab, keg, item); - if (BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree)) + if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); - BIT_SET_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree); - - return; + BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); } /* @@ -4550,11 +4559,11 @@ panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); - if (!BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree)) + if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); - BIT_CLR_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree); + BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); } #endif /* INVARIANTS */ Index: head/sys/vm/uma_int.h =================================================================== --- head/sys/vm/uma_int.h +++ head/sys/vm/uma_int.h @@ -271,17 +271,26 @@ uint16_t us_freecount; /* How many are free? */ uint8_t us_flags; /* Page flags see uma.h */ uint8_t us_domain; /* Backing NUMA domain. */ -#ifdef INVARIANTS - struct slabbits us_debugfree; /* Debug bitmask. */ -#endif - struct noslabbits us_free; /* Free bitmask. */ + struct noslabbits us_free; /* Free bitmask, flexible. */ }; +_Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free), + "us_free field must be last"); #if MAXMEMDOM >= 255 #error "Slab domain type insufficient" #endif typedef struct uma_slab * uma_slab_t; +/* + * On INVARIANTS builds, the slab contains a second bitset of the same size, + * "dbg_bits", which is laid out immediately after us_free. + */ +#ifdef INVARIANTS +#define SLAB_BITSETS 2 +#else +#define SLAB_BITSETS 1 +#endif + /* These three functions are for embedded (!OFFPAGE) use only. */ size_t slab_sizeof(int nitems); size_t slab_space(int nitems); @@ -293,7 +302,10 @@ */ struct uma_hash_slab { struct uma_slab uhs_slab; /* Must be first. */ - struct slabbits uhs_bits; /* Must be second. */ + struct slabbits uhs_bits1; /* Must be second. */ +#ifdef INVARIANTS + struct slabbits uhs_bits2; /* Must be third. */ +#endif LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */ uint8_t *uhs_data; /* First item */ };