Page MenuHomeFreeBSD

D50640.id156395.diff
No OneTemporary

D50640.id156395.diff

diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -49,7 +49,6 @@
* - Investigate cache size adjustments
*/
-#include <sys/cdefs.h>
#include "opt_ddb.h"
#include "opt_param.h"
#include "opt_vm.h"
@@ -57,7 +56,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/asan.h>
-#include <sys/bitset.h>
#include <sys/domainset.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
@@ -126,6 +124,89 @@
#define SLAB_BITSETS 1
#endif
+/*
+ * How many bytes do we need for a bitset of the give size?
+ */
+#define SLAB_BITSET_WORDS(setsize) \
+ howmany((setsize), NBBY * sizeof(int))
+#define SLAB_BITSET_SIZE(setsize) \
+ (SLAB_BITSET_WORDS(setsize) * sizeof(int))
+
+/*
+ * Operations on slab bitsets. These would ideally be implemented using
+ * bitset.h, but that requires each bitset word to be a long, which is excessive
+ * on 64-bit systems.
+ */
+static void
+slab_bitset_fill(size_t setsize, int *set)
+{
+ for (size_t i = 0; i < SLAB_BITSET_WORDS(setsize); i++)
+ set[i] = -1;
+}
+
+static void __diagused
+slab_bitset_zero(size_t setsize, int *set)
+{
+ for (size_t i = 0; i < SLAB_BITSET_WORDS(setsize); i++)
+ set[i] = 0;
+}
+
+static int
+slab_bitset_ffs(size_t setsize, int *set)
+{
+ for (size_t i = 0; i < SLAB_BITSET_WORDS(setsize); i++) {
+ if (set[i] != 0) {
+ int bit;
+
+ bit = ffsl(set[i]);
+ if (bit != 0)
+ return (i * NBBY * sizeof(int) + bit);
+ }
+ }
+ return (0);
+}
+
+static void
+slab_bitset_set(size_t setsize, int index, int *set)
+{
+ int word;
+
+ word = index / (NBBY * sizeof(int));
+ set[word] |= (1 << (index % (NBBY * sizeof(int))));
+}
+
+static void
+slab_bitset_clear(size_t setsize, int index, int *set)
+{
+ int word;
+
+ word = index / (NBBY * sizeof(int));
+ set[word] &= ~(1 << (index % (NBBY * sizeof(int))));
+}
+
+static int __diagused
+slab_bitset_atomic_testandset(size_t setsize, int index, int *set)
+{
+ int word;
+
+ word = index / (NBBY * sizeof(int));
+ return (atomic_testandset_int(&set[word], index));
+}
+
+static int __diagused
+slab_bitset_atomic_testandclear(size_t setsize, int index, int *set)
+{
+ int word;
+
+ word = index / (NBBY * sizeof(int));
+ return (atomic_testandclear_int(&set[word], index));
+}
+
+/*
+ * Free bits per-slab.
+ */
+#define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
+
/*
* These are the two zones from which all offpage uma_slab_ts are allocated.
*
@@ -134,8 +215,10 @@
* headers that are smaller and represent fewer items, making the headers more
* efficient.
*/
-#define SLABZONE_SIZE(setsize) \
- (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS)
+#define SLABZONE_SIZE(setsize) \
+ (roundup2(__offsetof(struct uma_hash_slab, uhs_slab.us_free) + \
+ SLAB_BITSET_SIZE(setsize) * SLAB_BITSETS, \
+ _Alignof(struct uma_hash_slab)))
#define SLABZONE0_SETSIZE (PAGE_SIZE / 16)
#define SLABZONE1_SETSIZE SLAB_MAX_SETSIZE
#define SLABZONE0_SIZE SLABZONE_SIZE(SLABZONE0_SETSIZE)
@@ -345,7 +428,7 @@
#ifdef INVARIANTS
static uint64_t uma_keg_get_allocs(uma_keg_t zone);
-static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
+static int *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
@@ -1826,9 +1909,9 @@
slab->us_flags = sflags;
slab->us_domain = domain;
- BIT_FILL(keg->uk_ipers, &slab->us_free);
+ slab_bitset_fill(keg->uk_ipers, slab->us_free);
#ifdef INVARIANTS
- BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
+ slab_bitset_zero(keg->uk_ipers, slab_dbg_bits(slab, keg));
#endif
if (keg->uk_init != NULL) {
@@ -2187,11 +2270,12 @@
}
#ifdef INVARIANTS
-static struct noslabbits *
+static int *
slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
{
- return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
+ return ((void *)((char *)&slab->us_free +
+ SLAB_BITSET_SIZE(keg->uk_ipers)));
}
#endif
@@ -2203,7 +2287,8 @@
{
size_t s;
- s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
+ s = __offsetof(struct uma_slab, us_free) +
+ SLAB_BITSET_SIZE(nitems) * SLAB_BITSETS;
return (roundup(s, UMA_ALIGN_PTR + 1));
}
@@ -4082,8 +4167,8 @@
KEG_LOCK_ASSERT(keg, slab->us_domain);
dom = &keg->uk_domain[slab->us_domain];
- freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
- BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
+ freei = slab_bitset_ffs(keg->uk_ipers, slab->us_free) - 1;
+ slab_bitset_clear(keg->uk_ipers, freei, slab->us_free);
item = slab_item(slab, keg, freei);
slab->us_freecount--;
dom->ud_free_items--;
@@ -4851,7 +4936,7 @@
/* Slab management. */
freei = slab_item_index(slab, keg, item);
- BIT_SET(keg->uk_ipers, freei, &slab->us_free);
+ slab_bitset_set(keg->uk_ipers, freei, slab->us_free);
slab->us_freecount++;
/* Keg statistics. */
@@ -5825,7 +5910,7 @@
keg = zone->uz_keg;
freei = slab_item_index(slab, keg, item);
- if (BIT_TEST_SET_ATOMIC(keg->uk_ipers, freei,
+ if (slab_bitset_atomic_testandset(keg->uk_ipers, freei,
slab_dbg_bits(slab, keg)))
panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)",
item, zone, zone->uz_name, slab, freei);
@@ -5859,7 +5944,7 @@
panic("Unaligned free of %p from zone %p(%s) slab %p(%d)",
item, zone, zone->uz_name, slab, freei);
- if (!BIT_TEST_CLR_ATOMIC(keg->uk_ipers, freei,
+ if (!slab_bitset_atomic_testandclear(keg->uk_ipers, freei,
slab_dbg_bits(slab, keg)))
panic("Duplicate free of %p from zone %p(%s) slab %p(%d)",
item, zone, zone->uz_name, slab, freei);
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -29,7 +29,6 @@
*/
#include <sys/counter.h>
-#include <sys/_bitset.h>
#include <sys/_domainset.h>
#include <sys/_task.h>
@@ -363,13 +362,6 @@
};
typedef struct uma_keg * uma_keg_t;
-/*
- * Free bits per-slab.
- */
-#define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
-#define SLAB_MIN_SETSIZE _BITSET_BITS
-BITSET_DEFINE(noslabbits, 0);
-
/*
* The slab structure manages a single contiguous allocation from backing
* store and subdivides it into individually allocatable items.
@@ -379,10 +371,8 @@
uint16_t us_freecount; /* How many are free? */
uint8_t us_flags; /* Page flags see uma.h */
uint8_t us_domain; /* Backing NUMA domain. */
- struct noslabbits us_free; /* Free bitmask, flexible. */
+ int us_free[0]; /* Free bitmask, flexible. */
};
-_Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),
- "us_free field must be last");
_Static_assert(MAXMEMDOM < 255,
"us_domain field is not wide enough");

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 2, 4:39 AM (5 h, 8 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28409981
Default Alt Text
D50640.id156395.diff (6 KB)

Event Timeline