Index: sys/kern/subr_blist.c =================================================================== --- sys/kern/subr_blist.c +++ sys/kern/subr_blist.c @@ -90,6 +90,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,7 @@ #include #include +#include #include #include #include @@ -170,6 +172,35 @@ } /* + * Use binary search to find the 1 bit in a u_daddr_t. + * Assumes that the argument has only one bit set. ffs(x) + * could be implemented as return (x==0? 0: 1+bitpos(x&-x)). + */ +static inline int +bitpos(u_daddr_t mask) +{ + int hi, lo, mid; + + switch (sizeof(u_daddr_t)) { +#ifdef HAVE_INLINE_FFSLL + case sizeof(long long): + return (ffsll(mask) - 1); +#endif + default: + lo = 0; + hi = BLIST_BMAP_RADIX; + while (lo + 1 < hi) { + mid = (lo + hi) >> 1; + if ((mask >> mid) != 0) + lo = mid; + else + hi = mid; + } + return (lo); + } +} + +/* * blist_create() - create a blist capable of handling up to the specified * number of blocks * @@ -340,6 +371,150 @@ #endif +struct gap_stats { + int num; /* number of gaps observed */ + daddr_t max; /* largest gap size */ + daddr_t avg; /* average gap size */ + daddr_t err; /* sum - num * avg */ + int histo[127]; /* counts for gaps of various sizes */ + int max_bucket; + u_daddr_t start; +}; + +/* + * gap_stats_counting() - is the state 'counting 1 bits'? + * or 'skipping 0 bits'? + */ +static inline int +gap_stats_counting(struct gap_stats *stats) +{ + + return (stats->start != -1); +} + +/* + * init_gap_stats() - initialize stats on gap sizes + */ +static inline void +init_gap_stats(struct gap_stats *stats) +{ + + bzero(stats, sizeof(*stats)); + stats->start = -1; +} + +/* + * update_gap_stats() - update stats on gap sizes + */ +static void +update_gap_stats(struct gap_stats *stats, u_daddr_t posn) +{ + u_daddr_t size; + int id, logsize; + + if (!gap_stats_counting(stats)) { + stats->start = posn; + return; + } + size = posn - stats->start; + stats->start = -1; + + if (size > stats->max) + stats->max = size; + if (size == 1) { + id = 0; + } else { + logsize = 1; + while ((size >> logsize) > 1) + logsize++; + id = 2 * logsize - 1; + if (size >> (logsize - 1) >= 3) + id++; + } + stats->histo[id]++; + if (id > stats->max_bucket) + stats->max_bucket = id; + stats->err += (daddr_t)size - stats->avg; + stats->num++; + stats->avg += stats->err / stats->num; + stats->err %= stats->num; +} + +/* + * dump_stats() - print stats on gap sizes + */ +static inline void +dump_gap_stats(struct sbuf *s, struct gap_stats *stats) +{ + int i, val; + + sbuf_printf(s, "number of gaps: %d\n", stats->num); + sbuf_printf(s, "largest gap: %jd\n", (intmax_t)stats->max); + sbuf_printf(s, "average gap size: %jd\n", (intmax_t)stats->avg); + sbuf_printf(s, "histogram:\n"); + for (i = 0; i <= stats->max_bucket; i++) { + val = (i == 0) ? 1 : ((3 - i % 2) << ((i - 1) / 2)); + sbuf_printf(s, "%d\t%d\n", val, stats->histo[i]); + } +} + +/* + * blist_stats() - dump radix tree stats + */ +void +blist_stats(struct sbuf *s, blist_t bl) +{ + daddr_t bit, diff, i, last_block, nodes, radix; + u_daddr_t mask; + struct gap_stats stats; + + init_gap_stats(&stats); + nodes = 0; + i = bl->bl_radix; + while (i < bl->bl_radix + bl->bl_blocks) { + last_block = i / BLIST_BMAP_RADIX; + radix = BLIST_BMAP_RADIX; + while ((last_block & (BLIST_META_RADIX - 1)) == 0) { + /* + * Skip meta node. + */ + last_block /= BLIST_META_RADIX; + radix *= BLIST_META_RADIX; + } + while (radix > BLIST_BMAP_RADIX) { + if (bl->bl_root[nodes].u.bmu_avail == 0) { + if (gap_stats_counting(&stats)) + update_gap_stats(&stats, i); + break; + } + if (bl->bl_root[nodes].u.bmu_avail == radix) { + if (!gap_stats_counting(&stats)) + update_gap_stats(&stats, i); + break; + } + nodes++; + radix /= BLIST_META_RADIX; + } + if (radix == BLIST_BMAP_RADIX) { + /* + * Scan leaf. + */ + mask = bl->bl_root[nodes].u.bmu_bitmap; + diff = mask ^ (mask << 1); + diff ^= gap_stats_counting(&stats); + while (diff) { + bit = diff & -diff; + update_gap_stats(&stats, i + bitpos(bit)); + diff ^= bit; + } + } + nodes += radix_to_skip(radix); + i += radix; + } + update_gap_stats(&stats, i); + dump_gap_stats(s, &stats); +} + /************************************************************************ * ALLOCATION SUPPORT FUNCTIONS * ************************************************************************ @@ -355,13 +530,13 @@ * * This is the core of the allocator and is optimized for the * BLIST_BMAP_RADIX block allocation case. Otherwise, execution - * time is proportional to log2(count) + log2(BLIST_BMAP_RADIX). + * time is proportional to log2(count) + bitpos time. */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count, daddr_t cursor) { u_daddr_t mask; - int count1, hi, lo, mid, num_shifts, range1, range_ext; + int count1, lo, num_shifts, range1, range_ext; if (count == BLIST_BMAP_RADIX) { /* @@ -419,17 +594,10 @@ /* * The least significant set bit in mask marks the start of the first * available range of sufficient size. Clear all the bits but that one, - * and then perform a binary search to find its position. + * and then find its position. */ mask &= -mask; - hi = BLIST_BMAP_RADIX - count1; - while (lo + 1 < hi) { - mid = (lo + hi) >> 1; - if ((mask >> mid) != 0) - lo = mid; - else - hi = mid; - } + lo = bitpos(mask); /* * Set in mask exactly the bits being allocated, and clear them from @@ -980,6 +1148,7 @@ int size = 1024; int i; blist_t bl; + struct sbuf *s; for (i = 1; i < ac; ++i) { const char *ptr = av[i]; @@ -1014,6 +1183,13 @@ case 'p': blist_print(bl); break; + case 's': + s = sbuf_new_auto(); + blist_stats(s, bl); + sbuf_finish(s); + printf("%s", sbuf_data(s)); + sbuf_delete(s); + break; case 'a': if (sscanf(buf + 1, "%lld", &count) == 1) { daddr_t blk = blist_alloc(bl, count); @@ -1041,6 +1217,7 @@ case 'h': puts( "p -print\n" + "s -stats\n" "a %d -allocate\n" "f %x %d -free\n" "l %x %d -fill\n" Index: sys/sys/blist.h =================================================================== --- sys/sys/blist.h +++ sys/sys/blist.h @@ -97,6 +97,8 @@ daddr_t blist_fill(blist_t bl, daddr_t blkno, daddr_t count); void blist_free(blist_t blist, daddr_t blkno, daddr_t count); void blist_print(blist_t blist); +struct sbuf; +void blist_stats(struct sbuf *s, blist_t bl); void blist_resize(blist_t *pblist, daddr_t count, int freenew, int flags); #endif /* _SYS_BLIST_H_ */ Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -92,6 +92,7 @@ #include #include #include +#include #include #include #include @@ -323,6 +324,9 @@ SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I", "Maximum running async swap ops"); +static int sysctl_swap_pager_dumpstats(SYSCTL_HANDLER_ARGS); +SYSCTL_OID(_vm, OID_AUTO, swap_pager_dumpstats, CTLTYPE_STRING | CTLFLAG_RD, + NULL, 0, sysctl_swap_pager_dumpstats, "A", "Swap Pager Info"); static struct sx sw_alloc_sx; @@ -780,6 +784,30 @@ } /* + * SYSCTL_SWAP_PAGER_DUMPSTATS() - produce raw swap space stats + */ +static int +sysctl_swap_pager_dumpstats(SYSCTL_HANDLER_ARGS) +{ + struct sbuf sbuf; + int error; + struct swdevt *sp; + + error = sysctl_wire_old_buffer(req, 0); + if (error != 0) + return (error); + sbuf_new_for_sysctl(&sbuf, NULL, 128, req); + + mtx_lock(&sw_dev_mtx); + TAILQ_FOREACH(sp, &swtailq, sw_list) + blist_stats(&sbuf, sp->sw_blist); + mtx_unlock(&sw_dev_mtx); + error = sbuf_finish(&sbuf); + sbuf_delete(&sbuf); + return (error); +} + +/* * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page * range within an object. *