diff --git a/sys/kern/subr_unit.c b/sys/kern/subr_unit.c index 0f4d25257553..ee67e5db01d6 100644 --- a/sys/kern/subr_unit.c +++ b/sys/kern/subr_unit.c @@ -1,1139 +1,1260 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004 Poul-Henning Kamp * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * * * Unit number allocation functions. * * These functions implement a mixed run-length/bitmap management of unit * number spaces in a very memory efficient manner. * * Allocation policy is always lowest free number first. * * A return value of -1 signals that no more unit numbers are available. * * There is no cost associated with the range of unitnumbers, so unless * the resource really is finite, specify INT_MAX to new_unrhdr() and * forget about checking the return value. * * If a mutex is not provided when the unit number space is created, a * default global mutex is used. The advantage to passing a mutex in, is * that the alloc_unrl() function can be called with the mutex already * held (it will not be released by alloc_unrl()). * * The allocation function alloc_unr{l}() never sleeps (but it may block on * the mutex of course). * * Freeing a unit number may require allocating memory, and can therefore * sleep so the free_unr() function does not come in a pre-locked variant. * * A userland test program is included. * * Memory usage is a very complex function of the exact allocation * pattern, but always very compact: * * For the very typical case where a single unbroken run of unit * numbers are allocated 44 bytes are used on i386. * * For a unit number space of 1000 units and the random pattern * in the usermode test program included, the worst case usage * was 252 bytes on i386 for 500 allocated and 500 free units. * * For a unit number space of 10000 units and the random pattern * in the usermode test program included, the worst case usage * was 798 bytes on i386 for 5000 allocated and 5000 free units. * * The worst case is where every other unit number is allocated and * the rest are free. In that case 44 + N/4 bytes are used where * N is the number of the highest unit allocated. */ #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #include /* * In theory it would be smarter to allocate the individual blocks * with the zone allocator, but at this time the expectation is that * there will typically not even be enough allocations to fill a single * page, so we stick with malloc for now. */ static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation"); #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO) #define Free(foo) free(foo, M_UNIT) static struct mtx unitmtx; MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF); #ifdef UNR64_LOCKED uint64_t alloc_unr64(struct unrhdr64 *unr64) { uint64_t item; mtx_lock(&unitmtx); item = unr64->counter++; mtx_unlock(&unitmtx); return (item); } #endif #else /* ...USERLAND */ #include #include #include #include #include #include #include #include #define KASSERT(cond, arg) \ do { \ if (!(cond)) { \ printf arg; \ abort(); \ } \ } while (0) static int no_alloc; #define Malloc(foo) _Malloc(foo, __LINE__) static void * _Malloc(size_t foo, int line) { KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line)); return (calloc(foo, 1)); } #define Free(foo) free(foo) struct unrhdr; #define UNR_NO_MTX ((void *)(uintptr_t)-1) struct mtx { int state; } unitmtx; static void mtx_lock(struct mtx *mp) { KASSERT(mp->state == 0, ("mutex already locked")); mp->state = 1; } static void mtx_unlock(struct mtx *mp) { KASSERT(mp->state == 1, ("mutex not locked")); mp->state = 0; } #define MA_OWNED 9 static void mtx_assert(struct mtx *mp, int flag) { if (flag == MA_OWNED) { KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true")); } } #define CTASSERT(foo) #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 #endif /* USERLAND */ /* * This is our basic building block. * * It can be used in three different ways depending on the value of the ptr * element: * If ptr is NULL, it represents a run of free items. * If ptr points to the unrhdr it represents a run of allocated items. * Otherwise it points to a bitstring of allocated items. * * For runs the len field is the length of the run. * For bitmaps the len field represents the number of allocated items. * * The bitmap is the same size as struct unr to optimize memory management. * * Two special ranges are not covered by unrs: * - at the start of the allocator space, all elements in [low, low + first) * are allocated; * - at the end of the allocator space, all elements in [high - last, high] * are free. */ struct unr { TAILQ_ENTRY(unr) list; u_int len; void *ptr; }; struct unrb { bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)]; }; CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0); /* Number of bits we can store in the bitmap */ #define NBITS (NBBY * sizeof(((struct unrb *)NULL)->map)) static inline bool is_bitmap(struct unrhdr *uh, struct unr *up) { return (up->ptr != uh && up->ptr != NULL); } /* Is the unrb empty in at least the first len bits? */ static inline bool ub_empty(struct unrb *ub, int len) { int first_set; bit_ffs(ub->map, len, &first_set); return (first_set == -1); } /* Is the unrb full? That is, is the number of set elements equal to len? */ static inline bool ub_full(struct unrb *ub, int len) { int first_clear; bit_ffc(ub->map, len, &first_clear); return (first_clear == -1); } +/* + * start: ipos = -1, upos = NULL; + * end: ipos = -1, upos = uh + */ +struct unrhdr_iter { + struct unrhdr *uh; + int ipos; + int upos_first_item; + void *upos; +}; + +void * +create_iter_unr(struct unrhdr *uh) +{ + struct unrhdr_iter *iter; + + iter = Malloc(sizeof(*iter)); + iter->ipos = -1; + iter->uh = uh; + iter->upos = NULL; + iter->upos_first_item = -1; + return (iter); +} + +static void +next_iter_unrl(struct unrhdr *uh, struct unrhdr_iter *iter) +{ + struct unr *up; + struct unrb *ub; + u_int y; + int c; + + if (iter->ipos == -1) { + if (iter->upos == uh) + return; + y = uh->low - 1; + if (uh->first == 0) { + up = TAILQ_FIRST(&uh->head); + if (up == NULL) { + iter->upos = uh; + return; + } + iter->upos = up; + if (up->ptr == NULL) + iter->upos = NULL; + else + iter->upos_first_item = uh->low; + } + } else { + y = iter->ipos; + } + + up = iter->upos; + + /* Special case for the compacted [low, first) run. */ + if (up == NULL) { + if (y + 1 < uh->low + uh->first) { + iter->ipos = y + 1; + return; + } + up = iter->upos = TAILQ_FIRST(&uh->head); + iter->upos_first_item = uh->low + uh->first; + } + + for (;;) { + if (y + 1 < iter->upos_first_item + up->len) { + if (up->ptr == uh) { + iter->ipos = y + 1; + return; + } else if (is_bitmap(uh, up)) { + ub = up->ptr; + bit_ffs_at(&ub->map[0], + y + 1 - iter->upos_first_item, + up->len, &c); + if (c != -1) { + iter->ipos = iter->upos_first_item + c; + return; + } + } + } + iter->upos_first_item += up->len; + y = iter->upos_first_item - 1; + up = iter->upos = TAILQ_NEXT((struct unr *)iter->upos, list); + if (iter->upos == NULL) { + iter->ipos = -1; + iter->upos = uh; + return; + } + } +} + +/* + * returns -1 on end, otherwise the next element + */ +int +next_iter_unr(void *handle) +{ + struct unrhdr *uh; + struct unrhdr_iter *iter; + + iter = handle; + uh = iter->uh; + if (uh->mtx != NULL) + mtx_lock(uh->mtx); + next_iter_unrl(uh, iter); + if (uh->mtx != NULL) + mtx_unlock(uh->mtx); + return (iter->ipos); +} + +void +free_iter_unr(void *handle) +{ + Free(handle); +} + #if defined(DIAGNOSTIC) || !defined(_KERNEL) /* * Consistency check function. * * Checks the internal consistency as well as we can. * * Called at all boundaries of this API. */ static void check_unrhdr(struct unrhdr *uh, int line) { struct unr *up; struct unrb *ub; int w; u_int y, z; y = uh->first; z = 0; TAILQ_FOREACH(up, &uh->head, list) { z++; if (is_bitmap(uh, up)) { ub = up->ptr; KASSERT (up->len <= NBITS, ("UNR inconsistency: len %u max %zd (line %d)\n", up->len, NBITS, line)); z++; w = 0; bit_count(ub->map, 0, up->len, &w); y += w; } else if (up->ptr != NULL) y += up->len; } KASSERT (y == uh->busy, ("UNR inconsistency: items %u found %u (line %d)\n", uh->busy, y, line)); KASSERT (z == uh->alloc, ("UNR inconsistency: chunks %u found %u (line %d)\n", uh->alloc, z, line)); } #else static __inline void check_unrhdr(struct unrhdr *uh __unused, int line __unused) { } #endif /* * Userland memory management. Just use calloc and keep track of how * many elements we have allocated for check_unrhdr(). */ static __inline void * new_unr(struct unrhdr *uh, void **p1, void **p2) { void *p; uh->alloc++; KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory")); if (*p1 != NULL) { p = *p1; *p1 = NULL; return (p); } else { p = *p2; *p2 = NULL; return (p); } } static __inline void delete_unr(struct unrhdr *uh, void *ptr) { struct unr *up; uh->alloc--; up = ptr; TAILQ_INSERT_TAIL(&uh->ppfree, up, list); } void clean_unrhdrl(struct unrhdr *uh) { struct unr *up; if (uh->mtx != NULL) mtx_assert(uh->mtx, MA_OWNED); while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) { TAILQ_REMOVE(&uh->ppfree, up, list); if (uh->mtx != NULL) mtx_unlock(uh->mtx); Free(up); if (uh->mtx != NULL) mtx_lock(uh->mtx); } } void clean_unrhdr(struct unrhdr *uh) { if (uh->mtx != NULL) mtx_lock(uh->mtx); clean_unrhdrl(uh); if (uh->mtx != NULL) mtx_unlock(uh->mtx); } void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex) { KASSERT(low >= 0 && low <= high, ("UNR: use error: new_unrhdr(%d, %d)", low, high)); if (mutex == UNR_NO_MTX) uh->mtx = NULL; else if (mutex != NULL) uh->mtx = mutex; else uh->mtx = &unitmtx; TAILQ_INIT(&uh->head); TAILQ_INIT(&uh->ppfree); uh->low = low; uh->high = high; uh->first = 0; uh->last = 1 + (high - low); uh->busy = 0; uh->alloc = 0; check_unrhdr(uh, __LINE__); } /* * Allocate a new unrheader set. * * Highest and lowest valid values given as parameters. */ struct unrhdr * new_unrhdr(int low, int high, struct mtx *mutex) { struct unrhdr *uh; uh = Malloc(sizeof *uh); init_unrhdr(uh, low, high, mutex); return (uh); } void delete_unrhdr(struct unrhdr *uh) { check_unrhdr(uh, __LINE__); KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy)); KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr")); KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL, ("unrhdr has postponed item for free")); Free(uh); } void clear_unrhdr(struct unrhdr *uh) { struct unr *up, *uq; KASSERT(TAILQ_EMPTY(&uh->ppfree), ("unrhdr has postponed item for free")); TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) { if (up->ptr != uh) { Free(up->ptr); } Free(up); } uh->busy = 0; uh->alloc = 0; init_unrhdr(uh, uh->low, uh->high, uh->mtx); check_unrhdr(uh, __LINE__); } /* * Look for sequence of items which can be combined into a bitmap, if * multiple are present, take the one which saves most memory. * * Return (1) if a sequence was found to indicate that another call * might be able to do more. Return (0) if we found no suitable sequence. * * NB: called from alloc_unr(), no new memory allocation allowed. */ static int optimize_unr(struct unrhdr *uh) { struct unr *up, *uf, *us; struct unrb *ub, *ubf; u_int a, l, ba; /* * Look for the run of items (if any) which when collapsed into * a bitmap would save most memory. */ us = NULL; ba = 0; TAILQ_FOREACH(uf, &uh->head, list) { if (uf->len >= NBITS) continue; a = 1; if (is_bitmap(uh, uf)) a++; l = uf->len; up = uf; while (1) { up = TAILQ_NEXT(up, list); if (up == NULL) break; if ((up->len + l) > NBITS) break; a++; if (is_bitmap(uh, up)) a++; l += up->len; } if (a > ba) { ba = a; us = uf; } } if (ba < 3) return (0); /* * If the first element is not a bitmap, make it one. * Trying to do so without allocating more memory complicates things * a bit */ if (!is_bitmap(uh, us)) { uf = TAILQ_NEXT(us, list); TAILQ_REMOVE(&uh->head, us, list); a = us->len; l = us->ptr == uh ? 1 : 0; ub = (void *)us; bit_nclear(ub->map, 0, NBITS - 1); if (l) bit_nset(ub->map, 0, a); if (!is_bitmap(uh, uf)) { if (uf->ptr == NULL) bit_nclear(ub->map, a, a + uf->len - 1); else bit_nset(ub->map, a, a + uf->len - 1); uf->ptr = ub; uf->len += a; us = uf; } else { ubf = uf->ptr; for (l = 0; l < uf->len; l++, a++) { if (bit_test(ubf->map, l)) bit_set(ub->map, a); else bit_clear(ub->map, a); } uf->len = a; delete_unr(uh, uf->ptr); uf->ptr = ub; us = uf; } } ub = us->ptr; while (1) { uf = TAILQ_NEXT(us, list); if (uf == NULL) return (1); if (uf->len + us->len > NBITS) return (1); if (uf->ptr == NULL) { bit_nclear(ub->map, us->len, us->len + uf->len - 1); us->len += uf->len; TAILQ_REMOVE(&uh->head, uf, list); delete_unr(uh, uf); } else if (uf->ptr == uh) { bit_nset(ub->map, us->len, us->len + uf->len - 1); us->len += uf->len; TAILQ_REMOVE(&uh->head, uf, list); delete_unr(uh, uf); } else { ubf = uf->ptr; for (l = 0; l < uf->len; l++, us->len++) { if (bit_test(ubf->map, l)) bit_set(ub->map, us->len); else bit_clear(ub->map, us->len); } TAILQ_REMOVE(&uh->head, uf, list); delete_unr(uh, ubf); delete_unr(uh, uf); } } } /* * See if a given unr should be collapsed with a neighbor. * * NB: called from alloc_unr(), no new memory allocation allowed. */ static void collapse_unr(struct unrhdr *uh, struct unr *up) { struct unr *upp; struct unrb *ub; /* If bitmap is all set or clear, change it to runlength */ if (is_bitmap(uh, up)) { ub = up->ptr; if (ub_full(ub, up->len)) { delete_unr(uh, up->ptr); up->ptr = uh; } else if (ub_empty(ub, up->len)) { delete_unr(uh, up->ptr); up->ptr = NULL; } } /* If nothing left in runlength, delete it */ if (up->len == 0) { upp = TAILQ_PREV(up, unrhd, list); if (upp == NULL) upp = TAILQ_NEXT(up, list); TAILQ_REMOVE(&uh->head, up, list); delete_unr(uh, up); up = upp; } /* If we have "hot-spot" still, merge with neighbor if possible */ if (up != NULL) { upp = TAILQ_PREV(up, unrhd, list); if (upp != NULL && up->ptr == upp->ptr) { up->len += upp->len; TAILQ_REMOVE(&uh->head, upp, list); delete_unr(uh, upp); } upp = TAILQ_NEXT(up, list); if (upp != NULL && up->ptr == upp->ptr) { up->len += upp->len; TAILQ_REMOVE(&uh->head, upp, list); delete_unr(uh, upp); } } /* Merge into ->first if possible */ upp = TAILQ_FIRST(&uh->head); if (upp != NULL && upp->ptr == uh) { uh->first += upp->len; TAILQ_REMOVE(&uh->head, upp, list); delete_unr(uh, upp); if (up == upp) up = NULL; } /* Merge into ->last if possible */ upp = TAILQ_LAST(&uh->head, unrhd); if (upp != NULL && upp->ptr == NULL) { uh->last += upp->len; TAILQ_REMOVE(&uh->head, upp, list); delete_unr(uh, upp); if (up == upp) up = NULL; } /* Try to make bitmaps */ while (optimize_unr(uh)) continue; } /* * Allocate a free unr. */ int alloc_unrl(struct unrhdr *uh) { struct unr *up; struct unrb *ub; u_int x; int y; if (uh->mtx != NULL) mtx_assert(uh->mtx, MA_OWNED); check_unrhdr(uh, __LINE__); x = uh->low + uh->first; up = TAILQ_FIRST(&uh->head); /* * If we have an ideal split, just adjust the first+last */ if (up == NULL && uh->last > 0) { uh->first++; uh->last--; uh->busy++; return (x); } /* * We can always allocate from the first list element, so if we have * nothing on the list, we must have run out of unit numbers. */ if (up == NULL) return (-1); KASSERT(up->ptr != uh, ("UNR first element is allocated")); if (up->ptr == NULL) { /* free run */ uh->first++; up->len--; } else { /* bitmap */ ub = up->ptr; bit_ffc(ub->map, up->len, &y); KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap.")); bit_set(ub->map, y); x += y; } uh->busy++; collapse_unr(uh, up); return (x); } int alloc_unr(struct unrhdr *uh) { int i; if (uh->mtx != NULL) mtx_lock(uh->mtx); i = alloc_unrl(uh); clean_unrhdrl(uh); if (uh->mtx != NULL) mtx_unlock(uh->mtx); return (i); } static int alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2) { struct unr *up, *upn; struct unrb *ub; u_int i, last, tl; if (uh->mtx != NULL) mtx_assert(uh->mtx, MA_OWNED); if (item < uh->low + uh->first || item > uh->high) return (-1); up = TAILQ_FIRST(&uh->head); /* Ideal split. */ if (up == NULL && item - uh->low == uh->first) { uh->first++; uh->last--; uh->busy++; check_unrhdr(uh, __LINE__); return (item); } i = item - uh->low - uh->first; if (up == NULL) { up = new_unr(uh, p1, p2); up->ptr = NULL; up->len = i; TAILQ_INSERT_TAIL(&uh->head, up, list); up = new_unr(uh, p1, p2); up->ptr = uh; up->len = 1; TAILQ_INSERT_TAIL(&uh->head, up, list); uh->last = uh->high - uh->low - i; uh->busy++; check_unrhdr(uh, __LINE__); return (item); } else { /* Find the item which contains the unit we want to allocate. */ TAILQ_FOREACH(up, &uh->head, list) { if (up->len > i) break; i -= up->len; } } if (up == NULL) { if (i > 0) { up = new_unr(uh, p1, p2); up->ptr = NULL; up->len = i; TAILQ_INSERT_TAIL(&uh->head, up, list); } up = new_unr(uh, p1, p2); up->ptr = uh; up->len = 1; TAILQ_INSERT_TAIL(&uh->head, up, list); goto done; } if (is_bitmap(uh, up)) { ub = up->ptr; if (bit_test(ub->map, i) == 0) { bit_set(ub->map, i); goto done; } else return (-1); } else if (up->ptr == uh) return (-1); KASSERT(up->ptr == NULL, ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up)); /* Split off the tail end, if any. */ tl = up->len - (1 + i); if (tl > 0) { upn = new_unr(uh, p1, p2); upn->ptr = NULL; upn->len = tl; TAILQ_INSERT_AFTER(&uh->head, up, upn, list); } /* Split off head end, if any */ if (i > 0) { upn = new_unr(uh, p1, p2); upn->len = i; upn->ptr = NULL; TAILQ_INSERT_BEFORE(up, upn, list); } up->len = 1; up->ptr = uh; done: last = uh->high - uh->low - (item - uh->low); if (uh->last > last) uh->last = last; uh->busy++; collapse_unr(uh, up); check_unrhdr(uh, __LINE__); return (item); } int alloc_unr_specific(struct unrhdr *uh, u_int item) { void *p1, *p2; int i; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific"); p1 = Malloc(sizeof(struct unr)); p2 = Malloc(sizeof(struct unr)); if (uh->mtx != NULL) mtx_lock(uh->mtx); i = alloc_unr_specificl(uh, item, &p1, &p2); if (uh->mtx != NULL) mtx_unlock(uh->mtx); if (p1 != NULL) Free(p1); if (p2 != NULL) Free(p2); return (i); } /* * Free a unr. * * If we can save unrs by using a bitmap, do so. */ static void free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2) { struct unr *up, *upp, *upn; struct unrb *ub; u_int pl; KASSERT(item >= uh->low && item <= uh->high, ("UNR: free_unr(%u) out of range [%u...%u]", item, uh->low, uh->high)); check_unrhdr(uh, __LINE__); item -= uh->low; upp = TAILQ_FIRST(&uh->head); /* * Freeing in the ideal split case */ if (item + 1 == uh->first && upp == NULL) { uh->last++; uh->first--; uh->busy--; check_unrhdr(uh, __LINE__); return; } /* * Freeing in the ->first section. Create a run starting at the * freed item. The code below will subdivide it. */ if (item < uh->first) { up = new_unr(uh, p1, p2); up->ptr = uh; up->len = uh->first - item; TAILQ_INSERT_HEAD(&uh->head, up, list); uh->first -= up->len; } item -= uh->first; /* Find the item which contains the unit we want to free */ TAILQ_FOREACH(up, &uh->head, list) { if (up->len > item) break; item -= up->len; } /* Handle bitmap items */ if (is_bitmap(uh, up)) { ub = up->ptr; KASSERT(bit_test(ub->map, item) != 0, ("UNR: Freeing free item %d (bitmap)\n", item)); bit_clear(ub->map, item); uh->busy--; collapse_unr(uh, up); return; } KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item)); /* Just this one left, reap it */ if (up->len == 1) { up->ptr = NULL; uh->busy--; collapse_unr(uh, up); return; } /* Check if we can shift the item into the previous 'free' run */ upp = TAILQ_PREV(up, unrhd, list); if (item == 0 && upp != NULL && upp->ptr == NULL) { upp->len++; up->len--; uh->busy--; collapse_unr(uh, up); return; } /* Check if we can shift the item to the next 'free' run */ upn = TAILQ_NEXT(up, list); if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) { upn->len++; up->len--; uh->busy--; collapse_unr(uh, up); return; } /* Split off the tail end, if any. */ pl = up->len - (1 + item); if (pl > 0) { upp = new_unr(uh, p1, p2); upp->ptr = uh; upp->len = pl; TAILQ_INSERT_AFTER(&uh->head, up, upp, list); } /* Split off head end, if any */ if (item > 0) { upp = new_unr(uh, p1, p2); upp->len = item; upp->ptr = uh; TAILQ_INSERT_BEFORE(up, upp, list); } up->len = 1; up->ptr = NULL; uh->busy--; collapse_unr(uh, up); } void free_unr(struct unrhdr *uh, u_int item) { void *p1, *p2; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr"); p1 = Malloc(sizeof(struct unr)); p2 = Malloc(sizeof(struct unr)); if (uh->mtx != NULL) mtx_lock(uh->mtx); free_unrl(uh, item, &p1, &p2); clean_unrhdrl(uh); if (uh->mtx != NULL) mtx_unlock(uh->mtx); if (p1 != NULL) Free(p1); if (p2 != NULL) Free(p2); } #ifdef _KERNEL #include "opt_ddb.h" #ifdef DDB #include #endif #endif #if (defined(_KERNEL) && defined(DDB)) || !defined(_KERNEL) #if !defined(_KERNEL) #define db_printf printf #endif static void print_unr(struct unrhdr *uh, struct unr *up) { u_int x; struct unrb *ub; db_printf(" %p len = %5u ", up, up->len); if (up->ptr == NULL) db_printf("free\n"); else if (up->ptr == uh) db_printf("alloc\n"); else { ub = up->ptr; db_printf("bitmap ["); for (x = 0; x < up->len; x++) { if (bit_test(ub->map, x)) db_printf("#"); else db_printf(" "); } db_printf("]\n"); } } static void print_unrhdr(struct unrhdr *uh) { struct unr *up; u_int x; db_printf( "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n", uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc); x = uh->low + uh->first; TAILQ_FOREACH(up, &uh->head, list) { db_printf(" from = %5u", x); print_unr(uh, up); if (up->ptr == NULL || up->ptr == uh) x += up->len; else x += NBITS; } } #endif #if defined(_KERNEL) && defined(DDB) DB_SHOW_COMMAND(unrhdr, unrhdr_print_unrhdr) { if (!have_addr) { db_printf("show unrhdr addr\n"); return; } print_unrhdr((struct unrhdr *)addr); } #endif #ifndef _KERNEL /* USERLAND test driver */ /* * Simple stochastic test driver for the above functions. The code resides * here so that it can access static functions and structures. */ static bool verbose; #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);} static void test_alloc_unr(struct unrhdr *uh, u_int i, char a[]) { int j; if (a[i]) { VPRINTF("F %u\n", i); free_unr(uh, i); a[i] = 0; } else { no_alloc = 1; j = alloc_unr(uh); if (j != -1) { a[j] = 1; VPRINTF("A %d\n", j); } no_alloc = 0; } } static void test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[]) { int j; j = alloc_unr_specific(uh, i); if (j == -1) { VPRINTF("F %u\n", i); a[i] = 0; free_unr(uh, i); } else { a[i] = 1; VPRINTF("A %d\n", j); } } +static void +test_iter(void) +{ +} + static void usage(char **argv) { printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]); } int main(int argc, char **argv) { struct unrhdr *uh; char *a; long count = 10000; /* Number of unrs to test */ long reps = 1, m; int ch; u_int i; verbose = false; while ((ch = getopt(argc, argv, "hr:v")) != -1) { switch (ch) { case 'r': errno = 0; reps = strtol(optarg, NULL, 0); if (errno == ERANGE || errno == EINVAL) { usage(argv); exit(2); } break; case 'v': verbose = true; break; case 'h': default: usage(argv); exit(2); } } setbuf(stdout, NULL); uh = new_unrhdr(0, count - 1, NULL); print_unrhdr(uh); a = calloc(count, sizeof(char)); if (a == NULL) err(1, "calloc failed"); printf("sizeof(struct unr) %zu\n", sizeof(struct unr)); printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb)); printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr)); printf("NBITS %lu\n", (unsigned long)NBITS); for (m = 0; m < count * reps; m++) { i = arc4random_uniform(count); #if 0 if (a[i] && (j & 1)) continue; #endif if ((arc4random() & 1) != 0) test_alloc_unr(uh, i, a); else test_alloc_unr_specific(uh, i, a); if (verbose) print_unrhdr(uh); check_unrhdr(uh, __LINE__); } for (i = 0; i < (u_int)count; i++) { if (a[i]) { if (verbose) { printf("C %u\n", i); print_unrhdr(uh); } free_unr(uh, i); } } print_unrhdr(uh); delete_unrhdr(uh); free(a); return (0); } #endif diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 828297e5b948..4b479e95aa2f 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,541 +1,544 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 * $FreeBSD$ */ #ifndef _SYS_SYSTM_H_ #define _SYS_SYSTM_H_ #include #include #include #include #include #include #include /* for people using printf mainly */ __NULLABILITY_PRAGMA_PUSH #ifdef _KERNEL extern int cold; /* nonzero if we are doing a cold boot */ extern int suspend_blocked; /* block suspend due to pending shutdown */ extern int rebooting; /* kern_reboot() has been called. */ extern char version[]; /* system version */ extern char compiler_version[]; /* compiler version */ extern char copyright[]; /* system copyright */ extern int kstack_pages; /* number of kernel stack pages */ extern u_long pagesizes[]; /* supported page sizes */ extern long physmem; /* physical memory */ extern long realmem; /* 'real' memory */ extern char *rootdevnames[2]; /* names of possible root devices */ extern int boothowto; /* reboot flags, from console subsystem */ extern int bootverbose; /* nonzero to print verbose messages */ extern int maxusers; /* system tune hint */ extern int ngroups_max; /* max # of supplemental groups */ extern int vm_guest; /* Running as virtual machine guest? */ extern u_long maxphys; /* max raw I/O transfer size */ /* * Detected virtual machine guest types. The intention is to expand * and/or add to the VM_GUEST_VM type if specific VM functionality is * ever implemented (e.g. vendor-specific paravirtualization features). * Keep in sync with vm_guest_sysctl_names[]. */ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_GUEST_VBOX, VM_GUEST_PARALLELS, VM_LAST }; #endif /* KERNEL */ /* * Align variables. */ #define __read_mostly __section(".data.read_mostly") #define __read_frequently __section(".data.read_frequently") #define __exclusive_cache_line __aligned(CACHE_LINE_SIZE) \ __section(".data.exclusive_cache_line") #if defined(_STANDALONE) struct ucred; #endif #ifdef _KERNEL #include /* MAXCPU */ #include /* curthread */ #include extern int osreldate; extern const void *zero_region; /* address space maps to a zeroed page */ extern int unmapped_buf_allowed; #ifdef __LP64__ #define IOSIZE_MAX iosize_max() #define DEVFS_IOSIZE_MAX devfs_iosize_max() #else #define IOSIZE_MAX SSIZE_MAX #define DEVFS_IOSIZE_MAX SSIZE_MAX #endif /* * General function declarations. */ struct inpcb; struct lock_object; struct malloc_type; struct mtx; struct proc; struct socket; struct thread; struct tty; struct ucred; struct uio; struct _jmp_buf; struct trapframe; struct eventtimer; int setjmp(struct _jmp_buf *) __returns_twice; void longjmp(struct _jmp_buf *, int) __dead2; int dumpstatus(vm_offset_t addr, off_t count); int nullop(void); int eopnotsupp(void); int ureadc(int, struct uio *); void hashdestroy(void *, struct malloc_type *, u_long); void *hashinit(int count, struct malloc_type *type, u_long *hashmask); void *hashinit_flags(int count, struct malloc_type *type, u_long *hashmask, int flags); #define HASH_NOWAIT 0x00000001 #define HASH_WAITOK 0x00000002 void *phashinit(int count, struct malloc_type *type, u_long *nentries); void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries, int flags); void g_waitidle(void); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); void critical_enter_KBI(void); void critical_exit_KBI(void); void critical_exit_preempt(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); void tablefull(const char *); /* * Allocate per-thread "current" state in the linuxkpi */ extern int (*lkpi_alloc_current)(struct thread *, int); int linux_alloc_current_noop(struct thread *, int); #if defined(KLD_MODULE) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET) #define critical_enter() critical_enter_KBI() #define critical_exit() critical_exit_KBI() #else static __inline void critical_enter(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; td->td_critnest++; atomic_interrupt_fence(); } static __inline void critical_exit(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); atomic_interrupt_fence(); td->td_critnest--; atomic_interrupt_fence(); if (__predict_false(td->td_owepreempt)) critical_exit_preempt(); } #endif #ifdef EARLY_PRINTF typedef void early_putc_t(int ch); extern early_putc_t *early_putc; #endif int kvprintf(char const *, void (*)(int, void*), void *, int, __va_list) __printflike(1, 0); void log(int, const char *, ...) __printflike(2, 3); void log_console(struct uio *); void vlog(int, const char *, __va_list) __printflike(2, 0); int asprintf(char **ret, struct malloc_type *mtp, const char *format, ...) __printflike(3, 4); int printf(const char *, ...) __printflike(1, 2); int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); int sprintf(char *buf, const char *, ...) __printflike(2, 3); int uprintf(const char *, ...) __printflike(1, 2); int vprintf(const char *, __va_list) __printflike(1, 0); int vasprintf(char **ret, struct malloc_type *mtp, const char *format, __va_list ap) __printflike(3, 0); int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0); int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0); int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0); int sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3); int vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list) __scanflike(2, 0); long strtol(const char *, char **, int); u_long strtoul(const char *, char **, int); quad_t strtoq(const char *, char **, int); u_quad_t strtouq(const char *, char **, int); void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4); void vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0); void hexdump(const void *ptr, int length, const char *hdr, int flags); #define HD_COLUMN_MASK 0xff #define HD_DELIM_MASK 0xff00 #define HD_OMIT_COUNT (1 << 16) #define HD_OMIT_HEX (1 << 17) #define HD_OMIT_CHARS (1 << 18) #define ovbcopy(f, t, l) bcopy((f), (t), (l)) void bcopy(const void * _Nonnull from, void * _Nonnull to, size_t len); void bzero(void * _Nonnull buf, size_t len); void explicit_bzero(void * _Nonnull, size_t); int bcmp(const void *b1, const void *b2, size_t len); void *memset(void * _Nonnull buf, int c, size_t len); void *memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n); int memcmp(const void *b1, const void *b2, size_t len); #ifdef SAN_NEEDS_INTERCEPTORS #define SAN_INTERCEPTOR(func) \ __CONCAT(SAN_INTERCEPTOR_PREFIX, __CONCAT(_, func)) void *SAN_INTERCEPTOR(memset)(void *, int, size_t); void *SAN_INTERCEPTOR(memcpy)(void *, const void *, size_t); void *SAN_INTERCEPTOR(memmove)(void *, const void *, size_t); int SAN_INTERCEPTOR(memcmp)(const void *, const void *, size_t); #ifndef SAN_RUNTIME #define bcopy(from, to, len) SAN_INTERCEPTOR(memmove)((to), (from), (len)) #define bzero(buf, len) SAN_INTERCEPTOR(memset)((buf), 0, (len)) #define bcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) #define memset(buf, c, len) SAN_INTERCEPTOR(memset)((buf), (c), (len)) #define memcpy(to, from, len) SAN_INTERCEPTOR(memcpy)((to), (from), (len)) #define memmove(dest, src, n) SAN_INTERCEPTOR(memmove)((dest), (src), (n)) #define memcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) #endif /* !SAN_RUNTIME */ #else /* !SAN_NEEDS_INTERCEPTORS */ #define bcopy(from, to, len) __builtin_memmove((to), (from), (len)) #define bzero(buf, len) __builtin_memset((buf), 0, (len)) #define bcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #define memset(buf, c, len) __builtin_memset((buf), (c), (len)) #define memcpy(to, from, len) __builtin_memcpy((to), (from), (len)) #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n)) #define memcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #endif /* SAN_NEEDS_INTERCEPTORS */ void *memset_early(void * _Nonnull buf, int c, size_t len); #define bzero_early(buf, len) memset_early((buf), 0, (len)) void *memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n); #define bcopy_early(from, to, len) memmove_early((to), (from), (len)) #define copystr(src, dst, len, outlen) ({ \ size_t __r, __len, *__outlen; \ \ __len = (len); \ __outlen = (outlen); \ __r = strlcpy((dst), (src), __len); \ if (__outlen != NULL) \ *__outlen = ((__r >= __len) ? __len : __r + 1); \ ((__r >= __len) ? ENAMETOOLONG : 0); \ }) int copyinstr(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len, size_t * __restrict lencopied); int copyin(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyin_nofault(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyout(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); int copyout_nofault(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); #ifdef SAN_NEEDS_INTERCEPTORS int SAN_INTERCEPTOR(copyin)(const void *, void *, size_t); int SAN_INTERCEPTOR(copyinstr)(const void *, void *, size_t, size_t *); int SAN_INTERCEPTOR(copyout)(const void *, void *, size_t); #ifndef SAN_RUNTIME #define copyin(u, k, l) SAN_INTERCEPTOR(copyin)((u), (k), (l)) #define copyinstr(u, k, l, lc) SAN_INTERCEPTOR(copyinstr)((u), (k), (l), (lc)) #define copyout(k, u, l) SAN_INTERCEPTOR(copyout)((k), (u), (l)) #endif /* !SAN_RUNTIME */ #endif /* SAN_NEEDS_INTERCEPTORS */ int fubyte(volatile const void *base); long fuword(volatile const void *base); int fuword16(volatile const void *base); int32_t fuword32(volatile const void *base); int64_t fuword64(volatile const void *base); int fueword(volatile const void *base, long *val); int fueword32(volatile const void *base, int32_t *val); int fueword64(volatile const void *base, int64_t *val); int subyte(volatile void *base, int byte); int suword(volatile void *base, long word); int suword16(volatile void *base, int word); int suword32(volatile void *base, int32_t word); int suword64(volatile void *base, int64_t word); uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval); u_long casuword(volatile u_long *p, u_long oldval, u_long newval); int casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval); int casueword(volatile u_long *p, u_long oldval, u_long *oldvalp, u_long newval); int sysbeep(int hertz, sbintime_t duration); void hardclock(int cnt, int usermode); void hardclock_sync(int cpu); void softclock(void *); void statclock(int cnt, int usermode); void profclock(int cnt, int usermode, uintfptr_t pc); int hardclockintr(void); void startprofclock(struct proc *); void stopprofclock(struct proc *); void cpu_startprofclock(void); void cpu_stopprofclock(void); void suspendclock(void); void resumeclock(void); sbintime_t cpu_idleclock(void); void cpu_activeclock(void); void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt); void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq); extern int cpu_disable_c2_sleep; extern int cpu_disable_c3_sleep; char *kern_getenv(const char *name); void freeenv(char *env); int getenv_int(const char *name, int *data); int getenv_uint(const char *name, unsigned int *data); int getenv_long(const char *name, long *data); int getenv_ulong(const char *name, unsigned long *data); int getenv_string(const char *name, char *data, int size); int getenv_int64(const char *name, int64_t *data); int getenv_uint64(const char *name, uint64_t *data); int getenv_quad(const char *name, quad_t *data); int getenv_bool(const char *name, bool *data); bool getenv_is_true(const char *name); bool getenv_is_false(const char *name); int kern_setenv(const char *name, const char *value); int kern_unsetenv(const char *name); int testenv(const char *name); int getenv_array(const char *name, void *data, int size, int *psize, int type_size, bool allow_signed); #define GETENV_UNSIGNED false /* negative numbers not allowed */ #define GETENV_SIGNED true /* negative numbers allowed */ typedef uint64_t (cpu_tick_f)(void); void set_cputicker(cpu_tick_f *func, uint64_t freq, bool isvariable); extern cpu_tick_f *cpu_ticks; uint64_t cpu_tickrate(void); uint64_t cputick2usec(uint64_t tick); #include /* Initialize the world */ void consinit(void); void cpu_initclocks(void); void cpu_initclocks_bsp(void); void cpu_initclocks_ap(void); void usrinfoinit(void); /* Finalize the world */ void kern_reboot(int) __dead2; void shutdown_nice(int); /* Stubs for obsolete functions that used to be for interrupt management */ static __inline intrmask_t splhigh(void) { return 0; } static __inline intrmask_t splimp(void) { return 0; } static __inline intrmask_t splnet(void) { return 0; } static __inline intrmask_t spltty(void) { return 0; } static __inline void splx(intrmask_t ipl __unused) { return; } /* * Common `proc' functions are declared here so that proc.h can be included * less often. */ int _sleep(const void * _Nonnull chan, struct lock_object *lock, int pri, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ (flags)) int msleep_spin_sbt(const void * _Nonnull chan, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep_spin(chan, mtx, wmesg, timo) \ msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); static __inline int pause(const char *wmesg, int timo) { return (pause_sbt(wmesg, tick_sbt * timo, 0, C_HARDCLOCK)); } #define pause_sig(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH) #define tsleep(chan, pri, wmesg, timo) \ _sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) #define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) void wakeup(const void *chan); void wakeup_one(const void *chan); void wakeup_any(const void *chan); /* * Common `struct cdev *' stuff are declared here to avoid #include poisoning */ struct cdev; dev_t dev2udev(struct cdev *x); const char *devtoname(struct cdev *cdev); #ifdef __LP64__ size_t devfs_iosize_max(void); size_t iosize_max(void); #endif int poll_no_poll(int events); /* XXX: Should be void nanodelay(u_int nsec); */ void DELAY(int usec); /* Root mount holdback API */ struct root_hold_token { int flags; const char *who; TAILQ_ENTRY(root_hold_token) list; }; struct root_hold_token *root_mount_hold(const char *identifier); void root_mount_hold_token(const char *identifier, struct root_hold_token *h); void root_mount_rel(struct root_hold_token *h); int root_mounted(void); /* * Unit number allocation API. (kern/subr_unit.c) */ struct unrhdr; #define UNR_NO_MTX ((void *)(uintptr_t)-1) struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex); void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex); void delete_unrhdr(struct unrhdr *uh); void clear_unrhdr(struct unrhdr *uh); void clean_unrhdr(struct unrhdr *uh); void clean_unrhdrl(struct unrhdr *uh); int alloc_unr(struct unrhdr *uh); int alloc_unr_specific(struct unrhdr *uh, u_int item); int alloc_unrl(struct unrhdr *uh); void free_unr(struct unrhdr *uh, u_int item); +void *create_iter_unr(struct unrhdr *uh); +int next_iter_unr(void *handle); +void free_iter_unr(void *handle); #ifndef __LP64__ #define UNR64_LOCKED #endif struct unrhdr64 { uint64_t counter; }; static __inline void new_unrhdr64(struct unrhdr64 *unr64, uint64_t low) { unr64->counter = low; } #ifdef UNR64_LOCKED uint64_t alloc_unr64(struct unrhdr64 *); #else static __inline uint64_t alloc_unr64(struct unrhdr64 *unr64) { return (atomic_fetchadd_64(&unr64->counter, 1)); } #endif void intr_prof_stack_use(struct thread *td, struct trapframe *frame); void counted_warning(unsigned *counter, const char *msg); /* * APIs to manage deprecation and obsolescence. */ void _gone_in(int major, const char *msg); void _gone_in_dev(device_t dev, int major, const char *msg); #ifdef NO_OBSOLETE_CODE #define __gone_ok(m, msg) \ _Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)), \ "Obsolete code: " msg); #else #define __gone_ok(m, msg) #endif #define gone_in(major, msg) __gone_ok(major, msg) _gone_in(major, msg) #define gone_in_dev(dev, major, msg) __gone_ok(major, msg) _gone_in_dev(dev, major, msg) #if defined(INVARIANTS) || defined(WITNESS) #define __diagused #else #define __diagused __unused #endif #endif /* _KERNEL */ __NULLABILITY_PRAGMA_POP #endif /* !_SYS_SYSTM_H_ */