Changeset View
Standalone View
sys/kern/subr_vmem.c
Show All 37 Lines | |||||
* - Magazines and Vmem: Extending the Slab Allocator | * - Magazines and Vmem: Extending the Slab Allocator | ||||
* to Many CPUs and Arbitrary Resources | * to Many CPUs and Arbitrary Resources | ||||
* http://www.usenix.org/event/usenix01/bonwick.html | * http://www.usenix.org/event/usenix01/bonwick.html | ||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#ifdef _KERNEL | |||||
#include "opt_ddb.h" | #include "opt_ddb.h" | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/queue.h> | #include <sys/queue.h> | ||||
#include <sys/callout.h> | #include <sys/callout.h> | ||||
#include <sys/hash.h> | #include <sys/hash.h> | ||||
Show All 18 Lines | |||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_pagequeue.h> | #include <vm/vm_pagequeue.h> | ||||
#include <vm/uma_int.h> | #include <vm/uma_int.h> | ||||
#else /* _KERNEL */ | |||||
#include <sys/types.h> | |||||
#include <sys/queue.h> | |||||
#include <sys/hash.h> | |||||
#include <sys/vmem.h> | |||||
#include <assert.h> | |||||
#include <errno.h> | |||||
#include <pthread.h> | |||||
#include <pthread_np.h> | |||||
#include <stdbool.h> | |||||
#include <stdlib.h> | |||||
#include <string.h> | |||||
#include <strings.h> | |||||
#define KASSERT(a, b) | |||||
#define MPASS(a) | |||||
#define WITNESS_WARN(a, b, c) | |||||
#define panic(...) assert(0) | |||||
#endif /* _KERNEL */ | |||||
#define VMEM_OPTORDER 5 | #define VMEM_OPTORDER 5 | ||||
#define VMEM_OPTVALUE (1 << VMEM_OPTORDER) | #define VMEM_OPTVALUE (1 << VMEM_OPTORDER) | ||||
#define VMEM_MAXORDER \ | #define VMEM_MAXORDER \ | ||||
(VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) | (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) | ||||
#define VMEM_HASHSIZE_MIN 16 | #define VMEM_HASHSIZE_MIN 16 | ||||
#define VMEM_HASHSIZE_MAX 131072 | #define VMEM_HASHSIZE_MAX 131072 | ||||
#define VMEM_QCACHE_IDX_MAX 16 | #define VMEM_QCACHE_IDX_MAX 16 | ||||
#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) | #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) | ||||
#define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \ | |||||
M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) | |||||
#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) | |||||
#define QC_NAME_MAX 16 | #define QC_NAME_MAX 16 | ||||
/* | /* | ||||
* Data structures private to vmem. | * Data structures private to vmem. | ||||
*/ | */ | ||||
#ifdef _KERNEL | |||||
#define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \ | |||||
M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) | |||||
#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) | |||||
MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); | MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); | ||||
#else /* _KERNEL */ | |||||
/* bit-compat with kernel */ | |||||
#define M_NOWAIT 0x0001 | |||||
#define M_WAITOK 0x0002 | |||||
#define M_ZERO 0 | |||||
#define M_NOVM 0 | |||||
#define M_USE_RESERVE 0 | |||||
#define VMEM_FLAGS (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT) | |||||
#define BT_FLAGS 0 | |||||
#endif /* _KERNEL */ | |||||
typedef struct vmem_btag bt_t; | typedef struct vmem_btag bt_t; | ||||
TAILQ_HEAD(vmem_seglist, vmem_btag); | TAILQ_HEAD(vmem_seglist, vmem_btag); | ||||
LIST_HEAD(vmem_freelist, vmem_btag); | LIST_HEAD(vmem_freelist, vmem_btag); | ||||
LIST_HEAD(vmem_hashlist, vmem_btag); | LIST_HEAD(vmem_hashlist, vmem_btag); | ||||
#ifdef _KERNEL | |||||
struct qcache { | struct qcache { | ||||
uma_zone_t qc_cache; | uma_zone_t qc_cache; | ||||
vmem_t *qc_vmem; | vmem_t *qc_vmem; | ||||
vmem_size_t qc_size; | vmem_size_t qc_size; | ||||
char qc_name[QC_NAME_MAX]; | char qc_name[QC_NAME_MAX]; | ||||
}; | }; | ||||
typedef struct qcache qcache_t; | typedef struct qcache qcache_t; | ||||
#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) | #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) | ||||
#endif | |||||
#define VMEM_NAME_MAX 16 | #define VMEM_NAME_MAX 16 | ||||
/* boundary tag */ | /* boundary tag */ | ||||
struct vmem_btag { | struct vmem_btag { | ||||
TAILQ_ENTRY(vmem_btag) bt_seglist; | TAILQ_ENTRY(vmem_btag) bt_seglist; | ||||
union { | union { | ||||
LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ | LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ | ||||
LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ | LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ | ||||
} bt_u; | } bt_u; | ||||
#define bt_hashlist bt_u.u_hashlist | #define bt_hashlist bt_u.u_hashlist | ||||
#define bt_freelist bt_u.u_freelist | #define bt_freelist bt_u.u_freelist | ||||
vmem_addr_t bt_start; | vmem_addr_t bt_start; | ||||
vmem_size_t bt_size; | vmem_size_t bt_size; | ||||
int bt_type; | int bt_type; | ||||
}; | }; | ||||
/* vmem arena */ | /* vmem arena */ | ||||
struct vmem { | struct vmem { | ||||
#ifdef _KERNEL | |||||
struct mtx_padalign vm_lock; | struct mtx_padalign vm_lock; | ||||
struct cv vm_cv; | struct cv vm_cv; | ||||
#else | |||||
pthread_mutex_t vm_lock; | |||||
pthread_cond_t vm_cv; | |||||
#endif | |||||
char vm_name[VMEM_NAME_MAX+1]; | char vm_name[VMEM_NAME_MAX+1]; | ||||
LIST_ENTRY(vmem) vm_alllist; | LIST_ENTRY(vmem) vm_alllist; | ||||
struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; | struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; | ||||
struct vmem_freelist vm_freelist[VMEM_MAXORDER]; | struct vmem_freelist vm_freelist[VMEM_MAXORDER]; | ||||
struct vmem_seglist vm_seglist; | struct vmem_seglist vm_seglist; | ||||
struct vmem_hashlist *vm_hashlist; | struct vmem_hashlist *vm_hashlist; | ||||
vmem_size_t vm_hashsize; | vmem_size_t vm_hashsize; | ||||
Show All 15 Lines | #endif | ||||
/* Used on import. */ | /* Used on import. */ | ||||
vmem_import_t *vm_importfn; | vmem_import_t *vm_importfn; | ||||
vmem_release_t *vm_releasefn; | vmem_release_t *vm_releasefn; | ||||
void *vm_arg; | void *vm_arg; | ||||
/* Space exhaustion callback. */ | /* Space exhaustion callback. */ | ||||
vmem_reclaim_t *vm_reclaimfn; | vmem_reclaim_t *vm_reclaimfn; | ||||
#ifdef _KERNEL | |||||
/* quantum cache */ | /* quantum cache */ | ||||
qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; | qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; | ||||
#endif | |||||
}; | }; | ||||
#define BT_TYPE_SPAN 1 /* Allocated from importfn */ | #define BT_TYPE_SPAN 1 /* Allocated from importfn */ | ||||
#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ | #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ | ||||
#define BT_TYPE_FREE 3 /* Available space. */ | #define BT_TYPE_FREE 3 /* Available space. */ | ||||
#define BT_TYPE_BUSY 4 /* Used space. */ | #define BT_TYPE_BUSY 4 /* Used space. */ | ||||
#define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */ | #define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */ | ||||
#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) | #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) | ||||
#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) | #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) | ||||
#ifdef _KERNEL | |||||
#if defined(DIAGNOSTIC) | #if defined(DIAGNOSTIC) | ||||
static int enable_vmem_check = 1; | static int enable_vmem_check = 1; | ||||
SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, | SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, | ||||
&enable_vmem_check, 0, "Enable vmem check"); | &enable_vmem_check, 0, "Enable vmem check"); | ||||
static void vmem_check(vmem_t *); | static void vmem_check(vmem_t *); | ||||
#endif | #endif | ||||
static struct callout vmem_periodic_ch; | static struct callout vmem_periodic_ch; | ||||
static int vmem_periodic_interval; | static int vmem_periodic_interval; | ||||
static struct task vmem_periodic_wk; | static struct task vmem_periodic_wk; | ||||
static struct mtx_padalign __exclusive_cache_line vmem_list_lock; | static struct mtx_padalign __exclusive_cache_line vmem_list_lock; | ||||
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); | |||||
static uma_zone_t vmem_zone; | static uma_zone_t vmem_zone; | ||||
#else /* _KERNEL */ | |||||
static pthread_mutex_t vmem_list_lock = PTHREAD_MUTEX_INITIALIZER; | |||||
#endif /* _KERNEL */ | |||||
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); | |||||
/* ---- misc */ | /* ---- misc */ | ||||
#ifdef _KERNEL | |||||
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) | #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) | ||||
#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) | #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) | ||||
#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) | #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) | ||||
#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) | #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) | ||||
#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) | #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) | ||||
#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) | #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) | ||||
#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) | #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) | ||||
#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) | #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) | ||||
#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) | #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) | ||||
#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); | #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); | ||||
#else /* _KERNEL */ | |||||
#define VMEM_CONDVAR_INIT(vm, wchan) pthread_cond_init(&vm->vm_cv, NULL) | |||||
#define VMEM_CONDVAR_DESTROY(vm) pthread_cond_destroy(&vm->vm_cv) | |||||
#define VMEM_CONDVAR_WAIT(vm) pthread_cond_wait(&vm->vm_cv, &vm->vm_lock) | |||||
#define VMEM_CONDVAR_BROADCAST(vm) pthread_cond_broadcast(&vm->vm_cv) | |||||
#define VMEM_LOCK(vm) pthread_mutex_lock(&vm->vm_lock) | |||||
#define VMEM_TRYLOCK(vm) pthread_mutex_trylock(&vm->vm_lock) | |||||
#define VMEM_UNLOCK(vm) pthread_mutex_unlock(&vm->vm_lock) | |||||
#define VMEM_LOCK_INIT(vm, name) pthread_mutex_init(&vm->vm_lock, NULL) | |||||
#define VMEM_LOCK_DESTROY(vm) pthread_mutex_destroy(&vm->vm_lock) | |||||
#define VMEM_ASSERT_LOCKED(vm) pthread_mutex_isowned_np(&vm->vm_lock) | |||||
#endif /* _KERNEL */ | |||||
#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) | #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) | ||||
#define VMEM_CROSS_P(addr1, addr2, boundary) \ | #define VMEM_CROSS_P(addr1, addr2, boundary) \ | ||||
((((addr1) ^ (addr2)) & -(boundary)) != 0) | ((((addr1) ^ (addr2)) & -(boundary)) != 0) | ||||
#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ | #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ | ||||
(vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) | (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) | ||||
#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ | #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ | ||||
(flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) | (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) | ||||
/* | /* | ||||
* Maximum number of boundary tags that may be required to satisfy an | * Maximum number of boundary tags that may be required to satisfy an | ||||
* allocation. Two may be required to import. Another two may be | * allocation. Two may be required to import. Another two may be | ||||
* required to clip edges. | * required to clip edges. | ||||
*/ | */ | ||||
#define BT_MAXALLOC 4 | #define BT_MAXALLOC 4 | ||||
/* | /* | ||||
* Max free limits the number of locally cached boundary tags. We | * Max free limits the number of locally cached boundary tags. We | ||||
* just want to avoid hitting the zone allocator for every call. | * just want to avoid hitting the zone allocator for every call. | ||||
*/ | */ | ||||
#define BT_MAXFREE (BT_MAXALLOC * 8) | #define BT_MAXFREE (BT_MAXALLOC * 8) | ||||
#ifdef _KERNEL | |||||
/* Allocator for boundary tags. */ | /* Allocator for boundary tags. */ | ||||
static uma_zone_t vmem_bt_zone; | static uma_zone_t vmem_bt_zone; | ||||
/* boot time arena storage. */ | /* boot time arena storage. */ | ||||
static struct vmem kernel_arena_storage; | static struct vmem kernel_arena_storage; | ||||
static struct vmem buffer_arena_storage; | static struct vmem buffer_arena_storage; | ||||
static struct vmem transient_arena_storage; | static struct vmem transient_arena_storage; | ||||
/* kernel and kmem arenas are aliased for backwards KPI compat. */ | /* kernel and kmem arenas are aliased for backwards KPI compat. */ | ||||
vmem_t *kernel_arena = &kernel_arena_storage; | vmem_t *kernel_arena = &kernel_arena_storage; | ||||
vmem_t *kmem_arena = &kernel_arena_storage; | vmem_t *kmem_arena = &kernel_arena_storage; | ||||
vmem_t *buffer_arena = &buffer_arena_storage; | vmem_t *buffer_arena = &buffer_arena_storage; | ||||
vmem_t *transient_arena = &transient_arena_storage; | vmem_t *transient_arena = &transient_arena_storage; | ||||
#ifdef DEBUG_MEMGUARD | #ifdef DEBUG_MEMGUARD | ||||
static struct vmem memguard_arena_storage; | static struct vmem memguard_arena_storage; | ||||
vmem_t *memguard_arena = &memguard_arena_storage; | vmem_t *memguard_arena = &memguard_arena_storage; | ||||
#endif | #endif /* DEBUG_MEMGUARD */ | ||||
#endif /* _KERNEL */ | |||||
static bool | static bool | ||||
bt_isbusy(bt_t *bt) | bt_isbusy(bt_t *bt) | ||||
{ | { | ||||
return (bt->bt_type == BT_TYPE_BUSY); | return (bt->bt_type == BT_TYPE_BUSY); | ||||
} | } | ||||
static bool | static bool | ||||
Show All 14 Lines | _bt_fill(vmem_t *vm, int flags) | ||||
VMEM_ASSERT_LOCKED(vm); | VMEM_ASSERT_LOCKED(vm); | ||||
/* | /* | ||||
* Only allow the kernel arena and arenas derived from kernel arena to | * Only allow the kernel arena and arenas derived from kernel arena to | ||||
* dip into reserve tags. They are where new tags come from. | * dip into reserve tags. They are where new tags come from. | ||||
*/ | */ | ||||
flags &= BT_FLAGS; | flags &= BT_FLAGS; | ||||
#ifdef _KERNEL | |||||
if (vm != kernel_arena && vm->vm_arg != kernel_arena) | if (vm != kernel_arena && vm->vm_arg != kernel_arena) | ||||
flags &= ~M_USE_RESERVE; | flags &= ~M_USE_RESERVE; | ||||
#endif | |||||
/* | /* | ||||
* Loop until we meet the reserve. To minimize the lock shuffle | * Loop until we meet the reserve. To minimize the lock shuffle | ||||
* and prevent simultaneous fills we first try a NOWAIT regardless | * and prevent simultaneous fills we first try a NOWAIT regardless | ||||
* of the caller's flags. Specify M_NOVM so we don't recurse while | * of the caller's flags. Specify M_NOVM so we don't recurse while | ||||
* holding a vmem lock. | * holding a vmem lock. | ||||
*/ | */ | ||||
while (vm->vm_nfreetags < BT_MAXALLOC) { | while (vm->vm_nfreetags < BT_MAXALLOC) { | ||||
#ifdef _KERNEL | |||||
bt = uma_zalloc(vmem_bt_zone, | bt = uma_zalloc(vmem_bt_zone, | ||||
(flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); | (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); | ||||
#else | |||||
bt = malloc(sizeof(struct vmem_btag)); | |||||
#endif | |||||
if (bt == NULL) { | if (bt == NULL) { | ||||
#ifdef _KERNEL | |||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
bt = uma_zalloc(vmem_bt_zone, flags); | bt = uma_zalloc(vmem_bt_zone, flags); | ||||
VMEM_LOCK(vm); | VMEM_LOCK(vm); | ||||
#endif | |||||
if (bt == NULL) | if (bt == NULL) | ||||
break; | break; | ||||
} | } | ||||
LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); | LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); | ||||
vm->vm_nfreetags++; | vm->vm_nfreetags++; | ||||
} | } | ||||
if (vm->vm_nfreetags < BT_MAXALLOC) | if (vm->vm_nfreetags < BT_MAXALLOC) | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | while (vm->vm_nfreetags > freelimit) { | ||||
bt = LIST_FIRST(&vm->vm_freetags); | bt = LIST_FIRST(&vm->vm_freetags); | ||||
LIST_REMOVE(bt, bt_freelist); | LIST_REMOVE(bt, bt_freelist); | ||||
vm->vm_nfreetags--; | vm->vm_nfreetags--; | ||||
LIST_INSERT_HEAD(&freetags, bt, bt_freelist); | LIST_INSERT_HEAD(&freetags, bt, bt_freelist); | ||||
} | } | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
while ((bt = LIST_FIRST(&freetags)) != NULL) { | while ((bt = LIST_FIRST(&freetags)) != NULL) { | ||||
LIST_REMOVE(bt, bt_freelist); | LIST_REMOVE(bt, bt_freelist); | ||||
#ifdef _KERNEL | |||||
uma_zfree(vmem_bt_zone, bt); | uma_zfree(vmem_bt_zone, bt); | ||||
#else | |||||
free(bt); | |||||
#endif | |||||
} | } | ||||
} | } | ||||
static inline void | static inline void | ||||
bt_free(vmem_t *vm, bt_t *bt) | bt_free(vmem_t *vm, bt_t *bt) | ||||
{ | { | ||||
VMEM_ASSERT_LOCKED(vm); | VMEM_ASSERT_LOCKED(vm); | ||||
▲ Show 20 Lines • Show All 151 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
bt_insseg_tail(vmem_t *vm, bt_t *bt) | bt_insseg_tail(vmem_t *vm, bt_t *bt) | ||||
{ | { | ||||
TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); | TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); | ||||
} | } | ||||
static void | static void | ||||
bt_remfree(vmem_t *vm __unused, bt_t *bt) | bt_remfree(vmem_t *vm __unused, bt_t *bt) | ||||
markj: Commit separately? | |||||
{ | { | ||||
MPASS(bt->bt_type == BT_TYPE_FREE); | MPASS(bt->bt_type == BT_TYPE_FREE); | ||||
LIST_REMOVE(bt, bt_freelist); | LIST_REMOVE(bt, bt_freelist); | ||||
} | } | ||||
static void | static void | ||||
bt_insfree(vmem_t *vm, bt_t *bt) | bt_insfree(vmem_t *vm, bt_t *bt) | ||||
{ | { | ||||
struct vmem_freelist *list; | struct vmem_freelist *list; | ||||
list = bt_freehead_tofree(vm, bt->bt_size); | list = bt_freehead_tofree(vm, bt->bt_size); | ||||
LIST_INSERT_HEAD(list, bt, bt_freelist); | LIST_INSERT_HEAD(list, bt, bt_freelist); | ||||
} | } | ||||
/* ---- vmem internal functions */ | /* ---- vmem internal functions */ | ||||
#ifdef _KERNEL | |||||
/* | /* | ||||
* Import from the arena into the quantum cache in UMA. | * Import from the arena into the quantum cache in UMA. | ||||
* | * | ||||
* We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate | * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate | ||||
* failure, so UMA can't be used to cache a resource with value 0. | * failure, so UMA can't be used to cache a resource with value 0. | ||||
*/ | */ | ||||
static int | static int | ||||
qc_import(void *arg, void **store, int cnt, int domain, int flags) | qc_import(void *arg, void **store, int cnt, int domain, int flags) | ||||
▲ Show 20 Lines • Show All 172 Lines • ▼ Show 20 Lines | #else | ||||
* arena, which may involve importing a range from the kernel arena, | * arena, which may involve importing a range from the kernel arena, | ||||
* so we need to keep at least 2 * BT_MAXALLOC tags reserved. | * so we need to keep at least 2 * BT_MAXALLOC tags reserved. | ||||
*/ | */ | ||||
uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus); | uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus); | ||||
uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); | uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); | ||||
#endif | #endif | ||||
} | } | ||||
/* ---- rehash */ | |||||
static int | static int | ||||
Done Inline ActionsI would delete the rehash comment and merge the two blocks. All of the functions below can arguably be called "vmem internal functions". markj: I would delete the `rehash` comment and merge the two blocks. All of the functions below can… | |||||
vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) | vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) | ||||
{ | { | ||||
bt_t *bt; | bt_t *bt; | ||||
struct vmem_hashlist *newhashlist; | struct vmem_hashlist *newhashlist; | ||||
struct vmem_hashlist *oldhashlist; | struct vmem_hashlist *oldhashlist; | ||||
vmem_size_t i, oldhashsize; | vmem_size_t i, oldhashsize; | ||||
Not Done Inline ActionsThis can be a separate commit. markj: This can be a separate commit. | |||||
MPASS(newhashsize > 0); | MPASS(newhashsize > 0); | ||||
newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, | newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, | ||||
M_VMEM, M_NOWAIT); | M_VMEM, M_NOWAIT); | ||||
if (newhashlist == NULL) | if (newhashlist == NULL) | ||||
return ENOMEM; | return ENOMEM; | ||||
for (i = 0; i < newhashsize; i++) { | for (i = 0; i < newhashsize; i++) { | ||||
Show All 13 Lines | for (i = 0; i < oldhashsize; i++) { | ||||
while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { | while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { | ||||
bt_rembusy(vm, bt); | bt_rembusy(vm, bt); | ||||
bt_insbusy(vm, bt); | bt_insbusy(vm, bt); | ||||
} | } | ||||
} | } | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
if (oldhashlist != vm->vm_hash0) | if (oldhashlist != vm->vm_hash0) | ||||
free(oldhashlist, M_VMEM); | free(oldhashlist, M_VMEM); | ||||
Not Done Inline ActionsThis can be a separate commit. markj: This can be a separate commit. | |||||
return 0; | return 0; | ||||
} | } | ||||
static void | static void | ||||
vmem_periodic_kick(void *dummy) | vmem_periodic_kick(void *dummy) | ||||
{ | { | ||||
taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); | taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | vmem_start_callout(void *unused) | ||||
TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); | TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); | ||||
vmem_periodic_interval = hz * 10; | vmem_periodic_interval = hz * 10; | ||||
callout_init(&vmem_periodic_ch, 1); | callout_init(&vmem_periodic_ch, 1); | ||||
callout_reset(&vmem_periodic_ch, vmem_periodic_interval, | callout_reset(&vmem_periodic_ch, vmem_periodic_interval, | ||||
vmem_periodic_kick, NULL); | vmem_periodic_kick, NULL); | ||||
} | } | ||||
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); | SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); | ||||
#endif /* _KERNEL */ | |||||
static void | static void | ||||
vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) | vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) | ||||
{ | { | ||||
bt_t *btfree, *btprev, *btspan; | bt_t *btfree, *btprev, *btspan; | ||||
VMEM_ASSERT_LOCKED(vm); | VMEM_ASSERT_LOCKED(vm); | ||||
MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); | MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); | ||||
Show All 39 Lines | vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) | ||||
vm->vm_size += size; | vm->vm_size += size; | ||||
} | } | ||||
static void | static void | ||||
vmem_destroy1(vmem_t *vm) | vmem_destroy1(vmem_t *vm) | ||||
{ | { | ||||
bt_t *bt; | bt_t *bt; | ||||
#ifdef _KERNEL | |||||
/* | /* | ||||
* Drain per-cpu quantum caches. | * Drain per-cpu quantum caches. | ||||
*/ | */ | ||||
qc_destroy(vm); | qc_destroy(vm); | ||||
#endif | |||||
/* | /* | ||||
* The vmem should now only contain empty segments. | * The vmem should now only contain empty segments. | ||||
*/ | */ | ||||
VMEM_LOCK(vm); | VMEM_LOCK(vm); | ||||
MPASS(vm->vm_nbusytag == 0); | MPASS(vm->vm_nbusytag == 0); | ||||
TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); | TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); | ||||
while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) | while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) | ||||
bt_remseg(vm, bt); | bt_remseg(vm, bt); | ||||
if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) | if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) { | ||||
#ifdef _KERNEL | |||||
free(vm->vm_hashlist, M_VMEM); | free(vm->vm_hashlist, M_VMEM); | ||||
#else | |||||
free(vm->vm_hashlist); | |||||
#endif | |||||
} | |||||
bt_freetrim(vm, 0); | bt_freetrim(vm, 0); | ||||
VMEM_CONDVAR_DESTROY(vm); | VMEM_CONDVAR_DESTROY(vm); | ||||
VMEM_LOCK_DESTROY(vm); | VMEM_LOCK_DESTROY(vm); | ||||
#ifdef _KERNEL | |||||
uma_zfree(vmem_zone, vm); | uma_zfree(vmem_zone, vm); | ||||
#else | |||||
free(vm); | |||||
#endif | |||||
} | } | ||||
static int | static int | ||||
vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) | vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) | ||||
{ | { | ||||
vmem_addr_t addr; | vmem_addr_t addr; | ||||
int error; | int error; | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags) | ||||
/* | /* | ||||
* Try to free some space from the quantum cache or reclaim | * Try to free some space from the quantum cache or reclaim | ||||
* functions if available. | * functions if available. | ||||
*/ | */ | ||||
if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { | if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { | ||||
avail = vm->vm_size - vm->vm_inuse; | avail = vm->vm_size - vm->vm_inuse; | ||||
bt_save(vm); | bt_save(vm); | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
#ifdef _KERNEL | |||||
if (vm->vm_qcache_max != 0) | if (vm->vm_qcache_max != 0) | ||||
qc_drain(vm); | qc_drain(vm); | ||||
#endif | |||||
if (vm->vm_reclaimfn != NULL) | if (vm->vm_reclaimfn != NULL) | ||||
vm->vm_reclaimfn(vm, flags); | vm->vm_reclaimfn(vm, flags); | ||||
VMEM_LOCK(vm); | VMEM_LOCK(vm); | ||||
bt_restore(vm); | bt_restore(vm); | ||||
/* If we were successful retry even NOWAIT. */ | /* If we were successful retry even NOWAIT. */ | ||||
if (vm->vm_size - vm->vm_inuse > avail) | if (vm->vm_size - vm->vm_inuse > avail) | ||||
return (1); | return (1); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 163 Lines • ▼ Show 20 Lines | |||||
* vmem_init: Initializes vmem arena. | * vmem_init: Initializes vmem arena. | ||||
*/ | */ | ||||
vmem_t * | vmem_t * | ||||
vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, | vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, | ||||
vmem_size_t quantum, vmem_size_t qcache_max, int flags) | vmem_size_t quantum, vmem_size_t qcache_max, int flags) | ||||
{ | { | ||||
vmem_size_t i; | vmem_size_t i; | ||||
#ifdef _KERNEL | |||||
MPASS(quantum > 0); | MPASS(quantum > 0); | ||||
MPASS((quantum & (quantum - 1)) == 0); | MPASS((quantum & (quantum - 1)) == 0); | ||||
#else | |||||
assert(quantum == 0); | |||||
assert(qcache_max == 0); | |||||
#endif | |||||
bzero(vm, sizeof(*vm)); | bzero(vm, sizeof(*vm)); | ||||
VMEM_CONDVAR_INIT(vm, name); | VMEM_CONDVAR_INIT(vm, name); | ||||
VMEM_LOCK_INIT(vm, name); | VMEM_LOCK_INIT(vm, name); | ||||
vm->vm_nfreetags = 0; | vm->vm_nfreetags = 0; | ||||
LIST_INIT(&vm->vm_freetags); | LIST_INIT(&vm->vm_freetags); | ||||
strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); | strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); | ||||
vm->vm_quantum_mask = quantum - 1; | vm->vm_quantum_mask = quantum - 1; | ||||
vm->vm_quantum_shift = flsl(quantum) - 1; | vm->vm_quantum_shift = flsl(quantum) - 1; | ||||
vm->vm_nbusytag = 0; | vm->vm_nbusytag = 0; | ||||
vm->vm_size = 0; | vm->vm_size = 0; | ||||
vm->vm_limit = 0; | vm->vm_limit = 0; | ||||
vm->vm_inuse = 0; | vm->vm_inuse = 0; | ||||
#ifdef _KERNEL | |||||
qc_init(vm, qcache_max); | qc_init(vm, qcache_max); | ||||
#else | |||||
qcache_max++; | |||||
Done Inline ActionsWhy? markj: Why? | |||||
Done Inline ActionsTo not add __unused declaration, I think it is logically wrong. kib: To not add __unused declaration, I think it is logically wrong. | |||||
Not Done Inline ActionsI think (void)qcache_max; is more common for this scenario. markj: I think `(void)qcache_max;` is more common for this scenario. | |||||
#endif | |||||
TAILQ_INIT(&vm->vm_seglist); | TAILQ_INIT(&vm->vm_seglist); | ||||
vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0; | vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0; | ||||
vm->vm_cursor.bt_type = BT_TYPE_CURSOR; | vm->vm_cursor.bt_type = BT_TYPE_CURSOR; | ||||
TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); | TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist); | ||||
for (i = 0; i < VMEM_MAXORDER; i++) | for (i = 0; i < VMEM_MAXORDER; i++) | ||||
LIST_INIT(&vm->vm_freelist[i]); | LIST_INIT(&vm->vm_freelist[i]); | ||||
memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); | memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); | ||||
vm->vm_hashsize = VMEM_HASHSIZE_MIN; | vm->vm_hashsize = VMEM_HASHSIZE_MIN; | ||||
vm->vm_hashlist = vm->vm_hash0; | vm->vm_hashlist = vm->vm_hash0; | ||||
if (size != 0) { | if (size != 0) { | ||||
if (vmem_add(vm, base, size, flags) != 0) { | if (vmem_add(vm, base, size, flags) != 0) { | ||||
vmem_destroy1(vm); | vmem_destroy1(vm); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
} | } | ||||
#ifdef _KERNEL | |||||
mtx_lock(&vmem_list_lock); | mtx_lock(&vmem_list_lock); | ||||
#else | |||||
pthread_mutex_lock(&vmem_list_lock); | |||||
#endif | |||||
LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); | LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); | ||||
#ifdef _KERNEL | |||||
mtx_unlock(&vmem_list_lock); | mtx_unlock(&vmem_list_lock); | ||||
#else | |||||
pthread_mutex_unlock(&vmem_list_lock); | |||||
#endif | |||||
return vm; | return vm; | ||||
} | } | ||||
/* | /* | ||||
* vmem_create: create an arena. | * vmem_create: create an arena. | ||||
*/ | */ | ||||
vmem_t * | vmem_t * | ||||
vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, | vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, | ||||
vmem_size_t quantum, vmem_size_t qcache_max, int flags) | vmem_size_t quantum, vmem_size_t qcache_max, int flags) | ||||
{ | { | ||||
vmem_t *vm; | vmem_t *vm; | ||||
#ifdef _KERNEL | |||||
vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); | vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT)); | ||||
#else | |||||
assert(quantum == 0); | |||||
assert(qcache_max == 0); | |||||
vm = malloc(sizeof(vmem_t)); | |||||
#endif | |||||
if (vm == NULL) | if (vm == NULL) | ||||
return (NULL); | return (NULL); | ||||
if (vmem_init(vm, name, base, size, quantum, qcache_max, | if (vmem_init(vm, name, base, size, quantum, qcache_max, | ||||
flags) == NULL) | flags) == NULL) | ||||
return (NULL); | return (NULL); | ||||
return (vm); | return (vm); | ||||
} | } | ||||
void | void | ||||
vmem_destroy(vmem_t *vm) | vmem_destroy(vmem_t *vm) | ||||
{ | { | ||||
#ifdef _KERNEL | |||||
mtx_lock(&vmem_list_lock); | mtx_lock(&vmem_list_lock); | ||||
#else | |||||
pthread_mutex_lock(&vmem_list_lock); | |||||
#endif | |||||
LIST_REMOVE(vm, vm_alllist); | LIST_REMOVE(vm, vm_alllist); | ||||
#ifdef _KERNEL | |||||
mtx_unlock(&vmem_list_lock); | mtx_unlock(&vmem_list_lock); | ||||
#else | |||||
pthread_mutex_unlock(&vmem_list_lock); | |||||
#endif | |||||
vmem_destroy1(vm); | vmem_destroy1(vm); | ||||
} | } | ||||
vmem_size_t | vmem_size_t | ||||
vmem_roundup_size(vmem_t *vm, vmem_size_t size) | vmem_roundup_size(vmem_t *vm, vmem_size_t size) | ||||
{ | { | ||||
return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; | return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; | ||||
} | } | ||||
/* | /* | ||||
* vmem_alloc: allocate resource from the arena. | * vmem_alloc: allocate resource from the arena. | ||||
*/ | */ | ||||
int | int | ||||
vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) | vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) | ||||
{ | { | ||||
const int strat __unused = flags & VMEM_FITMASK; | const int strat __unused = flags & VMEM_FITMASK; | ||||
qcache_t *qc; | |||||
Done Inline ActionsI really dislike conditionally defined top-level local variables, so I'd prefer to move the definition into the if block below. I don't insist on it though. markj: I really dislike conditionally defined top-level local variables, so I'd prefer to move the… | |||||
Done Inline ActionsI agree in general, and if this were some special case (e.g. INVARIANTS) instead of the usual case I'd argue strongly for markj's take. emaste: I agree in general, and if this were some special case (e.g. INVARIANTS) instead of the usual… | |||||
flags &= VMEM_FLAGS; | flags &= VMEM_FLAGS; | ||||
MPASS(size > 0); | MPASS(size > 0); | ||||
MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); | MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT); | ||||
if ((flags & M_NOWAIT) == 0) | if ((flags & M_NOWAIT) == 0) | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); | ||||
#ifdef _KERNEL | |||||
if (size <= vm->vm_qcache_max) { | if (size <= vm->vm_qcache_max) { | ||||
qcache_t *qc; | |||||
/* | /* | ||||
* Resource 0 cannot be cached, so avoid a blocking allocation | * Resource 0 cannot be cached, so avoid a blocking allocation | ||||
* in qc_import() and give the vmem_xalloc() call below a chance | * in qc_import() and give the vmem_xalloc() call below a chance | ||||
* to return 0. | * to return 0. | ||||
*/ | */ | ||||
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; | qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; | ||||
*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, | *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, | ||||
(flags & ~M_WAITOK) | M_NOWAIT); | (flags & ~M_WAITOK) | M_NOWAIT); | ||||
if (__predict_true(*addrp != 0)) | if (__predict_true(*addrp != 0)) | ||||
return (0); | return (0); | ||||
} | } | ||||
#endif | |||||
return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, | return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, | ||||
flags, addrp)); | flags, addrp)); | ||||
} | } | ||||
int | int | ||||
vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, | vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, | ||||
const vmem_size_t phase, const vmem_size_t nocross, | const vmem_size_t phase, const vmem_size_t nocross, | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | if (!vmem_try_fetch(vm, size, align, flags)) { | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
if (error != 0 && (flags & M_NOWAIT) == 0) | if (error != 0 && (flags & M_NOWAIT) == 0) | ||||
panic("failed to allocate waiting allocation\n"); | panic("failed to allocate waiting allocation\n"); | ||||
return (error); | return (error); | ||||
Done Inline ActionsYou could eliminate some inline ifdefs with #define panic(...) assert(0). markj: You could eliminate some inline ifdefs with `#define panic(...) assert(0)`. | |||||
Done Inline ActionsThere were only two of them so I did not bothered. kib: There were only two of them so I did not bothered. | |||||
} | } | ||||
/* | /* | ||||
* vmem_free: free the resource to the arena. | * vmem_free: free the resource to the arena. | ||||
*/ | */ | ||||
void | void | ||||
vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) | vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) | ||||
{ | { | ||||
qcache_t *qc; | |||||
MPASS(size > 0); | MPASS(size > 0); | ||||
Done Inline ActionsSame comment as above wrt local variables. markj: Same comment as above wrt local variables. | |||||
#ifdef _KERNEL | |||||
if (size <= vm->vm_qcache_max && | if (size <= vm->vm_qcache_max && | ||||
__predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { | __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) { | ||||
qcache_t *qc; | |||||
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; | qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; | ||||
uma_zfree(qc->qc_cache, (void *)addr); | uma_zfree(qc->qc_cache, (void *)addr); | ||||
} else | } else | ||||
#endif | |||||
vmem_xfree(vm, addr, size); | vmem_xfree(vm, addr, size); | ||||
} | } | ||||
void | void | ||||
vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused) | vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused) | ||||
Not Done Inline ActionsThis can be a separate commit. markj: This can be a separate commit. | |||||
{ | { | ||||
bt_t *bt; | bt_t *bt; | ||||
bt_t *t; | bt_t *t; | ||||
MPASS(size > 0); | MPASS(size > 0); | ||||
VMEM_LOCK(vm); | VMEM_LOCK(vm); | ||||
bt = bt_lookupbusy(vm, addr); | bt = bt_lookupbusy(vm, addr); | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | for (i = VMEM_MAXORDER - 1; i >= 0; i--) { | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
return ((vmem_size_t)ORDER2SIZE(i) << | return ((vmem_size_t)ORDER2SIZE(i) << | ||||
vm->vm_quantum_shift); | vm->vm_quantum_shift); | ||||
} | } | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
return (0); | return (0); | ||||
default: | default: | ||||
panic("vmem_size"); | panic("vmem_size"); | ||||
return (0); | |||||
} | } | ||||
} | } | ||||
/* ---- debug */ | /* ---- debug */ | ||||
#ifdef _KERNEL | |||||
#if defined(DDB) || defined(DIAGNOSTIC) | #if defined(DDB) || defined(DIAGNOSTIC) | ||||
Done Inline ActionsThis block should probably be formally protected with ifdef _KERNEL as well. markj: This block should probably be formally protected with ifdef _KERNEL as well. | |||||
static void bt_dump(const bt_t *, int (*)(const char *, ...) | static void bt_dump(const bt_t *, int (*)(const char *, ...) | ||||
__printflike(1, 2)); | __printflike(1, 2)); | ||||
static const char * | static const char * | ||||
bt_type_string(int type) | bt_type_string(int type) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 234 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
if (!vmem_check_sanity(vm)) { | if (!vmem_check_sanity(vm)) { | ||||
panic("insanity vmem %p", vm); | panic("insanity vmem %p", vm); | ||||
} | } | ||||
} | } | ||||
#endif /* defined(DIAGNOSTIC) */ | #endif /* defined(DIAGNOSTIC) */ | ||||
#endif /* _KERNEL */ |
Commit separately?