Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/uma_int.h
Show First 20 Lines • Show All 277 Lines • ▼ Show 20 Lines | |||||
BITSET_DEFINE(slabbits, SLAB_SETSIZE); | BITSET_DEFINE(slabbits, SLAB_SETSIZE); | ||||
/* | /* | ||||
* The slab structure manages a single contiguous allocation from backing | * The slab structure manages a single contiguous allocation from backing | ||||
* store and subdivides it into individually allocatable items. | * store and subdivides it into individually allocatable items. | ||||
*/ | */ | ||||
struct uma_slab { | struct uma_slab { | ||||
uma_keg_t us_keg; /* Keg we live in */ | uma_keg_t us_keg; /* Keg we live in */ | ||||
union { | LIST_ENTRY(uma_slab) us_link; /* slabs in zone */ | ||||
LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ | |||||
unsigned long _us_size; /* Size of allocation */ | |||||
} us_type; | |||||
SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ | SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ | ||||
uint8_t *us_data; /* First item */ | uint8_t *us_data; /* First item */ | ||||
struct slabbits us_free; /* Free bitmask. */ | struct slabbits us_free; /* Free bitmask. */ | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
struct slabbits us_debugfree; /* Debug bitmask. */ | struct slabbits us_debugfree; /* Debug bitmask. */ | ||||
#endif | #endif | ||||
uint16_t us_freecount; /* How many are free? */ | uint16_t us_freecount; /* How many are free? */ | ||||
uint8_t us_flags; /* Page flags see uma.h */ | uint8_t us_flags; /* Page flags see uma.h */ | ||||
uint8_t us_domain; /* Backing NUMA domain. */ | uint8_t us_domain; /* Backing NUMA domain. */ | ||||
}; | }; | ||||
#define us_link us_type._us_link | |||||
#define us_size us_type._us_size | |||||
#if MAXMEMDOM >= 255 | #if MAXMEMDOM >= 255 | ||||
#error "Slab domain type insufficient" | #error "Slab domain type insufficient" | ||||
#endif | #endif | ||||
typedef struct uma_slab * uma_slab_t; | typedef struct uma_slab * uma_slab_t; | ||||
TAILQ_HEAD(uma_bucketlist, uma_bucket); | TAILQ_HEAD(uma_bucketlist, uma_bucket); | ||||
▲ Show 20 Lines • Show All 88 Lines • ▼ Show 20 Lines | |||||
#define UMA_ZFLAG_INHERIT \ | #define UMA_ZFLAG_INHERIT \ | ||||
(UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) | (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) | ||||
#undef UMA_ALIGN | #undef UMA_ALIGN | ||||
#ifdef _KERNEL | #ifdef _KERNEL | ||||
/* Internal prototypes */ | /* Internal prototypes */ | ||||
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); | static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); | ||||
void *uma_large_malloc(vm_size_t size, int wait); | |||||
void *uma_large_malloc_domain(vm_size_t size, int domain, int wait); | |||||
void uma_large_free(uma_slab_t slab); | |||||
/* Lock Macros */ | /* Lock Macros */ | ||||
#define KEG_LOCK_INIT(k, lc) \ | #define KEG_LOCK_INIT(k, lc) \ | ||||
do { \ | do { \ | ||||
if ((lc)) \ | if ((lc)) \ | ||||
mtx_init(&(k)->uk_lock, (k)->uk_name, \ | mtx_init(&(k)->uk_lock, (k)->uk_name, \ | ||||
(k)->uk_name, MTX_DEF | MTX_DUPOK); \ | (k)->uk_name, MTX_DEF | MTX_DUPOK); \ | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | |||||
static __inline void | static __inline void | ||||
vsetslab(vm_offset_t va, uma_slab_t slab) | vsetslab(vm_offset_t va, uma_slab_t slab) | ||||
{ | { | ||||
vm_page_t p; | vm_page_t p; | ||||
p = PHYS_TO_VM_PAGE(pmap_kextract(va)); | p = PHYS_TO_VM_PAGE(pmap_kextract(va)); | ||||
p->plinks.s.pv = slab; | p->plinks.s.pv = slab; | ||||
} | |||||
extern unsigned long uma_kmem_limit; | |||||
extern unsigned long uma_kmem_total; | |||||
/* Adjust bytes under management by UMA. */ | |||||
static inline void | |||||
uma_total_dec(unsigned long size) | |||||
{ | |||||
atomic_subtract_long(&uma_kmem_total, size); | |||||
} | |||||
static inline void | |||||
uma_total_inc(unsigned long size) | |||||
{ | |||||
if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) | |||||
uma_reclaim_wakeup(); | |||||
} | } | ||||
/* | /* | ||||
* The following two functions may be defined by architecture specific code | * The following two functions may be defined by architecture specific code | ||||
* if they can provide more efficient allocation functions. This is useful | * if they can provide more efficient allocation functions. This is useful | ||||
* for using direct mapped addresses. | * for using direct mapped addresses. | ||||
*/ | */ | ||||
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | ||||
uint8_t *pflag, int wait); | uint8_t *pflag, int wait); | ||||
void uma_small_free(void *mem, vm_size_t size, uint8_t flags); | void uma_small_free(void *mem, vm_size_t size, uint8_t flags); | ||||
/* Set a global soft limit on UMA managed memory. */ | /* Set a global soft limit on UMA managed memory. */ | ||||
void uma_set_limit(unsigned long limit); | void uma_set_limit(unsigned long limit); | ||||
#endif /* _KERNEL */ | #endif /* _KERNEL */ | ||||
#endif /* VM_UMA_INT_H */ | #endif /* VM_UMA_INT_H */ |