Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/uma_core.c
Show First 20 Lines • Show All 3,488 Lines • ▼ Show 20 Lines | item_domain(void *item) | ||||
domain = vm_phys_domain(vtophys(item)); | domain = vm_phys_domain(vtophys(item)); | ||||
KASSERT(domain >= 0 && domain < vm_ndomains, | KASSERT(domain >= 0 && domain < vm_ndomains, | ||||
("%s: unknown domain for item %p", __func__, item)); | ("%s: unknown domain for item %p", __func__, item)); | ||||
return (domain); | return (domain); | ||||
} | } | ||||
#endif | #endif | ||||
#if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS) | #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS) | ||||
#if defined(INVARIANTS) && (defined(DDB) || defined(STACK)) | |||||
#include <sys/stack.h> | |||||
#endif | |||||
#define UMA_ZALLOC_DEBUG | #define UMA_ZALLOC_DEBUG | ||||
static int | static int | ||||
uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags) | uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags) | ||||
{ | { | ||||
int error; | int error; | ||||
error = 0; | error = 0; | ||||
#ifdef WITNESS | #ifdef WITNESS | ||||
if (flags & M_WAITOK) { | if (flags & M_WAITOK) { | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | ||||
"uma_zalloc_debug: zone \"%s\"", zone->uz_name); | "uma_zalloc_debug: zone \"%s\"", zone->uz_name); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
KASSERT((flags & M_EXEC) == 0, | KASSERT((flags & M_EXEC) == 0, | ||||
("uma_zalloc_debug: called with M_EXEC")); | ("uma_zalloc_debug: called with M_EXEC")); | ||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), | ||||
("uma_zalloc_debug: called within spinlock or critical section")); | ("uma_zalloc_debug: called within spinlock or critical section")); | ||||
KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0, | KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0, | ||||
("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO")); | ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO")); | ||||
_Static_assert(M_NOWAIT != 0 && M_WAITOK != 0, | |||||
"M_NOWAIT and M_WAITOK must be non-zero for this assertion:"); | |||||
#if 0 | |||||
/* | |||||
* Give the #elif clause time to find problems, then remove it | |||||
* and enable this. (Remove <sys/stack.h> above, too.) | |||||
*/ | |||||
KASSERT((flags & (M_NOWAIT|M_WAITOK)) == M_NOWAIT || | |||||
(flags & (M_NOWAIT|M_WAITOK)) == M_WAITOK, | |||||
("uma_zalloc_debug: must pass one of M_NOWAIT or M_WAITOK")); | |||||
#elif defined(DDB) || defined(STACK) | |||||
if (__predict_false((flags & (M_NOWAIT|M_WAITOK)) != M_NOWAIT && | |||||
(flags & (M_NOWAIT|M_WAITOK)) != M_WAITOK)) { | |||||
static int stack_count; | |||||
struct stack st; | |||||
if (stack_count < 10) { | |||||
++stack_count; | |||||
printf("uma_zalloc* called with bad WAIT flags:\n"); | |||||
stack_save(&st); | |||||
stack_print(&st); | |||||
} | |||||
} | |||||
#endif | |||||
#endif | #endif | ||||
#ifdef DEBUG_MEMGUARD | #ifdef DEBUG_MEMGUARD | ||||
if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) { | if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) { | ||||
void *item; | void *item; | ||||
item = memguard_alloc(zone->uz_size, flags); | item = memguard_alloc(zone->uz_size, flags); | ||||
if (item != NULL) { | if (item != NULL) { | ||||
error = EJUSTRETURN; | error = EJUSTRETURN; | ||||
▲ Show 20 Lines • Show All 127 Lines • ▼ Show 20 Lines | KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, | ||||
("uma_zalloc_arg: called with SMR zone.")); | ("uma_zalloc_arg: called with SMR zone.")); | ||||
if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN) | if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN) | ||||
return (item); | return (item); | ||||
#endif | #endif | ||||
/* | /* | ||||
* If possible, allocate from the per-CPU cache. There are two | * If possible, allocate from the per-CPU cache. There are two | ||||
* requirements for safe access to the per-CPU cache: (1) the thread | * requirements for safe access to the per-CPU cache: (1) the thread | ||||
* accessing the cache must not be preempted or yield during access, | * accessing the cache must not be preempted or yield during access, | ||||
vangyzen: I love the irony of hard-coding M_WAITOK here. | |||||
Done Inline ActionsDo we actually have to allocate one here? I thought that struct stack was small enough to be safe to be allocated directly from the stack. rstone: Do we actually have to allocate one here? I thought that struct stack was small enough to be… | |||||
Done Inline ActionsNo, we don't. Contrary to the little voice in my head, I tried using this KPI by reading _only the man page_, which clearly says stack_create is required. The src tree tells a different story, where only 1 out of 24 instances uses stack_create. From now on, I'll listen to the voices in my head. No...wait... D34461 updates the man page. vangyzen: No, we don't. Contrary to the little voice in my head, I tried using this KPI by reading _only… | |||||
* and (2) the thread must not migrate CPUs without switching which | * and (2) the thread must not migrate CPUs without switching which | ||||
* cache it accesses. We rely on a critical section to prevent | * cache it accesses. We rely on a critical section to prevent | ||||
* preemption and migration. We release the critical section in | * preemption and migration. We release the critical section in | ||||
* order to acquire the zone mutex if we are unable to allocate from | * order to acquire the zone mutex if we are unable to allocate from | ||||
* the current cache; when we re-acquire the critical section, we | * the current cache; when we re-acquire the critical section, we | ||||
* must detect and handle migration if it has occurred. | * must detect and handle migration if it has occurred. | ||||
*/ | */ | ||||
critical_enter(); | critical_enter(); | ||||
▲ Show 20 Lines • Show All 2,206 Lines • Show Last 20 Lines |
I love the irony of hard-coding M_WAITOK here.