Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/subr_vmem.c
Show First 20 Lines • Show All 243 Lines • ▼ Show 20 Lines | |||||
vmem_t *buffer_arena = &buffer_arena_storage; | vmem_t *buffer_arena = &buffer_arena_storage; | ||||
vmem_t *transient_arena = &transient_arena_storage; | vmem_t *transient_arena = &transient_arena_storage; | ||||
#ifdef DEBUG_MEMGUARD | #ifdef DEBUG_MEMGUARD | ||||
static struct vmem memguard_arena_storage; | static struct vmem memguard_arena_storage; | ||||
vmem_t *memguard_arena = &memguard_arena_storage; | vmem_t *memguard_arena = &memguard_arena_storage; | ||||
#endif | #endif | ||||
static bool | |||||
bt_isbusy(bt_t *bt) | |||||
{ | |||||
return (bt->bt_type == BT_TYPE_BUSY); | |||||
} | |||||
static bool | |||||
bt_isfree(bt_t *bt) | |||||
{ | |||||
return (bt->bt_type == BT_TYPE_FREE); | |||||
} | |||||
/* | /* | ||||
* Fill the vmem's boundary tag cache. We guarantee that boundary tag | * Fill the vmem's boundary tag cache. We guarantee that boundary tag | ||||
* allocation will not fail once bt_fill() passes. To do so we cache | * allocation will not fail once bt_fill() passes. To do so we cache | ||||
* at least the maximum possible tag allocations in the arena. | * at least the maximum possible tag allocations in the arena. | ||||
*/ | */ | ||||
static int | static int | ||||
bt_fill(vmem_t *vm, int flags) | bt_fill(vmem_t *vm, int flags) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 526 Lines • ▼ Show 20 Lines | vmem_start_callout(void *unused) | ||||
callout_reset(&vmem_periodic_ch, vmem_periodic_interval, | callout_reset(&vmem_periodic_ch, vmem_periodic_interval, | ||||
vmem_periodic_kick, NULL); | vmem_periodic_kick, NULL); | ||||
} | } | ||||
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); | SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); | ||||
static void | static void | ||||
vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) | vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) | ||||
{ | { | ||||
bt_t *btspan; | bt_t *btfree, *btprev, *btspan; | ||||
bt_t *btfree; | |||||
VMEM_ASSERT_LOCKED(vm); | |||||
MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); | MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); | ||||
MPASS((size & vm->vm_quantum_mask) == 0); | MPASS((size & vm->vm_quantum_mask) == 0); | ||||
if (vm->vm_releasefn == NULL) { | |||||
/* | |||||
* The new segment will never be released, so see if it is | |||||
* contiguous with respect to an existing segment. In this case | |||||
* a span tag is not needed, and it may be possible now or in | |||||
* the future to coalesce the new segment with an existing free | |||||
* segment. | |||||
*/ | |||||
btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist); | |||||
if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) || | |||||
btprev->bt_start + btprev->bt_size != addr) | |||||
btprev = NULL; | |||||
} else { | |||||
btprev = NULL; | |||||
} | |||||
if (btprev == NULL || bt_isbusy(btprev)) { | |||||
if (btprev == NULL) { | |||||
btspan = bt_alloc(vm); | btspan = bt_alloc(vm); | ||||
btspan->bt_type = type; | btspan->bt_type = type; | ||||
btspan->bt_start = addr; | btspan->bt_start = addr; | ||||
btspan->bt_size = size; | btspan->bt_size = size; | ||||
bt_insseg_tail(vm, btspan); | bt_insseg_tail(vm, btspan); | ||||
markj: The segment list is supposed to be sorted, but here we are assuming that a newly imported range… | |||||
} | |||||
btfree = bt_alloc(vm); | btfree = bt_alloc(vm); | ||||
btfree->bt_type = BT_TYPE_FREE; | btfree->bt_type = BT_TYPE_FREE; | ||||
btfree->bt_start = addr; | btfree->bt_start = addr; | ||||
btfree->bt_size = size; | btfree->bt_size = size; | ||||
bt_insseg(vm, btfree, btspan); | bt_insseg_tail(vm, btfree); | ||||
bt_insfree(vm, btfree); | bt_insfree(vm, btfree); | ||||
} else { | |||||
bt_remfree(vm, btprev); | |||||
btprev->bt_size += size; | |||||
bt_insfree(vm, btprev); | |||||
} | |||||
vm->vm_size += size; | vm->vm_size += size; | ||||
} | } | ||||
static void | static void | ||||
vmem_destroy1(vmem_t *vm) | vmem_destroy1(vmem_t *vm) | ||||
{ | { | ||||
bt_t *bt; | bt_t *bt; | ||||
▲ Show 20 Lines • Show All 570 Lines • ▼ Show 20 Lines | for (;;) { | ||||
*/ | */ | ||||
if (!vmem_try_fetch(vm, size, align, flags)) { | if (!vmem_try_fetch(vm, size, align, flags)) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
VMEM_UNLOCK(vm); | VMEM_UNLOCK(vm); | ||||
if (error != 0 && (flags & M_NOWAIT) == 0) | KASSERT(error == 0 || (flags & M_NOWAIT) != 0, | ||||
panic("failed to allocate waiting allocation\n"); | ("%s: M_WAITOK allocation failed", __func__)); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* vmem_free: free the resource to the arena. | * vmem_free: free the resource to the arena. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 360 Lines • Show Last 20 Lines |
The segment list is supposed to be sorted, but here we are assuming that a newly imported range always sorts to the end of the list, which was surprising to me. The vmem implementation in illumos seems to do the same thing. I can't see a cheap way to ensure that the new segment is sorted.