Index: sys/kern/subr_vmem.c =================================================================== --- sys/kern/subr_vmem.c +++ sys/kern/subr_vmem.c @@ -266,8 +266,8 @@ * allocation will not fail once bt_fill() passes. To do so we cache * at least the maximum possible tag allocations in the arena. */ -static int -bt_fill(vmem_t *vm, int flags) +static __noinline int +_bt_fill(vmem_t *vm, int flags) { bt_t *bt; @@ -307,6 +307,14 @@ return 0; } +static inline int +bt_fill(vmem_t *vm, int flags) +{ + if (vm->vm_nfreetags >= BT_MAXALLOC) + return (0); + return (_bt_fill(vm, flags)); +} + /* * Pop a tag off of the freetag stack. */ @@ -1104,7 +1112,7 @@ /* * Make sure we have enough tags to complete the operation. */ - if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0) + if (bt_fill(vm, flags) != 0) goto out; /* @@ -1387,8 +1395,7 @@ * Make sure we have enough tags to complete the * operation. */ - if (vm->vm_nfreetags < BT_MAXALLOC && - bt_fill(vm, flags) != 0) { + if (bt_fill(vm, flags) != 0) { error = ENOMEM; break; } @@ -1513,10 +1520,9 @@ error = 0; flags &= VMEM_FLAGS; VMEM_LOCK(vm); - if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) + error = bt_fill(vm, flags); + if (error == 0) vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); - else - error = ENOMEM; VMEM_UNLOCK(vm); return (error);