diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -127,10 +127,6 @@ void vm_imgact_unmap_page(struct sf_buf *sf); void vm_thread_dispose(struct thread *td); int vm_thread_new(struct thread *td, int pages); -vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int npages); -vm_object_t vm_thread_kstack_size_to_obj(int npages); -int vm_thread_stack_back(vm_offset_t kaddr, vm_page_t ma[], int npages, - int req_class, int domain); u_int vm_active_count(void); u_int vm_inactive_count(void); u_int vm_laundry_count(void); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -56,7 +56,6 @@ * rights to redistribute these changes. */ -#include #include "opt_vm.h" #include "opt_kstack_pages.h" #include "opt_kstack_max_pages.h" @@ -101,7 +100,6 @@ #include #include #include -#include #include #include @@ -280,6 +278,11 @@ static int kstack_cache_size; static vmem_t *vmd_kstack_arena[MAXMEMDOM]; +static vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int npages); +static vm_object_t vm_thread_kstack_size_to_obj(int npages); +static int vm_thread_stack_back(vm_offset_t kaddr, vm_page_t ma[], int npages, + int req_class, int domain); + static int sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS) { @@ -577,7 +580,7 @@ * Uses a non-identity mapping if guard pages are * active to avoid pindex holes in the kstack object. */ -vm_pindex_t +static vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int kpages) { vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS); @@ -604,7 +607,7 @@ * Allocate physical pages, following the specified NUMA policy, to back a * kernel stack. */ -int +static int vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, int domain) { @@ -643,7 +646,7 @@ return (ENOMEM); } -vm_object_t +static vm_object_t vm_thread_kstack_size_to_obj(int npages) { return (npages == kstack_pages ? kstack_object : kstack_alt_object); @@ -686,7 +689,7 @@ vm_size_t kstack_quantum; int domain; - kstack_object = vm_object_allocate(OBJT_SWAP, + kstack_object = vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); kstack_cache = uma_zcache_create("kstack_cache", kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL, @@ -695,7 +698,7 @@ kstack_cache_size = imax(128, mp_ncpus * 4); uma_zone_set_maxcache(kstack_cache, kstack_cache_size); - kstack_alt_object = vm_object_allocate(OBJT_SWAP, + kstack_alt_object = vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); kstack_quantum = vm_thread_kstack_import_quantum();