diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -79,7 +79,6 @@ int *locality); vm_page_t vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int options); -void vm_phys_set_pool(int pool, vm_page_t m, int order); boolean_t vm_phys_unfree_page(vm_page_t m); int vm_phys_mem_affinity(int f, int t); void vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -179,7 +179,6 @@ static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order, int tail); - /* * Red-black tree helpers for vm fictitious range management. */ @@ -711,6 +710,18 @@ } while (npages > 0); } +/* + * Set the pool for a contiguous, power of two-sized set of physical pages. + */ +static void +vm_phys_set_pool(int pool, vm_page_t m, int order) +{ + vm_page_t m_tmp; + + for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) + m_tmp->pool = pool; +} + /* * Tries to allocate the specified number of pages from the specified pool * within the specified domain. Returns the actual number of allocated pages @@ -1273,18 +1284,6 @@ return (NULL); } -/* - * Set the pool for a contiguous, power of two-sized set of physical pages. - */ -void -vm_phys_set_pool(int pool, vm_page_t m, int order) -{ - vm_page_t m_tmp; - - for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) - m_tmp->pool = pool; -} - /* * Search for the given physical page "m" in the free lists. If the search * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return