Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c +++ sys/amd64/amd64/mp_machdep.c @@ -380,7 +380,7 @@ vm_offset_t oa, na; oa = (vm_offset_t)&__pcpu[cpuid]; - if (_vm_phys_domain(pmap_kextract(oa)) == domain) + if (vm_phys_domain(pmap_kextract(oa)) == domain) return; m = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -452,7 +452,7 @@ pc_to_domain(struct pv_chunk *pc) { - return (_vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc))); + return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc))); } #else static __inline int @@ -4611,7 +4611,7 @@ end = start + pages * sizeof(struct vm_page); for (va = start; va < end; va += NBPDR) { pfn = first_page + (va - start) / sizeof(struct vm_page); - domain = _vm_phys_domain(ptoa(pfn)); + domain = vm_phys_domain(ptoa(pfn)); pdpe = pmap_pdpe(kernel_pmap, va); if ((*pdpe & X86_PG_V) == 0) { pa = vm_phys_early_alloc(domain, PAGE_SIZE); @@ -5147,7 +5147,7 @@ pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; - pvc = &pv_chunks[_vm_phys_domain(m->phys_addr)]; + pvc = &pv_chunks[vm_phys_domain(m->phys_addr)]; mtx_lock(&pvc->pvc_lock); TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru); mtx_unlock(&pvc->pvc_lock); Index: sys/dev/mem/memdev.c =================================================================== --- sys/dev/mem/memdev.c +++ sys/dev/mem/memdev.c @@ -111,7 +111,7 @@ &td->td_proc->p_vmspace->vm_pmap, me->me_vaddr); if (me->me_paddr != 0) { me->me_state = ME_STATE_MAPPED; - me->me_domain = _vm_phys_domain(me->me_paddr); + me->me_domain = vm_phys_domain(me->me_paddr); } else { me->me_state = ME_STATE_VALID; } Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -3471,7 +3471,7 @@ } for (i = 0; phys_avail[i + 1] != 0; i+= 2) { - domain = _vm_phys_domain(phys_avail[i]); + domain = vm_phys_domain(phys_avail[i]); KASSERT(domain < MAXMEMDOM, ("Invalid phys_avail NUMA domain %d!\n", domain)); size = btoc(phys_avail[i + 1] - phys_avail[i]); Index: sys/powerpc/aim/mmu_radix.c =================================================================== --- sys/powerpc/aim/mmu_radix.c +++ sys/powerpc/aim/mmu_radix.c @@ -6292,7 +6292,7 @@ /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ for (va = start; va < end; va += L3_PAGE_SIZE) { pfn = first_page + (va - start) / sizeof(struct vm_page); - domain = _vm_phys_domain(ptoa(pfn)); + domain = vm_phys_domain(ptoa(pfn)); l2e = pmap_pml2e(kernel_pmap, va); if ((*l2e & PG_V) == 0) { pa = vm_phys_early_alloc(domain, PAGE_SIZE); Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c +++ sys/vm/uma_core.c @@ -3254,7 +3254,7 @@ { int domain; - domain = _vm_phys_domain(vtophys(item)); + domain = vm_phys_domain(vtophys(item)); KASSERT(domain >= 0 && domain < vm_ndomains, ("%s: unknown domain for item %p", __func__, item)); return (domain); Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -236,9 +236,9 @@ vmem_free(vmem, addr, size); return (0); } - KASSERT(vm_phys_domain(m) == domain, + KASSERT(vm_phys_page_domain(m) == domain, ("kmem_alloc_attr_domain: Domain mismatch %d != %d", - vm_phys_domain(m), domain)); + vm_phys_page_domain(m), domain)); if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); vm_page_valid(m); @@ -313,9 +313,9 @@ vmem_free(vmem, addr, size); return (0); } - KASSERT(vm_phys_domain(m) == domain, + KASSERT(vm_phys_page_domain(m) == domain, ("kmem_alloc_contig_domain: Domain mismatch %d != %d", - vm_phys_domain(m), domain)); + vm_phys_page_domain(m), domain)); end_m = m + npages; tmp = addr; for (; m < end_m; m++) { @@ -489,9 +489,9 @@ kmem_unback(object, addr, i); return (KERN_NO_SPACE); } - KASSERT(vm_phys_domain(m) == domain, + KASSERT(vm_phys_page_domain(m) == domain, ("kmem_back_domain: Domain mismatch %d != %d", - vm_phys_domain(m), domain)); + vm_phys_page_domain(m), domain)); if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); KASSERT((m->oflags & VPO_UNMANAGED) != 0, @@ -573,7 +573,7 @@ end = offset + size; VM_OBJECT_WLOCK(object); m = vm_page_lookup(object, atop(offset)); - domain = vm_phys_domain(m); + domain = vm_phys_page_domain(m); if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0)) arena = vm_dom[domain].vmd_kernel_arena; else Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -2892,7 +2892,7 @@ unlock: VM_OBJECT_WUNLOCK(object); } else { - MPASS(vm_phys_domain(m) == domain); + MPASS(vm_phys_page_domain(m) == domain); vmd = VM_DOMAIN(domain); vm_domain_free_lock(vmd); order = m->order; @@ -2923,7 +2923,7 @@ cnt = 0; vm_domain_free_lock(vmd); do { - MPASS(vm_phys_domain(m) == domain); + MPASS(vm_phys_page_domain(m) == domain); SLIST_REMOVE_HEAD(&free, plinks.s.ss); vm_phys_free_pages(m, 0); cnt++; @@ -3597,7 +3597,7 @@ ("page %p is unmanaged", m)); KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); - domain = vm_phys_domain(m); + domain = vm_phys_page_domain(m); pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; critical_enter(); Index: sys/vm/vm_pagequeue.h =================================================================== --- sys/vm/vm_pagequeue.h +++ sys/vm/vm_pagequeue.h @@ -389,7 +389,7 @@ vm_pagequeue_domain(vm_page_t m) { - return (VM_DOMAIN(vm_phys_domain(m))); + return (VM_DOMAIN(vm_phys_page_domain(m))); } /* Index: sys/vm/vm_phys.h =================================================================== --- sys/vm/vm_phys.h +++ sys/vm/vm_phys.h @@ -42,11 +42,13 @@ #ifdef _KERNEL +#include + #ifndef VM_NFREEORDER_MAX #define VM_NFREEORDER_MAX VM_NFREEORDER #endif -extern vm_paddr_t phys_avail[PHYS_AVAIL_COUNT]; +extern vm_paddr_t phys_avail[]; /* Domains must be dense (non-sparse) and zero-based. */ struct mem_affinity { @@ -59,6 +61,12 @@ extern int *mem_locality; #endif +struct vm_page; +#ifndef VM_PAGE_HAVE_PGLIST +TAILQ_HEAD(pglist, vm_page); +#define VM_PAGE_HAVE_PGLIST +#endif + struct vm_freelist { struct pglist pl; int lcnt; @@ -114,14 +122,15 @@ int vm_phys_avail_largest(void); vm_paddr_t vm_phys_avail_size(int i); +#ifdef _VM_PAGE_ /* * - * vm_phys_domain: + * vm_phys_page_domain: * * Return the index of the domain the page belongs to. */ static inline int -vm_phys_domain(vm_page_t m) +vm_phys_page_domain(vm_page_t m) { #ifdef NUMA int domn, segind; @@ -130,13 +139,31 @@ segind = m->segind; KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m)); domn = vm_phys_segs[segind].domain; - KASSERT(domn < vm_ndomains, ("domain %d m %p", domn, m)); + KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m)); return (domn); #else return (0); #endif } -int _vm_phys_domain(vm_paddr_t pa); +#endif + +static inline int +vm_phys_domain(vm_paddr_t pa) +{ +#ifdef NUMA + int i; + + if (vm_ndomains == 1) + return (0); + for (i = 0; mem_affinity[i].end != 0; i++) + if (mem_affinity[i].start <= pa && + mem_affinity[i].end >= pa) + return (mem_affinity[i].domain); + return (-1); +#else + return (0); +#endif +} #endif /* _KERNEL */ #endif /* !_VM_PHYS_H_ */ Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -649,24 +649,6 @@ #endif } -int -_vm_phys_domain(vm_paddr_t pa) -{ -#ifdef NUMA - int i; - - if (vm_ndomains == 1) - return (0); - for (i = 0; mem_affinity[i].end != 0; i++) - if (mem_affinity[i].start <= pa && - mem_affinity[i].end >= pa) - return (mem_affinity[i].domain); - return (-1); -#else - return (0); -#endif -} - /* * Split a contiguous, power of two-sized set of physical pages. * Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c +++ sys/vm/vm_reserv.c @@ -780,7 +780,7 @@ } } else return (NULL); - KASSERT(vm_phys_domain(m) == domain, + KASSERT(vm_phys_page_domain(m) == domain, ("vm_reserv_alloc_contig: Page domain does not match requested.")); /*