Page MenuHomeFreeBSD

D27207.id79685.diff
No OneTemporary

D27207.id79685.diff

Index: sys/amd64/amd64/mp_machdep.c
===================================================================
--- sys/amd64/amd64/mp_machdep.c
+++ sys/amd64/amd64/mp_machdep.c
@@ -380,7 +380,7 @@
vm_offset_t oa, na;
oa = (vm_offset_t)&__pcpu[cpuid];
- if (_vm_phys_domain(pmap_kextract(oa)) == domain)
+ if (vm_phys_domain(pmap_kextract(oa)) == domain)
return;
m = vm_page_alloc_domain(NULL, 0, domain,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -452,7 +452,7 @@
pc_to_domain(struct pv_chunk *pc)
{
- return (_vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
+ return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
}
#else
static __inline int
@@ -4611,7 +4611,7 @@
end = start + pages * sizeof(struct vm_page);
for (va = start; va < end; va += NBPDR) {
pfn = first_page + (va - start) / sizeof(struct vm_page);
- domain = _vm_phys_domain(ptoa(pfn));
+ domain = vm_phys_domain(ptoa(pfn));
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & X86_PG_V) == 0) {
pa = vm_phys_early_alloc(domain, PAGE_SIZE);
@@ -5147,7 +5147,7 @@
pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
pc->pc_map[1] = PC_FREE1;
pc->pc_map[2] = PC_FREE2;
- pvc = &pv_chunks[_vm_phys_domain(m->phys_addr)];
+ pvc = &pv_chunks[vm_phys_domain(m->phys_addr)];
mtx_lock(&pvc->pvc_lock);
TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
mtx_unlock(&pvc->pvc_lock);
Index: sys/dev/mem/memdev.c
===================================================================
--- sys/dev/mem/memdev.c
+++ sys/dev/mem/memdev.c
@@ -111,7 +111,7 @@
&td->td_proc->p_vmspace->vm_pmap, me->me_vaddr);
if (me->me_paddr != 0) {
me->me_state = ME_STATE_MAPPED;
- me->me_domain = _vm_phys_domain(me->me_paddr);
+ me->me_domain = vm_phys_domain(me->me_paddr);
} else {
me->me_state = ME_STATE_VALID;
}
Index: sys/powerpc/aim/mmu_oea64.c
===================================================================
--- sys/powerpc/aim/mmu_oea64.c
+++ sys/powerpc/aim/mmu_oea64.c
@@ -3471,7 +3471,7 @@
}
for (i = 0; phys_avail[i + 1] != 0; i+= 2) {
- domain = _vm_phys_domain(phys_avail[i]);
+ domain = vm_phys_domain(phys_avail[i]);
KASSERT(domain < MAXMEMDOM,
("Invalid phys_avail NUMA domain %d!\n", domain));
size = btoc(phys_avail[i + 1] - phys_avail[i]);
Index: sys/powerpc/aim/mmu_radix.c
===================================================================
--- sys/powerpc/aim/mmu_radix.c
+++ sys/powerpc/aim/mmu_radix.c
@@ -6292,7 +6292,7 @@
/* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
for (va = start; va < end; va += L3_PAGE_SIZE) {
pfn = first_page + (va - start) / sizeof(struct vm_page);
- domain = _vm_phys_domain(ptoa(pfn));
+ domain = vm_phys_domain(ptoa(pfn));
l2e = pmap_pml2e(kernel_pmap, va);
if ((*l2e & PG_V) == 0) {
pa = vm_phys_early_alloc(domain, PAGE_SIZE);
Index: sys/vm/_vm_phys.h
===================================================================
--- /dev/null
+++ sys/vm/_vm_phys.h
@@ -0,0 +1,73 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2002-2006 Rice University
+ * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Alan L. Cox,
+ * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+ * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __VM_PHYS_H_
+#define __VM_PHYS_H_
+
+#include <machine/vmparam.h>
+
+#ifndef VM_NFREEORDER_MAX
+#define VM_NFREEORDER_MAX VM_NFREEORDER
+#endif
+
+struct vm_page;
+#ifndef VM_PAGE_HAVE_PGLIST
+TAILQ_HEAD(pglist, vm_page);
+#define VM_PAGE_HAVE_PGLIST
+#endif
+
+struct vm_freelist {
+ struct pglist pl;
+ int lcnt;
+};
+
+struct vm_phys_seg {
+ vm_paddr_t start;
+ vm_paddr_t end;
+ vm_page_t first_page;
+#if VM_NRESERVLEVEL > 0
+ vm_reserv_t first_reserv;
+#endif
+#ifdef __aarch64__
+ void *md_first;
+#endif
+ int domain;
+ struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
+};
+
+extern struct vm_phys_seg vm_phys_segs[];
+extern int vm_phys_nsegs;
+
+#endif /* !__VM_PHYS_H_ */
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -3254,7 +3254,7 @@
{
int domain;
- domain = _vm_phys_domain(vtophys(item));
+ domain = vm_phys_domain(vtophys(item));
KASSERT(domain >= 0 && domain < vm_ndomains,
("%s: unknown domain for item %p", __func__, item));
return (domain);
Index: sys/vm/vm_kern.c
===================================================================
--- sys/vm/vm_kern.c
+++ sys/vm/vm_kern.c
@@ -91,8 +91,8 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
-#include <vm/vm_phys.h>
#include <vm/vm_pagequeue.h>
+#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@@ -236,9 +236,9 @@
vmem_free(vmem, addr, size);
return (0);
}
- KASSERT(vm_phys_domain(m) == domain,
+ KASSERT(vm_page_domain(m) == domain,
("kmem_alloc_attr_domain: Domain mismatch %d != %d",
- vm_phys_domain(m), domain));
+ vm_page_domain(m), domain));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
vm_page_valid(m);
@@ -313,9 +313,9 @@
vmem_free(vmem, addr, size);
return (0);
}
- KASSERT(vm_phys_domain(m) == domain,
+ KASSERT(vm_page_domain(m) == domain,
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
- vm_phys_domain(m), domain));
+ vm_page_domain(m), domain));
end_m = m + npages;
tmp = addr;
for (; m < end_m; m++) {
@@ -489,9 +489,9 @@
kmem_unback(object, addr, i);
return (KERN_NO_SPACE);
}
- KASSERT(vm_phys_domain(m) == domain,
+ KASSERT(vm_page_domain(m) == domain,
("kmem_back_domain: Domain mismatch %d != %d",
- vm_phys_domain(m), domain));
+ vm_page_domain(m), domain));
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
@@ -573,7 +573,7 @@
end = offset + size;
VM_OBJECT_WLOCK(object);
m = vm_page_lookup(object, atop(offset));
- domain = vm_phys_domain(m);
+ domain = vm_page_domain(m);
if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -70,6 +70,7 @@
#define _VM_PAGE_
#include <vm/pmap.h>
+#include <vm/_vm_phys.h>
/*
* Management of resident (logical) pages.
@@ -995,5 +996,21 @@
return (m->valid == 0);
}
+static inline int
+vm_page_domain(vm_page_t m)
+{
+#ifdef NUMA
+ int domn, segind;
+
+ segind = m->segind;
+ KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
+ domn = vm_phys_segs[segind].domain;
+ KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
+ return (domn);
+#else
+ return (0);
+#endif
+}
+
#endif /* _KERNEL */
#endif /* !_VM_PAGE_ */
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -2892,7 +2892,7 @@
unlock:
VM_OBJECT_WUNLOCK(object);
} else {
- MPASS(vm_phys_domain(m) == domain);
+ MPASS(vm_page_domain(m) == domain);
vmd = VM_DOMAIN(domain);
vm_domain_free_lock(vmd);
order = m->order;
@@ -2923,7 +2923,7 @@
cnt = 0;
vm_domain_free_lock(vmd);
do {
- MPASS(vm_phys_domain(m) == domain);
+ MPASS(vm_page_domain(m) == domain);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
vm_phys_free_pages(m, 0);
cnt++;
@@ -3597,7 +3597,7 @@
("page %p is unmanaged", m));
KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
- domain = vm_phys_domain(m);
+ domain = vm_page_domain(m);
pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
critical_enter();
Index: sys/vm/vm_pagequeue.h
===================================================================
--- sys/vm/vm_pagequeue.h
+++ sys/vm/vm_pagequeue.h
@@ -389,7 +389,7 @@
vm_pagequeue_domain(vm_page_t m)
{
- return (VM_DOMAIN(vm_phys_domain(m)));
+ return (VM_DOMAIN(vm_page_domain(m)));
}
/*
Index: sys/vm/vm_phys.h
===================================================================
--- sys/vm/vm_phys.h
+++ sys/vm/vm_phys.h
@@ -42,11 +42,7 @@
#ifdef _KERNEL
-#ifndef VM_NFREEORDER_MAX
-#define VM_NFREEORDER_MAX VM_NFREEORDER
-#endif
-
-extern vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
+extern vm_paddr_t phys_avail[];
/* Domains must be dense (non-sparse) and zero-based. */
struct mem_affinity {
@@ -59,28 +55,6 @@
extern int *mem_locality;
#endif
-struct vm_freelist {
- struct pglist pl;
- int lcnt;
-};
-
-struct vm_phys_seg {
- vm_paddr_t start;
- vm_paddr_t end;
- vm_page_t first_page;
-#if VM_NRESERVLEVEL > 0
- vm_reserv_t first_reserv;
-#endif
-#ifdef __aarch64__
- void *md_first;
-#endif
- int domain;
- struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
-};
-
-extern struct vm_phys_seg vm_phys_segs[];
-extern int vm_phys_nsegs;
-
/*
* The following functions are only to be used by the virtual memory system.
*/
@@ -114,29 +88,23 @@
int vm_phys_avail_largest(void);
vm_paddr_t vm_phys_avail_size(int i);
-/*
- *
- * vm_phys_domain:
- *
- * Return the index of the domain the page belongs to.
- */
static inline int
-vm_phys_domain(vm_page_t m)
+vm_phys_domain(vm_paddr_t pa)
{
#ifdef NUMA
- int domn, segind;
+ int i;
- /* XXXKIB try to assert that the page is managed */
- segind = m->segind;
- KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
- domn = vm_phys_segs[segind].domain;
- KASSERT(domn < vm_ndomains, ("domain %d m %p", domn, m));
- return (domn);
+ if (vm_ndomains == 1)
+ return (0);
+ for (i = 0; mem_affinity[i].end != 0; i++)
+ if (mem_affinity[i].start <= pa &&
+ mem_affinity[i].end >= pa)
+ return (mem_affinity[i].domain);
+ return (-1);
#else
return (0);
#endif
}
-int _vm_phys_domain(vm_paddr_t pa);
#endif /* _KERNEL */
#endif /* !_VM_PHYS_H_ */
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -649,24 +649,6 @@
#endif
}
-int
-_vm_phys_domain(vm_paddr_t pa)
-{
-#ifdef NUMA
- int i;
-
- if (vm_ndomains == 1)
- return (0);
- for (i = 0; mem_affinity[i].end != 0; i++)
- if (mem_affinity[i].start <= pa &&
- mem_affinity[i].end >= pa)
- return (mem_affinity[i].domain);
- return (-1);
-#else
- return (0);
-#endif
-}
-
/*
* Split a contiguous, power of two-sized set of physical pages.
*
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -63,8 +63,8 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
-#include <vm/vm_phys.h>
#include <vm/vm_pagequeue.h>
+#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/vm_reserv.h>
@@ -780,7 +780,7 @@
}
} else
return (NULL);
- KASSERT(vm_phys_domain(m) == domain,
+ KASSERT(vm_page_domain(m) == domain,
("vm_reserv_alloc_contig: Page domain does not match requested."));
/*

File Metadata

Mime Type
text/plain
Expires
Sat, Jan 18, 2:43 PM (1 h, 43 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15866446
Default Alt Text
D27207.id79685.diff (12 KB)

Event Timeline