Page MenuHomeFreeBSD

D29151.id85406.diff
No OneTemporary

D29151.id85406.diff

Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -114,7 +114,9 @@
#include <sys/param.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
+#include <sys/types.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
@@ -535,10 +537,6 @@
static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
static void pmap_pkru_deassign_all(pmap_t pmap);
-static COUNTER_U64_DEFINE_EARLY(pcid_save_cnt);
-SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLFLAG_RD,
- &pcid_save_cnt, "Count of saved TLB context on switch");
-
static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
static struct mtx invl_gen_mtx;
@@ -761,9 +759,15 @@
SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_page_count, CTLFLAG_RD,
&pv_page_count, "Current number of allocated pv pages");
-static COUNTER_U64_DEFINE_EARLY(pt_page_count);
-SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pt_page_count, CTLFLAG_RD,
- &pt_page_count, "Current number of allocated page table pages");
+static COUNTER_U64_DEFINE_EARLY(user_pt_page_count);
+SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, user_pt_page_count, CTLFLAG_RD,
+ &user_pt_page_count,
+ "Current number of allocated page table pages for userspace");
+
+static COUNTER_U64_DEFINE_EARLY(kernel_pt_page_count);
+SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, kernel_pt_page_count, CTLFLAG_RD,
+ &kernel_pt_page_count,
+ "Current number of allocated page table pages for the kernel");
#ifdef PV_STATS
@@ -1290,6 +1294,9 @@
struct spglist *free);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
+static vm_page_t pmap_alloc_pt_page(pmap_t, vm_pindex_t, int);
+static void pmap_free_pt_page(pmap_t, vm_page_t, bool);
+
/********************/
/* Inline functions */
/********************/
@@ -4018,7 +4025,10 @@
pmap_unwire_ptp(pmap, va, pml4pg, free);
}
- counter_u64_add(pt_page_count, -1);
+ if (pmap == kernel_pmap)
+ counter_u64_add(kernel_pt_page_count, -1);
+ else
+ counter_u64_add(user_pt_page_count, -1);
/*
* Put page on a list so that it is released after
@@ -4183,6 +4193,49 @@
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
}
+/* Allocate a page table page and do related bookkeeping */
+static vm_page_t
+pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags)
+{
+ vm_page_t m;
+
+ m = vm_page_alloc(NULL, pindex, flags | VM_ALLOC_NOOBJ);
+ if (__predict_false(m == NULL))
+ return (NULL);
+
+ if (pmap == kernel_pmap)
+ counter_u64_add(kernel_pt_page_count, 1);
+ else
+ counter_u64_add(user_pt_page_count, 1);
+
+ if ((flags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ return (m);
+}
+
+static void
+pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled)
+{
+ /*
+ * This function assumes the page will need to be unwired,
+ * even though the counterpart allocation in pmap_alloc_pt_page()
+ * doesn't enforce VM_ALLOC_WIRED. However, all current uses
+ * of pmap_free_pt_page() require unwiring. The case in which
+ * a PT page doesn't require unwiring because its ref_count has
+ * naturally reached 0 is handled through _pmap_unwire_ptp().
+ */
+ vm_page_unwire_noq(m);
+ if (zerofilled)
+ vm_page_free_zero(m);
+ else
+ vm_page_free(m);
+ if (pmap == kernel_pmap)
+ counter_u64_add(kernel_pt_page_count, -1);
+ else
+ counter_u64_add(user_pt_page_count, -1);
+}
+
/*
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
@@ -4197,11 +4250,9 @@
/*
* allocate the page directory page
*/
- pmltop_pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ pmltop_pg = pmap_alloc_pt_page(pmap, 0, VM_ALLOC_NORMAL |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
- counter_u64_add(pt_page_count, 1);
-
pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
@@ -4214,8 +4265,6 @@
pmap->pm_pmltopu = NULL;
pmap->pm_type = pm_type;
- if ((pmltop_pg->flags & PG_ZERO) == 0)
- pagezero(pmap->pm_pmltop);
/*
* Do not install the host kernel mappings in the nested page
@@ -4231,9 +4280,9 @@
else
pmap_pinit_pml4(pmltop_pg);
if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
- pmltop_pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
- counter_u64_add(pt_page_count, 1);
+ pmltop_pgu = pmap_alloc_pt_page(pmap, 0,
+ VM_ALLOC_WIRED | VM_ALLOC_NORMAL |
+ VM_ALLOC_WAITOK);
pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(pmltop_pgu));
if (pmap_is_la57(pmap))
@@ -4418,13 +4467,11 @@
/*
* Allocate a page table page.
*/
- if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
+ m = pmap_alloc_pt_page(pmap, ptepindex,
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (m == NULL)
return (NULL);
- if ((m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
-
/*
* Map the pagetable page into the process address space, if
* it isn't already there.
@@ -4451,8 +4498,7 @@
/* Wire up a new PDPE page */
pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true);
if (pml4 == NULL) {
- vm_page_unwire_noq(m);
- vm_page_free_zero(m);
+ pmap_free_pt_page(pmap, m, true);
return (NULL);
}
KASSERT((*pml4 & PG_V) == 0,
@@ -4479,8 +4525,7 @@
/* Wire up a new PDE page */
pdp = pmap_allocpte_getpdp(pmap, lockp, va, true);
if (pdp == NULL) {
- vm_page_unwire_noq(m);
- vm_page_free_zero(m);
+ pmap_free_pt_page(pmap, m, true);
return (NULL);
}
KASSERT((*pdp & PG_V) == 0,
@@ -4490,8 +4535,7 @@
/* Wire up a new PTE page */
pdp = pmap_allocpte_getpdp(pmap, lockp, va, false);
if (pdp == NULL) {
- vm_page_unwire_noq(m);
- vm_page_free_zero(m);
+ pmap_free_pt_page(pmap, m, true);
return (NULL);
}
if ((*pdp & PG_V) == 0) {
@@ -4500,8 +4544,7 @@
lockp, va) == NULL) {
pmap_allocpte_free_unref(pmap, va,
pmap_pml4e(pmap, va));
- vm_page_unwire_noq(m);
- vm_page_free_zero(m);
+ pmap_free_pt_page(pmap, m, true);
return (NULL);
}
} else {
@@ -4519,7 +4562,6 @@
}
pmap_resident_count_inc(pmap, 1);
- counter_u64_add(pt_page_count, 1);
return (m);
}
@@ -4681,16 +4723,12 @@
pmap->pm_pmltop[LMSPML4I + i] = 0;
}
- vm_page_unwire_noq(m);
- vm_page_free_zero(m);
- counter_u64_add(pt_page_count, -1);
+ pmap_free_pt_page(pmap, m, true);
if (pmap->pm_pmltopu != NULL) {
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
pm_pmltopu));
- vm_page_unwire_noq(m);
- vm_page_free(m);
- counter_u64_add(pt_page_count, -1);
+ pmap_free_pt_page(pmap, m, false);
}
if (pmap->pm_type == PT_X86 &&
(cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
@@ -4799,14 +4837,11 @@
pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
if ((*pdpe & X86_PG_V) == 0) {
/* We need a new PDP entry */
- nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
- VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ nkpg = pmap_alloc_pt_page(kernel_pmap,
+ kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED |
+ VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
- if ((nkpg->flags & PG_ZERO) == 0)
- pmap_zero_page(nkpg);
- counter_u64_add(pt_page_count, 1);
paddr = VM_PAGE_TO_PHYS(nkpg);
*pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
X86_PG_A | X86_PG_M);
@@ -4822,14 +4857,11 @@
continue;
}
- nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
- VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
- VM_ALLOC_ZERO);
+ nkpg = pmap_alloc_pt_page(kernel_pmap,
+ pmap_pde_pindex(kernel_vm_end), VM_ALLOC_WIRED |
+ VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
- if ((nkpg->flags & PG_ZERO) == 0)
- pmap_zero_page(nkpg);
- counter_u64_add(pt_page_count, 1);
paddr = VM_PAGE_TO_PHYS(nkpg);
newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
pde_store(pde, newpdir);
@@ -5746,9 +5778,9 @@
* priority (VM_ALLOC_INTERRUPT). Otherwise, the
* priority is normal.
*/
- mpte = vm_page_alloc(NULL, pmap_pde_pindex(va),
- (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+ mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
+ VM_ALLOC_WIRED |
+ (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL));
/*
* If the allocation of the new page table page fails,
@@ -5759,8 +5791,6 @@
return (FALSE);
}
- counter_u64_add(pt_page_count, 1);
-
if (!in_kernel) {
mpte->ref_count = NPTEPG;
pmap_resident_count_inc(pmap, 1);
@@ -9097,13 +9127,13 @@
oldpdpe = *pdpe;
KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
- if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+ pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
+ VM_ALLOC_WIRED | VM_ALLOC_INTERRUPT);
+ if (pdpg == NULL) {
CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
}
- counter_u64_add(pt_page_count, 1);
pdpgpa = VM_PAGE_TO_PHYS(pdpg);
firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
@@ -9652,8 +9682,6 @@
PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
- if (cached)
- counter_u64_add(pcid_save_cnt, 1);
pmap_activate_sw_pti_post(td, pmap);
}
@@ -9673,8 +9701,6 @@
load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
cached);
PCPU_SET(curpmap, pmap);
- if (cached)
- counter_u64_add(pcid_save_cnt, 1);
}
static void
@@ -10115,16 +10141,8 @@
static vm_page_t
pmap_large_map_getptp_unlocked(void)
{
- vm_page_t m;
-
- m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
- VM_ALLOC_ZERO);
- if (m != NULL) {
- if ((m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
- counter_u64_add(pt_page_count, 1);
- }
- return (m);
+ return (pmap_alloc_pt_page(kernel_pmap, 0,
+ VM_ALLOC_NORMAL | VM_ALLOC_ZERO));
}
static vm_page_t

File Metadata

Mime Type
text/plain
Expires
Tue, Jan 14, 7:12 PM (5 h, 2 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15799892
Default Alt Text
D29151.id85406.diff (10 KB)

Event Timeline