Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F144375975
D44575.id136398.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
11 KB
Referenced Files
None
Subscribers
None
D44575.id136398.diff
View Options
Index: sys/arm64/arm64/pmap.c
===================================================================
--- sys/arm64/arm64/pmap.c
+++ sys/arm64/arm64/pmap.c
@@ -472,6 +472,8 @@
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
u_int flags, vm_page_t m, struct rwlock **lockp);
+static int pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
+ vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp);
static bool pmap_every_pte_zero(vm_paddr_t pa);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
bool all_l3e_AF_set);
@@ -1692,6 +1694,22 @@
SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, mappings, CTLFLAG_RD,
&pmap_l3c_mappings, 0, "64KB page mappings");
+static u_long pmap_l3c_removes; // XXX
+SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, removes, CTLFLAG_RD,
+ &pmap_l3c_removes, 0, "64KB page removes XXX");
+
+static u_long pmap_l3c_protects; // XXX
+SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, protects, CTLFLAG_RD,
+ &pmap_l3c_protects, 0, "64KB page protects XXX");
+
+static u_long pmap_l3c_copies; // XXX
+SYSCTL_ULONG(_vm_pmap_l3c, OID_AUTO, copies, CTLFLAG_RD,
+ &pmap_l3c_copies, 0, "64KB page copies XXX");
+
+static u_long pmap_l2_fills; // XXX
+SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, fills, CTLFLAG_RD,
+ &pmap_l2_fills, 0, "XXX");
+
/*
* If the given value for "final_only" is false, then any cached intermediate-
* level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to
@@ -3766,6 +3784,7 @@
vm_offset_t tva;
vm_page_t m, mt;
+ atomic_add_long(&pmap_l3c_removes, 1); // XXX
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
0, ("pmap_remove_l3c: l3p is not aligned"));
@@ -4236,6 +4255,7 @@
vm_page_t m, mt;
bool dirty;
+ atomic_add_long(&pmap_l3c_protects, 1); // XXX
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
0, ("pmap_mask_set_l3c: l3p is not aligned"));
@@ -5269,8 +5289,11 @@
* and let vm_fault() cope. Check after l2 allocation, since
* it could sleep.
*/
- if (!pmap_bti_same(pmap, va, va + L2_SIZE))
+ if (!pmap_bti_same(pmap, va, va + L2_SIZE)) {
+ KASSERT(l2pg != NULL, ("pmap_enter_l2: missing L2 PTP"));
+ pmap_abort_ptp(pmap, va, l2pg);
return (KERN_PROTECTION_FAILURE);
+ }
/*
* If there are existing mappings, either abort or remove them.
@@ -5402,6 +5425,230 @@
return (KERN_SUCCESS);
}
+static int
+pmap_enter_64kpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
+ vm_prot_t prot, struct rwlock **lockp)
+{
+ pt_entry_t l3e;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
+ KASSERT(ADDR_IS_CANONICAL(va),
+ ("%s: Address not in canonical form: %lx", __func__, va));
+
+ l3e = PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
+ ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
+ ATTR_CONTIGUOUS | L3_PAGE;
+ l3e |= pmap_pte_bti(pmap, va);
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ l3e |= ATTR_SW_MANAGED;
+ l3e &= ~ATTR_AF;
+ }
+ if ((prot & VM_PROT_EXECUTE) == 0 ||
+ m->md.pv_memattr == VM_MEMATTR_DEVICE)
+ l3e |= ATTR_S1_XN;
+ if (!ADDR_IS_KERNEL(va))
+ l3e |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
+ else
+ l3e |= ATTR_S1_UXN;
+ if (pmap != kernel_pmap)
+ l3e |= ATTR_S1_nG;
+ return (pmap_enter_l3c(pmap, va, l3e, PMAP_ENTER_NOSLEEP |
+ PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, ml3p, lockp));
+}
+
+static int
+pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
+ vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp)
+{
+ pd_entry_t *l2p, *pde;
+ pt_entry_t *l3p, old_l3e, *tl3p;
+ vm_page_t mt;
+ vm_paddr_t pa;
+ vm_pindex_t l2pindex;
+ int lvl;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((va & L3C_OFFSET) == 0,
+ ("pmap_enter_l3c: va is not aligned"));
+ KASSERT(!VA_IS_CLEANMAP(va) || (l3e & ATTR_SW_MANAGED) == 0,
+ ("pmap_enter_l3c: managed mapping within the clean submap"));
+
+ /*
+ * If the L3 PTP is not resident, we attempt to create it here.
+ */
+ if (!ADDR_IS_KERNEL(va)) {
+ /*
+ * Were we given the correct L3 PTP? If so, we can simply
+ * increment its ref count.
+ */
+ l2pindex = pmap_l2_pindex(va);
+ if (*ml3p != NULL && (*ml3p)->pindex == l2pindex) {
+ (*ml3p)->ref_count += L3C_ENTRIES;
+ } else {
+retry:
+ /*
+ * Get the L2 entry.
+ */
+ pde = pmap_pde(pmap, va, &lvl);
+
+ /*
+ * If the L2 entry is a superpage, we either abort or
+ * demote depending on the given flags.
+ */
+ if (lvl == 1) {
+ l2p = pmap_l1_to_l2(pde, va);
+ if ((pmap_load(l2p) & ATTR_DESCR_MASK) ==
+ L2_BLOCK) {
+ if ((flags & PMAP_ENTER_NOREPLACE) != 0)
+ return (KERN_FAILURE);
+ l3p = pmap_demote_l2_locked(pmap, l2p,
+ va, lockp);
+ if (l3p != NULL) {
+ *ml3p = PHYS_TO_VM_PAGE(
+ PTE_TO_PHYS(pmap_load(
+ l2p)));
+ (*ml3p)->ref_count +=
+ L3C_ENTRIES;
+ goto have_l3p;
+ }
+ }
+ /* We need to allocate an L3 PTP. */
+ }
+
+ /*
+ * If the L3 PTP is mapped, we just increment its ref
+ * count. Otherwise, we attempt to allocate it.
+ */
+ if (lvl == 2 && pmap_load(pde) != 0) {
+ *ml3p = PHYS_TO_VM_PAGE(PTE_TO_PHYS(
+ pmap_load(pde)));
+ (*ml3p)->ref_count += L3C_ENTRIES;
+ } else {
+ *ml3p = _pmap_alloc_l3(pmap, l2pindex, (flags &
+ PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp);
+ if (*ml3p == NULL) {
+ if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+ return (KERN_FAILURE);
+
+ /*
+ * The page table may have changed
+ * while we slept.
+ */
+ goto retry;
+ }
+ (*ml3p)->ref_count += L3C_ENTRIES - 1;
+ }
+ }
+ l3p = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ml3p));
+ } else {
+ *ml3p = NULL;
+
+ /*
+ * If the L2 entry is a superpage, we either abort or demote
+ * depending on the given flags.
+ */
+ pde = pmap_pde(kernel_pmap, va, &lvl);
+ if (lvl == 1) {
+ l2p = pmap_l1_to_l2(pde, va);
+ KASSERT((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("pmap_enter_l3c: missing L2 block"));
+ if ((flags & PMAP_ENTER_NOREPLACE) != 0)
+ return (KERN_FAILURE);
+ l3p = pmap_demote_l2_locked(pmap, l2p, va, lockp);
+ } else {
+ KASSERT(lvl == 2,
+ ("pmap_enter_l3c: Invalid level %d", lvl));
+ l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(
+ pmap_load(pde)));
+ }
+ }
+have_l3p:
+ l3p = &l3p[pmap_l3_index(va)];
+
+ /*
+ * If bti is not the same for the whole L3C range, return failure
+ * and let vm_fault() cope. Check after L3 allocation, since
+ * it could sleep.
+ */
+ if (!pmap_bti_same(pmap, va, va + L3C_SIZE)) {
+ KASSERT(*ml3p != NULL, ("pmap_enter_l3c: missing L3 PTP"));
+ (*ml3p)->ref_count -= L3C_ENTRIES - 1;
+ pmap_abort_ptp(pmap, va, *ml3p);
+ *ml3p = NULL;
+ return (KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If there are existing mappings, either abort or remove them.
+ */
+ if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
+ for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
+ if ((old_l3e = pmap_load(tl3p)) != 0) {
+ if (*ml3p != NULL)
+ (*ml3p)->ref_count -= L3C_ENTRIES;
+ return (KERN_FAILURE);
+ }
+ }
+ } else {
+ /*
+ * Because we increment the L3 page's reference count above,
+ * it is guaranteed not to be freed here and we can pass NULL
+ * instead of a valid free list.
+ */
+ pmap_remove_l3_range(pmap, pmap_load(pmap_l2(pmap, va)), va,
+ va + L3C_SIZE, NULL, lockp);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((l3e & ATTR_SW_MANAGED) != 0) {
+ if (!pmap_pv_insert_l3c(pmap, va, m, lockp)) {
+ if (*ml3p != NULL) {
+ (*ml3p)->ref_count -= L3C_ENTRIES - 1;
+ pmap_abort_ptp(pmap, va, *ml3p);
+ *ml3p = NULL;
+ }
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ if ((l3e & ATTR_SW_DBM) != 0)
+ for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
+ vm_page_aflag_set(mt, PGA_WRITEABLE);
+ }
+
+ /*
+ * Increment counters.
+ */
+ if ((l3e & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count += L3C_ENTRIES;
+ pmap_resident_count_inc(pmap, L3C_ENTRIES);
+
+ pa = VM_PAGE_TO_PHYS(m);
+ KASSERT((pa & L3C_OFFSET) == 0, ("pmap_enter_l3c: pa is not aligned"));
+
+ /*
+ * Sync the icache before the mapping is stored.
+ */
+ if ((l3e & ATTR_S1_XN) == 0 && pmap != kernel_pmap &&
+ m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
+ cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), L3C_SIZE);
+
+ /*
+ * Map the superpage.
+ */
+ for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
+ pmap_store(tl3p, l3e);
+ l3e += L3_SIZE;
+ }
+ dsb(ishst);
+
+ atomic_add_long(&pmap_l3c_mappings, 1);
+ CTR2(KTR_PMAP, "pmap_enter_l3c: success for va %#lx in pmap %p",
+ va, pmap);
+ return (KERN_SUCCESS);
+}
+
/*
* Maps a sequence of resident pages belonging to the same object.
* The sequence begins with the given page m_start. This page is
@@ -5438,6 +5685,13 @@
((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) ==
KERN_SUCCESS || rv == KERN_NO_SPACE))
m = &m[L2_SIZE / PAGE_SIZE - 1];
+ else if ((va & L3C_OFFSET) == 0 && va + L3C_SIZE <= end &&
+ (VM_PAGE_TO_PHYS(m) & L3C_OFFSET) == 0 &&
+ vm_reserv_is_populated(m, L3C_ENTRIES) &&
+ pmap_ps_enabled(pmap) &&
+ ((rv = pmap_enter_64kpage(pmap, va, m, &mpte, prot,
+ &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE))
+ m = &m[L3C_ENTRIES - 1];
else
mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
&lock);
@@ -5543,7 +5797,7 @@
pde = pmap_pde(kernel_pmap, va, &lvl);
KASSERT(pde != NULL,
("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
- va));
+ va));
KASSERT(lvl == 2,
("pmap_enter_quick_locked: Invalid level %d", lvl));
l3 = pmap_l2_to_l3(pde, va);
@@ -5801,6 +6055,7 @@
l3e += L3_SIZE;
}
pmap_resident_count_inc(pmap, L3C_ENTRIES);
+ atomic_add_long(&pmap_l3c_copies, 1); // XXX
atomic_add_long(&pmap_l3c_mappings, 1);
CTR2(KTR_PMAP, "pmap_copy_l3c: success for va %#lx in pmap %p",
va, pmap);
@@ -7635,6 +7890,8 @@
*l3 = newl3;
newl3 += L3_SIZE;
}
+ /* XXX */
+ atomic_add_long(&pmap_l2_fills, 1);
}
static void
Index: sys/vm/vm_reserv.h
===================================================================
--- sys/vm/vm_reserv.h
+++ sys/vm/vm_reserv.h
@@ -55,6 +55,7 @@
boolean_t vm_reserv_free_page(vm_page_t m);
void vm_reserv_init(void);
bool vm_reserv_is_page_free(vm_page_t m);
+bool vm_reserv_is_populated(vm_page_t m, int npages);
int vm_reserv_level(vm_page_t m);
int vm_reserv_level_iffullpop(vm_page_t m);
vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages,
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -1058,6 +1058,25 @@
return (!bit_test(rv->popmap, m - rv->pages));
}
+/*
+ * Returns true if the given page is part of a block of npages, starting at a
+ * multiple of npages, that are all allocated. Otherwise, returns false.
+ */
+bool
+vm_reserv_is_populated(vm_page_t m, int npages)
+{
+ vm_reserv_t rv;
+ int index;
+
+ KASSERT(powerof2(npages),
+ ("%s: npages %d is not a power of 2", __func__, npages));
+ rv = vm_reserv_from_page(m);
+ if (rv->object == NULL)
+ return (false);
+ index = rounddown2(m - rv->pages, npages);
+ return (bit_ntest(rv->popmap, index, index + npages - 1, 1));
+}
+
/*
* If the given page belongs to a reservation, returns the level of that
* reservation. Otherwise, returns -1.
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Feb 9, 3:16 AM (13 h, 5 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28530179
Default Alt Text
D44575.id136398.diff (11 KB)
Attached To
Mode
D44575: arm64 pmap: Add ATTR_CONTIGUOUS support [Part 2]
Attached
Detach File
Event Timeline
Log In to Comment