Page MenuHomeFreeBSD

D23830.id68835.diff
No OneTemporary

D23830.id68835.diff

Index: head/sys/arm64/arm64/pmap.c
===================================================================
--- head/sys/arm64/arm64/pmap.c
+++ head/sys/arm64/arm64/pmap.c
@@ -150,6 +150,8 @@
#include <arm/include/physmem.h>
+#define PMAP_ASSERT_STAGE1(pmap) MPASS((pmap)->pm_stage == PM_STAGE1)
+
#define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t)))
#define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t)))
@@ -586,9 +588,10 @@
* Checks if the PTE is dirty.
*/
static inline int
-pmap_pte_dirty(pt_entry_t pte)
+pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
{
+ PMAP_ASSERT_STAGE1(pmap);
KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
@@ -845,6 +848,7 @@
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
+ kernel_pmap->pm_stage = PM_STAGE1;
/* Assume the address we were loaded to is a valid physical address */
min_pa = KERNBASE - kern_delta;
@@ -1040,6 +1044,8 @@
{
uint64_t r;
+ PMAP_ASSERT_STAGE1(pmap);
+
dsb(ishst);
if (pmap == kernel_pmap) {
r = atop(va);
@@ -1057,6 +1063,8 @@
{
uint64_t end, r, start;
+ PMAP_ASSERT_STAGE1(pmap);
+
dsb(ishst);
if (pmap == kernel_pmap) {
start = atop(sva);
@@ -1079,6 +1087,8 @@
{
uint64_t r;
+ PMAP_ASSERT_STAGE1(pmap);
+
dsb(ishst);
if (pmap == kernel_pmap) {
__asm __volatile("tlbi vmalle1is");
@@ -1153,6 +1163,8 @@
vm_page_t m;
int lvl;
+ PMAP_ASSERT_STAGE1(pmap);
+
m = NULL;
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va, &lvl);
@@ -1539,6 +1551,7 @@
pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
pmap->pm_root.rt_root = 0;
pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
+ pmap->pm_stage = PM_STAGE1;
PCPU_SET(curpmap, pmap);
}
@@ -1564,6 +1577,7 @@
pmap->pm_root.rt_root = 0;
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
+ pmap->pm_stage = PM_STAGE1;
/* XXX Temporarily disable deferred ASID allocation. */
pmap_alloc_asid(pmap);
@@ -1828,6 +1842,7 @@
pmap->pm_stats.resident_count));
KASSERT(vm_radix_is_empty(&pmap->pm_root),
("pmap_release: pmap has reserved page table page(s)"));
+ PMAP_ASSERT_STAGE1(pmap);
mtx_lock_spin(&asid_set_mutex);
if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch) {
@@ -2089,7 +2104,7 @@
continue;
tpte = pmap_load_clear(pte);
m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
- if (pmap_pte_dirty(tpte))
+ if (pmap_pte_dirty(pmap, tpte))
vm_page_dirty(m);
if ((tpte & ATTR_AF) != 0) {
pmap_invalidate_page(pmap, va);
@@ -2588,7 +2603,7 @@
eva = sva + L2_SIZE;
for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
va < eva; va += PAGE_SIZE, m++) {
- if (pmap_pte_dirty(old_l2))
+ if (pmap_pte_dirty(pmap, old_l2))
vm_page_dirty(m);
if (old_l2 & ATTR_AF)
vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2633,7 +2648,7 @@
pmap_resident_count_dec(pmap, 1);
if (old_l3 & ATTR_SW_MANAGED) {
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
- if (pmap_pte_dirty(old_l3))
+ if (pmap_pte_dirty(pmap, old_l3))
vm_page_dirty(m);
if (old_l3 & ATTR_AF)
vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2683,7 +2698,7 @@
pmap_resident_count_dec(pmap, 1);
if ((old_l3 & ATTR_SW_MANAGED) != 0) {
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
- if (pmap_pte_dirty(old_l3))
+ if (pmap_pte_dirty(pmap, old_l3))
vm_page_dirty(m);
if ((old_l3 & ATTR_AF) != 0)
vm_page_aflag_set(m, PGA_REFERENCED);
@@ -2880,6 +2895,7 @@
}
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
pvh_gen = pvh->pv_gen;
md_gen = m->md.pv_gen;
@@ -2913,7 +2929,7 @@
/*
* Update the vm_page_t clean and reference bits.
*/
- if (pmap_pte_dirty(tpte))
+ if (pmap_pte_dirty(pmap, tpte))
vm_page_dirty(m);
pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
@@ -2937,6 +2953,7 @@
vm_page_t m, mt;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
KASSERT((sva & L2_OFFSET) == 0,
("pmap_protect_l2: sva is not 2mpage aligned"));
old_l2 = pmap_load(l2);
@@ -2958,7 +2975,7 @@
*/
if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
- pmap_pte_dirty(old_l2)) {
+ pmap_pte_dirty(pmap, old_l2)) {
m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
vm_page_dirty(mt);
@@ -2985,6 +3002,7 @@
pd_entry_t *l0, *l1, *l2;
pt_entry_t *l3p, l3, mask, nbits;
+ PMAP_ASSERT_STAGE1(pmap);
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
if (prot == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
@@ -3070,7 +3088,7 @@
*/
if ((l3 & ATTR_SW_MANAGED) != 0 &&
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
- pmap_pte_dirty(l3))
+ pmap_pte_dirty(pmap, l3))
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
@@ -3208,6 +3226,7 @@
vm_offset_t sva;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
sva = va & ~L2_OFFSET;
firstl3 = pmap_l2_to_l3(l2, sva);
@@ -3307,6 +3326,8 @@
boolean_t nosleep;
int lvl, rv;
+ PMAP_ASSERT_STAGE1(pmap);
+
va = trunc_page(va);
if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m);
@@ -3455,7 +3476,7 @@
* concurrent calls to pmap_page_test_mappings() and
* pmap_ts_referenced().
*/
- if (pmap_pte_dirty(orig_l3))
+ if (pmap_pte_dirty(pmap, orig_l3))
vm_page_dirty(om);
if ((orig_l3 & ATTR_AF) != 0) {
pmap_invalidate_page(pmap, va);
@@ -3526,7 +3547,7 @@
orig_l3 = pmap_load_store(l3, new_l3);
pmap_invalidate_page(pmap, va);
if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
- pmap_pte_dirty(orig_l3))
+ pmap_pte_dirty(pmap, orig_l3))
vm_page_dirty(m);
} else {
/*
@@ -3581,6 +3602,7 @@
pd_entry_t new_l2;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
@@ -3809,6 +3831,7 @@
(m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
/*
@@ -4044,6 +4067,9 @@
vm_offset_t addr, end_addr, va_next;
vm_page_t dst_l2pg, dstmpte, srcmpte;
+ PMAP_ASSERT_STAGE1(dst_pmap);
+ PMAP_ASSERT_STAGE1(src_pmap);
+
if (dst_addr != src_addr)
return;
end_addr = src_addr + len;
@@ -4500,7 +4526,7 @@
/*
* Update the vm_page_t clean/reference bits.
*/
- if (pmap_pte_dirty(tpte)) {
+ if (pmap_pte_dirty(pmap, tpte)) {
switch (lvl) {
case 1:
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
@@ -4598,6 +4624,7 @@
restart:
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
rw_runlock(lock);
@@ -4630,6 +4657,7 @@
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
pvh_gen = pvh->pv_gen;
@@ -4752,6 +4780,7 @@
rw_wlock(lock);
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
pvh_gen = pvh->pv_gen;
rw_wunlock(lock);
@@ -4774,6 +4803,7 @@
}
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
pvh_gen = pvh->pv_gen;
md_gen = m->md.pv_gen;
@@ -4872,7 +4902,7 @@
("pmap_ts_referenced: found an invalid l1 table"));
pte = pmap_l1_to_l2(pde, pv->pv_va);
tpte = pmap_load(pte);
- if (pmap_pte_dirty(tpte)) {
+ if (pmap_pte_dirty(pmap, tpte)) {
/*
* Although "tpte" is mapping a 2MB page, because
* this function is called at a 4KB page granularity,
@@ -4947,7 +4977,7 @@
("pmap_ts_referenced: found an invalid l2 table"));
pte = pmap_l2_to_l3(pde, pv->pv_va);
tpte = pmap_load(pte);
- if (pmap_pte_dirty(tpte))
+ if (pmap_pte_dirty(pmap, tpte))
vm_page_dirty(m);
if ((tpte & ATTR_AF) != 0) {
if ((tpte & ATTR_SW_WIRED) == 0) {
@@ -4986,6 +5016,8 @@
pd_entry_t *l0, *l1, *l2, oldl2;
pt_entry_t *l3, oldl3;
+ PMAP_ASSERT_STAGE1(pmap);
+
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
@@ -5062,7 +5094,7 @@
if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
(ATTR_SW_MANAGED | L3_PAGE))
goto maybe_invlrng;
- else if (pmap_pte_dirty(oldl3)) {
+ else if (pmap_pte_dirty(pmap, oldl3)) {
if (advice == MADV_DONTNEED) {
/*
* Future calls to pmap_is_modified()
@@ -5123,6 +5155,7 @@
restart:
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
pvh_gen = pvh->pv_gen;
rw_wunlock(lock);
@@ -5157,6 +5190,7 @@
}
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
pmap = PV_PMAP(pv);
+ PMAP_ASSERT_STAGE1(pmap);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
pvh_gen = pvh->pv_gen;
@@ -5595,6 +5629,7 @@
vm_page_t ml3;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PMAP_ASSERT_STAGE1(pmap);
l3 = NULL;
oldl2 = pmap_load(l2);
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
@@ -5755,6 +5790,7 @@
int lvl, val;
bool managed;
+ PMAP_ASSERT_STAGE1(pmap);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, addr, &lvl);
if (pte != NULL) {
@@ -5778,7 +5814,7 @@
val = MINCORE_INCORE;
if (lvl != 3)
val |= MINCORE_SUPER;
- if ((managed && pmap_pte_dirty(tpte)) || (!managed &&
+ if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
(tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if ((tpte & ATTR_AF) == ATTR_AF)
@@ -5826,6 +5862,7 @@
if (cpuid == curcpu)
continue;
pmap = pcpu_find(cpuid)->pc_curpmap;
+ PMAP_ASSERT_STAGE1(pmap);
asid = COOKIE_TO_ASID(pmap->pm_cookie);
if (asid == -1)
continue;
@@ -5842,6 +5879,7 @@
{
int new_asid;
+ PMAP_ASSERT_STAGE1(pmap);
mtx_lock_spin(&asid_set_mutex);
/*
@@ -5879,6 +5917,7 @@
pmap_to_ttbr0(pmap_t pmap)
{
+ PMAP_ASSERT_STAGE1(pmap);
return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
pmap->pm_l0_paddr);
}
@@ -5888,6 +5927,7 @@
{
int epoch;
+ PMAP_ASSERT_STAGE1(pmap);
KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
if (pmap == PCPU_GET(curpmap)) {
@@ -5925,6 +5965,7 @@
pmap_t pmap;
pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ PMAP_ASSERT_STAGE1(pmap);
critical_enter();
(void)pmap_activate_int(pmap);
critical_exit();
@@ -5970,6 +6011,7 @@
pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
{
+ PMAP_ASSERT_STAGE1(pmap);
if (va >= VM_MIN_KERNEL_ADDRESS) {
cpu_icache_sync_range(va, sz);
} else {
@@ -6003,6 +6045,7 @@
uint64_t ec, par;
int lvl, rv;
+ PMAP_ASSERT_STAGE1(pmap);
rv = KERN_FAILURE;
ec = ESR_ELx_EXCEPTION(esr);
Index: head/sys/arm64/include/pmap.h
===================================================================
--- head/sys/arm64/include/pmap.h
+++ head/sys/arm64/include/pmap.h
@@ -76,6 +76,11 @@
vm_paddr_t pv_pa;
};
+enum pmap_stage {
+ PM_INVALID,
+ PM_STAGE1,
+ PM_STAGE2,
+};
struct pmap {
struct mtx pm_mtx;
@@ -85,6 +90,7 @@
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
struct vm_radix pm_root; /* spare page table pages */
long pm_cookie; /* encodes the pmap's ASID */
+ enum pmap_stage pm_stage;
};
typedef struct pmap *pmap_t;

File Metadata

Mime Type
text/plain
Expires
Mon, Jan 26, 1:39 AM (19 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27989049
Default Alt Text
D23830.id68835.diff (11 KB)

Event Timeline