Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -112,7 +112,12 @@ /* Get physical address from PVO. */ #define PVO_PADDR(pvo) ((pvo)->pvo_pte.pa & LPTE_RPGN) - +#define MOEA64_GET_PVO_PAGE(pvo) \ + ({ \ + if (__predict_false(pvo->pvo_page == NULL)) \ + pvo->pvo_page = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); \ + (pvo->pvo_page); \ + }) /* * Locking semantics: * @@ -418,8 +423,6 @@ uint64_t hash; int shift; - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - pvo->pvo_pmap = pmap; va &= ~ADDR_POFF; pvo->pvo_vaddr |= va; @@ -664,6 +667,10 @@ pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; init_pvo_entry(pvo, kernel_pmap, va); + /* + * XXX: When large pages can be remapped and permissioned, need to set + * pvo_page here. + */ pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; pvo->pvo_pte.pa = pa | pte_lo; @@ -1212,7 +1219,7 @@ (pvo->pvo_pte.prot & VM_PROT_WRITE)) { if (refchg < 0) refchg = LPTE_CHG; - m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); + m = MOEA64_GET_PVO_PAGE(pvo); refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); if (refchg & LPTE_CHG) @@ -1240,7 +1247,7 @@ pvo = moea64_pvo_find_va(pmap, addr); if (pvo != NULL) { pa = PVO_PADDR(pvo); - m = PHYS_TO_VM_PAGE(pa); + m = MOEA64_GET_PVO_PAGE(pvo); managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; val = MINCORE_INCORE; } else { @@ -1453,6 +1460,7 @@ mtx_lock(PCPU_PTR(aim.qmap_lock)); pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | (uint64_t)pa; + pvo->pvo_page = m; moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); isync(); @@ -1519,10 +1527,11 @@ pvo->pvo_vaddr |= PVO_MANAGED; } + init_pvo_entry(pvo, pmap, va); + pvo->pvo_page = m; + PV_PAGE_LOCK(m); PMAP_LOCK(pmap); - if (pvo->pvo_pmap == NULL) - init_pvo_entry(pvo, pmap, va); if (prot & VM_PROT_WRITE) if (pmap_bootstrapped && (m->oflags & VPO_UNMANAGED) == 0) @@ -1686,7 +1695,7 @@ PMAP_LOCK(pmap); pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { - m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); + m = MOEA64_GET_PVO_PAGE(pvo); if (!vm_page_wire_mapped(m)) m = NULL; } @@ -1730,6 +1739,7 @@ init_pvo_entry(pvo, kernel_pmap, va); pvo->pvo_vaddr |= PVO_WIRED; + pvo->pvo_page = m; moea64_pvo_enter(pvo, NULL, NULL); @@ -1945,11 +1955,13 @@ pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); pvo->pvo_vaddr |= PVO_WIRED; + pvo->pvo_page = 0; + + init_pvo_entry(pvo, kernel_pmap, va); PMAP_LOCK(kernel_pmap); oldpvo = moea64_pvo_find_va(kernel_pmap, va); if (oldpvo != NULL) moea64_pvo_remove_from_pmap(oldpvo); - init_pvo_entry(pvo, kernel_pmap, va); error = moea64_pvo_enter(pvo, NULL, NULL); PMAP_UNLOCK(kernel_pmap); @@ -2319,7 +2331,7 @@ */ oldprot = pvo->pvo_pte.prot; pvo->pvo_pte.prot = prot; - pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); + pg = MOEA64_GET_PVO_PAGE(pvo); /* * If the PVO is in the page table, update mapping @@ -2701,7 +2713,7 @@ /* Send RC bits to VM */ if ((pvo->pvo_vaddr & PVO_MANAGED) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { - pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); + pg = MOEA64_GET_PVO_PAGE(pvo); if (pg != NULL) { refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); if (refchg & LPTE_CHG) @@ -2747,7 +2759,7 @@ vm_page_t pg = NULL; if (pvo->pvo_vaddr & PVO_MANAGED) - pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); + pg = MOEA64_GET_PVO_PAGE(pvo); PV_LOCK(PVO_PADDR(pvo)); moea64_pvo_remove_from_page_locked(pvo, pg); Index: sys/powerpc/aim/slb.c =================================================================== --- sys/powerpc/aim/slb.c +++ sys/powerpc/aim/slb.c @@ -295,10 +295,10 @@ struct slbtnode *ua, *next, *inter; struct slb *slb; int idx; + bool locked; KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); - PMAP_LOCK_ASSERT(pm, MA_OWNED); vsid = moea64_get_unique_vsid(); slbv = vsid << SLBV_VSID_SHIFT; @@ -307,6 +307,17 @@ ua = pm->pm_slb_tree_root; + locked = PMAP_LOCKED(pm); + + if (!locked) { + PMAP_LOCK(pm); + slb = user_va_to_slb_entry(pm, esid << ADDR_SR_SHFT); + if (slb != NULL) { + vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; + goto out; + } + } + /* Descend to the correct leaf or NULL pointer. */ for (;;) { KASSERT(uad_baseok(ua), @@ -347,8 +358,12 @@ * SLB mapping, so pre-spill this entry. */ eieio(); +out: slb_insert_user(pm, slb); + if (!locked) + PMAP_UNLOCK(pm); + return (vsid); } Index: sys/powerpc/include/pmap.h =================================================================== --- sys/powerpc/include/pmap.h +++ sys/powerpc/include/pmap.h @@ -130,6 +130,7 @@ } pvo_pte; pmap_t pvo_pmap; /* Owning pmap */ vm_offset_t pvo_vaddr; /* VA of entry */ + vm_page_t pvo_page; /* vm_page */ uint64_t pvo_vpn; /* Virtual page number */ }; LIST_HEAD(pvo_head, pvo_entry);