Index: head/sys/powerpc/aim/mmu_oea64.c =================================================================== --- head/sys/powerpc/aim/mmu_oea64.c +++ head/sys/powerpc/aim/mmu_oea64.c @@ -108,6 +108,9 @@ #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define VSID_HASH_MASK 0x0000007fffffffffULL +/* Get physical address from PVO. */ +#define PVO_PADDR(pvo) ((pvo)->pvo_pte.pa & LPTE_RPGN) + /* * Locking semantics: * @@ -1207,7 +1210,7 @@ (pvo->pvo_pte.prot & VM_PROT_WRITE)) { if (refchg < 0) refchg = LPTE_CHG; - m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); if (refchg & LPTE_CHG) @@ -1615,7 +1618,7 @@ if (pvo == NULL) pa = 0; else - pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); + pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); PMAP_UNLOCK(pm); return (pa); @@ -1636,7 +1639,7 @@ PMAP_LOCK(pmap); pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { - m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); if (!vm_page_wire_mapped(m)) m = NULL; } @@ -1943,7 +1946,7 @@ pvo = moea64_pvo_find_va(kernel_pmap, va); KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, va)); - pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); + pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); PMAP_UNLOCK(kernel_pmap); return (pa); } @@ -2269,7 +2272,7 @@ */ oldprot = pvo->pvo_pte.prot; pvo->pvo_pte.prot = prot; - pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); /* * If the PVO is in the page table, update mapping @@ -2284,7 +2287,7 @@ if ((pg->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(pg, PGA_EXECUTABLE); moea64_syncicache(pm, PVO_VADDR(pvo), - pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); + PVO_PADDR(pvo), PAGE_SIZE); } /* @@ -2648,7 +2651,7 @@ /* Send RC bits to VM */ if ((pvo->pvo_vaddr & PVO_MANAGED) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { - pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); if (pg != NULL) { refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); if (refchg & LPTE_CHG) @@ -2674,7 +2677,7 @@ /* * Update vm about page writeability/executability if managed */ - PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); + PV_LOCKASSERT(PVO_PADDR(pvo)); if (pvo->pvo_vaddr & PVO_MANAGED) { if (m != NULL) { LIST_REMOVE(pvo, pvo_vlink); @@ -2694,11 +2697,11 @@ vm_page_t pg = NULL; if (pvo->pvo_vaddr & PVO_MANAGED) - pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); + pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo)); - PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); + PV_LOCK(PVO_PADDR(pvo)); moea64_pvo_remove_from_page_locked(pvo, pg); - PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); + PV_UNLOCK(PVO_PADDR(pvo)); } static struct pvo_entry * @@ -2810,7 +2813,7 @@ for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); ppa < pa + size; ppa += PAGE_SIZE, pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { - if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) { + if (pvo == NULL || PVO_PADDR(pvo) != ppa) { error = EFAULT; break; } @@ -2886,7 +2889,7 @@ len = MIN(lim - va, sz); pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { - pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); + pa = PVO_PADDR(pvo) | (va & ADDR_POFF); moea64_syncicache(pm, va, pa, len); } va += len; @@ -3005,7 +3008,7 @@ } } - pa = pvo->pvo_pte.pa & LPTE_RPGN; + pa = PVO_PADDR(pvo); if (va & PVO_LARGE) { pa_end = pa + lpsize;