Index: sys/amd64/acpica/acpi_wakeup.c =================================================================== --- sys/amd64/acpica/acpi_wakeup.c +++ sys/amd64/acpica/acpi_wakeup.c @@ -345,8 +345,7 @@ for (i = 0; i < ACPI_WAKEPT_PAGES - (la57 ? 0 : 1); i++) { wakept_m[i] = pmap_page_alloc_below_4g(true); - wakept_pages[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( - wakept_m[i])); + ASSIGN_VM_PAGE_TO_DMAP(wakept_pages[i], wakept_m[i]); } if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL, EVENTHANDLER_PRI_LAST) == NULL) { Index: sys/amd64/amd64/efirt_machdep.c =================================================================== --- sys/amd64/amd64/efirt_machdep.c +++ sys/amd64/amd64/efirt_machdep.c @@ -186,7 +186,7 @@ VM_OBJECT_WLOCK(obj_1t1_pt); efi_pmltop_page = efi_1t1_page(); VM_OBJECT_WUNLOCK(obj_1t1_pt); - pml = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_pmltop_page)); + ASSIGN_VM_PAGE_TO_DMAP(pml, efi_pmltop_page); if (la57) { efi_pml5 = pml; pmap_pinit_pml5(efi_pmltop_page); Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c +++ sys/amd64/amd64/mp_machdep.c @@ -340,29 +340,29 @@ /* Create a transient 1:1 mapping of low 4G */ if (la57) { m_pml4 = pmap_page_alloc_below_4g(true); - v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4)); + ASSIGN_VM_PAGE_TO_DMAP(v_pml4, m_pml4); } else { v_pml4 = &kernel_pmap->pm_pmltop[0]; } m_pdp = pmap_page_alloc_below_4g(true); - v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp)); + ASSIGN_VM_PAGE_TO_DMAP(v_pdp, m_pdp); m_pd[0] = pmap_page_alloc_below_4g(false); - v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0])); + ASSIGN_VM_PAGE_TO_DMAP(v_pd, m_pd[0]); for (i = 0; i < NPDEPG; i++) v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; m_pd[1] = pmap_page_alloc_below_4g(false); - v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1])); + ASSIGN_VM_PAGE_TO_DMAP(v_pd, m_pd[1]); for (i = 0; i < NPDEPG; i++) v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; m_pd[2] = pmap_page_alloc_below_4g(false); - v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2])); + ASSIGN_VM_PAGE_TO_DMAP(v_pd, m_pd[2]); for (i = 0; i < NPDEPG; i++) v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; m_pd[3] = pmap_page_alloc_below_4g(false); - v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3])); + ASSIGN_VM_PAGE_TO_DMAP(v_pd, m_pd[3]); for (i = 0; i < NPDEPG; i++) v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS; Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2185,18 +2185,18 @@ r_gdt.rd_base = (long)__pcpu[0].pc_gdt; m_code = pmap_page_alloc_below_4g(true); - v_code = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_code)); + ASSIGN_VM_PAGE_TO_DMAP(v_code, m_code); m_pml5 = pmap_page_alloc_below_4g(true); KPML5phys = VM_PAGE_TO_PHYS(m_pml5); v_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(KPML5phys); m_pml4 = pmap_page_alloc_below_4g(true); - v_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4)); + ASSIGN_VM_PAGE_TO_DMAP(v_pml4, m_pml4); m_pdp = pmap_page_alloc_below_4g(true); - v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp)); + ASSIGN_VM_PAGE_TO_DMAP(v_pdp, m_pdp); m_pd = pmap_page_alloc_below_4g(true); - v_pd = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd)); + ASSIGN_VM_PAGE_TO_DMAP(v_pd, m_pd); m_pt = pmap_page_alloc_below_4g(true); - v_pt = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pt)); + ASSIGN_VM_PAGE_TO_DMAP(v_pt, m_pt); /* * Map m_code 1:1, it appears below 4G in KVA due to physical @@ -3756,7 +3756,7 @@ else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (i = 0; i < count; i++) { - daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); + ASSIGN_VM_PAGE_TO_DMAP(daddr, pages[i]); eva = daddr + PAGE_SIZE; for (; daddr < eva; daddr += cpu_clflush_line_size) { if (useclflushopt) @@ -4312,7 +4312,7 @@ pml4_entry_t *pm_pml4; int i; - pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg)); + ASSIGN_VM_PAGE_TO_DMAP(pm_pml4, pml4pg); /* Wire in kernel global address entries. */ for (i = 0; i < NKPML4E; i++) { @@ -4354,7 +4354,7 @@ { pml5_entry_t *pm_pml5; - pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg)); + ASSIGN_VM_PAGE_TO_DMAP(pm_pml5, pml5pg); /* * Add pml5 entry at top of KVA pointing to existing pml4 table, @@ -4378,7 +4378,7 @@ pml4_entry_t *pm_pml4u; int i; - pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pgu)); + ASSIGN_VM_PAGE_TO_DMAP(pm_pml4u, pml4pgu); for (i = 0; i < NPML4EPG; i++) pm_pml4u[i] = pti_pml4[i]; } @@ -4388,7 +4388,7 @@ { pml5_entry_t *pm_pml5u; - pm_pml5u = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pgu)); + ASSIGN_VM_PAGE_TO_DMAP(pm_pml5u, pml5pgu); pagezero(pm_pml5u); /* @@ -4504,8 +4504,7 @@ pmltop_pgu = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pmap_pt_page_count_pinit(pmap, 1); - pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP( - VM_PAGE_TO_PHYS(pmltop_pgu)); + ASSIGN_VM_PAGE_TO_DMAP(pmap->pm_pmltopu, pmltop_pgu); if (pmap_is_la57(pmap)) pmap_pinit_pml5_pti(pmltop_pgu); else @@ -4841,7 +4840,7 @@ else return (NULL); } - pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); + ASSIGN_VM_PAGE_TO_DMAP(pde, pdpg); pde = &pde[pmap_pde_index(va)]; } else panic("pmap_alloc_pde: missing page table page for va %#lx", @@ -4923,7 +4922,7 @@ KASSERT(CPU_EMPTY(&pmap->pm_active), ("releasing active pmap %p", pmap)); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop)); + m = DMAP_TO_VM_PAGE(pmap->pm_pmltop); if (pmap_is_la57(pmap)) { pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0; @@ -4952,8 +4951,7 @@ pmap_pt_page_count_pinit(pmap, -1); if (pmap->pm_pmltopu != NULL) { - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap-> - pm_pmltopu)); + m = DMAP_TO_VM_PAGE(pmap->pm_pmltopu); pmap_free_pt_page(NULL, m, false); pmap_pt_page_count_pinit(pmap, -1); } @@ -5418,7 +5416,7 @@ PV_STAT(counter_u64_add(pc_chunk_count, -1)); PV_STAT(counter_u64_add(pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); @@ -5519,7 +5517,7 @@ PV_STAT(counter_u64_add(pc_chunk_frees, 1)); counter_u64_add(pv_page_count, -1); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -7071,7 +7069,7 @@ NULL, va); if (mp == NULL) goto allocf; - pdpe = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp)); + ASSIGN_VM_PAGE_TO_DMAP(pdpe, mp); pdpe = &pdpe[pmap_pdpe_index(va)]; origpte = *pdpe; MPASS(origpte == 0); @@ -7092,7 +7090,7 @@ NULL, va); if (mp == NULL) goto allocf; - pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp)); + ASSIGN_VM_PAGE_TO_DMAP(pde, mp); pde = &pde[pmap_pde_index(va)]; origpte = *pde; MPASS(origpte == 0); @@ -7775,7 +7773,7 @@ return (NULL); } } - pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); + ASSIGN_VM_PAGE_TO_DMAP(pte, mpte); pte = &pte[pmap_pte_index(va)]; } else { mpte = NULL; @@ -8208,8 +8206,7 @@ } else if ((dstmpte = pmap_allocpte(dst_pmap, addr, NULL)) == NULL) goto out; - dst_pte = (pt_entry_t *) - PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); + ASSIGN_VM_PAGE_TO_DMAP(dst_pte, dstmpte); dst_pte = &dst_pte[pmap_pte_index(addr)]; if (*dst_pte == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, @@ -8272,13 +8269,13 @@ void pmap_zero_page(vm_page_t m) { - vm_offset_t va; + void *addr; #ifdef TSLOG_PAGEZERO TSENTER(); #endif - va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - pagezero((void *)va); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); + pagezero(addr); #ifdef TSLOG_PAGEZERO TSEXIT(); #endif @@ -8291,12 +8288,13 @@ void pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + void *addr; + ASSIGN_VM_PAGE_TO_DMAP(addr, m); if (off == 0 && size == PAGE_SIZE) - pagezero((void *)va); + pagezero(addr); else - bzero((char *)va + off, size); + bzero((char *)addr + off, size); } /* @@ -8305,10 +8303,12 @@ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); - vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + void *src, *dst; + + ASSIGN_VM_PAGE_TO_DMAP(src, msrc); + ASSIGN_VM_PAGE_TO_DMAP(dst, mdst); - pagecopy((void *)src, (void *)dst); + pagecopy(src, dst); } int unmapped_buf_allowed = 1; @@ -9607,6 +9607,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + vm_offset_t va; m->md.pat_mode = ma; @@ -9616,14 +9617,15 @@ * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && - pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, - m->md.pat_mode)) + (ASSIGN_VM_PAGE_TO_DMAP(va, m), + pmap_change_attr(va, PAGE_SIZE, m->md.pat_mode))) panic("memory attribute change on the direct map failed"); } void pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma) { + vm_offset_t va; int error; m->md.pat_mode = ma; @@ -9631,7 +9633,8 @@ if ((m->flags & PG_FICTITIOUS) != 0) return; PMAP_LOCK(kernel_pmap); - error = pmap_change_props_locked(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), + ASSIGN_VM_PAGE_TO_DMAP(va, m); + error = pmap_change_props_locked(va, PAGE_SIZE, PROT_NONE, m->md.pat_mode, 0); PMAP_UNLOCK(kernel_pmap); if (error != 0) @@ -10683,7 +10686,7 @@ goto retry; mphys = VM_PAGE_TO_PHYS(m); *pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx; - PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++; + DMAP_TO_VM_PAGE(pde)->ref_count++; } else { MPASS((*pde & X86_PG_PS) == 0); mphys = *pde & PG_FRAME; @@ -10802,8 +10805,7 @@ *pde = pa | pg_g | X86_PG_PS | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap, mattr, true); - PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))-> - ref_count++; + DMAP_TO_VM_PAGE(pde)->ref_count++; inc = NBPDR; } else { pte = pmap_large_map_pte(va); @@ -10811,8 +10813,7 @@ *pte = pa | pg_g | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap, mattr, false); - PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))-> - ref_count++; + DMAP_TO_VM_PAGE(pte)->ref_count++; inc = PAGE_SIZE; } } @@ -10880,7 +10881,7 @@ pd, len)); pde_store(pde, 0); inc = NBPDR; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde)); + m = DMAP_TO_VM_PAGE(pde); m->ref_count--; if (m->ref_count == 0) { *pdpe = 0; @@ -10894,12 +10895,12 @@ (u_long)pte, *pte)); pte_clear(pte); inc = PAGE_SIZE; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte)); + m = DMAP_TO_VM_PAGE(pte); m->ref_count--; if (m->ref_count == 0) { *pde = 0; SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde)); + m = DMAP_TO_VM_PAGE(pde); m->ref_count--; if (m->ref_count == 0) { *pdpe = 0; @@ -11126,7 +11127,7 @@ pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL); VM_OBJECT_WLOCK(pti_obj); pml4_pg = pmap_pti_alloc_page(); - pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg)); + ASSIGN_VM_PAGE_TO_DMAP(pti_pml4, pml4_pg); for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS && va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) { pdpe = pmap_pti_pdpe(va); @@ -11200,11 +11201,8 @@ static void pmap_pti_wire_pte(void *pte) { - vm_page_t m; - VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte)); - m->ref_count++; + DMAP_TO_VM_PAGE(pte)->ref_count++; } static void @@ -11213,7 +11211,7 @@ vm_page_t m; VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde)); + m = DMAP_TO_VM_PAGE(pde); MPASS(only_ref || m->ref_count > 1); pmap_pti_free_page(m); } @@ -11225,7 +11223,7 @@ pd_entry_t *pde; VM_OBJECT_ASSERT_WLOCKED(pti_obj); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte)); + m = DMAP_TO_VM_PAGE(pte); if (pmap_pti_free_page(m)) { pde = pmap_pti_pde(va); MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V); @@ -12220,7 +12218,7 @@ pd_entry_t *pd; int i4, i3, i2; - pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg4)); + ASSIGN_VM_PAGE_TO_DMAP(pml4, pg4); for (i4 = 0; i4 < num_entries; i4++) { if ((pml4[i4] & PG_V) == 0) continue; @@ -12230,7 +12228,7 @@ continue; } ptpages_show_page(3, i4, pg3); - pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg3)); + ASSIGN_VM_PAGE_TO_DMAP(pdp, pg3); for (i3 = 0; i3 < NPDPEPG; i3++) { if ((pdp[i3] & PG_V) == 0) continue; @@ -12240,7 +12238,7 @@ continue; } ptpages_show_page(2, i3, pg2); - pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg2)); + ASSIGN_VM_PAGE_TO_DMAP(pd, pg2); for (i2 = 0; i2 < NPDEPG; i2++) { if ((pd[i2] & PG_V) == 0) continue; @@ -12284,8 +12282,8 @@ ptpages_show_pml4(pg, NPML4EPG, PG_V); } } else { - ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS( - (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V); + ptpages_show_pml4(DMAP_TO_VM_PAGE(pmap->pm_pmltop), + NUP4ML4E, PG_V); } } #endif Index: sys/amd64/include/sf_buf.h =================================================================== --- sys/amd64/include/sf_buf.h +++ sys/amd64/include/sf_buf.h @@ -39,8 +39,10 @@ static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) { + vm_offset_t va; - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf))); + ASSIGN_VM_PAGE_TO_DMAP(va, (vm_page_t)sf); + return (va); } static inline vm_page_t Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -1179,8 +1179,10 @@ } if (count == 1) { + vm_offset_t va; *cookie = m; - return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return ((void *)(va + pageoff)); } else { *cookie = NULL; return (NULL); Index: sys/arm64/arm64/efirt_machdep.c =================================================================== --- sys/arm64/arm64/efirt_machdep.c +++ sys/arm64/arm64/efirt_machdep.c @@ -172,7 +172,7 @@ VM_OBJECT_WLOCK(obj_1t1_pt); efi_l0_page = efi_1t1_page(); VM_OBJECT_WUNLOCK(obj_1t1_pt); - efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page)); + ASSIGN_VM_PAGE_TO_DMAP(efi_l0, efi_l0_page); efi_ttbr0 = ASID_TO_OPERAND(ASID_RESERVED_FOR_EFI) | VM_PAGE_TO_PHYS(efi_l0_page); Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -283,6 +283,7 @@ #define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte)) #define VM_PAGE_TO_PTE(m) PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) +#define PTE_TO_DMAP(pte) ((pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pte))) /* * The presence of this flag indicates that the mapping is writeable. @@ -550,10 +551,8 @@ static __inline pd_entry_t * pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) { - pd_entry_t *l1; - l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0))); - return (&l1[pmap_l1_index(va)]); + return (&PTE_TO_DMAP(pmap_load(l0))[pmap_l1_index(va)]); } static __inline pd_entry_t * @@ -571,7 +570,7 @@ static __inline pd_entry_t * pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va) { - pd_entry_t l1, *l2p; + pd_entry_t l1; l1 = pmap_load(l1p); @@ -585,8 +584,7 @@ ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); - l2p = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l1)); - return (&l2p[pmap_l2_index(va)]); + return (&PTE_TO_DMAP(l1)[pmap_l2_index(va)]); } static __inline pd_entry_t * @@ -605,7 +603,6 @@ pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va) { pd_entry_t l2; - pt_entry_t *l3p; l2 = pmap_load(l2p); @@ -619,8 +616,7 @@ ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); - l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l2)); - return (&l3p[pmap_l3_index(va)]); + return (&PTE_TO_DMAP(l2)[pmap_l3_index(va)]); } /* @@ -2648,8 +2644,7 @@ l1pg->ref_count++; } - l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0))); - l1 = &l1[ptepindex & Ln_ADDR_MASK]; + l1 = &PTE_TO_DMAP(pmap_load(l0))[ptepindex & Ln_ADDR_MASK]; KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0, ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1))); pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE); @@ -2672,11 +2667,9 @@ return (NULL); } tl0 = pmap_load(l0); - l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0)); - l1 = &l1[l1index & Ln_ADDR_MASK]; + l1 = &PTE_TO_DMAP(tl0)[l1index & Ln_ADDR_MASK]; } else { - l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0)); - l1 = &l1[l1index & Ln_ADDR_MASK]; + l1 = &PTE_TO_DMAP(tl0)[l1index & Ln_ADDR_MASK]; tl1 = pmap_load(l1); if (tl1 == 0) { /* recurse for allocating page dir */ @@ -2692,8 +2685,7 @@ } } - l2 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l1))); - l2 = &l2[ptepindex & Ln_ADDR_MASK]; + l2 = &PTE_TO_DMAP(pmap_load(l1))[ptepindex & Ln_ADDR_MASK]; KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0, ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2))); pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE); @@ -2735,7 +2727,7 @@ else return (NULL); } - l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg)); + ASSIGN_VM_PAGE_TO_DMAP(l2, l2pg); l2 = &l2[pmap_l2_index(va)]; } else panic("pmap_alloc_l2: missing page table page for va %#lx", @@ -3147,7 +3139,7 @@ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pvc->pvc_lock); TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru); @@ -3249,7 +3241,7 @@ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -4635,7 +4627,7 @@ * Examine the first L3E in the specified PTP. Abort if this L3E is * ineligible for promotion... */ - firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2))); + firstl3 = PTE_TO_DMAP(pmap_load(l2)); newl2 = pmap_load(firstl3); if ((newl2 & ATTR_SW_NO_PROMOTE) != 0) return (false); @@ -4944,7 +4936,7 @@ PMAP_LOCK(pmap); goto restart; } - l2p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp)); + ASSIGN_VM_PAGE_TO_DMAP(l2p, mp); l2p = &l2p[pmap_l2_index(va)]; origpte = pmap_load(l2p); } else { @@ -5526,8 +5518,7 @@ if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) != PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) && pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) { - cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)), - L2_SIZE); + cpu_icache_sync_range((void *)PTE_TO_DMAP(new_l2), L2_SIZE); } /* @@ -5661,7 +5652,7 @@ (*ml3p)->ref_count += L3C_ENTRIES - 1; } } - l3p = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ml3p)); + ASSIGN_VM_PAGE_TO_DMAP(l3p, *ml3p); have_l3p: /* @@ -5693,8 +5684,7 @@ } else { KASSERT(lvl == 2, ("pmap_enter_l3c: Invalid level %d", lvl)); - l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS( - pmap_load(pde))); + l3p = PTE_TO_DMAP(pmap_load(pde)); } } l3p = &l3p[pmap_l3_index(va)]; @@ -5908,7 +5898,7 @@ return (mpte); } } - l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); + ASSIGN_VM_PAGE_TO_DMAP(l3, mpte); l3 = &l3[pmap_l3_index(va)]; } else { mpte = NULL; @@ -6302,8 +6292,7 @@ ("pmap_copy: source page table page is unused")); if (va_next > end_addr) va_next = end_addr; - src_pte = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(srcptepaddr)); - src_pte = &src_pte[pmap_l3_index(addr)]; + src_pte = &PTE_TO_DMAP(srcptepaddr)[pmap_l3_index(addr)]; dstmpte = NULL; for (; addr < va_next; addr += PAGE_SIZE, src_pte++) { ptetemp = pmap_load(src_pte); @@ -6321,8 +6310,7 @@ } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr, NULL)) == NULL) goto out; - dst_pte = (pt_entry_t *) - PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); + ASSIGN_VM_PAGE_TO_DMAP(dst_pte, dstmpte); dst_pte = &dst_pte[pmap_l3_index(addr)]; if ((ptetemp & ATTR_CONTIGUOUS) != 0 && (addr & L3C_OFFSET) == 0 && addr + L3C_OFFSET <= @@ -6409,9 +6397,10 @@ void pmap_zero_page(vm_page_t m) { - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + void *addr; - pagezero((void *)va); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); + pagezero(addr); } /* @@ -6423,12 +6412,13 @@ void pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + void *addr; + ASSIGN_VM_PAGE_TO_DMAP(addr, m); if (off == 0 && size == PAGE_SIZE) - pagezero((void *)va); + pagezero(addr); else - bzero((char *)va + off, size); + bzero((char *)addr + off, size); } /* @@ -6440,10 +6430,11 @@ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); - vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + void *src, *dst; - pagecopy((void *)src, (void *)dst); + ASSIGN_VM_PAGE_TO_DMAP(src, msrc); + ASSIGN_VM_PAGE_TO_DMAP(dst, mdst); + pagecopy(src, dst); } int unmapped_buf_allowed = 1; @@ -6487,8 +6478,10 @@ vm_offset_t pmap_quick_enter_page(vm_page_t m) { + vm_offset_t va; - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return (va); } void @@ -7696,6 +7689,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + vm_offset_t va; m->md.pv_memattr = ma; @@ -7705,8 +7699,8 @@ * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && - pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, - m->md.pv_memattr) != 0) + (ASSIGN_VM_PAGE_TO_DMAP(va, m), + pmap_change_attr(va, PAGE_SIZE, m->md.pv_memattr) != 0)) panic("memory attribute change on the direct map failed"); } Index: sys/arm64/include/sf_buf.h =================================================================== --- sys/arm64/include/sf_buf.h +++ sys/arm64/include/sf_buf.h @@ -40,8 +40,10 @@ static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) { + vm_offset_t pa; + ASSIGN_VM_PAGE_TO_DMAP(pa, (vm_page_t)sf); - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf))); + return (pa); } static inline vm_page_t Index: sys/arm64/vmm/vmm.c =================================================================== --- sys/arm64/vmm/vmm.c +++ sys/arm64/vmm/vmm.c @@ -1530,8 +1530,10 @@ } if (count == 1) { + vm_offset_t va; *cookie = m; - return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return ((void *)(va + pageoff)); } else { *cookie = NULL; return (NULL); Index: sys/compat/linuxkpi/common/src/linux_page.c =================================================================== --- sys/compat/linuxkpi/common/src/linux_page.c +++ sys/compat/linuxkpi/common/src/linux_page.c @@ -201,8 +201,7 @@ _linux_free_kmem(addr, order); } else { vm_page_t page; - - page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); + page = DMAP_TO_VM_PAGE(addr); linux_free_pages(page, order); } } Index: sys/dev/iscsi/icl_soft.c =================================================================== --- sys/dev/iscsi/icl_soft.c +++ sys/dev/iscsi/icl_soft.c @@ -1196,7 +1196,7 @@ while (len > 0) { todo = MIN(len, PAGE_SIZE - page_offset); - vaddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(bp->bio_ma[i])); + ASSIGN_VM_PAGE_TO_DMAP(vaddr, bp->bio_ma[i]); do { mtodo = min(todo, M_SIZE(m) - m->m_len); @@ -1287,7 +1287,7 @@ while (len > 0) { todo = MIN(len, PAGE_SIZE - page_offset); - vaddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(bp->bio_ma[i])); + ASSIGN_VM_PAGE_TO_DMAP(vaddr, bp->bio_ma[i]); m_copydata(ip->ip_data_mbuf, pdu_off, todo, (char *)vaddr + page_offset); Index: sys/kern/uipc_ktls.c =================================================================== --- sys/kern/uipc_ktls.c +++ sys/kern/uipc_ktls.c @@ -449,7 +449,7 @@ VM_MEMATTR_DEFAULT); if (m == NULL) break; - store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(store[i], m); } return (i); } @@ -461,7 +461,7 @@ int i, j; for (i = 0; i < count; i++) { - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); + m = DMAP_TO_VM_PAGE(store[i]); for (j = 0; j < atop(ktls_maxlen); j++) { (void)vm_page_unwire_noq(m + j); vm_page_free(m + j); Index: sys/opencrypto/criov.c =================================================================== --- sys/opencrypto/criov.c +++ sys/opencrypto/criov.c @@ -162,8 +162,9 @@ processed = 0; CVM_PAGE_SKIP(); while (len > 0) { - char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)); + char *kaddr; count = min(PAGE_SIZE - off, len); + ASSIGN_VM_PAGE_TO_DMAP(kaddr, *pages); rval = (*f)(arg, kaddr + off, count); if (rval) return (rval); @@ -178,12 +179,15 @@ static inline void * cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len) { + char *p; + if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) return (NULL); pages += (skip / PAGE_SIZE); skip -= rounddown(skip, PAGE_SIZE); - return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip); + ASSIGN_VM_PAGE_TO_DMAP(p, *pages); + return (p + skip); } /* @@ -199,9 +203,11 @@ CVM_PAGE_SKIP(); while (len > 0) { + char *p; + count = min(PAGE_SIZE - off, len); - bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off, - count); + ASSIGN_VM_PAGE_TO_DMAP(p, *pages); + bcopy(cp, p + off, count); len -= count; cp += count; processed += count; @@ -224,9 +230,11 @@ CVM_PAGE_SKIP(); while (len > 0) { + char *p; + count = min(PAGE_SIZE - off, len); - bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp, - count); + ASSIGN_VM_PAGE_TO_DMAP(p, *pages); + bcopy(p + off, cp, count); len -= count; cp += count; processed += count; @@ -428,6 +436,7 @@ } switch (cc->cc_type) { + char *p; case CRYPTO_BUF_CONTIG: *len = cc->cc_buf_len; return (cc->cc_buf); @@ -439,8 +448,8 @@ return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); case CRYPTO_BUF_VMPAGE: *len = PAGE_SIZE - cc->cc_offset; - return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( - *cc->cc_vmpage)) + cc->cc_offset); + ASSIGN_VM_PAGE_TO_DMAP(p, *cc->cc_vmpage); + return (p + cc->cc_offset); case CRYPTO_BUF_UIO: *len = cc->cc_iov->iov_len - cc->cc_offset; return ((char *)cc->cc_iov->iov_base + cc->cc_offset); @@ -494,8 +503,8 @@ break; case CRYPTO_BUF_VMPAGE: for (;;) { - dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( - *cc->cc_vmpage)) + cc->cc_offset; + ASSIGN_VM_PAGE_TO_DMAP(dst, *cc->cc_vmpage); + dst += cc->cc_offset; remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); todo = MIN(remain, size); memcpy(dst, src, todo); @@ -583,8 +592,8 @@ break; case CRYPTO_BUF_VMPAGE: for (;;) { - src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( - *cc->cc_vmpage)) + cc->cc_offset; + ASSIGN_VM_PAGE_TO_DMAP(src, *cc->cc_vmpage); + src += cc->cc_offset; remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); todo = MIN(remain, size); memcpy(dst, src, todo); Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -1472,36 +1472,30 @@ void moea64_copy_page_dmap(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t dst; - vm_offset_t src; + void *src, *dst; - dst = VM_PAGE_TO_PHYS(mdst); - src = VM_PAGE_TO_PHYS(msrc); - - bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst), - PAGE_SIZE); + ASSIGN_VM_PAGE_TO_DMAP(src, msrc); + ASSIGN_VM_PAGE_TO_DMAP(dst, mdst); + bcopy(src, dst, PAGE_SIZE); } inline void moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { - void *a_cp, *b_cp; + uintptr_t a_cp, b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); - a_cp = (char *)(uintptr_t)PHYS_TO_DMAP( - VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) + - a_pg_offset; + ASSIGN_VM_PAGE_TO_DMAP(a_cp, ma[a_offset >> PAGE_SHIFT]); b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); - b_cp = (char *)(uintptr_t)PHYS_TO_DMAP( - VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) + - b_pg_offset; - bcopy(a_cp, b_cp, cnt); + ASSIGN_VM_PAGE_TO_DMAP(b_cp, mb[b_offset >> PAGE_SHIFT]); + bcopy((char*)a_cp + a_pg_offset, + (char*)b_cp + b_pg_offset, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; @@ -1577,10 +1571,9 @@ void moea64_zero_page_dmap(vm_page_t m) { - vm_paddr_t pa = VM_PAGE_TO_PHYS(m); - vm_offset_t va, off; + vm_offset_t off, va; - va = PHYS_TO_DMAP(pa); + ASSIGN_VM_PAGE_TO_DMAP(va, m); for (off = 0; off < PAGE_SIZE; off += cacheline_size) __asm __volatile("dcbz 0,%0" :: "r"(va + off)); } @@ -1614,8 +1607,10 @@ vm_offset_t moea64_quick_enter_page_dmap(vm_page_t m) { + vm_offset_t va; - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return (va); } void Index: sys/powerpc/aim/mmu_radix.c =================================================================== --- sys/powerpc/aim/mmu_radix.c +++ sys/powerpc/aim/mmu_radix.c @@ -1498,7 +1498,7 @@ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ - m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m_pc = DMAP_TO_VM_PAGE(pc); dump_drop_page(m_pc->phys_addr); mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); @@ -1588,7 +1588,7 @@ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -2578,8 +2578,7 @@ dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL); if (dst_pdpg == NULL) break; - l3e = (pml3_entry_t *) - PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg)); + ASSIGN_VM_PAGE_TO_DMAP(l3e, dst_pdpg); l3e = &l3e[pmap_pml3e_index(addr)]; if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 || pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr, @@ -2617,8 +2616,7 @@ else if ((dstmpte = pmap_allocpte(dst_pmap, addr, NULL)) == NULL) goto out; - dst_pte = (pt_entry_t *) - PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); + ASSIGN_VM_PAGE_TO_DMAP(dst_pte, dstmpte); dst_pte = &dst_pte[pmap_pte_index(addr)]; if (be64toh(*dst_pte) == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, @@ -2672,21 +2670,23 @@ static void mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); - vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + void *src, *dst; + + ASSIGN_VM_PAGE_TO_DMAP(src, msrc); + ASSIGN_VM_PAGE_TO_DMAP(dst, mdst); CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst); /* * XXX slow */ - bcopy((void *)src, (void *)dst, PAGE_SIZE); + bcopy(src, dst, PAGE_SIZE); } static void mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { - void *a_cp, *b_cp; + uintptr_t a_cp, b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; @@ -2696,15 +2696,12 @@ while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); - a_cp = (char *)(uintptr_t)PHYS_TO_DMAP( - VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) + - a_pg_offset; + ASSIGN_VM_PAGE_TO_DMAP(a_cp, ma[a_offset >> PAGE_SHIFT]); b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); - b_cp = (char *)(uintptr_t)PHYS_TO_DMAP( - VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) + - b_pg_offset; - bcopy(a_cp, b_cp, cnt); + ASSIGN_VM_PAGE_TO_DMAP(b_cp, mb[b_offset >> PAGE_SHIFT]); + bcopy((char *)a_cp + a_pg_offset, + (char *)b_cp + b_pg_offset, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; @@ -3196,7 +3193,7 @@ " in pmap %p", va, pmap); return (KERN_RESOURCE_SHORTAGE); } - l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); + ASSIGN_VM_PAGE_TO_DMAP(l3e, pdpg); l3e = &l3e[pmap_pml3e_index(va)]; oldl3e = be64toh(*l3e); if ((oldl3e & PG_V) != 0) { @@ -3245,6 +3242,7 @@ */ uwptpg = NULL; if ((newpde & PG_W) != 0 && pmap != kernel_pmap) { + pt_entry_t *pte; uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (uwptpg == NULL) return (KERN_RESOURCE_SHORTAGE); @@ -3256,8 +3254,8 @@ } pmap_resident_count_inc(pmap, 1); uwptpg->ref_count = NPTEPG; - pmap_fill_ptp((pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(uwptpg)), - newpde); + ASSIGN_VM_PAGE_TO_DMAP(pte, uwptpg); + pmap_fill_ptp(pte, newpde); } if ((newpde & PG_MANAGED) != 0) { /* @@ -3409,7 +3407,7 @@ return (mpte); } } - pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); + ASSIGN_VM_PAGE_TO_DMAP(pte, mpte); pte = &pte[pmap_pte_index(va)]; } else { mpte = NULL; @@ -3615,7 +3613,7 @@ RADIX_PGD_SIZE / PAGE_SIZE, 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE, VM_MEMATTR_DEFAULT); - store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(store[i], m); } return (count); } @@ -3635,7 +3633,7 @@ * XXX selectively remove dmap and KVA entries so we don't * need to bzero */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); + m = DMAP_TO_VM_PAGE(store[i]); for (int j = page_count-1; j >= 0; j--) { vm_page_unwire_noq(&m[j]); SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); @@ -4088,7 +4086,7 @@ addr += L3_PAGE_SIZE; continue; } - l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); + ASSIGN_VM_PAGE_TO_DMAP(l3e, pdpg); l3e = &l3e[pmap_pml3e_index(addr)]; if ((be64toh(*l3e) & PG_V) == 0) { pa |= PG_M | PG_A | PG_RW; @@ -5769,7 +5767,7 @@ vm_offset_t addr; CTR2(KTR_PMAP, "%s(%p)", __func__, m); - addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); pagezero(addr); } @@ -5780,7 +5778,7 @@ CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size); MPASS(off + size <= PAGE_SIZE); - addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); memset(addr + off, 0, size); } @@ -5916,6 +5914,7 @@ void mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + vm_offset_t va; CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); m->md.mdpg_cache_attrs = ma; @@ -5926,8 +5925,8 @@ * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && - mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), - PAGE_SIZE, m->md.mdpg_cache_attrs)) + (ASSIGN_VM_PAGE_TO_DMAP(va, m), + mmu_radix_change_attr(va, PAGE_SIZE, m->md.mdpg_cache_attrs)) panic("memory attribute change on the direct map failed"); } @@ -6159,11 +6158,10 @@ vm_offset_t mmu_radix_quick_enter_page(vm_page_t m) { - vm_paddr_t paddr; - + vm_offset_t paddr; CTR2(KTR_PMAP, "%s(%p)", __func__, m); - paddr = VM_PAGE_TO_PHYS(m); - return (PHYS_TO_DMAP(paddr)); + ASSIGN_VM_PAGE_TO_DMAP(paddr, m); + return (paddr); } void Index: sys/powerpc/aim/slb.c =================================================================== --- sys/powerpc/aim/slb.c +++ sys/powerpc/aim/slb.c @@ -505,7 +505,7 @@ return (NULL); if (hw_direct_map) - va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(va, m); else { va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS); pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); Index: sys/powerpc/booke/pmap_64.c =================================================================== --- sys/powerpc/booke/pmap_64.c +++ sys/powerpc/booke/pmap_64.c @@ -153,6 +153,7 @@ mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep) { vm_page_t m; + vm_offset_t pa; int req; req = VM_ALLOC_WIRED | VM_ALLOC_ZERO; @@ -167,8 +168,8 @@ PMAP_LOCK(pmap); } m->pindex = idx; - - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); + ASSIGN_VM_PAGE_TO_DMAP(pa, m); + return (pa); } /* Initialize pool of kva ptbl buffers. */ @@ -276,7 +277,7 @@ } return (page); } - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page)); + m = DMAP_TO_VM_PAGE(page); page = ptr_tbl[index]; vm_page_unwire_noq(m); vm_page_free_zero(m); @@ -321,7 +322,6 @@ ptbl_unhold(pmap_t pmap, vm_offset_t va) { pte_t *ptbl; - vm_page_t m; u_int pg_root_idx; pte_t ***pdir_l1; u_int pdir_l1_idx; @@ -340,21 +340,17 @@ ptbl = pdir[pdir_idx]; /* decrement hold count */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); - - if (!unhold_free_page(pmap, m)) + if (!unhold_free_page(pmap, DMAP_TO_VM_PAGE(ptbl))) return (0); pdir[pdir_idx] = NULL; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir)); - if (!unhold_free_page(pmap, m)) + if (!unhold_free_page(pmap, DMAP_TO_VM_PAGE(pdir))) return (1); pdir_l1[pdir_l1_idx] = NULL; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1)); - if (!unhold_free_page(pmap, m)) + if (!unhold_free_page(pmap, DMAP_TO_VM_PAGE(pdir_l1))) return (1); pmap->pm_root[pg_root_idx] = NULL; @@ -368,13 +364,10 @@ static void ptbl_hold(pmap_t pmap, pte_t *ptbl) { - vm_page_t m; - KASSERT((pmap != kernel_pmap), ("ptbl_hold: holding kernel ptbl!")); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); - m->ref_count++; + DMAP_TO_VM_PAGE(ptbl)->ref_count++; } /* @@ -666,12 +659,10 @@ static void mmu_booke_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va; - + caddr_t addr; /* XXX KASSERT off and size are within a single page? */ - - va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - bzero((caddr_t)va + off, size); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); + bzero(addr + off, size); } /* @@ -682,7 +673,7 @@ { vm_offset_t off, va; - va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(va, m); for (off = 0; off < PAGE_SIZE; off += cacheline_size) __asm __volatile("dcbz 0,%0" :: "r"(va + off)); @@ -696,18 +687,18 @@ static void mmu_booke_copy_page(vm_page_t sm, vm_page_t dm) { - vm_offset_t sva, dva; + caddr_t sva, dva; - sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm)); - dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm)); - memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); + ASSIGN_VM_PAGE_TO_DMAP(sva, sm); + ASSIGN_VM_PAGE_TO_DMAP(dva, dm); + memcpy(dva, sva, PAGE_SIZE); } static inline void mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { - void *a_cp, *b_cp; + caddr_t a_cp, b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; @@ -720,11 +711,10 @@ pb = mb[b_offset >> PAGE_SHIFT]; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); - a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) + - a_pg_offset); - b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) + - b_pg_offset); - bcopy(a_cp, b_cp, cnt); + ASSIGN_VM_PAGE_TO_DMAP(a_cp, pa); + ASSIGN_VM_PAGE_TO_DMAP(b_cp, pb); + bcopy((void *)(a_cp + a_pg_offset), + (void *)(b_cp + b_pg_offset), cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; @@ -734,7 +724,10 @@ static vm_offset_t mmu_booke_quick_enter_page(vm_page_t m) { - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); + vm_offset_t va; + + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return (va); } static void Index: sys/powerpc/powerpc/uma_machdep.c =================================================================== --- sys/powerpc/powerpc/uma_machdep.c +++ sys/powerpc/powerpc/uma_machdep.c @@ -80,7 +80,7 @@ vm_page_t m; if (hw_direct_map) - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)mem)); + m = DMAP_TO_VM_PAGE(mem); else { m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)mem)); pmap_kremove((vm_offset_t)mem); Index: sys/riscv/include/sf_buf.h =================================================================== --- sys/riscv/include/sf_buf.h +++ sys/riscv/include/sf_buf.h @@ -36,8 +36,10 @@ static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) { + vm_offset_t pa; - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf))); + ASSIGN_VM_PAGE_TO_DMAP(pa, (vm_page_t)sf); + return (pa); } static inline vm_page_t Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -363,6 +363,7 @@ #define L2PTE_TO_PHYS(l2) \ ((((l2) & ~PTE_HI_MASK) >> PTE_PPN1_S) << L2_SHIFT) #define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte)) +#define PTE_TO_DMAP(pte) ((pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pte))) static __inline pd_entry_t * pmap_l0(pmap_t pmap, vm_offset_t va) @@ -376,14 +377,8 @@ static __inline pd_entry_t * pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) { - vm_paddr_t phys; - pd_entry_t *l1; - KASSERT(pmap_mode != PMAP_MODE_SV39, ("%s: in SV39 mode", __func__)); - phys = PTE_TO_PHYS(pmap_load(l0)); - l1 = (pd_entry_t *)PHYS_TO_DMAP(phys); - - return (&l1[pmap_l1_index(va)]); + return (&PTE_TO_DMAP(pmap_load(l0))[pmap_l1_index(va)]); } static __inline pd_entry_t * @@ -408,13 +403,7 @@ static __inline pd_entry_t * pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va) { - vm_paddr_t phys; - pd_entry_t *l2; - - phys = PTE_TO_PHYS(pmap_load(l1)); - l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); - - return (&l2[pmap_l2_index(va)]); + return (&PTE_TO_DMAP(pmap_load(l1))[pmap_l2_index(va)]); } static __inline pd_entry_t * @@ -436,13 +425,7 @@ static __inline pt_entry_t * pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va) { - vm_paddr_t phys; - pt_entry_t *l3; - - phys = PTE_TO_PHYS(pmap_load(l2)); - l3 = (pd_entry_t *)PHYS_TO_DMAP(phys); - - return (&l3[pmap_l3_index(va)]); + return (&PTE_TO_DMAP(pmap_load(l2))[pmap_l3_index(va)]); } static __inline pt_entry_t * @@ -1513,9 +1496,7 @@ } } - phys = PTE_TO_PHYS(pmap_load(l1)); - l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); - l2 = &l2[ptepindex & Ln_ADDR_MASK]; + l2 = &PTE_TO_DMAP(pmap_load(l1))[ptepindex & Ln_ADDR_MASK]; KASSERT((pmap_load(l2) & PTE_V) == 0, ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2))); @@ -1621,7 +1602,7 @@ mtx_unlock(&allpmaps_lock); } - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_top)); + m = DMAP_TO_VM_PAGE(pmap->pm_top); vm_page_unwire_noq(m); vm_page_free(m); } @@ -1814,7 +1795,7 @@ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + m = DMAP_TO_VM_PAGE(pc); dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); @@ -2765,7 +2746,7 @@ * ineligible for promotion or does not map the first 4KB physical page * within a 2MB page. */ - firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2))); + firstl3 = (pt_entry_t *)PTE_TO_DMAP(pmap_load(l2)); firstl3e = pmap_load(firstl3); pa = PTE_TO_PHYS(firstl3e); if ((pa & L2_OFFSET) != 0) { @@ -3215,7 +3196,7 @@ return (KERN_RESOURCE_SHORTAGE); } - l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg)); + ASSIGN_VM_PAGE_TO_DMAP(l2, l2pg); l2 = &l2[pmap_l2_index(va)]; if ((oldl2 = pmap_load(l2)) != 0) { KASSERT(l2pg->ref_count > 1, @@ -3463,7 +3444,7 @@ return (mpte); } } - l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); + ASSIGN_VM_PAGE_TO_DMAP(l3, mpte); l3 = &l3[pmap_l3_index(va)]; } else { mpte = NULL; @@ -3667,9 +3648,10 @@ void pmap_zero_page(vm_page_t m) { - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + void *addr; - pagezero((void *)va); + ASSIGN_VM_PAGE_TO_DMAP(addr, m); + pagezero(addr); } /* @@ -3681,12 +3663,13 @@ void pmap_zero_page_area(vm_page_t m, int off, int size) { - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + void *addr; + ASSIGN_VM_PAGE_TO_DMAP(addr, m); if (off == 0 && size == PAGE_SIZE) - pagezero((void *)va); + pagezero(addr); else - bzero((char *)va + off, size); + bzero((char *)addr + off, size); } /* @@ -3698,10 +3681,12 @@ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { - vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); - vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + void *src, *dst; + + ASSIGN_VM_PAGE_TO_DMAP(src, msrc); + ASSIGN_VM_PAGE_TO_DMAP(dst, mdst); - pagecopy((void *)src, (void *)dst); + pagecopy(src, dst); } int unmapped_buf_allowed = 1; @@ -3745,8 +3730,10 @@ vm_offset_t pmap_quick_enter_page(vm_page_t m) { + vm_offset_t va; - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); + ASSIGN_VM_PAGE_TO_DMAP(va, m); + return (va); } void @@ -4559,6 +4546,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + vm_offset_t va; m->md.pv_memattr = ma; @@ -4568,8 +4556,8 @@ * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && - pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, - m->md.pv_memattr) != 0) + (ASSIGN_VM_PAGE_TO_DMAP(va, m), + pmap_change_attr(va, PAGE_SIZE, m->md.pv_memattr) != 0)) panic("memory attribute change on the direct map failed"); } Index: sys/sys/sf_buf.h =================================================================== --- sys/sys/sf_buf.h +++ sys/sys/sf_buf.h @@ -113,10 +113,13 @@ static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) { - if (PMAP_HAS_DMAP) - return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf))); + vm_offset_t pa; - return (sf->kva); + if (PMAP_HAS_DMAP) + ASSIGN_VM_PAGE_TO_DMAP(pa, (vm_page_t)sf); + else + pa = sf->kva; + return (pa); } static inline vm_page_t Index: sys/vm/vm.h =================================================================== --- sys/vm/vm.h +++ sys/vm/vm.h @@ -58,6 +58,9 @@ #define VM_H #include +#define ASSIGN_VM_PAGE_TO_DMAP(lval, m) \ + (lval = (typeof(lval))PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))) +#define DMAP_TO_VM_PAGE(dmap) PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)dmap)) typedef char vm_inherit_t; /* inheritance codes */ Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -3902,7 +3902,7 @@ if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { uint64_t *p; int i; - p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + ASSIGN_VM_PAGE_TO_DMAP(p, m); for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", m, i, (uintmax_t)*p));