Index: head/sys/riscv/riscv/pmap.c =================================================================== --- head/sys/riscv/riscv/pmap.c +++ head/sys/riscv/riscv/pmap.c @@ -356,36 +356,6 @@ return (pmap_l2_to_l3(l2, va)); } - -static __inline int -pmap_is_write(pt_entry_t entry) -{ - - return (entry & PTE_W); -} - -static __inline int -pmap_l3_valid(pt_entry_t l3) -{ - - return (l3 & PTE_V); -} - -static inline int -pmap_page_accessed(pt_entry_t pte) -{ - - return (pte & PTE_A); -} - -/* Checks if the page is dirty. */ -static inline int -pmap_page_dirty(pt_entry_t pte) -{ - - return (pte & PTE_D); -} - static __inline void pmap_resident_count_inc(pmap_t pmap, int count) { @@ -898,7 +868,7 @@ retry: l3p = pmap_l3(pmap, va); if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) { - if ((pmap_is_write(l3)) || ((prot & VM_PROT_WRITE) == 0)) { + if ((l3 & PTE_W) != 0 || (prot & VM_PROT_WRITE) == 0) { phys = PTE_TO_PHYS(l3); if (vm_page_pa_tryrelock(pmap, phys, &pa)) goto retry; @@ -1777,7 +1747,7 @@ if (old_l3 & PTE_SW_MANAGED) { phys = PTE_TO_PHYS(old_l3); m = PHYS_TO_VM_PAGE(phys); - if (pmap_page_dirty(old_l3)) + if ((old_l3 & PTE_D) != 0) vm_page_dirty(m); if (old_l3 & PTE_A) vm_page_aflag_set(m, PGA_REFERENCED); @@ -1935,7 +1905,7 @@ /* * Update the vm_page_t clean and reference bits. */ - if (pmap_page_dirty(tl3)) + if ((tl3 & PTE_D) != 0) vm_page_dirty(m); pmap_unuse_l3(pmap, pv->pv_va, pmap_load(l2), &free); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); @@ -1997,9 +1967,9 @@ for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, sva += L3_SIZE) { l3 = pmap_load(l3p); - if (pmap_l3_valid(l3)) { + if ((l3 & PTE_V) != 0) { entry = pmap_load(l3p); - entry &= ~(PTE_W); + entry &= ~PTE_W; pmap_load_store(l3p, entry); /* XXX: Use pmap_invalidate_range */ pmap_invalidate_page(pmap, sva); @@ -2186,7 +2156,7 @@ /* * Is the specified virtual address already mapped? */ - if (pmap_l3_valid(orig_l3)) { + if ((orig_l3 & PTE_V) != 0) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there @@ -2217,10 +2187,9 @@ /* * No, might be a protection or wiring change. */ - if ((orig_l3 & PTE_SW_MANAGED) != 0) { - if (pmap_is_write(new_l3)) - vm_page_aflag_set(m, PGA_WRITEABLE); - } + if ((orig_l3 & PTE_SW_MANAGED) != 0 && + (new_l3 & PTE_W) != 0) + vm_page_aflag_set(m, PGA_WRITEABLE); goto validate; } @@ -2245,7 +2214,7 @@ * concurrent calls to pmap_page_test_mappings() and * pmap_ts_referenced(). */ - if (pmap_page_dirty(orig_l3)) + if ((orig_l3 & PTE_D) != 0) vm_page_dirty(om); if ((orig_l3 & PTE_A) != 0) vm_page_aflag_set(om, PGA_REFERENCED); @@ -2278,7 +2247,7 @@ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; - if (pmap_is_write(new_l3)) + if ((new_l3 & PTE_W) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); } @@ -2298,8 +2267,8 @@ pmap_invalidate_page(pmap, va); KASSERT(PTE_TO_PHYS(orig_l3) == pa, ("pmap_enter: invalid update")); - if (pmap_page_dirty(orig_l3) && - (orig_l3 & PTE_SW_MANAGED) != 0) + if ((orig_l3 & (PTE_D | PTE_SW_MANAGED)) == + (PTE_D | PTE_SW_MANAGED)) vm_page_dirty(m); } else { pmap_load_store(l3, new_l3); @@ -2840,7 +2809,7 @@ /* * Update the vm_page_t clean/reference bits. */ - if (pmap_page_dirty(tl3)) + if ((tl3 & PTE_D) != 0) vm_page_dirty(m); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); @@ -3044,11 +3013,11 @@ retry: oldl3 = pmap_load(l3); - if (pmap_is_write(oldl3)) { - newl3 = oldl3 & ~(PTE_W); + if ((oldl3 & PTE_W) != 0) { + newl3 = oldl3 & ~PTE_W; if (!atomic_cmpset_long(l3, oldl3, newl3)) goto retry; - /* TODO: use pmap_page_dirty(oldl3) ? */ + /* TODO: check for PTE_D? */ if ((oldl3 & PTE_A) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); @@ -3129,7 +3098,7 @@ l3 = pmap_l2_to_l3(l2, pv->pv_va); old_l3 = pmap_load(l3); - if (pmap_page_dirty(old_l3)) + if ((old_l3 & PTE_D) != 0) vm_page_dirty(m); if ((old_l3 & PTE_A) != 0) { if (safe_to_clear_referenced(pmap, old_l3)) { @@ -3271,9 +3240,9 @@ val = MINCORE_INCORE; } - if (pmap_page_dirty(tpte)) + if ((tpte & PTE_D) != 0) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; - if (pmap_page_accessed(tpte)) + if ((tpte & PTE_A) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED; }