Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -2103,11 +2103,11 @@ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { + struct spglist free; struct rwlock *lock; vm_offset_t va, va_next; - pd_entry_t *l1, *l2; - pt_entry_t l3_pte, *l3; - struct spglist free; + pd_entry_t *l1, *l2, l2e; + pt_entry_t *l3; /* * Perform an unsynchronized read. This is, however, safe. @@ -2143,16 +2143,22 @@ l2 = pmap_l1_to_l2(l1, sva); if (l2 == NULL) continue; - - l3_pte = pmap_load(l2); - - /* - * Weed out invalid mappings. - */ - if (l3_pte == 0) - continue; - if ((pmap_load(l2) & PTE_RX) != 0) + if ((l2e = pmap_load(l2)) == 0) continue; + if ((l2e & PTE_RWX) != 0) { + if (sva + L2_SIZE == va_next && eva >= va_next) { + (void)pmap_remove_l2(pmap, l2, sva, + pmap_load(l1), &free, &lock); + continue; + } else if (!pmap_demote_l2_locked(pmap, l2, sva, + &lock)) { + /* + * The large page mapping was destroyed. + */ + continue; + } + l2e = pmap_load(l2); + } /* * Limit our scan to either the end of the va represented @@ -2165,8 +2171,6 @@ va = va_next; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { - if (l3 == NULL) - panic("l3 == NULL"); if (pmap_load(l3) == 0) { if (va != va_next) { pmap_invalidate_range(pmap, va, sva); @@ -2176,8 +2180,7 @@ } if (va == va_next) va = sva; - if (pmap_remove_l3(pmap, l3, sva, l3_pte, &free, - &lock)) { + if (pmap_remove_l3(pmap, l3, sva, l2e, &free, &lock)) { sva += L3_SIZE; break; } @@ -2187,7 +2190,7 @@ } if (lock != NULL) rw_wunlock(lock); - rw_runlock(&pvh_global_lock); + rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, false); } @@ -2208,40 +2211,52 @@ void pmap_remove_all(vm_page_t m) { - pv_entry_t pv; - pmap_t pmap; - pt_entry_t *l3, tl3; - pd_entry_t *l2, tl2; struct spglist free; + struct md_page *pvh; + pmap_t pmap; + pt_entry_t *l3, l3e; + pd_entry_t *l2, l2e; + pv_entry_t pv; + vm_offset_t va; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); + pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : + pa_to_pvh(VM_PAGE_TO_PHYS(m)); + rw_wlock(&pvh_global_lock); + while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { + pmap = PV_PMAP(pv); + PMAP_LOCK(pmap); + va = pv->pv_va; + l2 = pmap_l2(pmap, va); + (void)pmap_demote_l2(pmap, l2, va); + PMAP_UNLOCK(pmap); + } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pmap_resident_count_dec(pmap, 1); l2 = pmap_l2(pmap, pv->pv_va); KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found")); - tl2 = pmap_load(l2); + l2e = pmap_load(l2); - KASSERT((tl2 & PTE_RX) == 0, - ("pmap_remove_all: found a table when expecting " - "a block in %p's pv list", m)); + KASSERT((l2e & PTE_RX) == 0, + ("pmap_remove_all: found a superpage in %p's pv list", m)); l3 = pmap_l2_to_l3(l2, pv->pv_va); - tl3 = pmap_load_clear(l3); + l3e = pmap_load_clear(l3); pmap_invalidate_page(pmap, pv->pv_va); - if (tl3 & PTE_SW_WIRED) + if (l3e & PTE_SW_WIRED) pmap->pm_stats.wired_count--; - if ((tl3 & PTE_A) != 0) + if ((l3e & PTE_A) != 0) vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ - if ((tl3 & PTE_D) != 0) + if ((l3e & PTE_D) != 0) vm_page_dirty(m); pmap_unuse_pt(pmap, pv->pv_va, pmap_load(l2), &free); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); @@ -3576,17 +3591,19 @@ void pmap_remove_pages(pmap_t pmap) { - pd_entry_t ptepde, *l2; - pt_entry_t *l3, tl3; struct spglist free; - vm_page_t m; + pd_entry_t ptepde; + pt_entry_t *pte, tpte; + vm_page_t m, mpte, mt; pv_entry_t pv; + struct md_page *pvh; struct pv_chunk *pc, *npc; struct rwlock *lock; int64_t bit; uint64_t inuse, bitmask; - int allfree, field, freed, idx; vm_paddr_t pa; + int allfree, field, freed, idx; + bool superpage; lock = NULL; @@ -3605,53 +3622,94 @@ pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; - l2 = pmap_l2(pmap, pv->pv_va); - ptepde = pmap_load(l2); - l3 = pmap_l2_to_l3(l2, pv->pv_va); - tl3 = pmap_load(l3); + pte = pmap_l1(pmap, pv->pv_va); + ptepde = pmap_load(pte); + pte = pmap_l1_to_l2(pte, pv->pv_va); + tpte = pmap_load(pte); + if ((tpte & PTE_RWX) != 0) { + superpage = true; + } else { + ptepde = tpte; + pte = pmap_l2_to_l3(pte, pv->pv_va); + tpte = pmap_load(pte); + superpage = false; + } /* * We cannot remove wired pages from a * process' mapping at this time. */ - if (tl3 & PTE_SW_WIRED) { + if (tpte & PTE_SW_WIRED) { allfree = 0; continue; } - pa = PTE_TO_PHYS(tl3); + pa = PTE_TO_PHYS(tpte); m = PHYS_TO_VM_PAGE(pa); KASSERT(m->phys_addr == pa, ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, - (uintmax_t)tl3)); + (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], - ("pmap_remove_pages: bad l3 %#jx", - (uintmax_t)tl3)); + ("pmap_remove_pages: bad pte %#jx", + (uintmax_t)tpte)); - pmap_clear(l3); + pmap_clear(pte); /* * Update the vm_page_t clean/reference bits. */ - if ((tl3 & PTE_D) != 0) - vm_page_dirty(m); + if ((tpte & (PTE_D | PTE_W)) == (PTE_D | PTE_W)) { + if (superpage) + for (mt = m; + mt < &m[Ln_ENTRIES]; mt++) + vm_page_dirty(mt); + else + vm_page_dirty(m); + } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); /* Mark free */ pc->pc_map[field] |= bitmask; - pmap_resident_count_dec(pmap, 1); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); - m->md.pv_gen++; - if (TAILQ_EMPTY(&m->md.pv_list) && - (m->aflags & PGA_WRITEABLE) != 0) - vm_page_aflag_clear(m, PGA_WRITEABLE); - - pmap_unuse_l3(pmap, pv->pv_va, ptepde, &free); + if (superpage) { + pmap_resident_count_dec(pmap, Ln_ENTRIES); + pvh = pa_to_pvh(pa); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); + pvh->pv_gen++; + if (TAILQ_EMPTY(&pvh->pv_list)) { + for (mt = m; + mt < &m[Ln_ENTRIES]; + mt++) + if ((mt->aflags & PGA_WRITEABLE) != 0 && + TAILQ_EMPTY(&mt->md.pv_list)) + vm_page_aflag_clear(mt, + PGA_WRITEABLE); + } + mpte = pmap_remove_pt_page(pmap, pv->pv_va); + if (mpte != NULL) { + pmap_resident_count_dec(pmap, 1); + KASSERT(mpte->wire_count == Ln_ENTRIES, + ("pmap_remove_pages: pte page wire count error")); + mpte->wire_count = 0; + pmap_add_delayed_free_list(mpte, &free, FALSE); + } + } else { + pmap_resident_count_dec(pmap, 1); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + if (TAILQ_EMPTY(&m->md.pv_list) && + (m->aflags & PGA_WRITEABLE) != 0) { + pvh = pa_to_pvh(pa); + if (TAILQ_EMPTY(&pvh->pv_list)) + vm_page_aflag_clear(m, + PGA_WRITEABLE); + } + } + pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free); freed++; } }