Page MenuHomeFreeBSD

D18867.diff
No OneTemporary

D18867.diff

Index: sys/riscv/riscv/pmap.c
===================================================================
--- sys/riscv/riscv/pmap.c
+++ sys/riscv/riscv/pmap.c
@@ -3988,13 +3988,6 @@
rw_runlock(&pvh_global_lock);
}
-static __inline boolean_t
-safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
-{
-
- return (FALSE);
-}
-
/*
* pmap_ts_referenced:
*
@@ -4014,38 +4007,104 @@
int
pmap_ts_referenced(vm_page_t m)
{
+ struct spglist free;
+ struct md_page *pvh;
+ struct rwlock *lock;
pv_entry_t pv, pvf;
pmap_t pmap;
- struct rwlock *lock;
- pd_entry_t *l2;
- pt_entry_t *l3, old_l3;
+ pd_entry_t *l2, l2e;
+ pt_entry_t *l3, l3e;
vm_paddr_t pa;
- int cleared, md_gen, not_cleared;
- struct spglist free;
+ vm_offset_t va;
+ int md_gen, pvh_gen, ret;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
SLIST_INIT(&free);
- cleared = 0;
+ ret = 0;
pa = VM_PAGE_TO_PHYS(m);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
+
lock = PHYS_TO_PV_LIST_LOCK(pa);
rw_rlock(&pvh_global_lock);
rw_wlock(lock);
retry:
- not_cleared = 0;
+ if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
+ goto small_mappings;
+ pv = pvf;
+ do {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ va = pv->pv_va;
+ l2 = pmap_l2(pmap, va);
+ l2e = pmap_load(l2);
+ if ((l2e & (PTE_W | PTE_D)) == (PTE_W | PTE_D)) {
+ /*
+ * Although l2e is mapping a 2MB page, because
+ * this function is called at a 4KB page granularity,
+ * we only update the 4KB page under test.
+ */
+ vm_page_dirty(m);
+ }
+ if ((l2e & PTE_A) != 0) {
+ /*
+ * Since this reference bit is shared by 512 4KB
+ * pages, it should not be cleared every time it is
+ * tested. Apply a simple "hash" function on the
+ * physical page number, the virtual superpage number,
+ * and the pmap address to select one 4KB page out of
+ * the 512 on which testing the reference bit will
+ * result in clearing that reference bit. This
+ * function is designed to avoid the selection of the
+ * same 4KB page for every 2MB page mapping.
+ *
+ * On demotion, a mapping that hasn't been referenced
+ * is simply destroyed. To avoid the possibility of a
+ * subsequent page fault on a demoted wired mapping,
+ * always leave its reference bit set. Moreover,
+ * since the superpage is wired, the current state of
+ * its reference bit won't affect page replacement.
+ */
+ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
+ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
+ (l2e & PTE_SW_WIRED) == 0) {
+ pmap_clear_bits(l2, PTE_A);
+ pmap_invalidate_page(pmap, va);
+ }
+ ret++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ }
+ if (ret >= PMAP_TS_REFERENCED_MAX)
+ goto out;
+ } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
+small_mappings:
if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
goto out;
pv = pvf;
do {
- if (pvf == NULL)
- pvf = pv;
pmap = PV_PMAP(pv);
if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
md_gen = m->md.pv_gen;
rw_wunlock(lock);
PMAP_LOCK(pmap);
rw_wlock(lock);
- if (md_gen != m->md.pv_gen) {
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
PMAP_UNLOCK(pmap);
goto retry;
}
@@ -4056,36 +4115,21 @@
("pmap_ts_referenced: found an invalid l2 table"));
l3 = pmap_l2_to_l3(l2, pv->pv_va);
- old_l3 = pmap_load(l3);
- if ((old_l3 & PTE_D) != 0)
+ l3e = pmap_load(l3);
+ if ((l3e & PTE_D) != 0)
vm_page_dirty(m);
- if ((old_l3 & PTE_A) != 0) {
- if (safe_to_clear_referenced(pmap, old_l3)) {
- /*
- * TODO: We don't handle the access flag
- * at all. We need to be able to set it in
- * the exception handler.
- */
- panic("RISCVTODO: safe_to_clear_referenced\n");
- } else if ((old_l3 & PTE_SW_WIRED) == 0) {
+ if ((l3e & PTE_A) != 0) {
+ if ((l3e & PTE_SW_WIRED) == 0) {
/*
* Wired pages cannot be paged out so
* doing accessed bit emulation for
* them is wasted effort. We do the
* hard work for unwired pages only.
*/
- pmap_remove_l3(pmap, l3, pv->pv_va,
- pmap_load(l2), &free, &lock);
+ pmap_clear_bits(l3, PTE_A);
pmap_invalidate_page(pmap, pv->pv_va);
- cleared++;
- if (pvf == pv)
- pvf = NULL;
- pv = NULL;
- KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
- ("inconsistent pv lock %p %p for page %p",
- lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
- } else
- not_cleared++;
+ }
+ ret++;
}
PMAP_UNLOCK(pmap);
/* Rotate the PV list if it has more than one entry. */
@@ -4094,13 +4138,13 @@
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
}
- } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
- not_cleared < PMAP_TS_REFERENCED_MAX);
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && ret <
+ PMAP_TS_REFERENCED_MAX);
out:
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
vm_page_free_pages_toq(&free, false);
- return (cleared + not_cleared);
+ return (ret);
}
/*

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 9, 6:21 PM (13 h, 6 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28599341
Default Alt Text
D18867.diff (5 KB)

Event Timeline