Changeset View
Changeset View
Standalone View
Standalone View
head/sys/riscv/riscv/pmap.c
Show First 20 Lines • Show All 2,034 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
if (prot & VM_PROT_WRITE) | if (prot & VM_PROT_WRITE) | ||||
new_l3 |= PTE_W; | new_l3 |= PTE_W; | ||||
if ((va >> 63) == 0) | if ((va >> 63) == 0) | ||||
new_l3 |= PTE_U; | new_l3 |= PTE_U; | ||||
new_l3 |= (pn << PTE_PPN0_S); | new_l3 |= (pn << PTE_PPN0_S); | ||||
if ((flags & PMAP_ENTER_WIRED) != 0) | if ((flags & PMAP_ENTER_WIRED) != 0) | ||||
new_l3 |= PTE_SW_WIRED; | new_l3 |= PTE_SW_WIRED; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | |||||
new_l3 |= PTE_SW_MANAGED; | |||||
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | ||||
mpte = NULL; | mpte = NULL; | ||||
lock = NULL; | lock = NULL; | ||||
rw_rlock(&pvh_global_lock); | rw_rlock(&pvh_global_lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | if (l3 == NULL) { | ||||
entry |= (l3_pn << PTE_PPN0_S); | entry |= (l3_pn << PTE_PPN0_S); | ||||
pmap_load_store(l2, entry); | pmap_load_store(l2, entry); | ||||
PTE_SYNC(l2); | PTE_SYNC(l2); | ||||
l3 = pmap_l2_to_l3(l2, va); | l3 = pmap_l2_to_l3(l2, va); | ||||
} | } | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
} | } | ||||
om = NULL; | |||||
orig_l3 = pmap_load(l3); | orig_l3 = pmap_load(l3); | ||||
opa = PTE_TO_PHYS(orig_l3); | opa = PTE_TO_PHYS(orig_l3); | ||||
pv = NULL; | |||||
/* | /* | ||||
* Is the specified virtual address already mapped? | * Is the specified virtual address already mapped? | ||||
*/ | */ | ||||
if (pmap_l3_valid(orig_l3)) { | if (pmap_l3_valid(orig_l3)) { | ||||
/* | /* | ||||
* Wiring change, just update stats. We don't worry about | * Wiring change, just update stats. We don't worry about | ||||
* wiring PT pages as they remain resident as long as there | * wiring PT pages as they remain resident as long as there | ||||
Show All 20 Lines | if (pmap_l3_valid(orig_l3)) { | ||||
/* | /* | ||||
* Has the physical page changed? | * Has the physical page changed? | ||||
*/ | */ | ||||
if (opa == pa) { | if (opa == pa) { | ||||
/* | /* | ||||
* No, might be a protection or wiring change. | * No, might be a protection or wiring change. | ||||
*/ | */ | ||||
if ((orig_l3 & PTE_SW_MANAGED) != 0) { | if ((orig_l3 & PTE_SW_MANAGED) != 0) { | ||||
new_l3 |= PTE_SW_MANAGED; | |||||
if (pmap_is_write(new_l3)) | if (pmap_is_write(new_l3)) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
goto validate; | goto validate; | ||||
} | } | ||||
/* Flush the cache, there might be uncommitted data in it */ | /* Flush the cache, there might be uncommitted data in it */ | ||||
if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) | if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) | ||||
cpu_dcache_wb_range(va, L3_SIZE); | cpu_dcache_wb_range(va, L3_SIZE); | ||||
/* | |||||
* The physical page has changed. Temporarily invalidate | |||||
* the mapping. This ensures that all threads sharing the | |||||
* pmap keep a consistent view of the mapping, which is | |||||
* necessary for the correct handling of COW faults. It | |||||
* also permits reuse of the old mapping's PV entry, | |||||
* avoiding an allocation. | |||||
* | |||||
* For consistency, handle unmanaged mappings the same way. | |||||
*/ | |||||
orig_l3 = pmap_load_clear(l3); | |||||
KASSERT(PTE_TO_PHYS(orig_l3) == opa, | |||||
("pmap_enter: unexpected pa update for %#lx", va)); | |||||
if ((orig_l3 & PTE_SW_MANAGED) != 0) { | |||||
om = PHYS_TO_VM_PAGE(opa); | |||||
/* | |||||
* The pmap lock is sufficient to synchronize with | |||||
* concurrent calls to pmap_page_test_mappings() and | |||||
* pmap_ts_referenced(). | |||||
*/ | |||||
if (pmap_page_dirty(orig_l3)) | |||||
vm_page_dirty(om); | |||||
if ((orig_l3 & PTE_A) != 0) | |||||
vm_page_aflag_set(om, PGA_REFERENCED); | |||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | |||||
pv = pmap_pvh_remove(&om->md, pmap, va); | |||||
if ((new_l3 & PTE_SW_MANAGED) == 0) | |||||
free_pv_entry(pmap, pv); | |||||
if ((om->aflags & PGA_WRITEABLE) != 0 && | |||||
TAILQ_EMPTY(&om->md.pv_list)) | |||||
vm_page_aflag_clear(om, PGA_WRITEABLE); | |||||
} | |||||
pmap_invalidate_page(pmap, va); | |||||
orig_l3 = 0; | |||||
} else { | } else { | ||||
/* | /* | ||||
* Increment the counters. | * Increment the counters. | ||||
*/ | */ | ||||
if ((new_l3 & PTE_SW_WIRED) != 0) | if ((new_l3 & PTE_SW_WIRED) != 0) | ||||
pmap->pm_stats.wired_count++; | pmap->pm_stats.wired_count++; | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
} | } | ||||
/* | /* | ||||
* Enter on the PV list if part of our managed memory. | * Enter on the PV list if part of our managed memory. | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((new_l3 & PTE_SW_MANAGED) != 0) { | ||||
new_l3 |= PTE_SW_MANAGED; | if (pv == NULL) { | ||||
pv = get_pv_entry(pmap, &lock); | pv = get_pv_entry(pmap, &lock); | ||||
pv->pv_va = va; | pv->pv_va = va; | ||||
} | |||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
if (pmap_is_write(new_l3)) | if (pmap_is_write(new_l3)) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
/* | /* | ||||
* Update the L3 entry. | * Update the L3 entry. | ||||
*/ | */ | ||||
if (orig_l3 != 0) { | if (orig_l3 != 0) { | ||||
validate: | validate: | ||||
orig_l3 = pmap_load_store(l3, new_l3); | orig_l3 = pmap_load_store(l3, new_l3); | ||||
PTE_SYNC(l3); | PTE_SYNC(l3); | ||||
opa = PTE_TO_PHYS(orig_l3); | KASSERT(PTE_TO_PHYS(orig_l3) == pa, | ||||
("pmap_enter: invalid update")); | |||||
if (opa != pa) { | if (pmap_page_dirty(orig_l3) && | ||||
if ((orig_l3 & PTE_SW_MANAGED) != 0) { | (orig_l3 & PTE_SW_MANAGED) != 0) | ||||
om = PHYS_TO_VM_PAGE(opa); | |||||
if (pmap_page_dirty(orig_l3)) | |||||
vm_page_dirty(om); | |||||
if ((orig_l3 & PTE_A) != 0) | |||||
vm_page_aflag_set(om, PGA_REFERENCED); | |||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | |||||
pmap_pvh_free(&om->md, pmap, va); | |||||
} | |||||
} else if (pmap_page_dirty(orig_l3)) { | |||||
if ((orig_l3 & PTE_SW_MANAGED) != 0) | |||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} | |||||
} else { | } else { | ||||
pmap_load_store(l3, new_l3); | pmap_load_store(l3, new_l3); | ||||
PTE_SYNC(l3); | PTE_SYNC(l3); | ||||
} | } | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) | if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) | ||||
cpu_icache_sync_range(va, PAGE_SIZE); | cpu_icache_sync_range(va, PAGE_SIZE); | ||||
▲ Show 20 Lines • Show All 1,052 Lines • Show Last 20 Lines |