Changeset View
Changeset View
Standalone View
Standalone View
head/sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 144 Lines • ▼ Show 20 Lines | |||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <machine/machdep.h> | #include <machine/machdep.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/pcb.h> | #include <machine/pcb.h> | ||||
#include <arm/include/physmem.h> | #include <arm/include/physmem.h> | ||||
#define PMAP_ASSERT_STAGE1(pmap) MPASS((pmap)->pm_stage == PM_STAGE1) | |||||
#define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) | #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) | ||||
#define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) | #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) | ||||
#define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) | #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) | ||||
#define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t))) | #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t))) | ||||
#define NUL0E L0_ENTRIES | #define NUL0E L0_ENTRIES | ||||
#define NUL1E (NUL0E * NL1PG) | #define NUL1E (NUL0E * NL1PG) | ||||
#define NUL2E (NUL1E * NL2PG) | #define NUL2E (NUL1E * NL2PG) | ||||
▲ Show 20 Lines • Show All 420 Lines • ▼ Show 20 Lines | |||||
CTASSERT(L1_BLOCK == L2_BLOCK); | CTASSERT(L1_BLOCK == L2_BLOCK); | ||||
/* | /* | ||||
* Checks if the PTE is dirty. | * Checks if the PTE is dirty. | ||||
*/ | */ | ||||
static inline int | static inline int | ||||
pmap_pte_dirty(pt_entry_t pte) | pmap_pte_dirty(pmap_t pmap, pt_entry_t pte) | ||||
{ | { | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte)); | KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte)); | ||||
KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0, | KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0, | ||||
("pte %#lx is writeable and missing ATTR_SW_DBM", pte)); | ("pte %#lx is writeable and missing ATTR_SW_DBM", pte)); | ||||
return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == | return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == | ||||
(ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM)); | (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 240 Lines • ▼ Show 20 Lines | pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart, | ||||
printf("%lx\n", l1pt); | printf("%lx\n", l1pt); | ||||
printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); | printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); | ||||
/* Set this early so we can use the pagetable walking functions */ | /* Set this early so we can use the pagetable walking functions */ | ||||
kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt; | kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt; | ||||
PMAP_LOCK_INIT(kernel_pmap); | PMAP_LOCK_INIT(kernel_pmap); | ||||
kernel_pmap->pm_l0_paddr = l0pt - kern_delta; | kernel_pmap->pm_l0_paddr = l0pt - kern_delta; | ||||
kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN); | kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN); | ||||
kernel_pmap->pm_stage = PM_STAGE1; | |||||
/* Assume the address we were loaded to is a valid physical address */ | /* Assume the address we were loaded to is a valid physical address */ | ||||
min_pa = KERNBASE - kern_delta; | min_pa = KERNBASE - kern_delta; | ||||
physmap_idx = arm_physmem_avail(physmap, nitems(physmap)); | physmap_idx = arm_physmem_avail(physmap, nitems(physmap)); | ||||
physmap_idx /= 2; | physmap_idx /= 2; | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 179 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Invalidate a single TLB entry. | * Invalidate a single TLB entry. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
uint64_t r; | uint64_t r; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
dsb(ishst); | dsb(ishst); | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
r = atop(va); | r = atop(va); | ||||
__asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | ||||
} else { | } else { | ||||
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va); | r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va); | ||||
__asm __volatile("tlbi vae1is, %0" : : "r" (r)); | __asm __volatile("tlbi vae1is, %0" : : "r" (r)); | ||||
} | } | ||||
dsb(ish); | dsb(ish); | ||||
isb(); | isb(); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
uint64_t end, r, start; | uint64_t end, r, start; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
dsb(ishst); | dsb(ishst); | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
start = atop(sva); | start = atop(sva); | ||||
end = atop(eva); | end = atop(eva); | ||||
for (r = start; r < end; r++) | for (r = start; r < end; r++) | ||||
__asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | __asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | ||||
} else { | } else { | ||||
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | ||||
start |= atop(sva); | start |= atop(sva); | ||||
end |= atop(eva); | end |= atop(eva); | ||||
for (r = start; r < end; r++) | for (r = start; r < end; r++) | ||||
__asm __volatile("tlbi vae1is, %0" : : "r" (r)); | __asm __volatile("tlbi vae1is, %0" : : "r" (r)); | ||||
} | } | ||||
dsb(ish); | dsb(ish); | ||||
isb(); | isb(); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
pmap_invalidate_all(pmap_t pmap) | pmap_invalidate_all(pmap_t pmap) | ||||
{ | { | ||||
uint64_t r; | uint64_t r; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
dsb(ishst); | dsb(ishst); | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
__asm __volatile("tlbi vmalle1is"); | __asm __volatile("tlbi vmalle1is"); | ||||
} else { | } else { | ||||
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | ||||
__asm __volatile("tlbi aside1is, %0" : : "r" (r)); | __asm __volatile("tlbi aside1is, %0" : : "r" (r)); | ||||
} | } | ||||
dsb(ish); | dsb(ish); | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
vm_page_t | vm_page_t | ||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | ||||
{ | { | ||||
pt_entry_t *pte, tpte; | pt_entry_t *pte, tpte; | ||||
vm_offset_t off; | vm_offset_t off; | ||||
vm_page_t m; | vm_page_t m; | ||||
int lvl; | int lvl; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
m = NULL; | m = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pte = pmap_pte(pmap, va, &lvl); | pte = pmap_pte(pmap, va, &lvl); | ||||
if (pte != NULL) { | if (pte != NULL) { | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
KASSERT(lvl > 0 && lvl <= 3, | KASSERT(lvl > 0 && lvl <= 3, | ||||
("pmap_extract_and_hold: Invalid level %d", lvl)); | ("pmap_extract_and_hold: Invalid level %d", lvl)); | ||||
▲ Show 20 Lines • Show All 370 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
PMAP_LOCK_INIT(pmap); | PMAP_LOCK_INIT(pmap); | ||||
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | ||||
pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); | pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); | ||||
pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); | pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); | ||||
pmap->pm_root.rt_root = 0; | pmap->pm_root.rt_root = 0; | ||||
pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN); | pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN); | ||||
pmap->pm_stage = PM_STAGE1; | |||||
PCPU_SET(curpmap, pmap); | PCPU_SET(curpmap, pmap); | ||||
} | } | ||||
int | int | ||||
pmap_pinit(pmap_t pmap) | pmap_pinit(pmap_t pmap) | ||||
{ | { | ||||
vm_page_t l0pt; | vm_page_t l0pt; | ||||
Show All 9 Lines | pmap_pinit(pmap_t pmap) | ||||
pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); | pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr); | ||||
if ((l0pt->flags & PG_ZERO) == 0) | if ((l0pt->flags & PG_ZERO) == 0) | ||||
pagezero(pmap->pm_l0); | pagezero(pmap->pm_l0); | ||||
pmap->pm_root.rt_root = 0; | pmap->pm_root.rt_root = 0; | ||||
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | ||||
pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX); | pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX); | ||||
pmap->pm_stage = PM_STAGE1; | |||||
/* XXX Temporarily disable deferred ASID allocation. */ | /* XXX Temporarily disable deferred ASID allocation. */ | ||||
pmap_alloc_asid(pmap); | pmap_alloc_asid(pmap); | ||||
return (1); | return (1); | ||||
} | } | ||||
/* | /* | ||||
* This routine is called if the desired page table page does not exist. | * This routine is called if the desired page table page does not exist. | ||||
▲ Show 20 Lines • Show All 248 Lines • ▼ Show 20 Lines | pmap_release(pmap_t pmap) | ||||
vm_page_t m; | vm_page_t m; | ||||
int asid; | int asid; | ||||
KASSERT(pmap->pm_stats.resident_count == 0, | KASSERT(pmap->pm_stats.resident_count == 0, | ||||
("pmap_release: pmap resident count %ld != 0", | ("pmap_release: pmap resident count %ld != 0", | ||||
pmap->pm_stats.resident_count)); | pmap->pm_stats.resident_count)); | ||||
KASSERT(vm_radix_is_empty(&pmap->pm_root), | KASSERT(vm_radix_is_empty(&pmap->pm_root), | ||||
("pmap_release: pmap has reserved page table page(s)")); | ("pmap_release: pmap has reserved page table page(s)")); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
mtx_lock_spin(&asid_set_mutex); | mtx_lock_spin(&asid_set_mutex); | ||||
if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch) { | if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch) { | ||||
asid = COOKIE_TO_ASID(pmap->pm_cookie); | asid = COOKIE_TO_ASID(pmap->pm_cookie); | ||||
KASSERT(asid >= ASID_FIRST_AVAILABLE && asid < asid_set_size, | KASSERT(asid >= ASID_FIRST_AVAILABLE && asid < asid_set_size, | ||||
("pmap_release: pmap cookie has out-of-range asid")); | ("pmap_release: pmap cookie has out-of-range asid")); | ||||
bit_clear(asid_set, asid); | bit_clear(asid_set, asid); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 245 Lines • ▼ Show 20 Lines | for (field = 0; field < _NPCM; field++) { | ||||
if (lvl != 2) | if (lvl != 2) | ||||
continue; | continue; | ||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
if ((tpte & ATTR_SW_WIRED) != 0) | if ((tpte & ATTR_SW_WIRED) != 0) | ||||
continue; | continue; | ||||
tpte = pmap_load_clear(pte); | tpte = pmap_load_clear(pte); | ||||
m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); | ||||
if (pmap_pte_dirty(tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
▲ Show 20 Lines • Show All 482 Lines • ▼ Show 20 Lines | pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, | ||||
pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | ||||
if (old_l2 & ATTR_SW_MANAGED) { | if (old_l2 & ATTR_SW_MANAGED) { | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | ||||
pvh = pa_to_pvh(old_l2 & ~ATTR_MASK); | pvh = pa_to_pvh(old_l2 & ~ATTR_MASK); | ||||
pmap_pvh_free(pvh, pmap, sva); | pmap_pvh_free(pvh, pmap, sva); | ||||
eva = sva + L2_SIZE; | eva = sva + L2_SIZE; | ||||
for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | ||||
va < eva; va += PAGE_SIZE, m++) { | va < eva; va += PAGE_SIZE, m++) { | ||||
if (pmap_pte_dirty(old_l2)) | if (pmap_pte_dirty(pmap, old_l2)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if (old_l2 & ATTR_AF) | if (old_l2 & ATTR_AF) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
TAILQ_EMPTY(&pvh->pv_list)) | TAILQ_EMPTY(&pvh->pv_list)) | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
} | } | ||||
Show All 28 Lines | pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
old_l3 = pmap_load_clear(l3); | old_l3 = pmap_load_clear(l3); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
if (old_l3 & ATTR_SW_WIRED) | if (old_l3 & ATTR_SW_WIRED) | ||||
pmap->pm_stats.wired_count -= 1; | pmap->pm_stats.wired_count -= 1; | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
if (old_l3 & ATTR_SW_MANAGED) { | if (old_l3 & ATTR_SW_MANAGED) { | ||||
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | ||||
if (pmap_pte_dirty(old_l3)) | if (pmap_pte_dirty(pmap, old_l3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if (old_l3 & ATTR_AF) | if (old_l3 & ATTR_AF) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
pmap_pvh_free(&m->md, pmap, va); | pmap_pvh_free(&m->md, pmap, va); | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
(m->flags & PG_FICTITIOUS) == 0) { | (m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
Show All 33 Lines | if (!pmap_l3_valid(pmap_load(l3))) { | ||||
continue; | continue; | ||||
} | } | ||||
old_l3 = pmap_load_clear(l3); | old_l3 = pmap_load_clear(l3); | ||||
if ((old_l3 & ATTR_SW_WIRED) != 0) | if ((old_l3 & ATTR_SW_WIRED) != 0) | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
if ((old_l3 & ATTR_SW_MANAGED) != 0) { | if ((old_l3 & ATTR_SW_MANAGED) != 0) { | ||||
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | ||||
if (pmap_pte_dirty(old_l3)) | if (pmap_pte_dirty(pmap, old_l3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if ((old_l3 & ATTR_AF) != 0) | if ((old_l3 & ATTR_AF) != 0) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)); | new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)); | ||||
if (new_lock != *lockp) { | if (new_lock != *lockp) { | ||||
if (*lockp != NULL) { | if (*lockp != NULL) { | ||||
/* | /* | ||||
* Pending TLB invalidations must be | * Pending TLB invalidations must be | ||||
▲ Show 20 Lines • Show All 180 Lines • ▼ Show 20 Lines | while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { | ||||
KASSERT(lvl == 2, | KASSERT(lvl == 2, | ||||
("pmap_remove_all: invalid pte level %d", lvl)); | ("pmap_remove_all: invalid pte level %d", lvl)); | ||||
pmap_demote_l2_locked(pmap, pte, va, &lock); | pmap_demote_l2_locked(pmap, pte, va, &lock); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
Show All 17 Lines | while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
} | } | ||||
/* | /* | ||||
* Update the vm_page_t clean and reference bits. | * Update the vm_page_t clean and reference bits. | ||||
*/ | */ | ||||
if (pmap_pte_dirty(tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_unuse_pt(pmap, pv->pv_va, tpde, &free); | pmap_unuse_pt(pmap, pv->pv_va, tpde, &free); | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
free_pv_entry(pmap, pv); | free_pv_entry(pmap, pv); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
/* | /* | ||||
* pmap_protect_l2: do the things to protect a 2MB page in a pmap | * pmap_protect_l2: do the things to protect a 2MB page in a pmap | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask, | pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask, | ||||
pt_entry_t nbits) | pt_entry_t nbits) | ||||
{ | { | ||||
pd_entry_t old_l2; | pd_entry_t old_l2; | ||||
vm_page_t m, mt; | vm_page_t m, mt; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
KASSERT((sva & L2_OFFSET) == 0, | KASSERT((sva & L2_OFFSET) == 0, | ||||
("pmap_protect_l2: sva is not 2mpage aligned")); | ("pmap_protect_l2: sva is not 2mpage aligned")); | ||||
old_l2 = pmap_load(l2); | old_l2 = pmap_load(l2); | ||||
KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | ||||
("pmap_protect_l2: L2e %lx is not a block mapping", old_l2)); | ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2)); | ||||
/* | /* | ||||
* Return if the L2 entry already has the desired access restrictions | * Return if the L2 entry already has the desired access restrictions | ||||
* in place. | * in place. | ||||
*/ | */ | ||||
retry: | retry: | ||||
if ((old_l2 & mask) == nbits) | if ((old_l2 & mask) == nbits) | ||||
return; | return; | ||||
/* | /* | ||||
* When a dirty read/write superpage mapping is write protected, | * When a dirty read/write superpage mapping is write protected, | ||||
* update the dirty field of each of the superpage's constituent 4KB | * update the dirty field of each of the superpage's constituent 4KB | ||||
* pages. | * pages. | ||||
*/ | */ | ||||
if ((old_l2 & ATTR_SW_MANAGED) != 0 && | if ((old_l2 & ATTR_SW_MANAGED) != 0 && | ||||
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | ||||
pmap_pte_dirty(old_l2)) { | pmap_pte_dirty(pmap, old_l2)) { | ||||
m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
vm_page_dirty(mt); | vm_page_dirty(mt); | ||||
} | } | ||||
if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) | if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) | ||||
goto retry; | goto retry; | ||||
Show All 10 Lines | |||||
*/ | */ | ||||
void | void | ||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | ||||
{ | { | ||||
vm_offset_t va, va_next; | vm_offset_t va, va_next; | ||||
pd_entry_t *l0, *l1, *l2; | pd_entry_t *l0, *l1, *l2; | ||||
pt_entry_t *l3p, l3, mask, nbits; | pt_entry_t *l3p, l3, mask, nbits; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | ||||
if (prot == VM_PROT_NONE) { | if (prot == VM_PROT_NONE) { | ||||
pmap_remove(pmap, sva, eva); | pmap_remove(pmap, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
mask = nbits = 0; | mask = nbits = 0; | ||||
if ((prot & VM_PROT_WRITE) == 0) { | if ((prot & VM_PROT_WRITE) == 0) { | ||||
▲ Show 20 Lines • Show All 69 Lines • ▼ Show 20 Lines | retry: | ||||
} | } | ||||
/* | /* | ||||
* When a dirty read/write mapping is write protected, | * When a dirty read/write mapping is write protected, | ||||
* update the page's dirty field. | * update the page's dirty field. | ||||
*/ | */ | ||||
if ((l3 & ATTR_SW_MANAGED) != 0 && | if ((l3 & ATTR_SW_MANAGED) != 0 && | ||||
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | ||||
pmap_pte_dirty(l3)) | pmap_pte_dirty(pmap, l3)) | ||||
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | ||||
if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) | if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) | ||||
goto retry; | goto retry; | ||||
if (va == va_next) | if (va == va_next) | ||||
va = sva; | va = sva; | ||||
} | } | ||||
if (va != va_next) | if (va != va_next) | ||||
▲ Show 20 Lines • Show All 121 Lines • ▼ Show 20 Lines | |||||
pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, | pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pt_entry_t *firstl3, *l3, newl2, oldl3, pa; | pt_entry_t *firstl3, *l3, newl2, oldl3, pa; | ||||
vm_page_t mpte; | vm_page_t mpte; | ||||
vm_offset_t sva; | vm_offset_t sva; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
sva = va & ~L2_OFFSET; | sva = va & ~L2_OFFSET; | ||||
firstl3 = pmap_l2_to_l3(l2, sva); | firstl3 = pmap_l2_to_l3(l2, sva); | ||||
newl2 = pmap_load(firstl3); | newl2 = pmap_load(firstl3); | ||||
setl2: | setl2: | ||||
if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) { | if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) { | ||||
atomic_add_long(&pmap_l2_p_failures, 1); | atomic_add_long(&pmap_l2_p_failures, 1); | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
pt_entry_t new_l3, orig_l3; | pt_entry_t new_l3, orig_l3; | ||||
pt_entry_t *l2, *l3; | pt_entry_t *l2, *l3; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_paddr_t opa, pa; | vm_paddr_t opa, pa; | ||||
vm_page_t mpte, om; | vm_page_t mpte, om; | ||||
boolean_t nosleep; | boolean_t nosleep; | ||||
int lvl, rv; | int lvl, rv; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
va = trunc_page(va); | va = trunc_page(va); | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
VM_PAGE_OBJECT_BUSY_ASSERT(m); | VM_PAGE_OBJECT_BUSY_ASSERT(m); | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | | new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | | ||||
L3_PAGE); | L3_PAGE); | ||||
if ((prot & VM_PROT_WRITE) == 0) | if ((prot & VM_PROT_WRITE) == 0) | ||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); | new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); | ||||
▲ Show 20 Lines • Show All 132 Lines • ▼ Show 20 Lines | if (pmap_l3_valid(orig_l3)) { | ||||
if ((orig_l3 & ATTR_SW_MANAGED) != 0) { | if ((orig_l3 & ATTR_SW_MANAGED) != 0) { | ||||
om = PHYS_TO_VM_PAGE(opa); | om = PHYS_TO_VM_PAGE(opa); | ||||
/* | /* | ||||
* The pmap lock is sufficient to synchronize with | * The pmap lock is sufficient to synchronize with | ||||
* concurrent calls to pmap_page_test_mappings() and | * concurrent calls to pmap_page_test_mappings() and | ||||
* pmap_ts_referenced(). | * pmap_ts_referenced(). | ||||
*/ | */ | ||||
if (pmap_pte_dirty(orig_l3)) | if (pmap_pte_dirty(pmap, orig_l3)) | ||||
vm_page_dirty(om); | vm_page_dirty(om); | ||||
if ((orig_l3 & ATTR_AF) != 0) { | if ((orig_l3 & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
vm_page_aflag_set(om, PGA_REFERENCED); | vm_page_aflag_set(om, PGA_REFERENCED); | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | ||||
pv = pmap_pvh_remove(&om->md, pmap, va); | pv = pmap_pvh_remove(&om->md, pmap, va); | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | validate: | ||||
*/ | */ | ||||
if (pmap_l3_valid(orig_l3)) { | if (pmap_l3_valid(orig_l3)) { | ||||
KASSERT(opa == pa, ("pmap_enter: invalid update")); | KASSERT(opa == pa, ("pmap_enter: invalid update")); | ||||
if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | ||||
/* same PA, different attributes */ | /* same PA, different attributes */ | ||||
orig_l3 = pmap_load_store(l3, new_l3); | orig_l3 = pmap_load_store(l3, new_l3); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
if ((orig_l3 & ATTR_SW_MANAGED) != 0 && | if ((orig_l3 & ATTR_SW_MANAGED) != 0 && | ||||
pmap_pte_dirty(orig_l3)) | pmap_pte_dirty(pmap, orig_l3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} else { | } else { | ||||
/* | /* | ||||
* orig_l3 == new_l3 | * orig_l3 == new_l3 | ||||
* This can happens if multiple threads simultaneously | * This can happens if multiple threads simultaneously | ||||
* access not yet mapped page. This bad for performance | * access not yet mapped page. This bad for performance | ||||
* since this can cause full demotion-NOP-promotion | * since this can cause full demotion-NOP-promotion | ||||
* cycle. | * cycle. | ||||
Show All 38 Lines | |||||
*/ | */ | ||||
static bool | static bool | ||||
pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pd_entry_t new_l2; | pd_entry_t new_l2; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | ||||
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | | ||||
L2_BLOCK); | L2_BLOCK); | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
new_l2 |= ATTR_SW_MANAGED; | new_l2 |= ATTR_SW_MANAGED; | ||||
new_l2 &= ~ATTR_AF; | new_l2 &= ~ATTR_AF; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 212 Lines • ▼ Show 20 Lines | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
pt_entry_t *l2, *l3, l3_val; | pt_entry_t *l2, *l3, l3_val; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
int lvl; | int lvl; | ||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || | KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || | ||||
(m->oflags & VPO_UNMANAGED) != 0, | (m->oflags & VPO_UNMANAGED) != 0, | ||||
("pmap_enter_quick_locked: managed mapping within the clean submap")); | ("pmap_enter_quick_locked: managed mapping within the clean submap")); | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); | CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); | ||||
/* | /* | ||||
* In the case that a page table page is not | * In the case that a page table page is not | ||||
* resident, we are creating it here. | * resident, we are creating it here. | ||||
*/ | */ | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (va < VM_MAXUSER_ADDRESS) { | ||||
vm_pindex_t l2pindex; | vm_pindex_t l2pindex; | ||||
▲ Show 20 Lines • Show All 219 Lines • ▼ Show 20 Lines | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, | ||||
vm_offset_t src_addr) | vm_offset_t src_addr) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pd_entry_t *l0, *l1, *l2, srcptepaddr; | pd_entry_t *l0, *l1, *l2, srcptepaddr; | ||||
pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte; | pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte; | ||||
vm_offset_t addr, end_addr, va_next; | vm_offset_t addr, end_addr, va_next; | ||||
vm_page_t dst_l2pg, dstmpte, srcmpte; | vm_page_t dst_l2pg, dstmpte, srcmpte; | ||||
PMAP_ASSERT_STAGE1(dst_pmap); | |||||
PMAP_ASSERT_STAGE1(src_pmap); | |||||
if (dst_addr != src_addr) | if (dst_addr != src_addr) | ||||
return; | return; | ||||
end_addr = src_addr + len; | end_addr = src_addr + len; | ||||
lock = NULL; | lock = NULL; | ||||
if (dst_pmap < src_pmap) { | if (dst_pmap < src_pmap) { | ||||
PMAP_LOCK(dst_pmap); | PMAP_LOCK(dst_pmap); | ||||
PMAP_LOCK(src_pmap); | PMAP_LOCK(src_pmap); | ||||
} else { | } else { | ||||
▲ Show 20 Lines • Show All 440 Lines • ▼ Show 20 Lines | */ | ||||
* processors, the dirty bit cannot have | * processors, the dirty bit cannot have | ||||
* changed state since we last loaded pte. | * changed state since we last loaded pte. | ||||
*/ | */ | ||||
pmap_clear(pte); | pmap_clear(pte); | ||||
/* | /* | ||||
* Update the vm_page_t clean/reference bits. | * Update the vm_page_t clean/reference bits. | ||||
*/ | */ | ||||
if (pmap_pte_dirty(tpte)) { | if (pmap_pte_dirty(pmap, tpte)) { | ||||
switch (lvl) { | switch (lvl) { | ||||
case 1: | case 1: | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
vm_page_dirty(mt); | vm_page_dirty(mt); | ||||
break; | break; | ||||
case 2: | case 2: | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) | ||||
boolean_t rv; | boolean_t rv; | ||||
rv = FALSE; | rv = FALSE; | ||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
restart: | restart: | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
if (md_gen != m->md.pv_gen) { | if (md_gen != m->md.pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
goto restart; | goto restart; | ||||
Show All 16 Lines | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
if (rv) | if (rv) | ||||
goto out; | goto out; | ||||
} | } | ||||
if ((m->flags & PG_FICTITIOUS) == 0) { | if ((m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
if (md_gen != m->md.pv_gen || | if (md_gen != m->md.pv_gen || | ||||
pvh_gen != pvh->pv_gen) { | pvh_gen != pvh->pv_gen) { | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | if (!pmap_page_is_write_mapped(m)) | ||||
return; | return; | ||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : | pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : | ||||
pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
retry_pv_loop: | retry_pv_loop: | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen) { | if (pvh_gen != pvh->pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
goto retry_pv_loop; | goto retry_pv_loop; | ||||
} | } | ||||
} | } | ||||
va = pv->pv_va; | va = pv->pv_va; | ||||
pte = pmap_pte(pmap, pv->pv_va, &lvl); | pte = pmap_pte(pmap, pv->pv_va, &lvl); | ||||
if ((pmap_load(pte) & ATTR_SW_DBM) != 0) | if ((pmap_load(pte) & ATTR_SW_DBM) != 0) | ||||
(void)pmap_demote_l2_locked(pmap, pte, va, &lock); | (void)pmap_demote_l2_locked(pmap, pte, va, &lock); | ||||
KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), | KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), | ||||
("inconsistent pv lock %p %p for page %p", | ("inconsistent pv lock %p %p for page %p", | ||||
lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); | lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen || | if (pvh_gen != pvh->pv_gen || | ||||
md_gen != m->md.pv_gen) { | md_gen != m->md.pv_gen) { | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | do { | ||||
KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found")); | KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found")); | ||||
KASSERT(lvl == 1, | KASSERT(lvl == 1, | ||||
("pmap_ts_referenced: invalid pde level %d", lvl)); | ("pmap_ts_referenced: invalid pde level %d", lvl)); | ||||
tpde = pmap_load(pde); | tpde = pmap_load(pde); | ||||
KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE, | KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE, | ||||
("pmap_ts_referenced: found an invalid l1 table")); | ("pmap_ts_referenced: found an invalid l1 table")); | ||||
pte = pmap_l1_to_l2(pde, pv->pv_va); | pte = pmap_l1_to_l2(pde, pv->pv_va); | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
if (pmap_pte_dirty(tpte)) { | if (pmap_pte_dirty(pmap, tpte)) { | ||||
/* | /* | ||||
* Although "tpte" is mapping a 2MB page, because | * Although "tpte" is mapping a 2MB page, because | ||||
* this function is called at a 4KB page granularity, | * this function is called at a 4KB page granularity, | ||||
* we only update the 4KB page under test. | * we only update the 4KB page under test. | ||||
*/ | */ | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | do { | ||||
KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found")); | KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found")); | ||||
KASSERT(lvl == 2, | KASSERT(lvl == 2, | ||||
("pmap_ts_referenced: invalid pde level %d", lvl)); | ("pmap_ts_referenced: invalid pde level %d", lvl)); | ||||
tpde = pmap_load(pde); | tpde = pmap_load(pde); | ||||
KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE, | KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE, | ||||
("pmap_ts_referenced: found an invalid l2 table")); | ("pmap_ts_referenced: found an invalid l2 table")); | ||||
pte = pmap_l2_to_l3(pde, pv->pv_va); | pte = pmap_l2_to_l3(pde, pv->pv_va); | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
if (pmap_pte_dirty(tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
if ((tpte & ATTR_SW_WIRED) == 0) { | if ((tpte & ATTR_SW_WIRED) == 0) { | ||||
pmap_clear_bits(pte, ATTR_AF); | pmap_clear_bits(pte, ATTR_AF); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
cleared++; | cleared++; | ||||
} else | } else | ||||
not_cleared++; | not_cleared++; | ||||
Show All 22 Lines | |||||
pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) | pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va, va_next; | vm_offset_t va, va_next; | ||||
vm_page_t m; | vm_page_t m; | ||||
pd_entry_t *l0, *l1, *l2, oldl2; | pd_entry_t *l0, *l1, *l2, oldl2; | ||||
pt_entry_t *l3, oldl3; | pt_entry_t *l3, oldl3; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (advice != MADV_DONTNEED && advice != MADV_FREE) | if (advice != MADV_DONTNEED && advice != MADV_FREE) | ||||
return; | return; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
for (; sva < eva; sva = va_next) { | for (; sva < eva; sva = va_next) { | ||||
l0 = pmap_l0(pmap, sva); | l0 = pmap_l0(pmap, sva); | ||||
if (pmap_load(l0) == 0) { | if (pmap_load(l0) == 0) { | ||||
va_next = (sva + L0_SIZE) & ~L0_OFFSET; | va_next = (sva + L0_SIZE) & ~L0_OFFSET; | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
va = va_next; | va = va_next; | ||||
for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | ||||
sva += L3_SIZE) { | sva += L3_SIZE) { | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) != | if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) != | ||||
(ATTR_SW_MANAGED | L3_PAGE)) | (ATTR_SW_MANAGED | L3_PAGE)) | ||||
goto maybe_invlrng; | goto maybe_invlrng; | ||||
else if (pmap_pte_dirty(oldl3)) { | else if (pmap_pte_dirty(pmap, oldl3)) { | ||||
if (advice == MADV_DONTNEED) { | if (advice == MADV_DONTNEED) { | ||||
/* | /* | ||||
* Future calls to pmap_is_modified() | * Future calls to pmap_is_modified() | ||||
* can be avoided by making the page | * can be avoided by making the page | ||||
* dirty now. | * dirty now. | ||||
*/ | */ | ||||
m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if (!pmap_page_is_write_mapped(m)) | ||||
return; | return; | ||||
pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : | pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : | ||||
pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
restart: | restart: | ||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen) { | if (pvh_gen != pvh->pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
goto restart; | goto restart; | ||||
Show All 18 Lines | if ((oldl2 & ATTR_SW_DBM) != 0 && | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
▲ Show 20 Lines • Show All 422 Lines • ▼ Show 20 Lines | pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pt_entry_t *l3, newl3, oldl2; | pt_entry_t *l3, newl3, oldl2; | ||||
vm_offset_t tmpl2; | vm_offset_t tmpl2; | ||||
vm_paddr_t l3phys; | vm_paddr_t l3phys; | ||||
vm_page_t ml3; | vm_page_t ml3; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
l3 = NULL; | l3 = NULL; | ||||
oldl2 = pmap_load(l2); | oldl2 = pmap_load(l2); | ||||
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, | ||||
("pmap_demote_l2: Demoting a non-block entry")); | ("pmap_demote_l2: Demoting a non-block entry")); | ||||
va &= ~L2_OFFSET; | va &= ~L2_OFFSET; | ||||
tmpl2 = 0; | tmpl2 = 0; | ||||
if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { | if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { | ||||
▲ Show 20 Lines • Show All 144 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) | pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) | ||||
{ | { | ||||
pt_entry_t *pte, tpte; | pt_entry_t *pte, tpte; | ||||
vm_paddr_t mask, pa; | vm_paddr_t mask, pa; | ||||
int lvl, val; | int lvl, val; | ||||
bool managed; | bool managed; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pte = pmap_pte(pmap, addr, &lvl); | pte = pmap_pte(pmap, addr, &lvl); | ||||
if (pte != NULL) { | if (pte != NULL) { | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
switch (lvl) { | switch (lvl) { | ||||
case 3: | case 3: | ||||
mask = L3_OFFSET; | mask = L3_OFFSET; | ||||
break; | break; | ||||
case 2: | case 2: | ||||
mask = L2_OFFSET; | mask = L2_OFFSET; | ||||
break; | break; | ||||
case 1: | case 1: | ||||
mask = L1_OFFSET; | mask = L1_OFFSET; | ||||
break; | break; | ||||
default: | default: | ||||
panic("pmap_mincore: invalid level %d", lvl); | panic("pmap_mincore: invalid level %d", lvl); | ||||
} | } | ||||
managed = (tpte & ATTR_SW_MANAGED) != 0; | managed = (tpte & ATTR_SW_MANAGED) != 0; | ||||
val = MINCORE_INCORE; | val = MINCORE_INCORE; | ||||
if (lvl != 3) | if (lvl != 3) | ||||
val |= MINCORE_SUPER; | val |= MINCORE_SUPER; | ||||
if ((managed && pmap_pte_dirty(tpte)) || (!managed && | if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed && | ||||
(tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))) | (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))) | ||||
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; | val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; | ||||
if ((tpte & ATTR_AF) == ATTR_AF) | if ((tpte & ATTR_AF) == ATTR_AF) | ||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; | val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; | ||||
pa = (tpte & ~ATTR_MASK) | (addr & mask); | pa = (tpte & ~ATTR_MASK) | (addr & mask); | ||||
} else { | } else { | ||||
managed = false; | managed = false; | ||||
Show All 31 Lines | pmap_reset_asid_set(void) | ||||
dsb(ishst); | dsb(ishst); | ||||
__asm __volatile("tlbi vmalle1is"); | __asm __volatile("tlbi vmalle1is"); | ||||
dsb(ish); | dsb(ish); | ||||
bit_nclear(asid_set, ASID_FIRST_AVAILABLE, asid_set_size - 1); | bit_nclear(asid_set, ASID_FIRST_AVAILABLE, asid_set_size - 1); | ||||
CPU_FOREACH(cpuid) { | CPU_FOREACH(cpuid) { | ||||
if (cpuid == curcpu) | if (cpuid == curcpu) | ||||
continue; | continue; | ||||
pmap = pcpu_find(cpuid)->pc_curpmap; | pmap = pcpu_find(cpuid)->pc_curpmap; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
asid = COOKIE_TO_ASID(pmap->pm_cookie); | asid = COOKIE_TO_ASID(pmap->pm_cookie); | ||||
if (asid == -1) | if (asid == -1) | ||||
continue; | continue; | ||||
bit_set(asid_set, asid); | bit_set(asid_set, asid); | ||||
pmap->pm_cookie = COOKIE_FROM(asid, epoch); | pmap->pm_cookie = COOKIE_FROM(asid, epoch); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Allocate a new ASID for the specified pmap. | * Allocate a new ASID for the specified pmap. | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_alloc_asid(pmap_t pmap) | pmap_alloc_asid(pmap_t pmap) | ||||
{ | { | ||||
int new_asid; | int new_asid; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
mtx_lock_spin(&asid_set_mutex); | mtx_lock_spin(&asid_set_mutex); | ||||
/* | /* | ||||
* While this processor was waiting to acquire the asid set mutex, | * While this processor was waiting to acquire the asid set mutex, | ||||
* pmap_reset_asid_set() running on another processor might have | * pmap_reset_asid_set() running on another processor might have | ||||
* updated this pmap's cookie to the current epoch. In which case, we | * updated this pmap's cookie to the current epoch. In which case, we | ||||
* don't need to allocate a new ASID. | * don't need to allocate a new ASID. | ||||
*/ | */ | ||||
Show All 21 Lines | |||||
/* | /* | ||||
* Compute the value that should be stored in ttbr0 to activate the specified | * Compute the value that should be stored in ttbr0 to activate the specified | ||||
* pmap. This value may change from time to time. | * pmap. This value may change from time to time. | ||||
*/ | */ | ||||
uint64_t | uint64_t | ||||
pmap_to_ttbr0(pmap_t pmap) | pmap_to_ttbr0(pmap_t pmap) | ||||
{ | { | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | | return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | | ||||
pmap->pm_l0_paddr); | pmap->pm_l0_paddr); | ||||
} | } | ||||
static bool | static bool | ||||
pmap_activate_int(pmap_t pmap) | pmap_activate_int(pmap_t pmap) | ||||
{ | { | ||||
int epoch; | int epoch; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap")); | KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap")); | ||||
KASSERT(pmap != kernel_pmap, ("kernel pmap activation")); | KASSERT(pmap != kernel_pmap, ("kernel pmap activation")); | ||||
if (pmap == PCPU_GET(curpmap)) { | if (pmap == PCPU_GET(curpmap)) { | ||||
/* | /* | ||||
* Handle the possibility that the old thread was preempted | * Handle the possibility that the old thread was preempted | ||||
* after an "ic" or "tlbi" instruction but before it performed | * after an "ic" or "tlbi" instruction but before it performed | ||||
* a "dsb" instruction. If the old thread migrates to a new | * a "dsb" instruction. If the old thread migrates to a new | ||||
* processor, its completion of a "dsb" instruction on that | * processor, its completion of a "dsb" instruction on that | ||||
Show All 21 Lines | |||||
} | } | ||||
void | void | ||||
pmap_activate(struct thread *td) | pmap_activate(struct thread *td) | ||||
{ | { | ||||
pmap_t pmap; | pmap_t pmap; | ||||
pmap = vmspace_pmap(td->td_proc->p_vmspace); | pmap = vmspace_pmap(td->td_proc->p_vmspace); | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
critical_enter(); | critical_enter(); | ||||
(void)pmap_activate_int(pmap); | (void)pmap_activate_int(pmap); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
/* | /* | ||||
* To eliminate the unused parameter "old", we would have to add an instruction | * To eliminate the unused parameter "old", we would have to add an instruction | ||||
* to cpu_switch(). | * to cpu_switch(). | ||||
Show All 29 Lines | pmap_switch(struct thread *old __unused, struct thread *new) | ||||
return (pcb); | return (pcb); | ||||
} | } | ||||
void | void | ||||
pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) | pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) | ||||
{ | { | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
if (va >= VM_MIN_KERNEL_ADDRESS) { | if (va >= VM_MIN_KERNEL_ADDRESS) { | ||||
cpu_icache_sync_range(va, sz); | cpu_icache_sync_range(va, sz); | ||||
} else { | } else { | ||||
u_int len, offset; | u_int len, offset; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
/* Find the length of data in this page to flush */ | /* Find the length of data in this page to flush */ | ||||
offset = va & PAGE_MASK; | offset = va & PAGE_MASK; | ||||
Show All 17 Lines | |||||
int | int | ||||
pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far) | pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far) | ||||
{ | { | ||||
pt_entry_t pte, *ptep; | pt_entry_t pte, *ptep; | ||||
register_t intr; | register_t intr; | ||||
uint64_t ec, par; | uint64_t ec, par; | ||||
int lvl, rv; | int lvl, rv; | ||||
PMAP_ASSERT_STAGE1(pmap); | |||||
rv = KERN_FAILURE; | rv = KERN_FAILURE; | ||||
ec = ESR_ELx_EXCEPTION(esr); | ec = ESR_ELx_EXCEPTION(esr); | ||||
switch (ec) { | switch (ec) { | ||||
case EXCP_INSN_ABORT_L: | case EXCP_INSN_ABORT_L: | ||||
case EXCP_INSN_ABORT: | case EXCP_INSN_ABORT: | ||||
case EXCP_DATA_ABORT_L: | case EXCP_DATA_ABORT_L: | ||||
case EXCP_DATA_ABORT: | case EXCP_DATA_ABORT: | ||||
▲ Show 20 Lines • Show All 393 Lines • Show Last 20 Lines |