Changeset View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 1,217 Lines • ▼ Show 20 Lines | |||||
SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD, | SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD, | ||||
&pmap_l2_p_failures, 0, "2MB page promotion failures"); | &pmap_l2_p_failures, 0, "2MB page promotion failures"); | ||||
static u_long pmap_l2_promotions; | static u_long pmap_l2_promotions; | ||||
SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD, | SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD, | ||||
&pmap_l2_promotions, 0, "2MB page promotions"); | &pmap_l2_promotions, 0, "2MB page promotions"); | ||||
/* | /* | ||||
* Invalidate a single TLB entry. | * If the given value for "final_only" is false, then any cached intermediate- | ||||
* level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to | |||||
* any cached final-level entry, i.e., either an L{1,2}_BLOCK or L3_PAGE entry. | |||||
* Otherwise, just the cached final-level entry is invalidated. | |||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va) | pmap_invalidate_kernel(uint64_t r, bool final_only) | ||||
{ | { | ||||
if (final_only) | |||||
__asm __volatile("tlbi vaale1is, %0" : : "r" (r)); | |||||
else | |||||
__asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | |||||
} | |||||
static __inline void | |||||
pmap_invalidate_user(uint64_t r, bool final_only) | |||||
{ | |||||
if (final_only) | |||||
__asm __volatile("tlbi vale1is, %0" : : "r" (r)); | |||||
else | |||||
__asm __volatile("tlbi vae1is, %0" : : "r" (r)); | |||||
} | |||||
/* | |||||
* Invalidates any cached final- and optionally intermediate-level TLB entries | |||||
* for the specified virtual address in the given virtual address space. | |||||
*/ | |||||
static __inline void | |||||
kib: Would it make sense to call this function e.g. pmap_invalidate_page_flags (for instance), and… | |||||
Done Inline ActionsThe reason that I didn't was out of fear that someone might someday adapt code from, e.g., amd64, and not think about the fact that pmap_invalidate_{page,range}() are semantically different on arm64, because they don't invalidate page walk cache entries. This is similar to how we have allowed pmap_kenter() to be semantically different across different architectures. Thoughts? alc: The reason that I didn't was out of fear that someone might someday adapt code from, e.g. | |||||
Not Done Inline ActionsIMO a comment about not invalidating page mapping structures in the function herald would be useful. Might be symmetric amd64 comment that invalidations do guarantee flush of paging structures cache would be useful as well. Other than that, I consider it is unlikely that direct copy of some amd64 code would occur, or that it would be nuanced enough to be affected by such semantic difference. kib: IMO a comment about not invalidating page mapping structures in the function herald would be… | |||||
pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) | |||||
{ | |||||
uint64_t r; | uint64_t r; | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
dsb(ishst); | dsb(ishst); | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
r = atop(va); | r = atop(va); | ||||
__asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | pmap_invalidate_kernel(r, final_only); | ||||
} else { | } else { | ||||
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va); | r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va); | ||||
__asm __volatile("tlbi vae1is, %0" : : "r" (r)); | pmap_invalidate_user(r, final_only); | ||||
} | } | ||||
dsb(ish); | dsb(ish); | ||||
isb(); | isb(); | ||||
} | } | ||||
/* | |||||
* Invalidates any cached final- and optionally intermediate-level TLB entries | |||||
* for the specified virtual address range in the given virtual address space. | |||||
*/ | |||||
static __inline void | static __inline void | ||||
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | ||||
bool final_only) | |||||
{ | { | ||||
uint64_t end, r, start; | uint64_t end, r, start; | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
dsb(ishst); | dsb(ishst); | ||||
Not Done Inline ActionsDoes Graviton (or any other popular platform) implement FEAT_TLBIRANGE? Looks like that's a fairly new extension. markj: Does Graviton (or any other popular platform) implement FEAT_TLBIRANGE? Looks like that's a… | |||||
Done Inline ActionsGraviton 1 (Cortex-A72, just like RPi4) definitely doesn't, and I'm pretty sure that Graviton 2 (Cortex-A76/Neoverse-N1) doesn't either. I don't know about the just announced Graviton 3. alc: Graviton 1 (Cortex-A72, just like RPi4) definitely doesn't, and I'm pretty sure that Graviton 2… | |||||
Not Done Inline Actionskib: FYI, https://software.intel.com/content/dam/develop/external/us/en/documents/341431-remote… | |||||
Not Done Inline ActionsI have a patch to support FEAT_TLBIRANGE, however I haven't seen any improvement on current HW that supports it (On Apple M1 under the hypervisor) andrew: I have a patch to support FEAT_TLBIRANGE, however I haven't seen any improvement on current HW… | |||||
Done Inline ActionsI suspect that FEAT_TLBIRANGE matters most on the "big iron" machines where it reduces traffic over the "back office" interconnect used to implement TLB shootdown. alc: I suspect that FEAT_TLBIRANGE matters most on the "big iron" machines where it reduces traffic… | |||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
start = atop(sva); | start = atop(sva); | ||||
end = atop(eva); | end = atop(eva); | ||||
for (r = start; r < end; r++) | for (r = start; r < end; r++) | ||||
__asm __volatile("tlbi vaae1is, %0" : : "r" (r)); | pmap_invalidate_kernel(r, final_only); | ||||
} else { | } else { | ||||
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | ||||
start |= atop(sva); | start |= atop(sva); | ||||
end |= atop(eva); | end |= atop(eva); | ||||
for (r = start; r < end; r++) | for (r = start; r < end; r++) | ||||
__asm __volatile("tlbi vae1is, %0" : : "r" (r)); | pmap_invalidate_user(r, final_only); | ||||
} | } | ||||
dsb(ish); | dsb(ish); | ||||
isb(); | isb(); | ||||
} | } | ||||
/* | |||||
* Invalidates all cached intermediate- and final-level TLB entries for the | |||||
* given virtual address space. | |||||
*/ | |||||
static __inline void | static __inline void | ||||
pmap_invalidate_all(pmap_t pmap) | pmap_invalidate_all(pmap_t pmap) | ||||
{ | { | ||||
uint64_t r; | uint64_t r; | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
dsb(ishst); | dsb(ishst); | ||||
▲ Show 20 Lines • Show All 229 Lines • ▼ Show 20 Lines | while (size != 0) { | ||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
pmap_load_store(pte, (pa & ~L3_OFFSET) | attr); | pmap_load_store(pte, (pa & ~L3_OFFSET) | attr); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va, true); | ||||
} | } | ||||
void | void | ||||
pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) | pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) | ||||
{ | { | ||||
pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE); | pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE); | ||||
} | } | ||||
/* | /* | ||||
* Remove a page from the kernel pagetables. | * Remove a page from the kernel pagetables. | ||||
*/ | */ | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_kremove(vm_offset_t va) | pmap_kremove(vm_offset_t va) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); | pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); | ||||
pmap_clear(pte); | pmap_clear(pte); | ||||
pmap_invalidate_page(kernel_pmap, va); | pmap_invalidate_page(kernel_pmap, va, true); | ||||
} | } | ||||
void | void | ||||
pmap_kremove_device(vm_offset_t sva, vm_size_t size) | pmap_kremove_device(vm_offset_t sva, vm_size_t size) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
KASSERT((sva & L3_OFFSET) == 0, | KASSERT((sva & L3_OFFSET) == 0, | ||||
("pmap_kremove_device: Invalid virtual address")); | ("pmap_kremove_device: Invalid virtual address")); | ||||
KASSERT((size & PAGE_MASK) == 0, | KASSERT((size & PAGE_MASK) == 0, | ||||
("pmap_kremove_device: Mapping is not page-sized")); | ("pmap_kremove_device: Mapping is not page-sized")); | ||||
va = sva; | va = sva; | ||||
while (size != 0) { | while (size != 0) { | ||||
pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); | pte = pmap_pte_exists(kernel_pmap, va, 3, __func__); | ||||
pmap_clear(pte); | pmap_clear(pte); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va, true); | ||||
} | } | ||||
/* | /* | ||||
* Used to map a range of physical addresses into kernel | * Used to map a range of physical addresses into kernel | ||||
* virtual address space. | * virtual address space. | ||||
* | * | ||||
* The value passed in '*virt' is a suggested virtual address for | * The value passed in '*virt' is a suggested virtual address for | ||||
* the mapping. Architectures which can support a direct-mapped | * the mapping. Architectures which can support a direct-mapped | ||||
Show All 39 Lines | for (i = 0; i < count; i++) { | ||||
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | ||||
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | | ||||
ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; | ||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
pmap_load_store(pte, pa); | pmap_load_store(pte, pa); | ||||
va += L3_SIZE; | va += L3_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va, true); | ||||
} | } | ||||
/* | /* | ||||
* This routine tears out page mappings from the | * This routine tears out page mappings from the | ||||
* kernel -- it is meant only for temporary mappings. | * kernel -- it is meant only for temporary mappings. | ||||
*/ | */ | ||||
void | void | ||||
pmap_qremove(vm_offset_t sva, int count) | pmap_qremove(vm_offset_t sva, int count) | ||||
Show All 9 Lines | pmap_qremove(vm_offset_t sva, int count) | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
pte = pmap_pte_exists(kernel_pmap, va, 3, NULL); | pte = pmap_pte_exists(kernel_pmap, va, 3, NULL); | ||||
if (pte != NULL) { | if (pte != NULL) { | ||||
pmap_clear(pte); | pmap_clear(pte); | ||||
} | } | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va, true); | ||||
} | } | ||||
/*************************************************** | /*************************************************** | ||||
* Page table page management routines..... | * Page table page management routines..... | ||||
***************************************************/ | ***************************************************/ | ||||
/* | /* | ||||
* Schedule the specified unused page table page to be freed. Specifically, | * Schedule the specified unused page table page to be freed. Specifically, | ||||
* add the page to the specified list of pages that will be released to the | * add the page to the specified list of pages that will be released to the | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | if (m->pindex < NUL2E) { | ||||
pd_entry_t *l0, tl0; | pd_entry_t *l0, tl0; | ||||
vm_page_t l1pg; | vm_page_t l1pg; | ||||
l0 = pmap_l0(pmap, va); | l0 = pmap_l0(pmap, va); | ||||
tl0 = pmap_load(l0); | tl0 = pmap_load(l0); | ||||
l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); | l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); | ||||
pmap_unwire_l3(pmap, va, l1pg, free); | pmap_unwire_l3(pmap, va, l1pg, free); | ||||
} | } | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, false); | ||||
/* | /* | ||||
* Put page on a list so that it is released after | * Put page on a list so that it is released after | ||||
* *ALL* TLB shootdown is done | * *ALL* TLB shootdown is done | ||||
*/ | */ | ||||
pmap_add_delayed_free_list(m, free, TRUE); | pmap_add_delayed_free_list(m, free, TRUE); | ||||
} | } | ||||
Show All 21 Lines | |||||
* mapping. | * mapping. | ||||
*/ | */ | ||||
static void | static void | ||||
pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) | pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte) | ||||
{ | { | ||||
struct spglist free; | struct spglist free; | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_l3(pmap, va, mpte, &free)) { | if (pmap_unwire_l3(pmap, va, mpte, &free)) | ||||
/* | |||||
* Although "va" was never mapped, the TLB could nonetheless | |||||
* have intermediate entries that refer to the freed page | |||||
* table pages. Invalidate those entries. | |||||
* | |||||
* XXX redundant invalidation (See _pmap_unwire_l3().) | |||||
*/ | |||||
pmap_invalidate_page(pmap, va); | |||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
} | |||||
void | void | ||||
pmap_pinit0(pmap_t pmap) | pmap_pinit0(pmap_t pmap) | ||||
{ | { | ||||
PMAP_LOCK_INIT(pmap); | PMAP_LOCK_INIT(pmap); | ||||
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); | ||||
pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); | pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1); | ||||
▲ Show 20 Lines • Show All 626 Lines • ▼ Show 20 Lines | for (field = 0; field < _NPCM; field++) { | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
if ((tpte & ATTR_SW_WIRED) != 0) | if ((tpte & ATTR_SW_WIRED) != 0) | ||||
continue; | continue; | ||||
tpte = pmap_load_clear(pte); | tpte = pmap_load_clear(pte); | ||||
m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); | ||||
if (pmap_pte_dirty(pmap, tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
(m->flags & PG_FICTITIOUS) == 0) { | (m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = page_to_pvh(m); | pvh = page_to_pvh(m); | ||||
▲ Show 20 Lines • Show All 464 Lines • ▼ Show 20 Lines | pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, | ||||
old_l2 = pmap_load_clear(l2); | old_l2 = pmap_load_clear(l2); | ||||
KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK, | ||||
("pmap_remove_l2: L2e %lx is not a block mapping", old_l2)); | ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2)); | ||||
/* | /* | ||||
* Since a promotion must break the 4KB page mappings before making | * Since a promotion must break the 4KB page mappings before making | ||||
* the 2MB page mapping, a pmap_invalidate_page() suffices. | * the 2MB page mapping, a pmap_invalidate_page() suffices. | ||||
*/ | */ | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva, true); | ||||
if (old_l2 & ATTR_SW_WIRED) | if (old_l2 & ATTR_SW_WIRED) | ||||
pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; | pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE; | ||||
pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); | ||||
if (old_l2 & ATTR_SW_MANAGED) { | if (old_l2 & ATTR_SW_MANAGED) { | ||||
m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | ||||
pvh = page_to_pvh(m); | pvh = page_to_pvh(m); | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK); | ||||
Show All 33 Lines | pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, | ||||
pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) | pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pt_entry_t old_l3; | pt_entry_t old_l3; | ||||
vm_page_t m; | vm_page_t m; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
old_l3 = pmap_load_clear(l3); | old_l3 = pmap_load_clear(l3); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
if (old_l3 & ATTR_SW_WIRED) | if (old_l3 & ATTR_SW_WIRED) | ||||
pmap->pm_stats.wired_count -= 1; | pmap->pm_stats.wired_count -= 1; | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
if (old_l3 & ATTR_SW_MANAGED) { | if (old_l3 & ATTR_SW_MANAGED) { | ||||
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | ||||
if (pmap_pte_dirty(pmap, old_l3)) | if (pmap_pte_dirty(pmap, old_l3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if (old_l3 & ATTR_AF) | if (old_l3 & ATTR_AF) | ||||
Show All 32 Lines | pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva, | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE), | KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE), | ||||
("pmap_remove_l3_range: range crosses an L3 page table boundary")); | ("pmap_remove_l3_range: range crosses an L3 page table boundary")); | ||||
l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : NULL; | l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : NULL; | ||||
va = eva; | va = eva; | ||||
for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { | for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { | ||||
if (!pmap_l3_valid(pmap_load(l3))) { | if (!pmap_l3_valid(pmap_load(l3))) { | ||||
if (va != eva) { | if (va != eva) { | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, true); | ||||
va = eva; | va = eva; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
old_l3 = pmap_load_clear(l3); | old_l3 = pmap_load_clear(l3); | ||||
if ((old_l3 & ATTR_SW_WIRED) != 0) | if ((old_l3 & ATTR_SW_WIRED) != 0) | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
Show All 11 Lines | if ((old_l3 & ATTR_SW_MANAGED) != 0) { | ||||
* performed before the PV list lock is | * performed before the PV list lock is | ||||
* released. Otherwise, a concurrent | * released. Otherwise, a concurrent | ||||
* pmap_remove_all() on a physical page | * pmap_remove_all() on a physical page | ||||
* could return while a stale TLB entry | * could return while a stale TLB entry | ||||
* still provides access to that page. | * still provides access to that page. | ||||
*/ | */ | ||||
if (va != eva) { | if (va != eva) { | ||||
pmap_invalidate_range(pmap, va, | pmap_invalidate_range(pmap, va, | ||||
sva); | sva, true); | ||||
va = eva; | va = eva; | ||||
} | } | ||||
rw_wunlock(*lockp); | rw_wunlock(*lockp); | ||||
} | } | ||||
*lockp = new_lock; | *lockp = new_lock; | ||||
rw_wlock(*lockp); | rw_wlock(*lockp); | ||||
} | } | ||||
pmap_pvh_free(&m->md, pmap, sva); | pmap_pvh_free(&m->md, pmap, sva); | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
(m->flags & PG_FICTITIOUS) == 0) { | (m->flags & PG_FICTITIOUS) == 0) { | ||||
Not Done Inline ActionsDo we actually need to adjust sva here? pmap_unwire_l3() will have invalidated that page before returning true. markj: Do we actually need to adjust `sva` here? pmap_unwire_l3() will have invalidated that page… | |||||
Done Inline ActionsAs matter of correctness, no. As an optimization, yes. Right now, we are redundantly, i.e., twice, invalidating the last valid virtual address when we unhook a page table page. I could see replacing sva += L3_SIZE; by a comment explaining why we don't. alc: As matter of correctness, no. As an optimization, yes. Right now, we are redundantly, i.e. | |||||
pvh = page_to_pvh(m); | pvh = page_to_pvh(m); | ||||
if (TAILQ_EMPTY(&pvh->pv_list)) | if (TAILQ_EMPTY(&pvh->pv_list)) | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
} | } | ||||
if (va == eva) | |||||
va = sva; | |||||
if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) { | if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) { | ||||
sva += L3_SIZE; | /* | ||||
* _pmap_unwire_l3() has already invalidated the TLB | |||||
* entries at all levels for "sva". So, we need not | |||||
* perform "sva += L3_SIZE;" here. Moreover, we need | |||||
* not perform "va = sva;" if "sva" is at the start | |||||
* of a new valid range consisting of a single page. | |||||
*/ | |||||
break; | break; | ||||
} | } | ||||
if (va == eva) | |||||
va = sva; | |||||
} | } | ||||
if (va != eva) | if (va != eva) | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, true); | ||||
} | } | ||||
/* | /* | ||||
* Remove the given range of addresses from the specified map. | * Remove the given range of addresses from the specified map. | ||||
* | * | ||||
* It is assumed that the start and end are properly | * It is assumed that the start and end are properly | ||||
* rounded to the page size. | * rounded to the page size. | ||||
*/ | */ | ||||
Show All 38 Lines | for (; sva < eva; sva = va_next) { | ||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | ||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | ||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | ||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | ||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | ||||
MPASS(pmap != kernel_pmap); | MPASS(pmap != kernel_pmap); | ||||
MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | ||||
pmap_clear(l1); | pmap_clear(l1); | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva, true); | ||||
pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE); | pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE); | ||||
pmap_unuse_pt(pmap, sva, pmap_load(l0), &free); | pmap_unuse_pt(pmap, sva, pmap_load(l0), &free); | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* Calculate index for next page table. | * Calculate index for next page table. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | KASSERT(lvl == 2, | ||||
("pmap_remove_all: invalid pde level %d", lvl)); | ("pmap_remove_all: invalid pde level %d", lvl)); | ||||
tpde = pmap_load(pde); | tpde = pmap_load(pde); | ||||
pte = pmap_l2_to_l3(pde, pv->pv_va); | pte = pmap_l2_to_l3(pde, pv->pv_va); | ||||
tpte = pmap_load_clear(pte); | tpte = pmap_load_clear(pte); | ||||
if (tpte & ATTR_SW_WIRED) | if (tpte & ATTR_SW_WIRED) | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va, true); | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
} | } | ||||
/* | /* | ||||
* Update the vm_page_t clean and reference bits. | * Update the vm_page_t clean and reference bits. | ||||
*/ | */ | ||||
if (pmap_pte_dirty(pmap, tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | if ((old_l2 & ATTR_SW_MANAGED) != 0 && | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
vm_page_dirty(mt); | vm_page_dirty(mt); | ||||
} | } | ||||
/* | /* | ||||
* Since a promotion must break the 4KB page mappings before making | * Since a promotion must break the 4KB page mappings before making | ||||
* the 2MB page mapping, a pmap_invalidate_page() suffices. | * the 2MB page mapping, a pmap_invalidate_page() suffices. | ||||
*/ | */ | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva, true); | ||||
} | } | ||||
/* | /* | ||||
* Set the physical protection on the | * Set the physical protection on the | ||||
* specified range of this map as requested. | * specified range of this map as requested. | ||||
*/ | */ | ||||
void | void | ||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | ||||
Show All 40 Lines | for (; sva < eva; sva = va_next) { | ||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) { | ||||
KASSERT(va_next <= eva, | KASSERT(va_next <= eva, | ||||
("partial update of non-transparent 1G page " | ("partial update of non-transparent 1G page " | ||||
"l1 %#lx sva %#lx eva %#lx va_next %#lx", | "l1 %#lx sva %#lx eva %#lx va_next %#lx", | ||||
pmap_load(l1), sva, eva, va_next)); | pmap_load(l1), sva, eva, va_next)); | ||||
MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0); | ||||
if ((pmap_load(l1) & mask) != nbits) { | if ((pmap_load(l1) & mask) != nbits) { | ||||
pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); | pmap_store(l1, (pmap_load(l1) & ~mask) | nbits); | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva, true); | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
va_next = (sva + L2_SIZE) & ~L2_OFFSET; | va_next = (sva + L2_SIZE) & ~L2_OFFSET; | ||||
if (va_next < sva) | if (va_next < sva) | ||||
va_next = eva; | va_next = eva; | ||||
Show All 24 Lines | for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, | ||||
* invalid or already has the desired access | * invalid or already has the desired access | ||||
* restrictions in place. (The latter case occurs | * restrictions in place. (The latter case occurs | ||||
* frequently. For example, in a "buildworld" | * frequently. For example, in a "buildworld" | ||||
* workload, almost 1 out of 4 L3 entries already | * workload, almost 1 out of 4 L3 entries already | ||||
* have the desired restrictions.) | * have the desired restrictions.) | ||||
*/ | */ | ||||
if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) { | if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) { | ||||
if (va != va_next) { | if (va != va_next) { | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, | ||||
true); | |||||
va = va_next; | va = va_next; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | | while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | | ||||
nbits)) | nbits)) | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
/* | /* | ||||
* When a dirty read/write mapping is write protected, | * When a dirty read/write mapping is write protected, | ||||
* update the page's dirty field. | * update the page's dirty field. | ||||
*/ | */ | ||||
if ((l3 & ATTR_SW_MANAGED) != 0 && | if ((l3 & ATTR_SW_MANAGED) != 0 && | ||||
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | ||||
pmap_pte_dirty(pmap, l3)) | pmap_pte_dirty(pmap, l3)) | ||||
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | ||||
if (va == va_next) | if (va == va_next) | ||||
va = sva; | va = sva; | ||||
} | } | ||||
if (va != va_next) | if (va != va_next) | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, true); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Inserts the specified page table page into the specified pmap's collection | * Inserts the specified page table page into the specified pmap's collection | ||||
* of idle page table pages. Each of a pmap's page table pages is responsible | * of idle page table pages. Each of a pmap's page table pages is responsible | ||||
* for mapping a distinct range of virtual addresses. The pmap's collection is | * for mapping a distinct range of virtual addresses. The pmap's collection is | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte, | ||||
intr = intr_disable(); | intr = intr_disable(); | ||||
/* | /* | ||||
* Clear the old mapping's valid bit, but leave the rest of the entry | * Clear the old mapping's valid bit, but leave the rest of the entry | ||||
* unchanged, so that a lockless, concurrent pmap_kextract() can still | * unchanged, so that a lockless, concurrent pmap_kextract() can still | ||||
* lookup the physical address. | * lookup the physical address. | ||||
*/ | */ | ||||
pmap_clear_bits(pte, ATTR_DESCR_VALID); | pmap_clear_bits(pte, ATTR_DESCR_VALID); | ||||
pmap_invalidate_range(pmap, va, va + size); | |||||
/* | |||||
* When promoting, the L{1,2}_TABLE entry that is being replaced might | |||||
* be cached, so we invalidate intermediate entries as well as final | |||||
* entries. | |||||
*/ | |||||
pmap_invalidate_range(pmap, va, va + size, false); | |||||
/* Create the new mapping */ | /* Create the new mapping */ | ||||
pmap_store(pte, newpte); | pmap_store(pte, newpte); | ||||
dsb(ishst); | dsb(ishst); | ||||
intr_restore(intr); | intr_restore(intr); | ||||
} | } | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
▲ Show 20 Lines • Show All 436 Lines • ▼ Show 20 Lines | if ((orig_l3 & ATTR_SW_MANAGED) != 0) { | ||||
/* | /* | ||||
* The pmap lock is sufficient to synchronize with | * The pmap lock is sufficient to synchronize with | ||||
* concurrent calls to pmap_page_test_mappings() and | * concurrent calls to pmap_page_test_mappings() and | ||||
* pmap_ts_referenced(). | * pmap_ts_referenced(). | ||||
*/ | */ | ||||
if (pmap_pte_dirty(pmap, orig_l3)) | if (pmap_pte_dirty(pmap, orig_l3)) | ||||
vm_page_dirty(om); | vm_page_dirty(om); | ||||
if ((orig_l3 & ATTR_AF) != 0) { | if ((orig_l3 & ATTR_AF) != 0) { | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
vm_page_aflag_set(om, PGA_REFERENCED); | vm_page_aflag_set(om, PGA_REFERENCED); | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); | ||||
pv = pmap_pvh_remove(&om->md, pmap, va); | pv = pmap_pvh_remove(&om->md, pmap, va); | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
free_pv_entry(pmap, pv); | free_pv_entry(pmap, pv); | ||||
if ((om->a.flags & PGA_WRITEABLE) != 0 && | if ((om->a.flags & PGA_WRITEABLE) != 0 && | ||||
TAILQ_EMPTY(&om->md.pv_list) && | TAILQ_EMPTY(&om->md.pv_list) && | ||||
((om->flags & PG_FICTITIOUS) != 0 || | ((om->flags & PG_FICTITIOUS) != 0 || | ||||
TAILQ_EMPTY(&page_to_pvh(om)->pv_list))) | TAILQ_EMPTY(&page_to_pvh(om)->pv_list))) | ||||
vm_page_aflag_clear(om, PGA_WRITEABLE); | vm_page_aflag_clear(om, PGA_WRITEABLE); | ||||
} else { | } else { | ||||
KASSERT((orig_l3 & ATTR_AF) != 0, | KASSERT((orig_l3 & ATTR_AF) != 0, | ||||
("pmap_enter: unmanaged mapping lacks ATTR_AF")); | ("pmap_enter: unmanaged mapping lacks ATTR_AF")); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
} | } | ||||
orig_l3 = 0; | orig_l3 = 0; | ||||
} else { | } else { | ||||
/* | /* | ||||
* Increment the counters. | * Increment the counters. | ||||
*/ | */ | ||||
if ((new_l3 & ATTR_SW_WIRED) != 0) | if ((new_l3 & ATTR_SW_WIRED) != 0) | ||||
pmap->pm_stats.wired_count++; | pmap->pm_stats.wired_count++; | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | validate: | ||||
* Update the L3 entry | * Update the L3 entry | ||||
*/ | */ | ||||
if (pmap_l3_valid(orig_l3)) { | if (pmap_l3_valid(orig_l3)) { | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
KASSERT(opa == pa, ("pmap_enter: invalid update")); | KASSERT(opa == pa, ("pmap_enter: invalid update")); | ||||
if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | ||||
/* same PA, different attributes */ | /* same PA, different attributes */ | ||||
orig_l3 = pmap_load_store(l3, new_l3); | orig_l3 = pmap_load_store(l3, new_l3); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
if ((orig_l3 & ATTR_SW_MANAGED) != 0 && | if ((orig_l3 & ATTR_SW_MANAGED) != 0 && | ||||
pmap_pte_dirty(pmap, orig_l3)) | pmap_pte_dirty(pmap, orig_l3)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} else { | } else { | ||||
/* | /* | ||||
* orig_l3 == new_l3 | * orig_l3 == new_l3 | ||||
* This can happens if multiple threads simultaneously | * This can happens if multiple threads simultaneously | ||||
* access not yet mapped page. This bad for performance | * access not yet mapped page. This bad for performance | ||||
▲ Show 20 Lines • Show All 152 Lines • ▼ Show 20 Lines | if ((old_l2 = pmap_load(l2)) != 0) { | ||||
} else { | } else { | ||||
KASSERT(SLIST_EMPTY(&free), | KASSERT(SLIST_EMPTY(&free), | ||||
("pmap_enter_l2: freed kernel page table page")); | ("pmap_enter_l2: freed kernel page table page")); | ||||
/* | /* | ||||
* Both pmap_remove_l2() and pmap_remove_l3_range() | * Both pmap_remove_l2() and pmap_remove_l3_range() | ||||
* will leave the kernel page table page zero filled. | * will leave the kernel page table page zero filled. | ||||
* Nonetheless, the TLB could have an intermediate | * Nonetheless, the TLB could have an intermediate | ||||
* entry for the kernel page table page. | * entry for the kernel page table page, so request | ||||
* an invalidation at all levels after clearing | |||||
* the L2_TABLE entry. | |||||
*/ | */ | ||||
mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | ||||
if (pmap_insert_pt_page(pmap, mt, false)) | if (pmap_insert_pt_page(pmap, mt, false)) | ||||
panic("pmap_enter_l2: trie insert failed"); | panic("pmap_enter_l2: trie insert failed"); | ||||
pmap_clear(l2); | pmap_clear(l2); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, false); | ||||
} | } | ||||
} | } | ||||
if ((new_l2 & ATTR_SW_MANAGED) != 0) { | if ((new_l2 & ATTR_SW_MANAGED) != 0) { | ||||
/* | /* | ||||
* Abort this mapping if its PV entry could not be created. | * Abort this mapping if its PV entry could not be created. | ||||
*/ | */ | ||||
if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { | if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { | ||||
▲ Show 20 Lines • Show All 1,156 Lines • ▼ Show 20 Lines | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
oldpte = pmap_load(pte); | oldpte = pmap_load(pte); | ||||
if ((oldpte & ATTR_SW_DBM) != 0) { | if ((oldpte & ATTR_SW_DBM) != 0) { | ||||
while (!atomic_fcmpset_64(pte, &oldpte, | while (!atomic_fcmpset_64(pte, &oldpte, | ||||
(oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM)) | (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM)) | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
if ((oldpte & ATTR_S1_AP_RW_BIT) == | if ((oldpte & ATTR_S1_AP_RW_BIT) == | ||||
ATTR_S1_AP(ATTR_S1_AP_RW)) | ATTR_S1_AP(ATTR_S1_AP_RW)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va, true); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | if ((tpte & ATTR_AF) != 0) { | ||||
* always leave its reference bit set. Moreover, | * always leave its reference bit set. Moreover, | ||||
* since the superpage is wired, the current state of | * since the superpage is wired, the current state of | ||||
* its reference bit won't affect page replacement. | * its reference bit won't affect page replacement. | ||||
*/ | */ | ||||
if ((((pa >> PAGE_SHIFT) ^ (va >> L2_SHIFT) ^ | if ((((pa >> PAGE_SHIFT) ^ (va >> L2_SHIFT) ^ | ||||
(uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && | (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && | ||||
(tpte & ATTR_SW_WIRED) == 0) { | (tpte & ATTR_SW_WIRED) == 0) { | ||||
pmap_clear_bits(pte, ATTR_AF); | pmap_clear_bits(pte, ATTR_AF); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
cleared++; | cleared++; | ||||
} else | } else | ||||
not_cleared++; | not_cleared++; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
/* Rotate the PV list if it has more than one entry. */ | /* Rotate the PV list if it has more than one entry. */ | ||||
if (TAILQ_NEXT(pv, pv_next) != NULL) { | if (TAILQ_NEXT(pv, pv_next) != NULL) { | ||||
TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); | TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); | ||||
Show All 24 Lines | do { | ||||
} | } | ||||
pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); | pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__); | ||||
tpte = pmap_load(pte); | tpte = pmap_load(pte); | ||||
if (pmap_pte_dirty(pmap, tpte)) | if (pmap_pte_dirty(pmap, tpte)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if ((tpte & ATTR_AF) != 0) { | if ((tpte & ATTR_AF) != 0) { | ||||
if ((tpte & ATTR_SW_WIRED) == 0) { | if ((tpte & ATTR_SW_WIRED) == 0) { | ||||
pmap_clear_bits(pte, ATTR_AF); | pmap_clear_bits(pte, ATTR_AF); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va, true); | ||||
cleared++; | cleared++; | ||||
} else | } else | ||||
not_cleared++; | not_cleared++; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
/* Rotate the PV list if it has more than one entry. */ | /* Rotate the PV list if it has more than one entry. */ | ||||
if (TAILQ_NEXT(pv, pv_next) != NULL) { | if (TAILQ_NEXT(pv, pv_next) != NULL) { | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
▲ Show 20 Lines • Show All 126 Lines • ▼ Show 20 Lines | for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | ||||
pmap_clear_bits(l3, ATTR_AF); | pmap_clear_bits(l3, ATTR_AF); | ||||
else | else | ||||
goto maybe_invlrng; | goto maybe_invlrng; | ||||
if (va == va_next) | if (va == va_next) | ||||
va = sva; | va = sva; | ||||
continue; | continue; | ||||
maybe_invlrng: | maybe_invlrng: | ||||
if (va != va_next) { | if (va != va_next) { | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, true); | ||||
va = va_next; | va = va_next; | ||||
} | } | ||||
} | } | ||||
if (va != va_next) | if (va != va_next) | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva, true); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Clear the modify bits on the specified physical page. | * Clear the modify bits on the specified physical page. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if ((oldl2 & ATTR_SW_DBM) != 0 && | ||||
*/ | */ | ||||
va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); | va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); | ||||
l3 = pmap_l2_to_l3(l2, va); | l3 = pmap_l2_to_l3(l2, va); | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
while (!atomic_fcmpset_long(l3, &oldl3, | while (!atomic_fcmpset_long(l3, &oldl3, | ||||
(oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) | (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va, true); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
goto restart; | goto restart; | ||||
} | } | ||||
} | } | ||||
l2 = pmap_l2(pmap, pv->pv_va); | l2 = pmap_l2(pmap, pv->pv_va); | ||||
l3 = pmap_l2_to_l3(l2, pv->pv_va); | l3 = pmap_l2_to_l3(l2, pv->pv_va); | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ | if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ | ||||
pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); | pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va, true); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
} | } | ||||
void * | void * | ||||
pmap_mapbios(vm_paddr_t pa, vm_size_t size) | pmap_mapbios(vm_paddr_t pa, vm_size_t size) | ||||
▲ Show 20 Lines • Show All 1,079 Lines • ▼ Show 20 Lines | if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) || | ||||
return (rv); | return (rv); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
ptep = pmap_pte(pmap, far, &lvl); | ptep = pmap_pte(pmap, far, &lvl); | ||||
if (ptep != NULL && | if (ptep != NULL && | ||||
((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) { | ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) { | ||||
if ((pte & ATTR_S1_AP_RW_BIT) == | if ((pte & ATTR_S1_AP_RW_BIT) == | ||||
ATTR_S1_AP(ATTR_S1_AP_RO)) { | ATTR_S1_AP(ATTR_S1_AP_RO)) { | ||||
pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); | pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); | ||||
pmap_invalidate_page(pmap, far); | pmap_invalidate_page(pmap, far, true); | ||||
} | } | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
break; | break; | ||||
case ISS_DATA_DFSC_TF_L0: | case ISS_DATA_DFSC_TF_L0: | ||||
case ISS_DATA_DFSC_TF_L1: | case ISS_DATA_DFSC_TF_L1: | ||||
case ISS_DATA_DFSC_TF_L2: | case ISS_DATA_DFSC_TF_L2: | ||||
▲ Show 20 Lines • Show All 381 Lines • Show Last 20 Lines |
Would it make sense to call this function e.g. pmap_invalidate_page_flags (for instance), and then define pmap_invalidate_page() as pmap_invalidate_page_flags(.., true)? Same for other pmap_invalidate_ functions.
My reasoning is two-fold: