Changeset View
Changeset View
Standalone View
Standalone View
head/sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 2,504 Lines • ▼ Show 20 Lines | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
if (TAILQ_EMPTY(&pvh->pv_list)) | if (TAILQ_EMPTY(&pvh->pv_list)) | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
} | } | ||||
return (pmap_unuse_pt(pmap, va, l2e, free)); | return (pmap_unuse_pt(pmap, va, l2e, free)); | ||||
} | } | ||||
/* | /* | ||||
* Remove the specified range of addresses from the L3 page table that is | |||||
* identified by the given L2 entry. | |||||
*/ | |||||
static void | |||||
pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva, | |||||
vm_offset_t eva, struct spglist *free, struct rwlock **lockp) | |||||
{ | |||||
struct md_page *pvh; | |||||
struct rwlock *new_lock; | |||||
pt_entry_t *l3, old_l3; | |||||
vm_offset_t va; | |||||
vm_page_t m; | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE), | |||||
("pmap_remove_l3_range: range crosses an L3 page table boundary")); | |||||
va = eva; | |||||
for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { | |||||
if (!pmap_l3_valid(pmap_load(l3))) { | |||||
if (va != eva) { | |||||
pmap_invalidate_range(pmap, va, sva); | |||||
va = eva; | |||||
} | |||||
continue; | |||||
} | |||||
old_l3 = pmap_load_clear(l3); | |||||
if ((old_l3 & ATTR_SW_WIRED) != 0) | |||||
pmap->pm_stats.wired_count--; | |||||
pmap_resident_count_dec(pmap, 1); | |||||
if ((old_l3 & ATTR_SW_MANAGED) != 0) { | |||||
m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); | |||||
if (pmap_page_dirty(old_l3)) | |||||
vm_page_dirty(m); | |||||
if ((old_l3 & ATTR_AF) != 0) | |||||
vm_page_aflag_set(m, PGA_REFERENCED); | |||||
new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)); | |||||
if (new_lock != *lockp) { | |||||
if (*lockp != NULL) { | |||||
/* | |||||
* Pending TLB invalidations must be | |||||
* performed before the PV list lock is | |||||
* released. Otherwise, a concurrent | |||||
* pmap_remove_all() on a physical page | |||||
* could return while a stale TLB entry | |||||
* still provides access to that page. | |||||
*/ | |||||
if (va != eva) { | |||||
pmap_invalidate_range(pmap, va, | |||||
sva); | |||||
va = eva; | |||||
} | |||||
rw_wunlock(*lockp); | |||||
} | |||||
*lockp = new_lock; | |||||
rw_wlock(*lockp); | |||||
} | |||||
pmap_pvh_free(&m->md, pmap, sva); | |||||
if (TAILQ_EMPTY(&m->md.pv_list) && | |||||
(m->flags & PG_FICTITIOUS) == 0) { | |||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | |||||
if (TAILQ_EMPTY(&pvh->pv_list)) | |||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | |||||
} | |||||
} | |||||
if (va == eva) | |||||
va = sva; | |||||
if (pmap_unuse_pt(pmap, sva, l2e, free)) { | |||||
sva += L3_SIZE; | |||||
break; | |||||
} | |||||
} | |||||
if (va != eva) | |||||
pmap_invalidate_range(pmap, va, sva); | |||||
} | |||||
/* | |||||
* Remove the given range of addresses from the specified map. | * Remove the given range of addresses from the specified map. | ||||
* | * | ||||
* It is assumed that the start and end are properly | * It is assumed that the start and end are properly | ||||
* rounded to the page size. | * rounded to the page size. | ||||
*/ | */ | ||||
void | void | ||||
pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va, va_next; | vm_offset_t va_next; | ||||
pd_entry_t *l0, *l1, *l2; | pd_entry_t *l0, *l1, *l2; | ||||
pt_entry_t l3_paddr, *l3; | pt_entry_t l3_paddr; | ||||
struct spglist free; | struct spglist free; | ||||
/* | /* | ||||
* Perform an unsynchronized read. This is, however, safe. | * Perform an unsynchronized read. This is, however, safe. | ||||
*/ | */ | ||||
if (pmap->pm_stats.resident_count == 0) | if (pmap->pm_stats.resident_count == 0) | ||||
return; | return; | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | ||||
/* | /* | ||||
* Limit our scan to either the end of the va represented | * Limit our scan to either the end of the va represented | ||||
* by the current page table page, or to the end of the | * by the current page table page, or to the end of the | ||||
* range being removed. | * range being removed. | ||||
*/ | */ | ||||
if (va_next > eva) | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
va = va_next; | pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free, | ||||
for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | &lock); | ||||
sva += L3_SIZE) { | |||||
if (l3 == NULL) | |||||
panic("l3 == NULL"); | |||||
if (pmap_load(l3) == 0) { | |||||
if (va != va_next) { | |||||
pmap_invalidate_range(pmap, va, sva); | |||||
va = va_next; | |||||
} | } | ||||
continue; | |||||
} | |||||
if (va == va_next) | |||||
va = sva; | |||||
if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free, | |||||
&lock)) { | |||||
sva += L3_SIZE; | |||||
break; | |||||
} | |||||
} | |||||
if (va != va_next) | |||||
pmap_invalidate_range(pmap, va, sva); | |||||
} | |||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
/* | /* | ||||
* Routine: pmap_remove_all | * Routine: pmap_remove_all | ||||
▲ Show 20 Lines • Show All 786 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* The parameter "m" is only used when creating a managed, writeable mapping. | * The parameter "m" is only used when creating a managed, writeable mapping. | ||||
*/ | */ | ||||
static int | static int | ||||
pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, | pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags, | ||||
vm_page_t m, struct rwlock **lockp) | vm_page_t m, struct rwlock **lockp) | ||||
{ | { | ||||
struct spglist free; | struct spglist free; | ||||
pd_entry_t *l2, *l3, old_l2; | pd_entry_t *l2, old_l2; | ||||
vm_offset_t sva; | |||||
vm_page_t l2pg, mt; | vm_page_t l2pg, mt; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? | if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? | ||||
NULL : lockp)) == NULL) { | NULL : lockp)) == NULL) { | ||||
CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p", | CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p", | ||||
va, pmap); | va, pmap); | ||||
Show All 12 Lines | if ((flags & PMAP_ENTER_NOREPLACE) != 0) { | ||||
va, pmap); | va, pmap); | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
} | } | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) | if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) | ||||
(void)pmap_remove_l2(pmap, l2, va, | (void)pmap_remove_l2(pmap, l2, va, | ||||
pmap_load(pmap_l1(pmap, va)), &free, lockp); | pmap_load(pmap_l1(pmap, va)), &free, lockp); | ||||
else | else | ||||
for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) { | pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE, | ||||
l3 = pmap_l2_to_l3(l2, sva); | &free, lockp); | ||||
if (pmap_l3_valid(pmap_load(l3)) && | |||||
pmap_remove_l3(pmap, l3, sva, old_l2, &free, | |||||
lockp) != 0) | |||||
break; | |||||
} | |||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
if (va >= VM_MAXUSER_ADDRESS) { | if (va >= VM_MAXUSER_ADDRESS) { | ||||
/* | /* | ||||
* Both pmap_remove_l2() and pmap_remove_l3() will | * Both pmap_remove_l2() and pmap_remove_l3() will | ||||
* leave the kernel page table page zero filled. | * leave the kernel page table page zero filled. | ||||
*/ | */ | ||||
mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | ||||
if (pmap_insert_pt_page(pmap, mt, false)) | if (pmap_insert_pt_page(pmap, mt, false)) | ||||
▲ Show 20 Lines • Show All 2,092 Lines • Show Last 20 Lines |