Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 374 Lines • ▼ Show 20 Lines | SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, | ||||
"Are large page mappings enabled?"); | "Are large page mappings enabled?"); | ||||
/* | /* | ||||
* Internal flags for pmap_enter()'s helper functions. | * Internal flags for pmap_enter()'s helper functions. | ||||
*/ | */ | ||||
#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ | #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ | ||||
#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ | #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ | ||||
TAILQ_HEAD(pv_chunklist, pv_chunk); | |||||
static void free_pv_chunk(struct pv_chunk *pc); | static void free_pv_chunk(struct pv_chunk *pc); | ||||
static void free_pv_chunk_batch(struct pv_chunklist *batch); | |||||
static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | ||||
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | ||||
static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | ||||
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | ||||
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | ||||
vm_offset_t va); | vm_offset_t va); | ||||
static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); | static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); | ||||
▲ Show 20 Lines • Show All 2,359 Lines • ▼ Show 20 Lines | if (!pc_is_free(pc)) { | ||||
} | } | ||||
return; | return; | ||||
} | } | ||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | ||||
free_pv_chunk(pc); | free_pv_chunk(pc); | ||||
} | } | ||||
static void | static void | ||||
free_pv_chunk(struct pv_chunk *pc) | free_pv_chunk_dequeued(struct pv_chunk *pc) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
mtx_lock(&pv_chunks_mutex); | |||||
TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | |||||
mtx_unlock(&pv_chunks_mutex); | |||||
PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); | PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); | ||||
PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); | PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); | ||||
PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); | PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); | ||||
/* entire chunk is free, return it */ | /* entire chunk is free, return it */ | ||||
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); | m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); | ||||
dump_drop_page(m->phys_addr); | dump_drop_page(m->phys_addr); | ||||
vm_page_unwire_noq(m); | vm_page_unwire_noq(m); | ||||
vm_page_free(m); | vm_page_free(m); | ||||
} | } | ||||
markj: Missing newline. | |||||
static void | |||||
free_pv_chunk(struct pv_chunk *pc) | |||||
{ | |||||
mtx_lock(&pv_chunks_mutex); | |||||
TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | |||||
mtx_unlock(&pv_chunks_mutex); | |||||
free_pv_chunk_dequeued(pc); | |||||
} | |||||
static void | |||||
free_pv_chunk_batch(struct pv_chunklist *batch) | |||||
{ | |||||
struct pv_chunk *pc, *npc; | |||||
if (TAILQ_EMPTY(batch)) | |||||
return; | |||||
mtx_lock(&pv_chunks_mutex); | |||||
TAILQ_FOREACH(pc, batch, pc_list) { | |||||
TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | |||||
} | |||||
mtx_unlock(&pv_chunks_mutex); | |||||
TAILQ_FOREACH_SAFE(pc, batch, pc_list, npc) { | |||||
free_pv_chunk_dequeued(pc); | |||||
} | |||||
} | |||||
/* | /* | ||||
* Returns a new PV entry, allocating a new PV chunk from the system when | * Returns a new PV entry, allocating a new PV chunk from the system when | ||||
* needed. If this PV chunk allocation fails and a PV list lock pointer was | * needed. If this PV chunk allocation fails and a PV list lock pointer was | ||||
* given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is | * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is | ||||
* returned. | * returned. | ||||
* | * | ||||
* The given PV list lock may be released. | * The given PV list lock may be released. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 2,459 Lines • ▼ Show 20 Lines | |||||
* this function starts. | * this function starts. | ||||
*/ | */ | ||||
void | void | ||||
pmap_remove_pages(pmap_t pmap) | pmap_remove_pages(pmap_t pmap) | ||||
{ | { | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte, tpte; | pt_entry_t *pte, tpte; | ||||
struct spglist free; | struct spglist free; | ||||
struct pv_chunklist free_chunks; | |||||
vm_page_t m, ml3, mt; | vm_page_t m, ml3, mt; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
struct pv_chunk *pc, *npc; | struct pv_chunk *pc, *npc; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
int64_t bit; | int64_t bit; | ||||
uint64_t inuse, bitmask; | uint64_t inuse, bitmask; | ||||
int allfree, field, freed, idx, lvl; | int allfree, field, freed, idx, lvl; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
lock = NULL; | lock = NULL; | ||||
TAILQ_INIT(&free_chunks); | |||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { | TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { | ||||
allfree = 1; | allfree = 1; | ||||
freed = 0; | freed = 0; | ||||
for (field = 0; field < _NPCM; field++) { | for (field = 0; field < _NPCM; field++) { | ||||
inuse = ~pc->pc_map[field] & pc_freemask[field]; | inuse = ~pc->pc_map[field] & pc_freemask[field]; | ||||
while (inuse != 0) { | while (inuse != 0) { | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | */ | ||||
freed++; | freed++; | ||||
} | } | ||||
} | } | ||||
PV_STAT(atomic_add_long(&pv_entry_frees, freed)); | PV_STAT(atomic_add_long(&pv_entry_frees, freed)); | ||||
PV_STAT(atomic_add_int(&pv_entry_spare, freed)); | PV_STAT(atomic_add_int(&pv_entry_spare, freed)); | ||||
PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); | PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); | ||||
if (allfree) { | if (allfree) { | ||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | ||||
free_pv_chunk(pc); | TAILQ_INSERT_TAIL(&free_chunks, pc, pc_list); | ||||
} | } | ||||
} | } | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
free_pv_chunk_batch(&free_chunks); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
Not Done Inline ActionsDo pv chunks need to be freed under the pmap lock? markj: Do pv chunks need to be freed under the pmap lock? | |||||
Done Inline ActionsIt's the same as amd64. If the comments in D21832 don't apply here I can move it. andrew: It's the same as amd64. If the comments in D21832 don't apply here I can move it. | |||||
Not Done Inline ActionsNo, don't move it. The same reason applies here. alc: No, don't move it. The same reason applies here. | |||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
/* | /* | ||||
* This is used to check if a page has been accessed or modified. | * This is used to check if a page has been accessed or modified. | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) | pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) | ||||
▲ Show 20 Lines • Show All 2,143 Lines • Show Last 20 Lines |
Missing newline.