Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/amd64/pmap.c
Show First 20 Lines • Show All 109 Lines • ▼ Show 20 Lines | |||||
#include <sys/ktr.h> | #include <sys/ktr.h> | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mman.h> | #include <sys/mman.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/turnstile.h> | |||||
#include <sys/vmem.h> | #include <sys/vmem.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
▲ Show 20 Lines • Show All 243 Lines • ▼ Show 20 Lines | |||||
static struct pmap_preinit_mapping { | static struct pmap_preinit_mapping { | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_size_t sz; | vm_size_t sz; | ||||
int mode; | int mode; | ||||
} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; | } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; | ||||
static int pmap_initialized; | static int pmap_initialized; | ||||
static struct rwlock_padalign pvh_global_lock; | |||||
/* | /* | ||||
* Data for the pv entry allocation mechanism | * Data for the pv entry allocation mechanism. | ||||
* Updates to pv_invl_gen are protected by the pv_list_locks[] | |||||
* elements, but reads are not. | |||||
*/ | */ | ||||
static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); | static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); | ||||
static struct mtx pv_chunks_mutex; | static struct mtx pv_chunks_mutex; | ||||
static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; | static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; | ||||
static u_long pv_invl_gen[NPV_LIST_LOCKS]; | |||||
static struct md_page *pv_table; | static struct md_page *pv_table; | ||||
/* | /* | ||||
* All those kernel PT submaps that BSD is so fond of | * All those kernel PT submaps that BSD is so fond of | ||||
*/ | */ | ||||
pt_entry_t *CMAP1 = 0; | pt_entry_t *CMAP1 = 0; | ||||
caddr_t CADDR1 = 0; | caddr_t CADDR1 = 0; | ||||
static vm_offset_t qframe = 0; | static vm_offset_t qframe = 0; | ||||
Show All 19 Lines | CPU_FOREACH(i) { | ||||
res += cpuid_to_pcpu[i]->pc_pm_save_cnt; | res += cpuid_to_pcpu[i]->pc_pm_save_cnt; | ||||
} | } | ||||
return (sysctl_handle_64(oidp, &res, 0, req)); | return (sysctl_handle_64(oidp, &res, 0, req)); | ||||
} | } | ||||
SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW | | SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW | | ||||
CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU", | CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU", | ||||
"Count of saved TLB context on switch"); | "Count of saved TLB context on switch"); | ||||
static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker = | |||||
LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker); | |||||
static struct mtx invl_gen_mtx; | |||||
static u_long pmap_invl_gen = 0; | |||||
/* Fake lock object to satisfy turnstiles interface. */ | |||||
static struct lock_object invl_gen_ts = { | |||||
.lo_name = "invlts", | |||||
}; | |||||
#define PMAP_ASSERT_NOT_IN_DI() \ | |||||
KASSERT(curthread->td_md.md_invl_gen.gen == 0, ("DI already started")) | |||||
/* | /* | ||||
* Start a new Delayed Invalidation (DI) block of code, executed by | |||||
* the current thread. Within a DI block, the current thread may | |||||
* destroy both the page table and PV list entries for a mapping and | |||||
* then release the corresponding PV list lock before ensuring that | |||||
* the mapping is flushed from the TLBs of any processors with the | |||||
* pmap active. | |||||
*/ | |||||
static void | |||||
pmap_delayed_invl_started(void) | |||||
{ | |||||
struct pmap_invl_gen *invl_gen; | |||||
u_long currgen; | |||||
invl_gen = &curthread->td_md.md_invl_gen; | |||||
PMAP_ASSERT_NOT_IN_DI(); | |||||
mtx_lock(&invl_gen_mtx); | |||||
if (LIST_EMPTY(&pmap_invl_gen_tracker)) | |||||
currgen = pmap_invl_gen; | |||||
else | |||||
currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen; | |||||
invl_gen->gen = currgen + 1; | |||||
LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link); | |||||
mtx_unlock(&invl_gen_mtx); | |||||
} | |||||
/* | |||||
* Finish the DI block, previously started by the current thread. All | |||||
* required TLB flushes for the pages marked by | |||||
* pmap_delayed_invl_page() must be finished before this function is | |||||
* called. | |||||
* | |||||
* This function works by bumping the global DI generation number to | |||||
* the generation number of the current thread's DI, unless there is a | |||||
* pending DI that started earlier. In the latter case, bumping the | |||||
* global DI generation number would incorrectly signal that the | |||||
* earlier DI had finished. Instead, this function bumps the earlier | |||||
* DI's generation number to match the generation number of the | |||||
* current thread's DI. | |||||
*/ | |||||
static void | |||||
pmap_delayed_invl_finished(void) | |||||
{ | |||||
struct pmap_invl_gen *invl_gen, *next; | |||||
struct turnstile *ts; | |||||
invl_gen = &curthread->td_md.md_invl_gen; | |||||
KASSERT(invl_gen->gen != 0, ("missed invl_started")); | |||||
mtx_lock(&invl_gen_mtx); | |||||
next = LIST_NEXT(invl_gen, link); | |||||
if (next == NULL) { | |||||
turnstile_chain_lock(&invl_gen_ts); | |||||
ts = turnstile_lookup(&invl_gen_ts); | |||||
pmap_invl_gen = invl_gen->gen; | |||||
if (ts != NULL) { | |||||
turnstile_broadcast(ts, TS_SHARED_QUEUE); | |||||
turnstile_unpend(ts, TS_SHARED_LOCK); | |||||
} | |||||
turnstile_chain_unlock(&invl_gen_ts); | |||||
} else { | |||||
next->gen = invl_gen->gen; | |||||
} | |||||
LIST_REMOVE(invl_gen, link); | |||||
mtx_unlock(&invl_gen_mtx); | |||||
invl_gen->gen = 0; | |||||
} | |||||
#ifdef PV_STATS | |||||
static long invl_wait; | |||||
SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0, | |||||
"Number of times DI invalidation blocked pmap_remove_all/write"); | |||||
#endif | |||||
static u_long * | |||||
pmap_delayed_invl_genp(vm_page_t m) | |||||
{ | |||||
return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]); | |||||
} | |||||
/* | |||||
* Ensure that all currently executing DI blocks, that need to flush | |||||
* TLB for the given page m, actually flushed the TLB at the time the | |||||
* function returned. If the page m has an empty PV list and we call | |||||
* pmap_delayed_invl_wait(), upon its return we know that no CPU has a | |||||
* valid mapping for the page m in either its page table or TLB. | |||||
* | |||||
* This function works by blocking until the global DI generation | |||||
* number catches up with the generation number associated with the | |||||
* given page m and its PV list. Since this function's callers | |||||
* typically own an object lock and sometimes own a page lock, it | |||||
* cannot sleep. Instead, it blocks on a turnstile to relinquish the | |||||
* processor. | |||||
*/ | |||||
static void | |||||
pmap_delayed_invl_wait(vm_page_t m) | |||||
{ | |||||
struct thread *td; | |||||
struct turnstile *ts; | |||||
u_long *m_gen; | |||||
#ifdef PV_STATS | |||||
bool accounted = false; | |||||
#endif | |||||
td = curthread; | |||||
m_gen = pmap_delayed_invl_genp(m); | |||||
while (*m_gen > pmap_invl_gen) { | |||||
#ifdef PV_STATS | |||||
if (!accounted) { | |||||
atomic_add_long(&invl_wait, 1); | |||||
accounted = true; | |||||
} | |||||
#endif | |||||
ts = turnstile_trywait(&invl_gen_ts); | |||||
if (*m_gen > pmap_invl_gen) | |||||
turnstile_wait(ts, NULL, TS_SHARED_QUEUE); | |||||
else | |||||
turnstile_cancel(ts); | |||||
} | |||||
} | |||||
/* | |||||
* Mark the page m's PV list as participating in the current thread's | |||||
* DI block. Any threads concurrently using m's PV list to remove or | |||||
* restrict all mappings to m will wait for the current thread's DI | |||||
* block to complete before proceeding. | |||||
* | |||||
* The function works by setting the DI generation number for m's PV | |||||
* list to at least * the number for the current thread. This forces | |||||
* a caller to pmap_delayed_invl_wait() to spin until current thread | |||||
* calls pmap_delayed_invl_finished(). | |||||
*/ | |||||
static void | |||||
pmap_delayed_invl_page(vm_page_t m) | |||||
{ | |||||
u_long gen, *m_gen; | |||||
rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED); | |||||
gen = curthread->td_md.md_invl_gen.gen; | |||||
if (gen == 0) | |||||
return; | |||||
m_gen = pmap_delayed_invl_genp(m); | |||||
if (*m_gen < gen) | |||||
*m_gen = gen; | |||||
} | |||||
/* | |||||
* Crashdump maps. | * Crashdump maps. | ||||
*/ | */ | ||||
static caddr_t crashdumpmap; | static caddr_t crashdumpmap; | ||||
static void free_pv_chunk(struct pv_chunk *pc); | static void free_pv_chunk(struct pv_chunk *pc); | ||||
static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | ||||
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | ||||
static int popcnt_pc_map_elem_pq(uint64_t elem); | static int popcnt_pc_map_elem_pq(uint64_t elem); | ||||
▲ Show 20 Lines • Show All 451 Lines • ▼ Show 20 Lines | pmap_bootstrap(vm_paddr_t *firstaddr) | ||||
PMAP_LOCK_INIT(kernel_pmap); | PMAP_LOCK_INIT(kernel_pmap); | ||||
kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys); | kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys); | ||||
kernel_pmap->pm_cr3 = KPML4phys; | kernel_pmap->pm_cr3 = KPML4phys; | ||||
CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ | CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ | ||||
TAILQ_INIT(&kernel_pmap->pm_pvchunk); | TAILQ_INIT(&kernel_pmap->pm_pvchunk); | ||||
kernel_pmap->pm_flags = pmap_flags; | kernel_pmap->pm_flags = pmap_flags; | ||||
/* | /* | ||||
* Initialize the global pv list lock. | * Initialize the TLB invalidations generation number lock. | ||||
*/ | */ | ||||
rw_init(&pvh_global_lock, "pmap pv global"); | mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF); | ||||
/* | /* | ||||
* Reserve some special page table entries/VA space for temporary | * Reserve some special page table entries/VA space for temporary | ||||
* mapping of pages. | * mapping of pages. | ||||
*/ | */ | ||||
#define SYSMAP(c, p, v, n) \ | #define SYSMAP(c, p, v, n) \ | ||||
v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); | v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); | ||||
▲ Show 20 Lines • Show All 1,408 Lines • ▼ Show 20 Lines | _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) | ||||
/* | /* | ||||
* Allocate a page table page. | * Allocate a page table page. | ||||
*/ | */ | ||||
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | | if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { | ||||
if (lockp != NULL) { | if (lockp != NULL) { | ||||
RELEASE_PV_LIST_LOCK(lockp); | RELEASE_PV_LIST_LOCK(lockp); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
rw_runlock(&pvh_global_lock); | PMAP_ASSERT_NOT_IN_DI(); | ||||
VM_WAIT; | VM_WAIT; | ||||
rw_rlock(&pvh_global_lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Indicate the need to retry. While waiting, the page table | * Indicate the need to retry. While waiting, the page table | ||||
* page may have been allocated. | * page may have been allocated. | ||||
*/ | */ | ||||
return (NULL); | return (NULL); | ||||
▲ Show 20 Lines • Show All 387 Lines • ▼ Show 20 Lines | reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) | ||||
pt_entry_t PG_G, PG_A, PG_M, PG_RW; | pt_entry_t PG_G, PG_A, PG_M, PG_RW; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_page_t m, m_pc; | vm_page_t m, m_pc; | ||||
struct spglist free; | struct spglist free; | ||||
uint64_t inuse; | uint64_t inuse; | ||||
int bit, field, freed; | int bit, field, freed; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); | PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); | ||||
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); | KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); | ||||
pmap = NULL; | pmap = NULL; | ||||
m_pc = NULL; | m_pc = NULL; | ||||
PG_G = PG_A = PG_M = PG_RW = 0; | PG_G = PG_A = PG_M = PG_RW = 0; | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
TAILQ_INIT(&new_tail); | TAILQ_INIT(&new_tail); | ||||
pmap_delayed_invl_started(); | |||||
mtx_lock(&pv_chunks_mutex); | mtx_lock(&pv_chunks_mutex); | ||||
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { | while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { | ||||
TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | TAILQ_REMOVE(&pv_chunks, pc, pc_lru); | ||||
mtx_unlock(&pv_chunks_mutex); | mtx_unlock(&pv_chunks_mutex); | ||||
if (pmap != pc->pc_pmap) { | if (pmap != pc->pc_pmap) { | ||||
if (pmap != NULL) { | if (pmap != NULL) { | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
if (pmap != locked_pmap) | if (pmap != locked_pmap) | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
pmap_delayed_invl_finished(); | |||||
pmap_delayed_invl_started(); | |||||
pmap = pc->pc_pmap; | pmap = pc->pc_pmap; | ||||
/* Avoid deadlock and lock recursion. */ | /* Avoid deadlock and lock recursion. */ | ||||
if (pmap > locked_pmap) { | if (pmap > locked_pmap) { | ||||
RELEASE_PV_LIST_LOCK(lockp); | RELEASE_PV_LIST_LOCK(lockp); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
} else if (pmap != locked_pmap && | } else if (pmap != locked_pmap && | ||||
!PMAP_TRYLOCK(pmap)) { | !PMAP_TRYLOCK(pmap)) { | ||||
pmap = NULL; | pmap = NULL; | ||||
Show All 37 Lines | for (field = 0; field < _NPCM; field++) { | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
(m->flags & PG_FICTITIOUS) == 0) { | (m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
if (TAILQ_EMPTY(&pvh->pv_list)) { | if (TAILQ_EMPTY(&pvh->pv_list)) { | ||||
vm_page_aflag_clear(m, | vm_page_aflag_clear(m, | ||||
PGA_WRITEABLE); | PGA_WRITEABLE); | ||||
} | } | ||||
} | } | ||||
pmap_delayed_invl_page(m); | |||||
pc->pc_map[field] |= 1UL << bit; | pc->pc_map[field] |= 1UL << bit; | ||||
pmap_unuse_pt(pmap, va, *pde, &free); | pmap_unuse_pt(pmap, va, *pde, &free); | ||||
freed++; | freed++; | ||||
} | } | ||||
} | } | ||||
if (freed == 0) { | if (freed == 0) { | ||||
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); | TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); | ||||
mtx_lock(&pv_chunks_mutex); | mtx_lock(&pv_chunks_mutex); | ||||
Show All 25 Lines | reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) | ||||
} | } | ||||
TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); | TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); | ||||
mtx_unlock(&pv_chunks_mutex); | mtx_unlock(&pv_chunks_mutex); | ||||
if (pmap != NULL) { | if (pmap != NULL) { | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
if (pmap != locked_pmap) | if (pmap != locked_pmap) | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
pmap_delayed_invl_finished(); | |||||
if (m_pc == NULL && !SLIST_EMPTY(&free)) { | if (m_pc == NULL && !SLIST_EMPTY(&free)) { | ||||
m_pc = SLIST_FIRST(&free); | m_pc = SLIST_FIRST(&free); | ||||
SLIST_REMOVE_HEAD(&free, plinks.s.ss); | SLIST_REMOVE_HEAD(&free, plinks.s.ss); | ||||
/* Recycle a freed page table page. */ | /* Recycle a freed page table page. */ | ||||
m_pc->wire_count = 1; | m_pc->wire_count = 1; | ||||
atomic_add_int(&vm_cnt.v_wire_count, 1); | atomic_add_int(&vm_cnt.v_wire_count, 1); | ||||
} | } | ||||
pmap_free_zero_pages(&free); | pmap_free_zero_pages(&free); | ||||
return (m_pc); | return (m_pc); | ||||
} | } | ||||
/* | /* | ||||
* free the pv_entry back to the free list | * free the pv_entry back to the free list | ||||
*/ | */ | ||||
static void | static void | ||||
free_pv_entry(pmap_t pmap, pv_entry_t pv) | free_pv_entry(pmap_t pmap, pv_entry_t pv) | ||||
{ | { | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
int idx, field, bit; | int idx, field, bit; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PV_STAT(atomic_add_long(&pv_entry_frees, 1)); | PV_STAT(atomic_add_long(&pv_entry_frees, 1)); | ||||
PV_STAT(atomic_add_int(&pv_entry_spare, 1)); | PV_STAT(atomic_add_int(&pv_entry_spare, 1)); | ||||
PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); | PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); | ||||
pc = pv_to_chunk(pv); | pc = pv_to_chunk(pv); | ||||
idx = pv - &pc->pc_pventry[0]; | idx = pv - &pc->pc_pventry[0]; | ||||
field = idx / 64; | field = idx / 64; | ||||
bit = idx % 64; | bit = idx % 64; | ||||
Show All 40 Lines | |||||
static pv_entry_t | static pv_entry_t | ||||
get_pv_entry(pmap_t pmap, struct rwlock **lockp) | get_pv_entry(pmap_t pmap, struct rwlock **lockp) | ||||
{ | { | ||||
int bit, field; | int bit, field; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
vm_page_t m; | vm_page_t m; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); | PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); | ||||
retry: | retry: | ||||
pc = TAILQ_FIRST(&pmap->pm_pvchunk); | pc = TAILQ_FIRST(&pmap->pm_pvchunk); | ||||
if (pc != NULL) { | if (pc != NULL) { | ||||
for (field = 0; field < _NPCM; field++) { | for (field = 0; field < _NPCM; field++) { | ||||
if (pc->pc_map[field]) { | if (pc->pc_map[field]) { | ||||
bit = bsfq(pc->pc_map[field]); | bit = bsfq(pc->pc_map[field]); | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
static void | static void | ||||
reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) | reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) | ||||
{ | { | ||||
struct pch new_tail; | struct pch new_tail; | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
int avail, free; | int avail, free; | ||||
vm_page_t m; | vm_page_t m; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); | KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); | ||||
/* | /* | ||||
* Newly allocated PV chunks must be stored in a private list until | * Newly allocated PV chunks must be stored in a private list until | ||||
* the required number of PV chunks have been allocated. Otherwise, | * the required number of PV chunks have been allocated. Otherwise, | ||||
* reclaim_pv_chunk() could recycle one of these chunks. In | * reclaim_pv_chunk() could recycle one of these chunks. In | ||||
* contrast, these chunks must be added to the pmap upon allocation. | * contrast, these chunks must be added to the pmap upon allocation. | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
* otherwise. This operation can be performed on pv lists for either 4KB or | * otherwise. This operation can be performed on pv lists for either 4KB or | ||||
* 2MB page mappings. | * 2MB page mappings. | ||||
*/ | */ | ||||
static __inline pv_entry_t | static __inline pv_entry_t | ||||
pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) | pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
if (pmap == PV_PMAP(pv) && va == pv->pv_va) { | if (pmap == PV_PMAP(pv) && va == pv->pv_va) { | ||||
TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); | TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); | ||||
pvh->pv_gen++; | pvh->pv_gen++; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return (pv); | return (pv); | ||||
Show All 10 Lines | |||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_offset_t va_last; | vm_offset_t va_last; | ||||
vm_page_t m; | vm_page_t m; | ||||
int bit, field; | int bit, field; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
KASSERT((pa & PDRMASK) == 0, | KASSERT((pa & PDRMASK) == 0, | ||||
("pmap_pv_demote_pde: pa is not 2mpage aligned")); | ("pmap_pv_demote_pde: pa is not 2mpage aligned")); | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | ||||
/* | /* | ||||
* Transfer the 2mpage's pv entry for this mapping to the first | * Transfer the 2mpage's pv entry for this mapping to the first | ||||
* page's pv list. Once this transfer begins, the pv list lock | * page's pv list. Once this transfer begins, the pv list lock | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_offset_t va_last; | vm_offset_t va_last; | ||||
vm_page_t m; | vm_page_t m; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
KASSERT((pa & PDRMASK) == 0, | KASSERT((pa & PDRMASK) == 0, | ||||
("pmap_pv_promote_pde: pa is not 2mpage aligned")); | ("pmap_pv_promote_pde: pa is not 2mpage aligned")); | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | ||||
/* | /* | ||||
* Transfer the first page's pv entry for this mapping to the 2mpage's | * Transfer the first page's pv entry for this mapping to the 2mpage's | ||||
* pv list. Aside from avoiding the cost of a call to get_pv_entry(), | * pv list. Aside from avoiding the cost of a call to get_pv_entry(), | ||||
* a transfer avoids the possibility that get_pv_entry() calls | * a transfer avoids the possibility that get_pv_entry() calls | ||||
Show All 36 Lines | |||||
* memory can be allocated without resorting to reclamation. | * memory can be allocated without resorting to reclamation. | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, | pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* Pass NULL instead of the lock pointer to disable reclamation. */ | /* Pass NULL instead of the lock pointer to disable reclamation. */ | ||||
if ((pv = get_pv_entry(pmap, NULL)) != NULL) { | if ((pv = get_pv_entry(pmap, NULL)) != NULL) { | ||||
pv->pv_va = va; | pv->pv_va = va; | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
return (TRUE); | return (TRUE); | ||||
} else | } else | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* Conditionally create the PV entry for a 2MB page mapping if the required | * Conditionally create the PV entry for a 2MB page mapping if the required | ||||
* memory can be allocated without resorting to reclamation. | * memory can be allocated without resorting to reclamation. | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* Pass NULL instead of the lock pointer to disable reclamation. */ | /* Pass NULL instead of the lock pointer to disable reclamation. */ | ||||
if ((pv = get_pv_entry(pmap, NULL)) != NULL) { | if ((pv = get_pv_entry(pmap, NULL)) != NULL) { | ||||
pv->pv_va = va; | pv->pv_va = va; | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | ||||
pvh = pa_to_pvh(pa); | pvh = pa_to_pvh(pa); | ||||
TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); | ||||
pvh->pv_gen++; | pvh->pv_gen++; | ||||
▲ Show 20 Lines • Show All 241 Lines • ▼ Show 20 Lines | for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); | ||||
va < eva; va += PAGE_SIZE, m++) { | va < eva; va += PAGE_SIZE, m++) { | ||||
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) | if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
if (oldpde & PG_A) | if (oldpde & PG_A) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
TAILQ_EMPTY(&pvh->pv_list)) | TAILQ_EMPTY(&pvh->pv_list)) | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
pmap_delayed_invl_page(m); | |||||
} | } | ||||
} | } | ||||
if (pmap == kernel_pmap) { | if (pmap == kernel_pmap) { | ||||
pmap_remove_kernel_pde(pmap, pdq, sva); | pmap_remove_kernel_pde(pmap, pdq, sva); | ||||
} else { | } else { | ||||
mpte = pmap_lookup_pt_page(pmap, sva); | mpte = pmap_lookup_pt_page(pmap, sva); | ||||
if (mpte != NULL) { | if (mpte != NULL) { | ||||
pmap_remove_pt_page(pmap, mpte); | pmap_remove_pt_page(pmap, mpte); | ||||
Show All 37 Lines | if (oldpte & PG_MANAGED) { | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
pmap_pvh_free(&m->md, pmap, va); | pmap_pvh_free(&m->md, pmap, va); | ||||
if (TAILQ_EMPTY(&m->md.pv_list) && | if (TAILQ_EMPTY(&m->md.pv_list) && | ||||
(m->flags & PG_FICTITIOUS) == 0) { | (m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
if (TAILQ_EMPTY(&pvh->pv_list)) | if (TAILQ_EMPTY(&pvh->pv_list)) | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
pmap_delayed_invl_page(m); | |||||
} | } | ||||
return (pmap_unuse_pt(pmap, va, ptepde, free)); | return (pmap_unuse_pt(pmap, va, ptepde, free)); | ||||
} | } | ||||
/* | /* | ||||
* Remove a single page from a process address space | * Remove a single page from a process address space | ||||
*/ | */ | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
* Perform an unsynchronized read. This is, however, safe. | * Perform an unsynchronized read. This is, however, safe. | ||||
*/ | */ | ||||
if (pmap->pm_stats.resident_count == 0) | if (pmap->pm_stats.resident_count == 0) | ||||
return; | return; | ||||
anyvalid = 0; | anyvalid = 0; | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
rw_rlock(&pvh_global_lock); | pmap_delayed_invl_started(); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
/* | /* | ||||
* special handling of removing one page. a very | * special handling of removing one page. a very | ||||
* common operation and easy to short circuit some | * common operation and easy to short circuit some | ||||
* code. | * code. | ||||
*/ | */ | ||||
if (sva + PAGE_SIZE == eva) { | if (sva + PAGE_SIZE == eva) { | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | for (; sva < eva; sva = va_next) { | ||||
if (va != va_next) | if (va != va_next) | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva); | ||||
} | } | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
out: | out: | ||||
if (anyvalid) | if (anyvalid) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
pmap_delayed_invl_finished(); | |||||
pmap_free_zero_pages(&free); | pmap_free_zero_pages(&free); | ||||
} | } | ||||
/* | /* | ||||
* Routine: pmap_remove_all | * Routine: pmap_remove_all | ||||
* Function: | * Function: | ||||
* Removes this physical page from | * Removes this physical page from | ||||
* all physical maps in which it resides. | * all physical maps in which it resides. | ||||
* Reflects back modify bits to the pager. | * Reflects back modify bits to the pager. | ||||
* | * | ||||
* Notes: | * Notes: | ||||
* Original versions of this routine were very | * Original versions of this routine were very | ||||
* inefficient because they iteratively called | * inefficient because they iteratively called | ||||
* pmap_remove (slow...) | * pmap_remove (slow...) | ||||
*/ | */ | ||||
void | void | ||||
pmap_remove_all(vm_page_t m) | pmap_remove_all(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct rwlock *lock; | |||||
pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW; | pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW; | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
struct spglist free; | struct spglist free; | ||||
int pvh_gen, md_gen; | |||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("pmap_remove_all: page %p is not managed", m)); | ("pmap_remove_all: page %p is not managed", m)); | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
rw_wlock(&pvh_global_lock); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | |||||
retry: | |||||
rw_wlock(lock); | |||||
if ((m->flags & PG_FICTITIOUS) != 0) | if ((m->flags & PG_FICTITIOUS) != 0) | ||||
goto small_mappings; | goto small_mappings; | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | |||||
while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { | while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | |||||
pvh_gen = pvh->pv_gen; | |||||
rw_wunlock(lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | |||||
if (pvh_gen != pvh->pv_gen) { | |||||
rw_wunlock(lock); | |||||
PMAP_UNLOCK(pmap); | |||||
goto retry; | |||||
} | |||||
} | |||||
va = pv->pv_va; | va = pv->pv_va; | ||||
pde = pmap_pde(pmap, va); | pde = pmap_pde(pmap, va); | ||||
(void)pmap_demote_pde(pmap, pde, va); | (void)pmap_demote_pde_locked(pmap, pde, va, &lock); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
small_mappings: | small_mappings: | ||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | |||||
pvh_gen = pvh->pv_gen; | |||||
md_gen = m->md.pv_gen; | |||||
rw_wunlock(lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | |||||
if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | |||||
rw_wunlock(lock); | |||||
PMAP_UNLOCK(pmap); | |||||
goto retry; | |||||
} | |||||
} | |||||
PG_A = pmap_accessed_bit(pmap); | PG_A = pmap_accessed_bit(pmap); | ||||
PG_M = pmap_modified_bit(pmap); | PG_M = pmap_modified_bit(pmap); | ||||
PG_RW = pmap_rw_bit(pmap); | PG_RW = pmap_rw_bit(pmap); | ||||
pmap_resident_count_dec(pmap, 1); | pmap_resident_count_dec(pmap, 1); | ||||
pde = pmap_pde(pmap, pv->pv_va); | pde = pmap_pde(pmap, pv->pv_va); | ||||
KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" | KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" | ||||
" a 2mpage in page %p's pv list", m)); | " a 2mpage in page %p's pv list", m)); | ||||
pte = pmap_pde_to_pte(pde, pv->pv_va); | pte = pmap_pde_to_pte(pde, pv->pv_va); | ||||
Show All 11 Lines | while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { | ||||
pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); | pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
free_pv_entry(pmap, pv); | free_pv_entry(pmap, pv); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(lock); | ||||
pmap_delayed_invl_wait(m); | |||||
pmap_free_zero_pages(&free); | pmap_free_zero_pages(&free); | ||||
} | } | ||||
/* | /* | ||||
* pmap_protect_pde: do the things to protect a 2mpage in a process | * pmap_protect_pde: do the things to protect a 2mpage in a process | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) | pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | ||||
{ | { | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
pml4_entry_t *pml4e; | pml4_entry_t *pml4e; | ||||
pdp_entry_t *pdpe; | pdp_entry_t *pdpe; | ||||
pd_entry_t ptpaddr, *pde; | pd_entry_t ptpaddr, *pde; | ||||
pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V; | pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V; | ||||
boolean_t anychanged, pv_lists_locked; | boolean_t anychanged; | ||||
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | ||||
if (prot == VM_PROT_NONE) { | if (prot == VM_PROT_NONE) { | ||||
pmap_remove(pmap, sva, eva); | pmap_remove(pmap, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == | if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == | ||||
(VM_PROT_WRITE|VM_PROT_EXECUTE)) | (VM_PROT_WRITE|VM_PROT_EXECUTE)) | ||||
return; | return; | ||||
PG_G = pmap_global_bit(pmap); | PG_G = pmap_global_bit(pmap); | ||||
PG_M = pmap_modified_bit(pmap); | PG_M = pmap_modified_bit(pmap); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
PG_RW = pmap_rw_bit(pmap); | PG_RW = pmap_rw_bit(pmap); | ||||
pv_lists_locked = FALSE; | |||||
resume: | |||||
anychanged = FALSE; | anychanged = FALSE; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
for (; sva < eva; sva = va_next) { | for (; sva < eva; sva = va_next) { | ||||
pml4e = pmap_pml4e(pmap, sva); | pml4e = pmap_pml4e(pmap, sva); | ||||
if ((*pml4e & PG_V) == 0) { | if ((*pml4e & PG_V) == 0) { | ||||
va_next = (sva + NBPML4) & ~PML4MASK; | va_next = (sva + NBPML4) & ~PML4MASK; | ||||
Show All 34 Lines | if ((ptpaddr & PG_PS) != 0) { | ||||
if (sva + NBPDR == va_next && eva >= va_next) { | if (sva + NBPDR == va_next && eva >= va_next) { | ||||
/* | /* | ||||
* The TLB entry for a PG_G mapping is | * The TLB entry for a PG_G mapping is | ||||
* invalidated by pmap_protect_pde(). | * invalidated by pmap_protect_pde(). | ||||
*/ | */ | ||||
if (pmap_protect_pde(pmap, pde, sva, prot)) | if (pmap_protect_pde(pmap, pde, sva, prot)) | ||||
anychanged = TRUE; | anychanged = TRUE; | ||||
continue; | continue; | ||||
} else { | } else if (!pmap_demote_pde(pmap, pde, sva)) { | ||||
if (!pv_lists_locked) { | |||||
pv_lists_locked = TRUE; | |||||
if (!rw_try_rlock(&pvh_global_lock)) { | |||||
if (anychanged) | |||||
pmap_invalidate_all( | |||||
pmap); | |||||
PMAP_UNLOCK(pmap); | |||||
rw_rlock(&pvh_global_lock); | |||||
goto resume; | |||||
} | |||||
} | |||||
if (!pmap_demote_pde(pmap, pde, sva)) { | |||||
/* | /* | ||||
* The large page mapping was | * The large page mapping was destroyed. | ||||
* destroyed. | |||||
*/ | */ | ||||
continue; | continue; | ||||
} | } | ||||
} | } | ||||
} | |||||
if (va_next > eva) | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, | for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, | ||||
sva += PAGE_SIZE) { | sva += PAGE_SIZE) { | ||||
pt_entry_t obits, pbits; | pt_entry_t obits, pbits; | ||||
vm_page_t m; | vm_page_t m; | ||||
Show All 21 Lines | retry: | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva); | ||||
else | else | ||||
anychanged = TRUE; | anychanged = TRUE; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (anychanged) | if (anychanged) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
if (pv_lists_locked) | |||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Tries to promote the 512, contiguous 4KB page mappings that are within a | * Tries to promote the 512, contiguous 4KB page mappings that are within a | ||||
* single page table page (PTP) to a single 2MB page mapping. For promotion | * single page table page (PTP) to a single 2MB page mapping. For promotion | ||||
* to occur, two conditions must be met: (1) the 4KB page mappings must map | * to occur, two conditions must be met: (1) the 4KB page mappings must map | ||||
* aligned, contiguous physical memory and (2) the 4KB page mappings must have | * aligned, contiguous physical memory and (2) the 4KB page mappings must have | ||||
▲ Show 20 Lines • Show All 127 Lines • ▼ Show 20 Lines | |||||
* target physical map with the protection requested. | * target physical map with the protection requested. | ||||
* | * | ||||
* If specified, the page will be wired down, meaning | * If specified, the page will be wired down, meaning | ||||
* that the related pte can not be reclaimed. | * that the related pte can not be reclaimed. | ||||
* | * | ||||
* NB: This is the only routine which MAY NOT lazy-evaluate | * NB: This is the only routine which MAY NOT lazy-evaluate | ||||
* or lose information. That is, this routine must actually | * or lose information. That is, this routine must actually | ||||
* insert this page into the given map NOW. | * insert this page into the given map NOW. | ||||
* | |||||
* When destroying both a page table and PV entry, this function | |||||
* performs the TLB invalidation before releasing the PV list | |||||
* lock, so we do not need pmap_delayed_invl_page() calls here. | |||||
*/ | */ | ||||
int | int | ||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
u_int flags, int8_t psind __unused) | u_int flags, int8_t psind __unused) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V; | pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V; | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) { | if ((m->oflags & VPO_UNMANAGED) != 0) { | ||||
if ((newpte & PG_RW) != 0) | if ((newpte & PG_RW) != 0) | ||||
newpte |= PG_M; | newpte |= PG_M; | ||||
} | } | ||||
mpte = NULL; | mpte = NULL; | ||||
lock = NULL; | lock = NULL; | ||||
rw_rlock(&pvh_global_lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
/* | /* | ||||
* In the case that a page table page is not | * In the case that a page table page is not | ||||
* resident, we are creating it here. | * resident, we are creating it here. | ||||
*/ | */ | ||||
retry: | retry: | ||||
pde = pmap_pde(pmap, va); | pde = pmap_pde(pmap, va); | ||||
Show All 10 Lines | if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 || | ||||
* deallocated. | * deallocated. | ||||
*/ | */ | ||||
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; | nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; | ||||
mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), | mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), | ||||
nosleep ? NULL : &lock); | nosleep ? NULL : &lock); | ||||
if (mpte == NULL && nosleep) { | if (mpte == NULL && nosleep) { | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
} | } | ||||
goto retry; | goto retry; | ||||
} else | } else | ||||
panic("pmap_enter: invalid page directory va=%#lx", va); | panic("pmap_enter: invalid page directory va=%#lx", va); | ||||
origpte = *pte; | origpte = *pte; | ||||
▲ Show 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | unchanged: | ||||
if ((mpte == NULL || mpte->wire_count == NPTEPG) && | if ((mpte == NULL || mpte->wire_count == NPTEPG) && | ||||
pmap_ps_enabled(pmap) && | pmap_ps_enabled(pmap) && | ||||
(m->flags & PG_FICTITIOUS) == 0 && | (m->flags & PG_FICTITIOUS) == 0 && | ||||
vm_reserv_level_iffullpop(m) == 0) | vm_reserv_level_iffullpop(m) == 0) | ||||
pmap_promote_pde(pmap, pde, va, &lock); | pmap_promote_pde(pmap, pde, va, &lock); | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE | * Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE | ||||
* otherwise. Fails if (1) a page table page cannot be allocated without | * otherwise. Fails if (1) a page table page cannot be allocated without | ||||
* blocking, (2) a mapping already exists at the specified virtual address, or | * blocking, (2) a mapping already exists at the specified virtual address, or | ||||
* (3) a pv entry cannot be allocated without reclaiming another pv entry. | * (3) a pv entry cannot be allocated without reclaiming another pv entry. | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pd_entry_t *pde, newpde; | pd_entry_t *pde, newpde; | ||||
pt_entry_t PG_V; | pt_entry_t PG_V; | ||||
vm_page_t mpde; | vm_page_t mpde; | ||||
struct spglist free; | struct spglist free; | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { | if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { | ||||
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" | CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde)); | pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde)); | ||||
▲ Show 20 Lines • Show All 75 Lines • ▼ Show 20 Lines | pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
mpte = NULL; | mpte = NULL; | ||||
m = m_start; | m = m_start; | ||||
lock = NULL; | lock = NULL; | ||||
rw_rlock(&pvh_global_lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
va = start + ptoa(diff); | va = start + ptoa(diff); | ||||
if ((va & PDRMASK) == 0 && va + NBPDR <= end && | if ((va & PDRMASK) == 0 && va + NBPDR <= end && | ||||
m->psind == 1 && pmap_ps_enabled(pmap) && | m->psind == 1 && pmap_ps_enabled(pmap) && | ||||
pmap_enter_pde(pmap, va, m, prot, &lock)) | pmap_enter_pde(pmap, va, m, prot, &lock)) | ||||
m = &m[NBPDR / PAGE_SIZE - 1]; | m = &m[NBPDR / PAGE_SIZE - 1]; | ||||
else | else | ||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, | mpte = pmap_enter_quick_locked(pmap, va, m, prot, | ||||
mpte, &lock); | mpte, &lock); | ||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* this code makes some *MAJOR* assumptions: | * this code makes some *MAJOR* assumptions: | ||||
* 1. Current pmap & pmap exists. | * 1. Current pmap & pmap exists. | ||||
* 2. Not wired. | * 2. Not wired. | ||||
* 3. Read access. | * 3. Read access. | ||||
* 4. No page table pages. | * 4. No page table pages. | ||||
* but is *MUCH* faster than pmap_enter... | * but is *MUCH* faster than pmap_enter... | ||||
*/ | */ | ||||
void | void | ||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) | pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
lock = NULL; | lock = NULL; | ||||
rw_rlock(&pvh_global_lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); | (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) | vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) | ||||
{ | { | ||||
struct spglist free; | struct spglist free; | ||||
pt_entry_t *pte, PG_V; | pt_entry_t *pte, PG_V; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || | KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || | ||||
(m->oflags & VPO_UNMANAGED) != 0, | (m->oflags & VPO_UNMANAGED) != 0, | ||||
("pmap_enter_quick_locked: managed mapping within the clean submap")); | ("pmap_enter_quick_locked: managed mapping within the clean submap")); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
rw_assert(&pvh_global_lock, RA_LOCKED); | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* | /* | ||||
* In the case that a page table page is not | * In the case that a page table page is not | ||||
* resident, we are creating it here. | * resident, we are creating it here. | ||||
*/ | */ | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (va < VM_MAXUSER_ADDRESS) { | ||||
vm_pindex_t ptepindex; | vm_pindex_t ptepindex; | ||||
▲ Show 20 Lines • Show All 200 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* | /* | ||||
* Clear the wired attribute from the mappings for the specified range of | * Clear the wired attribute from the mappings for the specified range of | ||||
* addresses in the given pmap. Every valid mapping within that range | * addresses in the given pmap. Every valid mapping within that range | ||||
* must have the wired attribute set. In contrast, invalid mappings | * must have the wired attribute set. In contrast, invalid mappings | ||||
* cannot have the wired attribute set, so they are ignored. | * cannot have the wired attribute set, so they are ignored. | ||||
* | * | ||||
* The wired attribute of the page table entry is not a hardware feature, | * The wired attribute of the page table entry is not a hardware | ||||
* so there is no need to invalidate any TLB entries. | * feature, so there is no need to invalidate any TLB entries. | ||||
* Since pmap_demote_pde() for the wired entry must never fail, | |||||
* pmap_delayed_invl_started()/finished() calls around the | |||||
* function are not needed. | |||||
*/ | */ | ||||
void | void | ||||
pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
pml4_entry_t *pml4e; | pml4_entry_t *pml4e; | ||||
pdp_entry_t *pdpe; | pdp_entry_t *pdpe; | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte, PG_V; | pt_entry_t *pte, PG_V; | ||||
boolean_t pv_lists_locked; | |||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
pv_lists_locked = FALSE; | |||||
resume: | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
for (; sva < eva; sva = va_next) { | for (; sva < eva; sva = va_next) { | ||||
pml4e = pmap_pml4e(pmap, sva); | pml4e = pmap_pml4e(pmap, sva); | ||||
if ((*pml4e & PG_V) == 0) { | if ((*pml4e & PG_V) == 0) { | ||||
va_next = (sva + NBPML4) & ~PML4MASK; | va_next = (sva + NBPML4) & ~PML4MASK; | ||||
if (va_next < sva) | if (va_next < sva) | ||||
va_next = eva; | va_next = eva; | ||||
continue; | continue; | ||||
Show All 20 Lines | if ((*pde & PG_PS) != 0) { | ||||
* Are we unwiring the entire large page? If not, | * Are we unwiring the entire large page? If not, | ||||
* demote the mapping and fall through. | * demote the mapping and fall through. | ||||
*/ | */ | ||||
if (sva + NBPDR == va_next && eva >= va_next) { | if (sva + NBPDR == va_next && eva >= va_next) { | ||||
atomic_clear_long(pde, PG_W); | atomic_clear_long(pde, PG_W); | ||||
pmap->pm_stats.wired_count -= NBPDR / | pmap->pm_stats.wired_count -= NBPDR / | ||||
PAGE_SIZE; | PAGE_SIZE; | ||||
continue; | continue; | ||||
} else { | } else if (!pmap_demote_pde(pmap, pde, sva)) | ||||
if (!pv_lists_locked) { | |||||
pv_lists_locked = TRUE; | |||||
if (!rw_try_rlock(&pvh_global_lock)) { | |||||
PMAP_UNLOCK(pmap); | |||||
rw_rlock(&pvh_global_lock); | |||||
/* Repeat sva. */ | |||||
goto resume; | |||||
} | |||||
} | |||||
if (!pmap_demote_pde(pmap, pde, sva)) | |||||
panic("pmap_unwire: demotion failed"); | panic("pmap_unwire: demotion failed"); | ||||
} | } | ||||
} | |||||
if (va_next > eva) | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, | for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, | ||||
sva += PAGE_SIZE) { | sva += PAGE_SIZE) { | ||||
if ((*pte & PG_V) == 0) | if ((*pte & PG_V) == 0) | ||||
continue; | continue; | ||||
if ((*pte & PG_W) == 0) | if ((*pte & PG_W) == 0) | ||||
panic("pmap_unwire: pte %#jx is missing PG_W", | panic("pmap_unwire: pte %#jx is missing PG_W", | ||||
(uintmax_t)*pte); | (uintmax_t)*pte); | ||||
/* | /* | ||||
* PG_W must be cleared atomically. Although the pmap | * PG_W must be cleared atomically. Although the pmap | ||||
* lock synchronizes access to PG_W, another processor | * lock synchronizes access to PG_W, another processor | ||||
* could be setting PG_M and/or PG_A concurrently. | * could be setting PG_M and/or PG_A concurrently. | ||||
*/ | */ | ||||
atomic_clear_long(pte, PG_W); | atomic_clear_long(pte, PG_W); | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
} | } | ||||
} | } | ||||
if (pv_lists_locked) | |||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Copy the range specified by src_addr/len | * Copy the range specified by src_addr/len | ||||
* from the source map to the range dst_addr/len | * from the source map to the range dst_addr/len | ||||
* in the destination map. | * in the destination map. | ||||
* | * | ||||
Show All 24 Lines | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, | ||||
* (aka EPT_PG_EXECUTE) could still be set. Since some EPT | * (aka EPT_PG_EXECUTE) could still be set. Since some EPT | ||||
* implementations flag an EPT misconfiguration for exec-only | * implementations flag an EPT misconfiguration for exec-only | ||||
* mappings we skip this function entirely for emulated pmaps. | * mappings we skip this function entirely for emulated pmaps. | ||||
*/ | */ | ||||
if (pmap_emulate_ad_bits(dst_pmap)) | if (pmap_emulate_ad_bits(dst_pmap)) | ||||
return; | return; | ||||
lock = NULL; | lock = NULL; | ||||
rw_rlock(&pvh_global_lock); | |||||
if (dst_pmap < src_pmap) { | if (dst_pmap < src_pmap) { | ||||
PMAP_LOCK(dst_pmap); | PMAP_LOCK(dst_pmap); | ||||
PMAP_LOCK(src_pmap); | PMAP_LOCK(src_pmap); | ||||
} else { | } else { | ||||
PMAP_LOCK(src_pmap); | PMAP_LOCK(src_pmap); | ||||
PMAP_LOCK(dst_pmap); | PMAP_LOCK(dst_pmap); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 118 Lines • ▼ Show 20 Lines | while (addr < va_next) { | ||||
} | } | ||||
addr += PAGE_SIZE; | addr += PAGE_SIZE; | ||||
src_pte++; | src_pte++; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(src_pmap); | PMAP_UNLOCK(src_pmap); | ||||
PMAP_UNLOCK(dst_pmap); | PMAP_UNLOCK(dst_pmap); | ||||
} | } | ||||
/* | /* | ||||
* pmap_zero_page zeros the specified hardware page by mapping | * pmap_zero_page zeros the specified hardware page by mapping | ||||
* the page into KVM and using bzero to clear its contents. | * the page into KVM and using bzero to clear its contents. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | pmap_page_exists_quick(pmap_t pmap, vm_page_t m) | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
int loops = 0; | int loops = 0; | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("pmap_page_exists_quick: page %p is not managed", m)); | ("pmap_page_exists_quick: page %p is not managed", m)); | ||||
rv = FALSE; | rv = FALSE; | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
if (PV_PMAP(pv) == pmap) { | if (PV_PMAP(pv) == pmap) { | ||||
rv = TRUE; | rv = TRUE; | ||||
break; | break; | ||||
} | } | ||||
loops++; | loops++; | ||||
if (loops >= 16) | if (loops >= 16) | ||||
break; | break; | ||||
} | } | ||||
if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { | if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
if (PV_PMAP(pv) == pmap) { | if (PV_PMAP(pv) == pmap) { | ||||
rv = TRUE; | rv = TRUE; | ||||
break; | break; | ||||
} | } | ||||
loops++; | loops++; | ||||
if (loops >= 16) | if (loops >= 16) | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* pmap_page_wired_mappings: | * pmap_page_wired_mappings: | ||||
* | * | ||||
* Return the number of managed mappings to the given physical page | * Return the number of managed mappings to the given physical page | ||||
* that are wired. | * that are wired. | ||||
*/ | */ | ||||
int | int | ||||
pmap_page_wired_mappings(vm_page_t m) | pmap_page_wired_mappings(vm_page_t m) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
int count, md_gen, pvh_gen; | int count, md_gen, pvh_gen; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
return (0); | return (0); | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
restart: | restart: | ||||
count = 0; | count = 0; | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
Show All 28 Lines | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
} | } | ||||
pte = pmap_pde(pmap, pv->pv_va); | pte = pmap_pde(pmap, pv->pv_va); | ||||
if ((*pte & PG_W) != 0) | if ((*pte & PG_W) != 0) | ||||
count++; | count++; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
} | } | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
return (count); | return (count); | ||||
} | } | ||||
/* | /* | ||||
* Returns TRUE if the given page is mapped individually or as part of | * Returns TRUE if the given page is mapped individually or as part of | ||||
* a 2mpage. Otherwise, returns FALSE. | * a 2mpage. Otherwise, returns FALSE. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
pmap_page_is_mapped(vm_page_t m) | pmap_page_is_mapped(vm_page_t m) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
boolean_t rv; | boolean_t rv; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
return (FALSE); | return (FALSE); | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
rv = !TAILQ_EMPTY(&m->md.pv_list) || | rv = !TAILQ_EMPTY(&m->md.pv_list) || | ||||
((m->flags & PG_FICTITIOUS) == 0 && | ((m->flags & PG_FICTITIOUS) == 0 && | ||||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); | !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* Destroy all managed, non-wired mappings in the given user-space | * Destroy all managed, non-wired mappings in the given user-space | ||||
* pmap. This pmap cannot be active on any processor besides the | * pmap. This pmap cannot be active on any processor besides the | ||||
* caller. | * caller. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
lock = NULL; | lock = NULL; | ||||
PG_M = pmap_modified_bit(pmap); | PG_M = pmap_modified_bit(pmap); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
PG_RW = pmap_rw_bit(pmap); | PG_RW = pmap_rw_bit(pmap); | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
rw_rlock(&pvh_global_lock); | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { | TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { | ||||
allfree = 1; | allfree = 1; | ||||
freed = 0; | freed = 0; | ||||
for (field = 0; field < _NPCM; field++) { | for (field = 0; field < _NPCM; field++) { | ||||
inuse = ~pc->pc_map[field] & pc_freemask[field]; | inuse = ~pc->pc_map[field] & pc_freemask[field]; | ||||
while (inuse != 0) { | while (inuse != 0) { | ||||
bit = bsfq(inuse); | bit = bsfq(inuse); | ||||
▲ Show 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | */ | ||||
if (allfree) { | if (allfree) { | ||||
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); | ||||
free_pv_chunk(pc); | free_pv_chunk(pc); | ||||
} | } | ||||
} | } | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
pmap_free_zero_pages(&free); | pmap_free_zero_pages(&free); | ||||
} | } | ||||
static boolean_t | static boolean_t | ||||
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) | pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pt_entry_t *pte, mask; | pt_entry_t *pte, mask; | ||||
pt_entry_t PG_A, PG_M, PG_RW, PG_V; | pt_entry_t PG_A, PG_M, PG_RW, PG_V; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
int md_gen, pvh_gen; | int md_gen, pvh_gen; | ||||
boolean_t rv; | boolean_t rv; | ||||
rv = FALSE; | rv = FALSE; | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
restart: | restart: | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
rv = (*pte & mask) == mask; | rv = (*pte & mask) == mask; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
if (rv) | if (rv) | ||||
goto out; | goto out; | ||||
} | } | ||||
} | } | ||||
out: | out: | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* pmap_is_modified: | * pmap_is_modified: | ||||
* | * | ||||
* Return whether or not the specified physical page was modified | * Return whether or not the specified physical page was modified | ||||
* in any physical maps. | * in any physical maps. | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | pmap_remove_write(vm_page_t m) | ||||
/* | /* | ||||
* If the page is not exclusive busied, then PGA_WRITEABLE cannot be | * If the page is not exclusive busied, then PGA_WRITEABLE cannot be | ||||
* set by another thread while the object is locked. Thus, | * set by another thread while the object is locked. Thus, | ||||
* if PGA_WRITEABLE is clear, no page table entries need updating. | * if PGA_WRITEABLE is clear, no page table entries need updating. | ||||
*/ | */ | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) | if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) | ||||
return; | return; | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
retry_pv_loop: | retry_pv_loop: | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if ((m->flags & PG_FICTITIOUS) != 0) | if ((m->flags & PG_FICTITIOUS) != 0) | ||||
goto small_mappings; | goto small_mappings; | ||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | if (oldpte & PG_RW) { | ||||
if ((oldpte & PG_M) != 0) | if ((oldpte & PG_M) != 0) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
rw_runlock(&pvh_global_lock); | pmap_delayed_invl_wait(m); | ||||
} | } | ||||
static __inline boolean_t | static __inline boolean_t | ||||
safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) | safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) | ||||
{ | { | ||||
if (!pmap_emulate_ad_bits(pmap)) | if (!pmap_emulate_ad_bits(pmap)) | ||||
return (TRUE); | return (TRUE); | ||||
Show All 26 Lines | |||||
* Return a count of reference bits for a page, clearing those bits. | * Return a count of reference bits for a page, clearing those bits. | ||||
* It is not necessary for every reference bit to be cleared, but it | * It is not necessary for every reference bit to be cleared, but it | ||||
* is necessary that 0 only be returned when there are truly no | * is necessary that 0 only be returned when there are truly no | ||||
* reference bits set. | * reference bits set. | ||||
* | * | ||||
* XXX: The exact number of bits to check and clear is a matter that | * XXX: The exact number of bits to check and clear is a matter that | ||||
* should be tested and standardized at some point in the future for | * should be tested and standardized at some point in the future for | ||||
* optimal aging of shared pages. | * optimal aging of shared pages. | ||||
* | |||||
* A DI block is not needed within this function, because | |||||
* invalidations are performed before the PV list lock is | |||||
* released. | |||||
*/ | */ | ||||
int | int | ||||
pmap_ts_referenced(vm_page_t m) | pmap_ts_referenced(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv, pvf; | pv_entry_t pv, pvf; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pd_entry_t oldpde, *pde; | pd_entry_t oldpde, *pde; | ||||
pt_entry_t *pte, PG_A; | pt_entry_t *pte, PG_A; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
int cleared, md_gen, not_cleared, pvh_gen; | int cleared, md_gen, not_cleared, pvh_gen; | ||||
struct spglist free; | struct spglist free; | ||||
boolean_t demoted; | boolean_t demoted; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("pmap_ts_referenced: page %p is not managed", m)); | ("pmap_ts_referenced: page %p is not managed", m)); | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
cleared = 0; | cleared = 0; | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
lock = PHYS_TO_PV_LIST_LOCK(pa); | lock = PHYS_TO_PV_LIST_LOCK(pa); | ||||
pvh = pa_to_pvh(pa); | pvh = pa_to_pvh(pa); | ||||
rw_rlock(&pvh_global_lock); | |||||
rw_wlock(lock); | rw_wlock(lock); | ||||
retry: | retry: | ||||
not_cleared = 0; | not_cleared = 0; | ||||
if ((m->flags & PG_FICTITIOUS) != 0 || | if ((m->flags & PG_FICTITIOUS) != 0 || | ||||
(pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) | (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) | ||||
goto small_mappings; | goto small_mappings; | ||||
pv = pvf; | pv = pvf; | ||||
do { | do { | ||||
▲ Show 20 Lines • Show All 143 Lines • ▼ Show 20 Lines | if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { | ||||
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
} | } | ||||
} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + | } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + | ||||
not_cleared < PMAP_TS_REFERENCED_MAX); | not_cleared < PMAP_TS_REFERENCED_MAX); | ||||
out: | out: | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
pmap_free_zero_pages(&free); | pmap_free_zero_pages(&free); | ||||
return (cleared + not_cleared); | return (cleared + not_cleared); | ||||
} | } | ||||
/* | /* | ||||
* Apply the given advice to the specified range of addresses within the | * Apply the given advice to the specified range of addresses within the | ||||
* given pmap. Depending on the advice, clear the referenced and/or | * given pmap. Depending on the advice, clear the referenced and/or | ||||
* modified flags in each mapping and set the mapped page's dirty field. | * modified flags in each mapping and set the mapped page's dirty field. | ||||
*/ | */ | ||||
void | void | ||||
pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) | pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pml4_entry_t *pml4e; | pml4_entry_t *pml4e; | ||||
pdp_entry_t *pdpe; | pdp_entry_t *pdpe; | ||||
pd_entry_t oldpde, *pde; | pd_entry_t oldpde, *pde; | ||||
pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V; | pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V; | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
vm_page_t m; | vm_page_t m; | ||||
boolean_t anychanged, pv_lists_locked; | boolean_t anychanged; | ||||
if (advice != MADV_DONTNEED && advice != MADV_FREE) | if (advice != MADV_DONTNEED && advice != MADV_FREE) | ||||
return; | return; | ||||
pmap_delayed_invl_started(); | |||||
/* | /* | ||||
* A/D bit emulation requires an alternate code path when clearing | * A/D bit emulation requires an alternate code path when clearing | ||||
* the modified and accessed bits below. Since this function is | * the modified and accessed bits below. Since this function is | ||||
* advisory in nature we skip it entirely for pmaps that require | * advisory in nature we skip it entirely for pmaps that require | ||||
* A/D bit emulation. | * A/D bit emulation. | ||||
*/ | */ | ||||
if (pmap_emulate_ad_bits(pmap)) | if (pmap_emulate_ad_bits(pmap)) | ||||
return; | return; | ||||
PG_A = pmap_accessed_bit(pmap); | PG_A = pmap_accessed_bit(pmap); | ||||
PG_G = pmap_global_bit(pmap); | PG_G = pmap_global_bit(pmap); | ||||
PG_M = pmap_modified_bit(pmap); | PG_M = pmap_modified_bit(pmap); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
PG_RW = pmap_rw_bit(pmap); | PG_RW = pmap_rw_bit(pmap); | ||||
pv_lists_locked = FALSE; | |||||
resume: | |||||
anychanged = FALSE; | anychanged = FALSE; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
for (; sva < eva; sva = va_next) { | for (; sva < eva; sva = va_next) { | ||||
pml4e = pmap_pml4e(pmap, sva); | pml4e = pmap_pml4e(pmap, sva); | ||||
if ((*pml4e & PG_V) == 0) { | if ((*pml4e & PG_V) == 0) { | ||||
va_next = (sva + NBPML4) & ~PML4MASK; | va_next = (sva + NBPML4) & ~PML4MASK; | ||||
if (va_next < sva) | if (va_next < sva) | ||||
va_next = eva; | va_next = eva; | ||||
Show All 11 Lines | if (va_next < sva) | ||||
va_next = eva; | va_next = eva; | ||||
pde = pmap_pdpe_to_pde(pdpe, sva); | pde = pmap_pdpe_to_pde(pdpe, sva); | ||||
oldpde = *pde; | oldpde = *pde; | ||||
if ((oldpde & PG_V) == 0) | if ((oldpde & PG_V) == 0) | ||||
continue; | continue; | ||||
else if ((oldpde & PG_PS) != 0) { | else if ((oldpde & PG_PS) != 0) { | ||||
if ((oldpde & PG_MANAGED) == 0) | if ((oldpde & PG_MANAGED) == 0) | ||||
continue; | continue; | ||||
if (!pv_lists_locked) { | |||||
pv_lists_locked = TRUE; | |||||
if (!rw_try_rlock(&pvh_global_lock)) { | |||||
if (anychanged) | |||||
pmap_invalidate_all(pmap); | |||||
PMAP_UNLOCK(pmap); | |||||
rw_rlock(&pvh_global_lock); | |||||
goto resume; | |||||
} | |||||
} | |||||
lock = NULL; | lock = NULL; | ||||
if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) { | if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) { | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
/* | /* | ||||
* The large page mapping was destroyed. | * The large page mapping was destroyed. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, | ||||
if ((*pte & PG_G) != 0) | if ((*pte & PG_G) != 0) | ||||
pmap_invalidate_page(pmap, sva); | pmap_invalidate_page(pmap, sva); | ||||
else | else | ||||
anychanged = TRUE; | anychanged = TRUE; | ||||
} | } | ||||
} | } | ||||
if (anychanged) | if (anychanged) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
if (pv_lists_locked) | |||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
pmap_delayed_invl_finished(); | |||||
} | } | ||||
/* | /* | ||||
* Clear the modify bits on the specified physical page. | * Clear the modify bits on the specified physical page. | ||||
*/ | */ | ||||
void | void | ||||
pmap_clear_modify(vm_page_t m) | pmap_clear_modify(vm_page_t m) | ||||
{ | { | ||||
Show All 15 Lines | pmap_clear_modify(vm_page_t m) | ||||
/* | /* | ||||
* If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. | * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. | ||||
* If the object containing the page is locked and the page is not | * If the object containing the page is locked and the page is not | ||||
* exclusive busied, then PGA_WRITEABLE cannot be concurrently set. | * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. | ||||
*/ | */ | ||||
if ((m->aflags & PGA_WRITEABLE) == 0) | if ((m->aflags & PGA_WRITEABLE) == 0) | ||||
return; | return; | ||||
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); | ||||
rw_rlock(&pvh_global_lock); | |||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
restart: | restart: | ||||
if ((m->flags & PG_FICTITIOUS) != 0) | if ((m->flags & PG_FICTITIOUS) != 0) | ||||
goto small_mappings; | goto small_mappings; | ||||
TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
▲ Show 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pte = pmap_pde_to_pte(pde, pv->pv_va); | pte = pmap_pde_to_pte(pde, pv->pv_va); | ||||
if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { | if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { | ||||
atomic_clear_long(pte, PG_M); | atomic_clear_long(pte, PG_M); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
rw_runlock(&pvh_global_lock); | |||||
} | } | ||||
/* | /* | ||||
* Miscellaneous support routines follow | * Miscellaneous support routines follow | ||||
*/ | */ | ||||
/* Adjust the cache mode for a 4KB page mapped via a PTE. */ | /* Adjust the cache mode for a 4KB page mapped via a PTE. */ | ||||
static __inline void | static __inline void | ||||
▲ Show 20 Lines • Show All 685 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype) | pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype) | ||||
{ | { | ||||
int rv; | int rv; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_page_t m, mpte; | vm_page_t m, mpte; | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; | pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; | ||||
boolean_t pv_lists_locked; | |||||
KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE, | KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE, | ||||
("pmap_emulate_accessed_dirty: invalid fault type %d", ftype)); | ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype)); | ||||
if (!pmap_emulate_ad_bits(pmap)) | if (!pmap_emulate_ad_bits(pmap)) | ||||
return (-1); | return (-1); | ||||
PG_A = pmap_accessed_bit(pmap); | PG_A = pmap_accessed_bit(pmap); | ||||
PG_M = pmap_modified_bit(pmap); | PG_M = pmap_modified_bit(pmap); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
PG_RW = pmap_rw_bit(pmap); | PG_RW = pmap_rw_bit(pmap); | ||||
rv = -1; | rv = -1; | ||||
lock = NULL; | lock = NULL; | ||||
pv_lists_locked = FALSE; | |||||
retry: | |||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pde = pmap_pde(pmap, va); | pde = pmap_pde(pmap, va); | ||||
if (pde == NULL || (*pde & PG_V) == 0) | if (pde == NULL || (*pde & PG_V) == 0) | ||||
goto done; | goto done; | ||||
if ((*pde & PG_PS) != 0) { | if ((*pde & PG_PS) != 0) { | ||||
if (ftype == VM_PROT_READ) { | if (ftype == VM_PROT_READ) { | ||||
Show All 34 Lines | else | ||||
mpte = NULL; | mpte = NULL; | ||||
m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); | m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); | ||||
if ((mpte == NULL || mpte->wire_count == NPTEPG) && | if ((mpte == NULL || mpte->wire_count == NPTEPG) && | ||||
pmap_ps_enabled(pmap) && | pmap_ps_enabled(pmap) && | ||||
(m->flags & PG_FICTITIOUS) == 0 && | (m->flags & PG_FICTITIOUS) == 0 && | ||||
vm_reserv_level_iffullpop(m) == 0) { | vm_reserv_level_iffullpop(m) == 0) { | ||||
if (!pv_lists_locked) { | |||||
pv_lists_locked = TRUE; | |||||
if (!rw_try_rlock(&pvh_global_lock)) { | |||||
PMAP_UNLOCK(pmap); | |||||
rw_rlock(&pvh_global_lock); | |||||
goto retry; | |||||
} | |||||
} | |||||
pmap_promote_pde(pmap, pde, va, &lock); | pmap_promote_pde(pmap, pde, va, &lock); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
atomic_add_long(&ad_emulation_superpage_promotions, 1); | atomic_add_long(&ad_emulation_superpage_promotions, 1); | ||||
#endif | #endif | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (ftype == VM_PROT_WRITE) | if (ftype == VM_PROT_WRITE) | ||||
atomic_add_long(&num_dirty_emulations, 1); | atomic_add_long(&num_dirty_emulations, 1); | ||||
else | else | ||||
atomic_add_long(&num_accessed_emulations, 1); | atomic_add_long(&num_accessed_emulations, 1); | ||||
#endif | #endif | ||||
rv = 0; /* success */ | rv = 0; /* success */ | ||||
done: | done: | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
if (pv_lists_locked) | |||||
rw_runlock(&pvh_global_lock); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num) | pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num) | ||||
{ | { | ||||
pml4_entry_t *pml4; | pml4_entry_t *pml4; | ||||
▲ Show 20 Lines • Show All 214 Lines • Show Last 20 Lines |