Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 352 Lines • ▼ Show 20 Lines | if (_new_lock != *_lockp) { \ | ||||
*_lockp = _new_lock; \ | *_lockp = _new_lock; \ | ||||
rw_wlock(*_lockp); \ | rw_wlock(*_lockp); \ | ||||
} \ | } \ | ||||
} while (0) | } while (0) | ||||
#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ | #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) | ||||
#define PV_LIST_LOCK_MATCHES(lockp, pa) ({ \ | |||||
kib: Why do you need the `({})` extension there ? | |||||
(PHYS_TO_PV_LIST_LOCK(pa) == (*lockp)); \ | |||||
}) | |||||
#define PV_LIST_LOCK_MATCHES_VM_PAGE(lockp, m) \ | |||||
PV_LIST_LOCK_MATCHES(lockp, VM_PAGE_TO_PHYS(m)) | |||||
#define RELEASE_PV_LIST_LOCK(lockp) do { \ | #define RELEASE_PV_LIST_LOCK(lockp) do { \ | ||||
struct rwlock **_lockp = (lockp); \ | struct rwlock **_lockp = (lockp); \ | ||||
\ | \ | ||||
if (*_lockp != NULL) { \ | if (*_lockp != NULL) { \ | ||||
rw_wunlock(*_lockp); \ | rw_wunlock(*_lockp); \ | ||||
*_lockp = NULL; \ | *_lockp = NULL; \ | ||||
} \ | } \ | ||||
} while (0) | } while (0) | ||||
▲ Show 20 Lines • Show All 775 Lines • ▼ Show 20 Lines | |||||
#define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */ | #define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */ | ||||
#define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */ | #define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */ | ||||
TAILQ_HEAD(pv_chunklist, pv_chunk); | TAILQ_HEAD(pv_chunklist, pv_chunk); | ||||
static void free_pv_chunk(struct pv_chunk *pc); | static void free_pv_chunk(struct pv_chunk *pc); | ||||
static void free_pv_chunk_batch(struct pv_chunklist *batch); | static void free_pv_chunk_batch(struct pv_chunklist *batch); | ||||
static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | ||||
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | static pv_entry_t get_pv_entry(pmap_t pmap, bool reclaim, struct rwlock **lockp); | ||||
static int popcnt_pc_map_pq(uint64_t *map); | static int popcnt_pc_map_pq(uint64_t *map); | ||||
static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | ||||
static void reserve_pv_entries(pmap_t pmap, int needed, | static void reserve_pv_entries(pmap_t pmap, int needed, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, | static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, | ||||
u_int flags, struct rwlock **lockp); | u_int flags, struct rwlock **lockp); | ||||
▲ Show 20 Lines • Show All 2,994 Lines • ▼ Show 20 Lines | reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) | ||||
vm_page_t m, m_pc; | vm_page_t m, m_pc; | ||||
struct spglist free; | struct spglist free; | ||||
uint64_t inuse; | uint64_t inuse; | ||||
int bit, field, freed; | int bit, field, freed; | ||||
bool start_di; | bool start_di; | ||||
static int active_reclaims = 0; | static int active_reclaims = 0; | ||||
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); | PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); | ||||
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); | |||||
pmap = NULL; | pmap = NULL; | ||||
m_pc = NULL; | m_pc = NULL; | ||||
PG_G = PG_A = PG_M = PG_RW = 0; | PG_G = PG_A = PG_M = PG_RW = 0; | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
bzero(&pc_marker_b, sizeof(pc_marker_b)); | bzero(&pc_marker_b, sizeof(pc_marker_b)); | ||||
bzero(&pc_marker_end_b, sizeof(pc_marker_end_b)); | bzero(&pc_marker_end_b, sizeof(pc_marker_end_b)); | ||||
pc_marker = (struct pv_chunk *)&pc_marker_b; | pc_marker = (struct pv_chunk *)&pc_marker_b; | ||||
pc_marker_end = (struct pv_chunk *)&pc_marker_end_b; | pc_marker_end = (struct pv_chunk *)&pc_marker_end_b; | ||||
▲ Show 20 Lines • Show All 243 Lines • ▼ Show 20 Lines | |||||
* Returns a new PV entry, allocating a new PV chunk from the system when | * Returns a new PV entry, allocating a new PV chunk from the system when | ||||
* needed. If this PV chunk allocation fails and a PV list lock pointer was | * needed. If this PV chunk allocation fails and a PV list lock pointer was | ||||
* given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is | * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is | ||||
* returned. | * returned. | ||||
* | * | ||||
* The given PV list lock may be released. | * The given PV list lock may be released. | ||||
*/ | */ | ||||
static pv_entry_t | static pv_entry_t | ||||
get_pv_entry(pmap_t pmap, struct rwlock **lockp) | get_pv_entry(pmap_t pmap, bool reclaim, struct rwlock **lockp) | ||||
{ | { | ||||
int bit, field; | int bit, field; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
struct pv_chunk *pc; | struct pv_chunk *pc; | ||||
vm_page_t m; | vm_page_t m; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); | PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); | ||||
Show All 20 Lines | if (field < _NPCM) { | ||||
PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); | PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); | ||||
return (pv); | return (pv); | ||||
} | } | ||||
} | } | ||||
/* No free items, allocate another chunk */ | /* No free items, allocate another chunk */ | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | ||||
VM_ALLOC_WIRED); | VM_ALLOC_WIRED); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if (lockp == NULL) { | if (!reclaim) { | ||||
PV_STAT(pc_chunk_tryfail++); | PV_STAT(pc_chunk_tryfail++); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
m = reclaim_pv_chunk(pmap, lockp); | m = reclaim_pv_chunk(pmap, lockp); | ||||
if (m == NULL) | if (m == NULL) | ||||
goto retry; | goto retry; | ||||
} | } | ||||
PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | PV_STAT(atomic_add_int(&pc_chunk_count, 1)); | ||||
▲ Show 20 Lines • Show All 273 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, | pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* Pass NULL instead of the lock pointer to disable reclamation. */ | if (!PV_LIST_LOCK_MATCHES_VM_PAGE(lockp, m)) | ||||
if ((pv = get_pv_entry(pmap, NULL)) != NULL) { | RELEASE_PV_LIST_LOCK(lockp); | ||||
if ((pv = get_pv_entry(pmap, false, NULL)) != NULL) { | |||||
pv->pv_va = va; | pv->pv_va = va; | ||||
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
return (TRUE); | return (TRUE); | ||||
} else | } else | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* Create the PV entry for a 2MB page mapping. Always returns true unless the | * Create the PV entry for a 2MB page mapping. Always returns true unless the | ||||
* flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns | * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns | ||||
* false if the PV entry cannot be allocated without resorting to reclamation. | * false if the PV entry cannot be allocated without resorting to reclamation. | ||||
*/ | */ | ||||
static bool | static bool | ||||
pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags, | pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
/* Pass NULL instead of the lock pointer to disable reclamation. */ | pa = pde & PG_PS_FRAME; | ||||
if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? | if (!PV_LIST_LOCK_MATCHES(lockp, pa)) | ||||
NULL : lockp)) == NULL) | RELEASE_PV_LIST_LOCK(lockp); | ||||
pv = get_pv_entry(pmap, !(flags & PMAP_ENTER_NORECLAIM), lockp); | |||||
kibUnsubmitted Not Done Inline Actions_NORECLAIM) != 0 kib: _NORECLAIM) != 0 | |||||
if (pv == NULL) | |||||
return (false); | return (false); | ||||
pv->pv_va = va; | pv->pv_va = va; | ||||
pa = pde & PG_PS_FRAME; | |||||
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); | ||||
pvh = pa_to_pvh(pa); | pvh = pa_to_pvh(pa); | ||||
TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); | ||||
pvh->pv_gen++; | pvh->pv_gen++; | ||||
return (true); | return (true); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 1,184 Lines • ▼ Show 20 Lines | if ((origpte & PG_V) != 0) { | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
} | } | ||||
/* | /* | ||||
* Enter on the PV list if part of our managed memory. | * Enter on the PV list if part of our managed memory. | ||||
*/ | */ | ||||
if ((newpte & PG_MANAGED) != 0) { | if ((newpte & PG_MANAGED) != 0) { | ||||
if (pv == NULL) { | if (pv == NULL) { | ||||
pv = get_pv_entry(pmap, &lock); | if (!PV_LIST_LOCK_MATCHES_VM_PAGE(&lock, m)) | ||||
RELEASE_PV_LIST_LOCK(&lock); | |||||
pv = get_pv_entry(pmap, true, &lock); | |||||
pv->pv_va = va; | pv->pv_va = va; | ||||
} | } | ||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); | ||||
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); | ||||
m->md.pv_gen++; | m->md.pv_gen++; | ||||
if ((newpte & PG_RW) != 0) | if ((newpte & PG_RW) != 0) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 176 Lines • ▼ Show 20 Lines | if (va >= VM_MAXUSER_ADDRESS) { | ||||
KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", | KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", | ||||
pde)); | pde)); | ||||
} | } | ||||
if ((newpde & PG_MANAGED) != 0) { | if ((newpde & PG_MANAGED) != 0) { | ||||
/* | /* | ||||
* Abort this mapping if its PV entry could not be created. | * Abort this mapping if its PV entry could not be created. | ||||
*/ | */ | ||||
if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) { | if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) { | ||||
RELEASE_PV_LIST_LOCK(lockp); | |||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { | if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { | ||||
/* | /* | ||||
* Although "va" is not mapped, paging- | * Although "va" is not mapped, paging- | ||||
* structure caches could nonetheless have | * structure caches could nonetheless have | ||||
* entries that refer to the freed page table | * entries that refer to the freed page table | ||||
* pages. Invalidate those entries. | * pages. Invalidate those entries. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 163 Lines • ▼ Show 20 Lines | if (*pte) { | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* Enter on the PV list if part of our managed memory. | * Enter on the PV list if part of our managed memory. | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0 && | if ((m->oflags & VPO_UNMANAGED) == 0 && | ||||
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) { | !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { | ||||
RELEASE_PV_LIST_LOCK(lockp); | |||||
if (mpte != NULL) { | if (mpte != NULL) { | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_ptp(pmap, va, mpte, &free)) { | if (pmap_unwire_ptp(pmap, va, mpte, &free)) { | ||||
/* | /* | ||||
* Although "va" is not mapped, paging- | * Although "va" is not mapped, paging- | ||||
* structure caches could nonetheless have | * structure caches could nonetheless have | ||||
* entries that refer to the freed page table | * entries that refer to the freed page table | ||||
* pages. Invalidate those entries. | * pages. Invalidate those entries. | ||||
▲ Show 20 Lines • Show All 4,127 Lines • Show Last 20 Lines |
Why do you need the ({}) extension there ?