Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 1,252 Lines • ▼ Show 20 Lines | |||||
static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); | static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); | ||||
static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, | static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, | ||||
vm_prot_t prot, int mode, int flags); | vm_prot_t prot, int mode, int flags); | ||||
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); | static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); | ||||
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, | static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, | ||||
vm_offset_t va, struct rwlock **lockp); | vm_offset_t va, struct rwlock **lockp); | ||||
static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, | static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, | ||||
vm_offset_t va); | vm_offset_t va); | ||||
static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, struct rwlock **lockp); | vm_prot_t prot, struct rwlock **lockp); | ||||
static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, | ||||
u_int flags, vm_page_t m, struct rwlock **lockp); | u_int flags, vm_page_t m, struct rwlock **lockp); | ||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | ||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | ||||
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); | ||||
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); | static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted); | ||||
static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, | static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, | ||||
▲ Show 20 Lines • Show All 5,996 Lines • ▼ Show 20 Lines | |||||
out: | out: | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
/* | /* | ||||
* Tries to create a read- and/or execute-only 2MB page mapping. Returns true | * Tries to create a read- and/or execute-only 2MB page mapping. Returns | ||||
* if successful. Returns false if (1) a page table page cannot be allocated | * KERN_SUCCESS if the mapping was created. Otherwise, returns an error | ||||
* without sleeping, (2) a mapping already exists at the specified virtual | * value. See pmap_enter_pde() for the possible error values when "no sleep", | ||||
* address, or (3) a PV entry cannot be allocated without reclaiming another | * "no replace", and "no reclaim" are specified. | ||||
* PV entry. | |||||
*/ | */ | ||||
static bool | static int | ||||
pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pd_entry_t newpde; | pd_entry_t newpde; | ||||
pt_entry_t PG_V; | pt_entry_t PG_V; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PG_V = pmap_valid_bit(pmap); | PG_V = pmap_valid_bit(pmap); | ||||
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | | newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | | ||||
PG_PS | PG_V; | PG_PS | PG_V; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
newpde |= PG_MANAGED; | newpde |= PG_MANAGED; | ||||
if ((prot & VM_PROT_EXECUTE) == 0) | if ((prot & VM_PROT_EXECUTE) == 0) | ||||
newpde |= pg_nx; | newpde |= pg_nx; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (va < VM_MAXUSER_ADDRESS) | ||||
newpde |= PG_U; | newpde |= PG_U; | ||||
return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | | return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | | ||||
PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == | PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp)); | ||||
KERN_SUCCESS); | |||||
} | } | ||||
/* | /* | ||||
* Returns true if every page table entry in the specified page table page is | * Returns true if every page table entry in the specified page table page is | ||||
* zero. | * zero. | ||||
*/ | */ | ||||
static bool | static bool | ||||
pmap_every_pte_zero(vm_paddr_t pa) | pmap_every_pte_zero(vm_paddr_t pa) | ||||
{ | { | ||||
pt_entry_t *pt_end, *pte; | pt_entry_t *pt_end, *pte; | ||||
KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); | KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); | ||||
pte = (pt_entry_t *)PHYS_TO_DMAP(pa); | pte = (pt_entry_t *)PHYS_TO_DMAP(pa); | ||||
for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { | for (pt_end = pte + NPTEPG; pte < pt_end; pte++) { | ||||
if (*pte != 0) | if (*pte != 0) | ||||
return (false); | return (false); | ||||
} | } | ||||
return (true); | return (true); | ||||
} | } | ||||
/* | /* | ||||
* Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if | * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if | ||||
* the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE | * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, | ||||
* otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and | * KERN_PROTECTION_FAILURE, or KERN_RESOURCE_FAILURE otherwise. Returns | ||||
mhorne: I believe this is meant to say KERN_RESOURCE_SHORTAGE instead. I can make the adjustment when I… | |||||
alcAuthorUnsubmitted Done Inline ActionsYes, you are correct. Yes, please make the fix when you commit the riscv changes. alc: Yes, you are correct. Yes, please make the fix when you commit the riscv changes. | |||||
* a mapping already exists at the specified virtual address. Returns | * KERN_FAILURE if either (1) PMAP_ENTER_NOREPLACE was specified and a 4KB | ||||
* KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table | * page mapping already exists within the 2MB virtual address range starting | ||||
* page allocation failed. Returns KERN_RESOURCE_SHORTAGE if | * at the specified virtual address or (2) the requested 2MB page mapping is | ||||
* PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. | * not supported due to hardware errata. Returns KERN_NO_SPACE if | ||||
* PMAP_ENTER_NOREPLACE was specified and a 2MB page mapping already exists at | |||||
* the specified virtual address. Returns KERN_PROTECTION_FAILURE if the PKRU | |||||
* settings are not the same across the 2MB virtual address range starting at | |||||
* the specified virtual address. Returns KERN_RESOURCE_SHORTAGE if either | |||||
* (1) PMAP_ENTER_NOSLEEP was specified and a page table page allocation | |||||
* failed or (2) PMAP_ENTER_NORECLAIM was specified and a PV entry allocation | |||||
* failed. | |||||
* | * | ||||
* The parameter "m" is only used when creating a managed, writeable mapping. | * The parameter "m" is only used when creating a managed, writeable mapping. | ||||
*/ | */ | ||||
static int | static int | ||||
pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, | pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, | ||||
vm_page_t m, struct rwlock **lockp) | vm_page_t m, struct rwlock **lockp) | ||||
{ | { | ||||
struct spglist free; | struct spglist free; | ||||
Show All 39 Lines | pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, | ||||
/* | /* | ||||
* If there are existing mappings, either abort or remove them. | * If there are existing mappings, either abort or remove them. | ||||
*/ | */ | ||||
oldpde = *pde; | oldpde = *pde; | ||||
if ((oldpde & PG_V) != 0) { | if ((oldpde & PG_V) != 0) { | ||||
KASSERT(pdpg == NULL || pdpg->ref_count > 1, | KASSERT(pdpg == NULL || pdpg->ref_count > 1, | ||||
("pmap_enter_pde: pdpg's reference count is too low")); | ("pmap_enter_pde: pdpg's reference count is too low")); | ||||
if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va < | if ((flags & PMAP_ENTER_NOREPLACE) != 0) { | ||||
VM_MAXUSER_ADDRESS || (oldpde & PG_PS) != 0 || | if ((oldpde & PG_PS) != 0) { | ||||
!pmap_every_pte_zero(oldpde & PG_FRAME))) { | |||||
if (pdpg != NULL) | if (pdpg != NULL) | ||||
pdpg->ref_count--; | pdpg->ref_count--; | ||||
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" | CTR2(KTR_PMAP, | ||||
"pmap_enter_pde: no space for va %#lx" | |||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (KERN_NO_SPACE); | |||||
} else if (va < VM_MAXUSER_ADDRESS || | |||||
!pmap_every_pte_zero(oldpde & PG_FRAME)) { | |||||
if (pdpg != NULL) | |||||
pdpg->ref_count--; | |||||
CTR2(KTR_PMAP, | |||||
"pmap_enter_pde: failure for va %#lx" | |||||
" in pmap %p", va, pmap); | |||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
} | } | ||||
} | |||||
/* Break the existing mapping(s). */ | /* Break the existing mapping(s). */ | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if ((oldpde & PG_PS) != 0) { | if ((oldpde & PG_PS) != 0) { | ||||
/* | /* | ||||
* The reference to the PD page that was acquired by | * The reference to the PD page that was acquired by | ||||
* pmap_alloc_pde() ensures that it won't be freed. | * pmap_alloc_pde() ensures that it won't be freed. | ||||
* However, if the PDE resulted from a promotion, then | * However, if the PDE resulted from a promotion, then | ||||
* a reserved PT page could be freed. | * a reserved PT page could be freed. | ||||
▲ Show 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, | pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, | ||||
vm_page_t m_start, vm_prot_t prot) | vm_page_t m_start, vm_prot_t prot) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_page_t m, mpte; | vm_page_t m, mpte; | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
int rv; | |||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
mpte = NULL; | mpte = NULL; | ||||
m = m_start; | m = m_start; | ||||
lock = NULL; | lock = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
va = start + ptoa(diff); | va = start + ptoa(diff); | ||||
if ((va & PDRMASK) == 0 && va + NBPDR <= end && | if ((va & PDRMASK) == 0 && va + NBPDR <= end && | ||||
m->psind == 1 && pmap_ps_enabled(pmap) && | m->psind == 1 && pmap_ps_enabled(pmap) && | ||||
pmap_enter_2mpage(pmap, va, m, prot, &lock)) | ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) == | ||||
KERN_SUCCESS || rv == KERN_NO_SPACE)) | |||||
m = &m[NBPDR / PAGE_SIZE - 1]; | m = &m[NBPDR / PAGE_SIZE - 1]; | ||||
else | else | ||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, | mpte = pmap_enter_quick_locked(pmap, va, m, prot, | ||||
mpte, &lock); | mpte, &lock); | ||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
▲ Show 20 Lines • Show All 4,582 Lines • Show Last 20 Lines |
I believe this is meant to say KERN_RESOURCE_SHORTAGE instead. I can make the adjustment when I commit the related riscv pmap changes, if you like.