Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 4,549 Lines • ▼ Show 20 Lines | pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) | vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) | ||||
{ | { | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *l2, *l3, l3_val; | pt_entry_t *l1, *l2, *l3, l3_val; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
int lvl; | int lvl; | ||||
KASSERT(!VA_IS_CLEANMAP(va) || | KASSERT(!VA_IS_CLEANMAP(va) || | ||||
(m->oflags & VPO_UNMANAGED) != 0, | (m->oflags & VPO_UNMANAGED) != 0, | ||||
("pmap_enter_quick_locked: managed mapping within the clean submap")); | ("pmap_enter_quick_locked: managed mapping within the clean submap")); | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
PMAP_ASSERT_STAGE1(pmap); | PMAP_ASSERT_STAGE1(pmap); | ||||
Show All 11 Lines | if (!ADDR_IS_KERNEL(va)) { | ||||
/* | /* | ||||
* Calculate pagetable page index | * Calculate pagetable page index | ||||
*/ | */ | ||||
l2pindex = pmap_l2_pindex(va); | l2pindex = pmap_l2_pindex(va); | ||||
if (mpte && (mpte->pindex == l2pindex)) { | if (mpte && (mpte->pindex == l2pindex)) { | ||||
mpte->ref_count++; | mpte->ref_count++; | ||||
} else { | } else { | ||||
/* | /* | ||||
* Get the l2 entry | |||||
*/ | |||||
pde = pmap_pde(pmap, va, &lvl); | |||||
/* | |||||
* If the page table page is mapped, we just increment | * If the page table page is mapped, we just increment | ||||
* the hold count, and activate it. Otherwise, we | * the hold count, and activate it. Otherwise, we | ||||
* attempt to allocate a page table page. If this | * attempt to allocate a page table page, passing NULL | ||||
* attempt fails, we don't retry. Instead, we give up. | * instead of the PV list lock pointer because we don't | ||||
* intend to sleep. If this attempt fails, we don't | |||||
* retry. Instead, we give up. | |||||
*/ | */ | ||||
if (lvl == 1) { | l1 = pmap_l1(pmap, va); | ||||
l2 = pmap_l1_to_l2(pde, va); | if (l1 != NULL && pmap_load(l1) != 0) { | ||||
if ((pmap_load(l1) & ATTR_DESCR_MASK) == | |||||
L1_BLOCK) | |||||
return (NULL); | |||||
l2 = pmap_l1_to_l2(l1, va); | |||||
alc: I wish we had pmap_X_to_X+1() variants that returned the PTE value rather than its address so… | |||||
markjAuthorUnsubmitted Done Inline ActionsMaybe something following this example? pd_entry_t pmap_l1e_to_l2e(pdp_entry_t l1e, vm_offset_t va) { pd_entry_t *l2; l2 = (pd_entry_t *)PHYS_TO_DMAP(l1e & ~ATTR_MASK); return (l2[pmap_l2_index(va)]); } On the other hand, I'm not really sure why the arm64 pmap uses pmap_load() to begin with. markj: Maybe something following this example?
```
pd_entry_t
pmap_l1e_to_l2e(pdp_entry_t l1e… | |||||
alcUnsubmitted Not Done Inline ActionsYes, something like that. alc: Yes, something like that. | |||||
if (pmap_load(l2) != 0) { | |||||
if ((pmap_load(l2) & ATTR_DESCR_MASK) == | if ((pmap_load(l2) & ATTR_DESCR_MASK) == | ||||
L2_BLOCK) | L2_BLOCK) | ||||
return (NULL); | return (NULL); | ||||
} | mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & | ||||
if (lvl == 2 && pmap_load(pde) != 0) { | ~ATTR_MASK); | ||||
mpte = | |||||
PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK); | |||||
mpte->ref_count++; | mpte->ref_count++; | ||||
} else { | } else { | ||||
/* | mpte = _pmap_alloc_l3(pmap, l2pindex, | ||||
* Pass NULL instead of the PV list lock | NULL); | ||||
* pointer, because we don't intend to sleep. | if (mpte == NULL) | ||||
*/ | return (mpte); | ||||
} | |||||
} else { | |||||
mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); | mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); | ||||
if (mpte == NULL) | if (mpte == NULL) | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
} | } | ||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); | l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); | ||||
l3 = &l3[pmap_l3_index(va)]; | l3 = &l3[pmap_l3_index(va)]; | ||||
} else { | } else { | ||||
▲ Show 20 Lines • Show All 2,919 Lines • Show Last 20 Lines |
I wish we had pmap_X_to_X+1() variants that returned the PTE value rather than its address so that we didn't repeatedly pmap_load() afterwards.