Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 3,397 Lines • ▼ Show 20 Lines | setl3: | ||||
atomic_add_long(&pmap_l2_promotions, 1); | atomic_add_long(&pmap_l2_promotions, 1); | ||||
CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, | CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, | ||||
pmap); | pmap); | ||||
} | } | ||||
#endif /* VM_NRESERVLEVEL > 0 */ | #endif /* VM_NRESERVLEVEL > 0 */ | ||||
/* | /* | ||||
* Preallocate l1, l2 page directories for a specific VA range. | |||||
* This is optional and not in use currently. | |||||
*/ | |||||
int | |||||
pmap_bootstrap_smmu(pmap_t pmap, vm_offset_t sva, int count) | |||||
{ | |||||
struct rwlock *lock; | |||||
pd_entry_t *pde; | |||||
vm_page_t mpte; | |||||
vm_offset_t va; | |||||
int lvl; | |||||
int i; | |||||
lock = NULL; | |||||
PMAP_LOCK(pmap); | |||||
va = sva; | |||||
for (i = 0; i < count; i++) { | |||||
pde = pmap_pde(pmap, va, &lvl); | |||||
if (pde != NULL && lvl == 2) | |||||
return (EEXIST); | |||||
mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), &lock); | |||||
if (mpte == NULL) | |||||
return (ENOMEM); | |||||
va += L2_SIZE; | |||||
} | |||||
if (lock != NULL) | |||||
rw_wunlock(lock); | |||||
PMAP_UNLOCK(pmap); | |||||
return (0); | |||||
} | |||||
/* | |||||
* Add a single SMMU entry. This function does not sleep. | |||||
*/ | |||||
int | |||||
pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | |||||
vm_prot_t prot, u_int flags) | |||||
{ | |||||
pd_entry_t *pde; | |||||
pt_entry_t new_l3, orig_l3; | |||||
pt_entry_t *l3; | |||||
vm_page_t mpte; | |||||
int lvl; | |||||
int rv; | |||||
PMAP_ASSERT_STAGE1(pmap); | |||||
KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space")); | |||||
va = trunc_page(va); | |||||
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | | |||||
ATTR_S1_IDX(VM_MEMATTR_DEVICE) | L3_PAGE); | |||||
if ((prot & VM_PROT_WRITE) == 0) | |||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); | |||||
new_l3 |= ATTR_S1_XN; /* Execute never. */ | |||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER); | |||||
new_l3 |= ATTR_S1_nG; /* Non global. */ | |||||
CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); | |||||
PMAP_LOCK(pmap); | |||||
/* | |||||
* In the case that a page table page is not | |||||
* resident, we are creating it here. | |||||
*/ | |||||
retry: | |||||
pde = pmap_pde(pmap, va, &lvl); | |||||
if (pde != NULL && lvl == 2) { | |||||
l3 = pmap_l2_to_l3(pde, va); | |||||
} else { | |||||
mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL); | |||||
if (mpte == NULL) { | |||||
CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); | |||||
rv = KERN_RESOURCE_SHORTAGE; | |||||
goto out; | |||||
} | |||||
goto retry; | |||||
} | |||||
orig_l3 = pmap_load(l3); | |||||
KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid")); | |||||
/* New mapping */ | |||||
pmap_store(l3, new_l3); | |||||
dsb(ishst); | |||||
mmel: Why? Do you expect that SMMU pmap can be used on PE? | |||||
Done Inline ActionsNo, but we are doing SMMU TLB invalidation shortly after pmap_senter(). br: No, but we are doing SMMU TLB invalidation shortly after pmap_senter(). | |||||
rv = KERN_SUCCESS; | |||||
out: | |||||
PMAP_UNLOCK(pmap); | |||||
return (rv); | |||||
} | |||||
/* | |||||
* Remove a single SMMU entry. | |||||
*/ | |||||
int | |||||
pmap_sremove(pmap_t pmap, vm_offset_t va) | |||||
{ | |||||
pt_entry_t *pte; | |||||
int lvl; | |||||
int rc; | |||||
PMAP_LOCK(pmap); | |||||
pte = pmap_pte(pmap, va, &lvl); | |||||
KASSERT(lvl == 3, | |||||
("Invalid SMMU pagetable level: %d != 3", lvl)); | |||||
if (pte != NULL) { | |||||
pmap_clear(pte); | |||||
rc = KERN_FAILURE; | |||||
} else | |||||
rc = KERN_SUCCESS; | |||||
PMAP_UNLOCK(pmap); | |||||
return (rc); | |||||
} | |||||
/* | |||||
* Remove all the allocated L1, L2 pages from SMMU pmap. | |||||
* All the L3 entires must be cleared in advance, otherwise | |||||
* this function returns error. | |||||
*/ | |||||
int | |||||
pmap_sremove_all(pmap_t pmap) | |||||
{ | |||||
pd_entry_t l0e, *l1, l1e, *l2, l2e; | |||||
pt_entry_t *l3, l3e; | |||||
vm_offset_t sva; | |||||
vm_paddr_t pa; | |||||
vm_paddr_t pa0; | |||||
vm_paddr_t pa1; | |||||
int i, j, k, l; | |||||
vm_page_t m; | |||||
vm_page_t m0; | |||||
vm_page_t m1; | |||||
int rc; | |||||
PMAP_LOCK(pmap); | |||||
for (sva = VM_MINUSER_ADDRESS, i = pmap_l0_index(sva); i < Ln_ENTRIES; | |||||
i++) { | |||||
l0e = pmap->pm_l0[i]; | |||||
if ((l0e & ATTR_DESCR_VALID) == 0) { | |||||
sva += L0_SIZE; | |||||
continue; | |||||
} | |||||
pa0 = l0e & ~ATTR_MASK; | |||||
m0 = PHYS_TO_VM_PAGE(pa0); | |||||
l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0); | |||||
for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) { | |||||
l1e = l1[j]; | |||||
if ((l1e & ATTR_DESCR_VALID) == 0) { | |||||
sva += L1_SIZE; | |||||
continue; | |||||
} | |||||
if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { | |||||
sva += L1_SIZE; | |||||
continue; | |||||
} | |||||
pa1 = l1e & ~ATTR_MASK; | |||||
m1 = PHYS_TO_VM_PAGE(pa1); | |||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1); | |||||
for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) { | |||||
l2e = l2[k]; | |||||
if ((l2e & ATTR_DESCR_VALID) == 0) { | |||||
sva += L2_SIZE; | |||||
continue; | |||||
} | |||||
if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { | |||||
sva += L2_SIZE; | |||||
continue; | |||||
} | |||||
pa = l2e & ~ATTR_MASK; | |||||
m = PHYS_TO_VM_PAGE(pa); | |||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(pa); | |||||
for (l = pmap_l3_index(sva); l < Ln_ENTRIES; | |||||
l++, sva += L3_SIZE) { | |||||
l3e = l3[l]; | |||||
if ((l3e & ATTR_DESCR_VALID) == 0) | |||||
continue; | |||||
printf("%s: l3e found for va %jx\n", | |||||
__func__, sva); | |||||
rc = KERN_FAILURE; | |||||
goto out; | |||||
} | |||||
vm_page_unwire_noq(m1); | |||||
vm_page_unwire_noq(m); | |||||
pmap_resident_count_dec(pmap, 1); | |||||
vm_page_free(m); | |||||
pmap_clear(&l2[k]); | |||||
} | |||||
vm_page_unwire_noq(m0); | |||||
pmap_resident_count_dec(pmap, 1); | |||||
vm_page_free(m1); | |||||
pmap_clear(&l1[j]); | |||||
} | |||||
pmap_resident_count_dec(pmap, 1); | |||||
vm_page_free(m0); | |||||
pmap_clear(&pmap->pm_l0[i]); | |||||
} | |||||
KASSERT(pmap->pm_stats.resident_count == 0, | |||||
("Invalid resident count %jd", pmap->pm_stats.resident_count)); | |||||
rc = KERN_SUCCESS; | |||||
out: | |||||
PMAP_UNLOCK(pmap); | |||||
return (rc); | |||||
} | |||||
/* | |||||
* Insert the given physical page (p) at | * Insert the given physical page (p) at | ||||
* the specified virtual address (v) in the | * the specified virtual address (v) in the | ||||
* target physical map with the protection requested. | * target physical map with the protection requested. | ||||
* | * | ||||
* If specified, the page will be wired down, meaning | * If specified, the page will be wired down, meaning | ||||
* that the related pte can not be reclaimed. | * that the related pte can not be reclaimed. | ||||
* | * | ||||
* NB: This is the only routine which MAY NOT lazy-evaluate | * NB: This is the only routine which MAY NOT lazy-evaluate | ||||
▲ Show 20 Lines • Show All 3,287 Lines • Show Last 20 Lines |
Why? Do you expect that SMMU pmap can be used on PE?