Changeset View
Changeset View
Standalone View
Standalone View
sys/riscv/riscv/pmap.c
Show First 20 Lines • Show All 3,141 Lines • ▼ Show 20 Lines | if ((prot & VM_PROT_EXECUTE) != 0) | ||||
new_l2 |= PTE_X; | new_l2 |= PTE_X; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (va < VM_MAXUSER_ADDRESS) | ||||
new_l2 |= PTE_U; | new_l2 |= PTE_U; | ||||
return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | | return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | | ||||
PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp)); | PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp)); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if every page table entry in the specified page table is | |||||
* zero. | |||||
*/ | |||||
static bool | |||||
pmap_every_pte_zero(vm_paddr_t pa) | |||||
{ | |||||
pt_entry_t *pt_end, *pte; | |||||
KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned")); | |||||
pte = (pt_entry_t *)PHYS_TO_DMAP(pa); | |||||
for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) { | |||||
if (*pte != 0) | |||||
return (false); | |||||
} | |||||
return (true); | |||||
} | |||||
/* | |||||
* Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if | * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if | ||||
* the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or | * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or | ||||
* KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if | * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if | ||||
* PMAP_ENTER_NOREPLACE was specified and a 4KB page mapping already exists | * PMAP_ENTER_NOREPLACE was specified and a 4KB page mapping already exists | ||||
* within the 2MB virtual address range starting at the specified virtual | * within the 2MB virtual address range starting at the specified virtual | ||||
* address. Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a | * address. Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a | ||||
* 2MB page mapping already exists at the specified virtual address. Returns | * 2MB page mapping already exists at the specified virtual address. Returns | ||||
* KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a | * KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a | ||||
Show All 20 Lines | if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
} | } | ||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg)); | l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg)); | ||||
l2 = &l2[pmap_l2_index(va)]; | l2 = &l2[pmap_l2_index(va)]; | ||||
if ((oldl2 = pmap_load(l2)) != 0) { | if ((oldl2 = pmap_load(l2)) != 0) { | ||||
KASSERT(l2pg->ref_count > 1, | KASSERT(l2pg->ref_count > 1, | ||||
("pmap_enter_l2: l2pg's ref count is too low")); | ("pmap_enter_l2: l2pg's ref count is too low")); | ||||
if ((flags & PMAP_ENTER_NOREPLACE) != 0) { | if ((flags & PMAP_ENTER_NOREPLACE) != 0) { | ||||
if ((oldl2 & PTE_RWX) != 0) { | if ((oldl2 & PTE_RWX) != 0) { | ||||
alc: On amd64 and arm64, we don't maintain an exact count of the valid mappings within a kernel page… | |||||
Done Inline ActionsYes, good catch. I will make the adjustment as part of D36563. mhorne: Yes, good catch. I will make the adjustment as part of D36563. | |||||
l2pg->ref_count--; | l2pg->ref_count--; | ||||
CTR2(KTR_PMAP, | CTR2(KTR_PMAP, | ||||
"pmap_enter_l2: no space for va %#lx" | "pmap_enter_l2: no space for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (KERN_NO_SPACE); | return (KERN_NO_SPACE); | ||||
} else { | } else if (va < VM_MAXUSER_ADDRESS || | ||||
!pmap_every_pte_zero(L2PTE_TO_PHYS(oldl2))) { | |||||
l2pg->ref_count--; | l2pg->ref_count--; | ||||
CTR2(KTR_PMAP, "pmap_enter_l2:" | CTR2(KTR_PMAP, "pmap_enter_l2:" | ||||
" failed to replace existing mapping" | " failed to replace existing mapping" | ||||
" for va %#lx in pmap %p", va, pmap); | " for va %#lx in pmap %p", va, pmap); | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
} | } | ||||
} | } | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
▲ Show 20 Lines • Show All 1,811 Lines • Show Last 20 Lines |
On amd64 and arm64, we don't maintain an exact count of the valid mappings within a kernel page table page. If that is true on riscv, then this section of code should look more like the amd64 and arm64 versions.