Changeset View
Changeset View
Standalone View
Standalone View
head/sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 322 Lines • ▼ Show 20 Lines | |||||
* the same time as the CPU. | * the same time as the CPU. | ||||
*/ | */ | ||||
#define pmap_clear(table) atomic_store_64(table, 0) | #define pmap_clear(table) atomic_store_64(table, 0) | ||||
#define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) | #define pmap_clear_bits(table, bits) atomic_clear_64(table, bits) | ||||
#define pmap_load(table) (*table) | #define pmap_load(table) (*table) | ||||
#define pmap_load_clear(table) atomic_swap_64(table, 0) | #define pmap_load_clear(table) atomic_swap_64(table, 0) | ||||
#define pmap_load_store(table, entry) atomic_swap_64(table, entry) | #define pmap_load_store(table, entry) atomic_swap_64(table, entry) | ||||
#define pmap_set_bits(table, bits) atomic_set_64(table, bits) | #define pmap_set_bits(table, bits) atomic_set_64(table, bits) | ||||
#define pmap_store(table, entry) atomic_store_64(table, entry) | |||||
/********************/ | /********************/ | ||||
/* Inline functions */ | /* Inline functions */ | ||||
/********************/ | /********************/ | ||||
static __inline void | static __inline void | ||||
pagecopy(void *s, void *d) | pagecopy(void *s, void *d) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 293 Lines • ▼ Show 20 Lines | if ((pa & L1_OFFSET) != 0) { | ||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | ||||
if (l1_slot != prev_l1_slot) { | if (l1_slot != prev_l1_slot) { | ||||
prev_l1_slot = l1_slot; | prev_l1_slot = l1_slot; | ||||
l2 = (pt_entry_t *)freemempos; | l2 = (pt_entry_t *)freemempos; | ||||
l2_pa = pmap_early_vtophys(kern_l1, | l2_pa = pmap_early_vtophys(kern_l1, | ||||
(vm_offset_t)l2); | (vm_offset_t)l2); | ||||
freemempos += PAGE_SIZE; | freemempos += PAGE_SIZE; | ||||
pmap_load_store(&pagetable_dmap[l1_slot], | pmap_store(&pagetable_dmap[l1_slot], | ||||
(l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | ||||
memset(l2, 0, PAGE_SIZE); | memset(l2, 0, PAGE_SIZE); | ||||
} | } | ||||
KASSERT(l2 != NULL, | KASSERT(l2 != NULL, | ||||
("pmap_bootstrap_dmap: NULL l2 map")); | ("pmap_bootstrap_dmap: NULL l2 map")); | ||||
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | ||||
pa += L2_SIZE, va += L2_SIZE) { | pa += L2_SIZE, va += L2_SIZE) { | ||||
/* | /* | ||||
* We are on a boundary, stop to | * We are on a boundary, stop to | ||||
* create a level 1 block | * create a level 1 block | ||||
*/ | */ | ||||
if ((pa & L1_OFFSET) == 0) | if ((pa & L1_OFFSET) == 0) | ||||
break; | break; | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
KASSERT(l2_slot != 0, ("...")); | KASSERT(l2_slot != 0, ("...")); | ||||
pmap_load_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | ||||
ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); | ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); | ||||
} | } | ||||
KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS), | KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS), | ||||
("...")); | ("...")); | ||||
} | } | ||||
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] && | for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] && | ||||
(physmap[i + 1] - pa) >= L1_SIZE; | (physmap[i + 1] - pa) >= L1_SIZE; | ||||
pa += L1_SIZE, va += L1_SIZE) { | pa += L1_SIZE, va += L1_SIZE) { | ||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | ||||
pmap_load_store(&pagetable_dmap[l1_slot], | pmap_store(&pagetable_dmap[l1_slot], | ||||
(pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | | ||||
ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); | ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); | ||||
} | } | ||||
/* Create L2 mappings at the end of the region */ | /* Create L2 mappings at the end of the region */ | ||||
if (pa < physmap[i + 1]) { | if (pa < physmap[i + 1]) { | ||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | ||||
if (l1_slot != prev_l1_slot) { | if (l1_slot != prev_l1_slot) { | ||||
prev_l1_slot = l1_slot; | prev_l1_slot = l1_slot; | ||||
l2 = (pt_entry_t *)freemempos; | l2 = (pt_entry_t *)freemempos; | ||||
l2_pa = pmap_early_vtophys(kern_l1, | l2_pa = pmap_early_vtophys(kern_l1, | ||||
(vm_offset_t)l2); | (vm_offset_t)l2); | ||||
freemempos += PAGE_SIZE; | freemempos += PAGE_SIZE; | ||||
pmap_load_store(&pagetable_dmap[l1_slot], | pmap_store(&pagetable_dmap[l1_slot], | ||||
(l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | ||||
memset(l2, 0, PAGE_SIZE); | memset(l2, 0, PAGE_SIZE); | ||||
} | } | ||||
KASSERT(l2 != NULL, | KASSERT(l2 != NULL, | ||||
("pmap_bootstrap_dmap: NULL l2 map")); | ("pmap_bootstrap_dmap: NULL l2 map")); | ||||
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | ||||
pa += L2_SIZE, va += L2_SIZE) { | pa += L2_SIZE, va += L2_SIZE) { | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
pmap_load_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | ||||
ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); | ATTR_IDX(CACHED_MEMORY) | L2_BLOCK); | ||||
} | } | ||||
} | } | ||||
if (pa > dmap_phys_max) { | if (pa > dmap_phys_max) { | ||||
dmap_phys_max = pa; | dmap_phys_max = pa; | ||||
dmap_max_addr = va; | dmap_max_addr = va; | ||||
Show All 18 Lines | pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start) | ||||
l1 = (pd_entry_t *)l1pt; | l1 = (pd_entry_t *)l1pt; | ||||
l1_slot = pmap_l1_index(va); | l1_slot = pmap_l1_index(va); | ||||
l2pt = l2_start; | l2pt = l2_start; | ||||
for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) { | for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) { | ||||
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); | KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); | ||||
pa = pmap_early_vtophys(l1pt, l2pt); | pa = pmap_early_vtophys(l1pt, l2pt); | ||||
pmap_load_store(&l1[l1_slot], | pmap_store(&l1[l1_slot], | ||||
(pa & ~Ln_TABLE_MASK) | L1_TABLE); | (pa & ~Ln_TABLE_MASK) | L1_TABLE); | ||||
l2pt += PAGE_SIZE; | l2pt += PAGE_SIZE; | ||||
} | } | ||||
/* Clean the L2 page table */ | /* Clean the L2 page table */ | ||||
memset((void *)l2_start, 0, l2pt - l2_start); | memset((void *)l2_start, 0, l2pt - l2_start); | ||||
return l2pt; | return l2pt; | ||||
Show All 13 Lines | pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) | ||||
l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE); | l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE); | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
l3pt = l3_start; | l3pt = l3_start; | ||||
for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | ||||
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | ||||
pa = pmap_early_vtophys(l1pt, l3pt); | pa = pmap_early_vtophys(l1pt, l3pt); | ||||
pmap_load_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~Ln_TABLE_MASK) | L2_TABLE); | (pa & ~Ln_TABLE_MASK) | L2_TABLE); | ||||
l3pt += PAGE_SIZE; | l3pt += PAGE_SIZE; | ||||
} | } | ||||
/* Clean the L2 page table */ | /* Clean the L2 page table */ | ||||
memset((void *)l3_start, 0, l3pt - l3_start); | memset((void *)l3_start, 0, l3pt - l3_start); | ||||
return l3pt; | return l3pt; | ||||
▲ Show 20 Lines • Show All 766 Lines • ▼ Show 20 Lines | _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) | ||||
*/ | */ | ||||
if (ptepindex >= (NUL2E + NUL1E)) { | if (ptepindex >= (NUL2E + NUL1E)) { | ||||
pd_entry_t *l0; | pd_entry_t *l0; | ||||
vm_pindex_t l0index; | vm_pindex_t l0index; | ||||
l0index = ptepindex - (NUL2E + NUL1E); | l0index = ptepindex - (NUL2E + NUL1E); | ||||
l0 = &pmap->pm_l0[l0index]; | l0 = &pmap->pm_l0[l0index]; | ||||
pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); | pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); | ||||
} else if (ptepindex >= NUL2E) { | } else if (ptepindex >= NUL2E) { | ||||
vm_pindex_t l0index, l1index; | vm_pindex_t l0index, l1index; | ||||
pd_entry_t *l0, *l1; | pd_entry_t *l0, *l1; | ||||
pd_entry_t tl0; | pd_entry_t tl0; | ||||
l1index = ptepindex - NUL2E; | l1index = ptepindex - NUL2E; | ||||
l0index = l1index >> L0_ENTRIES_SHIFT; | l0index = l1index >> L0_ENTRIES_SHIFT; | ||||
Show All 9 Lines | if (tl0 == 0) { | ||||
} | } | ||||
} else { | } else { | ||||
l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); | l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); | ||||
l1pg->wire_count++; | l1pg->wire_count++; | ||||
} | } | ||||
l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); | l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); | ||||
l1 = &l1[ptepindex & Ln_ADDR_MASK]; | l1 = &l1[ptepindex & Ln_ADDR_MASK]; | ||||
pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); | pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); | ||||
} else { | } else { | ||||
vm_pindex_t l0index, l1index; | vm_pindex_t l0index, l1index; | ||||
pd_entry_t *l0, *l1, *l2; | pd_entry_t *l0, *l1, *l2; | ||||
pd_entry_t tl0, tl1; | pd_entry_t tl0, tl1; | ||||
l1index = ptepindex >> Ln_ENTRIES_SHIFT; | l1index = ptepindex >> Ln_ENTRIES_SHIFT; | ||||
l0index = l1index >> L0_ENTRIES_SHIFT; | l0index = l1index >> L0_ENTRIES_SHIFT; | ||||
Show All 25 Lines | if (tl0 == 0) { | ||||
} else { | } else { | ||||
l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); | l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); | ||||
l2pg->wire_count++; | l2pg->wire_count++; | ||||
} | } | ||||
} | } | ||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); | l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); | ||||
l2 = &l2[ptepindex & Ln_ADDR_MASK]; | l2 = &l2[ptepindex & Ln_ADDR_MASK]; | ||||
pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); | pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); | ||||
} | } | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
return (m); | return (m); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
▲ Show 20 Lines • Show All 156 Lines • ▼ Show 20 Lines | if (pmap_load(l1) == 0) { | ||||
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, | nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, | ||||
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_WIRED | VM_ALLOC_ZERO); | ||||
if (nkpg == NULL) | if (nkpg == NULL) | ||||
panic("pmap_growkernel: no memory to grow kernel"); | panic("pmap_growkernel: no memory to grow kernel"); | ||||
if ((nkpg->flags & PG_ZERO) == 0) | if ((nkpg->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(nkpg); | pmap_zero_page(nkpg); | ||||
paddr = VM_PAGE_TO_PHYS(nkpg); | paddr = VM_PAGE_TO_PHYS(nkpg); | ||||
pmap_load_store(l1, paddr | L1_TABLE); | pmap_store(l1, paddr | L1_TABLE); | ||||
continue; /* try again */ | continue; /* try again */ | ||||
} | } | ||||
l2 = pmap_l1_to_l2(l1, kernel_vm_end); | l2 = pmap_l1_to_l2(l1, kernel_vm_end); | ||||
if ((pmap_load(l2) & ATTR_AF) != 0) { | if ((pmap_load(l2) & ATTR_AF) != 0) { | ||||
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; | ||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | ||||
kernel_vm_end = vm_map_max(kernel_map); | kernel_vm_end = vm_map_max(kernel_map); | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 1,220 Lines • ▼ Show 20 Lines | pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte, | ||||
intr = intr_disable(); | intr = intr_disable(); | ||||
critical_enter(); | critical_enter(); | ||||
/* Clear the old mapping */ | /* Clear the old mapping */ | ||||
pmap_clear(pte); | pmap_clear(pte); | ||||
pmap_invalidate_range_nopin(pmap, va, va + size); | pmap_invalidate_range_nopin(pmap, va, va + size); | ||||
/* Create the new mapping */ | /* Create the new mapping */ | ||||
pmap_load_store(pte, newpte); | pmap_store(pte, newpte); | ||||
dsb(ishst); | dsb(ishst); | ||||
critical_exit(); | critical_exit(); | ||||
intr_restore(intr); | intr_restore(intr); | ||||
} | } | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 269 Lines • ▼ Show 20 Lines | if (pmap_l3_valid(orig_l3)) { | ||||
/* | /* | ||||
* Has the physical page changed? | * Has the physical page changed? | ||||
*/ | */ | ||||
if (opa == pa) { | if (opa == pa) { | ||||
/* | /* | ||||
* No, might be a protection or wiring change. | * No, might be a protection or wiring change. | ||||
*/ | */ | ||||
if ((orig_l3 & ATTR_SW_MANAGED) != 0) { | if ((orig_l3 & ATTR_SW_MANAGED) != 0 && | ||||
if ((new_l3 & ATTR_AP(ATTR_AP_RW)) == | (new_l3 & ATTR_SW_DBM) != 0) | ||||
ATTR_AP(ATTR_AP_RW)) { | |||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
} | |||||
} | |||||
goto validate; | goto validate; | ||||
} | } | ||||
/* | /* | ||||
* The physical page has changed. Temporarily invalidate | * The physical page has changed. Temporarily invalidate | ||||
* the mapping. | * the mapping. | ||||
*/ | */ | ||||
orig_l3 = pmap_load_clear(l3); | orig_l3 = pmap_load_clear(l3); | ||||
▲ Show 20 Lines • Show All 89 Lines • ▼ Show 20 Lines | if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | ||||
* actual mapping. | * actual mapping. | ||||
*/ | */ | ||||
CTR4(KTR_PMAP, "%s: already mapped page - " | CTR4(KTR_PMAP, "%s: already mapped page - " | ||||
"pmap %p va 0x%#lx pte 0x%lx", | "pmap %p va 0x%#lx pte 0x%lx", | ||||
__func__, pmap, va, new_l3); | __func__, pmap, va, new_l3); | ||||
} | } | ||||
} else { | } else { | ||||
/* New mapping */ | /* New mapping */ | ||||
pmap_load_store(l3, new_l3); | pmap_store(l3, new_l3); | ||||
dsb(ishst); | dsb(ishst); | ||||
} | } | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (pmap != pmap_kernel() && | if (pmap != pmap_kernel() && | ||||
(mpte == NULL || mpte->wire_count == NL3PG) && | (mpte == NULL || mpte->wire_count == NL3PG) && | ||||
pmap_ps_enabled(pmap) && | pmap_ps_enabled(pmap) && | ||||
(m->flags & PG_FICTITIOUS) == 0 && | (m->flags & PG_FICTITIOUS) == 0 && | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) | ||||
(void)pmap_remove_l2(pmap, l2, va, | (void)pmap_remove_l2(pmap, l2, va, | ||||
pmap_load(pmap_l1(pmap, va)), &free, lockp); | pmap_load(pmap_l1(pmap, va)), &free, lockp); | ||||
else | else | ||||
pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE, | pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE, | ||||
&free, lockp); | &free, lockp); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
if (va >= VM_MAXUSER_ADDRESS) { | if (va >= VM_MAXUSER_ADDRESS) { | ||||
/* | /* | ||||
* Both pmap_remove_l2() and pmap_remove_l3() will | * Both pmap_remove_l2() and pmap_remove_l3_range() | ||||
* leave the kernel page table page zero filled. | * will leave the kernel page table page zero filled. | ||||
* Nonetheless, the TLB could have an intermediate | |||||
* entry for the kernel page table page. | |||||
*/ | */ | ||||
mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); | ||||
if (pmap_insert_pt_page(pmap, mt, false)) | if (pmap_insert_pt_page(pmap, mt, false)) | ||||
panic("pmap_enter_l2: trie insert failed"); | panic("pmap_enter_l2: trie insert failed"); | ||||
pmap_clear(l2); | |||||
pmap_invalidate_page(pmap, va); | |||||
} else | } else | ||||
KASSERT(pmap_load(l2) == 0, | KASSERT(pmap_load(l2) == 0, | ||||
("pmap_enter_l2: non-zero L2 entry %p", l2)); | ("pmap_enter_l2: non-zero L2 entry %p", l2)); | ||||
} | } | ||||
if ((new_l2 & ATTR_SW_MANAGED) != 0) { | if ((new_l2 & ATTR_SW_MANAGED) != 0) { | ||||
/* | /* | ||||
* Abort this mapping if its PV entry could not be created. | * Abort this mapping if its PV entry could not be created. | ||||
*/ | */ | ||||
if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { | if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) { | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_l3(pmap, va, l2pg, &free)) { | if (pmap_unwire_l3(pmap, va, l2pg, &free)) { | ||||
/* | /* | ||||
* Although "va" is not mapped, paging-structure | * Although "va" is not mapped, the TLB could | ||||
* caches could nonetheless have entries that | * nonetheless have intermediate entries that | ||||
* refer to the freed page table pages. | * refer to the freed page table pages. | ||||
* Invalidate those entries. | * Invalidate those entries. | ||||
* | |||||
* XXX redundant invalidation (See | |||||
* _pmap_unwire_l3().) | |||||
*/ | */ | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
CTR2(KTR_PMAP, | CTR2(KTR_PMAP, | ||||
"pmap_enter_l2: failure for va %#lx in pmap %p", | "pmap_enter_l2: failure for va %#lx in pmap %p", | ||||
va, pmap); | va, pmap); | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
} | } | ||||
if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
vm_page_aflag_set(mt, PGA_WRITEABLE); | vm_page_aflag_set(mt, PGA_WRITEABLE); | ||||
} | } | ||||
/* | /* | ||||
* Increment counters. | * Increment counters. | ||||
*/ | */ | ||||
if ((new_l2 & ATTR_SW_WIRED) != 0) | if ((new_l2 & ATTR_SW_WIRED) != 0) | ||||
pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE; | pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE; | ||||
pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE; | pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE; | ||||
/* | /* | ||||
* Map the superpage. | * Map the superpage. | ||||
*/ | */ | ||||
(void)pmap_load_store(l2, new_l2); | pmap_store(l2, new_l2); | ||||
dsb(ishst); | dsb(ishst); | ||||
atomic_add_long(&pmap_l2_mappings, 1); | atomic_add_long(&pmap_l2_mappings, 1); | ||||
CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p", | CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p", | ||||
va, pmap); | va, pmap); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 186 Lines • ▼ Show 20 Lines | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
l3_val &= ~ATTR_AF; | l3_val &= ~ATTR_AF; | ||||
} | } | ||||
/* Sync icache before the mapping is stored to PTE */ | /* Sync icache before the mapping is stored to PTE */ | ||||
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && | if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && | ||||
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) | m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) | ||||
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); | cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); | ||||
pmap_load_store(l3, l3_val); | pmap_store(l3, l3_val); | ||||
dsb(ishst); | dsb(ishst); | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
/* | /* | ||||
* This code maps large physical mmap regions into the | * This code maps large physical mmap regions into the | ||||
* processor address space. Note that some shortcuts | * processor address space. Note that some shortcuts | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { | ||||
panic("pmap_unwire: l2 %#jx is missing " | panic("pmap_unwire: l2 %#jx is missing " | ||||
"ATTR_SW_WIRED", (uintmax_t)pmap_load(l2)); | "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2)); | ||||
/* | /* | ||||
* Are we unwiring the entire large page? If not, | * Are we unwiring the entire large page? If not, | ||||
* demote the mapping and fall through. | * demote the mapping and fall through. | ||||
*/ | */ | ||||
if (sva + L2_SIZE == va_next && eva >= va_next) { | if (sva + L2_SIZE == va_next && eva >= va_next) { | ||||
atomic_clear_64(l2, ATTR_SW_WIRED); | pmap_clear_bits(l2, ATTR_SW_WIRED); | ||||
pmap->pm_stats.wired_count -= L2_SIZE / | pmap->pm_stats.wired_count -= L2_SIZE / | ||||
PAGE_SIZE; | PAGE_SIZE; | ||||
continue; | continue; | ||||
} else if (pmap_demote_l2(pmap, l2, sva) == NULL) | } else if (pmap_demote_l2(pmap, l2, sva) == NULL) | ||||
panic("pmap_unwire: demotion failed"); | panic("pmap_unwire: demotion failed"); | ||||
} | } | ||||
KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, | KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, | ||||
("pmap_unwire: Invalid l2 entry after demotion")); | ("pmap_unwire: Invalid l2 entry after demotion")); | ||||
if (va_next > eva) | if (va_next > eva) | ||||
va_next = eva; | va_next = eva; | ||||
for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | ||||
sva += L3_SIZE) { | sva += L3_SIZE) { | ||||
if (pmap_load(l3) == 0) | if (pmap_load(l3) == 0) | ||||
continue; | continue; | ||||
if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) | if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) | ||||
panic("pmap_unwire: l3 %#jx is missing " | panic("pmap_unwire: l3 %#jx is missing " | ||||
"ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); | "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); | ||||
/* | /* | ||||
* ATTR_SW_WIRED must be cleared atomically. Although | * ATTR_SW_WIRED must be cleared atomically. Although | ||||
* the pmap lock synchronizes access to ATTR_SW_WIRED, | * the pmap lock synchronizes access to ATTR_SW_WIRED, | ||||
* the System MMU may write to the entry concurrently. | * the System MMU may write to the entry concurrently. | ||||
*/ | */ | ||||
atomic_clear_64(l3, ATTR_SW_WIRED); | pmap_clear_bits(l3, ATTR_SW_WIRED); | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Copy the range specified by src_addr/len | * Copy the range specified by src_addr/len | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) { | ||||
if (pmap_load(l2) == 0 && | if (pmap_load(l2) == 0 && | ||||
((srcptepaddr & ATTR_SW_MANAGED) == 0 || | ((srcptepaddr & ATTR_SW_MANAGED) == 0 || | ||||
pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr, | pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr, | ||||
PMAP_ENTER_NORECLAIM, &lock))) { | PMAP_ENTER_NORECLAIM, &lock))) { | ||||
mask = ATTR_AF | ATTR_SW_WIRED; | mask = ATTR_AF | ATTR_SW_WIRED; | ||||
nbits = 0; | nbits = 0; | ||||
if ((srcptepaddr & ATTR_SW_DBM) != 0) | if ((srcptepaddr & ATTR_SW_DBM) != 0) | ||||
nbits |= ATTR_AP_RW_BIT; | nbits |= ATTR_AP_RW_BIT; | ||||
(void)pmap_load_store(l2, | pmap_store(l2, (srcptepaddr & ~mask) | nbits); | ||||
(srcptepaddr & ~mask) | nbits); | |||||
pmap_resident_count_inc(dst_pmap, L2_SIZE / | pmap_resident_count_inc(dst_pmap, L2_SIZE / | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
atomic_add_long(&pmap_l2_mappings, 1); | atomic_add_long(&pmap_l2_mappings, 1); | ||||
} else | } else | ||||
dst_l2pg->wire_count--; | dst_l2pg->wire_count--; | ||||
continue; | continue; | ||||
} | } | ||||
KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE, | KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE, | ||||
Show All 32 Lines | for (; addr < va_next; addr += PAGE_SIZE, src_pte++) { | ||||
/* | /* | ||||
* Clear the wired, modified, and accessed | * Clear the wired, modified, and accessed | ||||
* (referenced) bits during the copy. | * (referenced) bits during the copy. | ||||
*/ | */ | ||||
mask = ATTR_AF | ATTR_SW_WIRED; | mask = ATTR_AF | ATTR_SW_WIRED; | ||||
nbits = 0; | nbits = 0; | ||||
if ((ptetemp & ATTR_SW_DBM) != 0) | if ((ptetemp & ATTR_SW_DBM) != 0) | ||||
nbits |= ATTR_AP_RW_BIT; | nbits |= ATTR_AP_RW_BIT; | ||||
(void)pmap_load_store(dst_pte, | pmap_store(dst_pte, (ptetemp & ~mask) | nbits); | ||||
(ptetemp & ~mask) | nbits); | |||||
pmap_resident_count_inc(dst_pmap, 1); | pmap_resident_count_inc(dst_pmap, 1); | ||||
} else { | } else { | ||||
SLIST_INIT(&free); | SLIST_INIT(&free); | ||||
if (pmap_unwire_l3(dst_pmap, addr, dstmpte, | if (pmap_unwire_l3(dst_pmap, addr, dstmpte, | ||||
&free)) { | &free)) { | ||||
/* | /* | ||||
* Although "addr" is not mapped, | * Although "addr" is not mapped, | ||||
* paging-structure caches could | * the TLB could nonetheless have | ||||
* nonetheless have entries that refer | * intermediate entries that refer | ||||
* to the freed page table pages. | * to the freed page table pages. | ||||
* Invalidate those entries. | * Invalidate those entries. | ||||
* | * | ||||
* XXX redundant invalidation | * XXX redundant invalidation | ||||
*/ | */ | ||||
pmap_invalidate_page(dst_pmap, addr); | pmap_invalidate_page(dst_pmap, addr); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 1,814 Lines • Show Last 20 Lines |