Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 212 Lines • ▼ Show 20 Lines | #define RELEASE_PV_LIST_LOCK(lockp) do { \ | ||||
} \ | } \ | ||||
} while (0) | } while (0) | ||||
#define VM_PAGE_TO_PV_LIST_LOCK(m) \ | #define VM_PAGE_TO_PV_LIST_LOCK(m) \ | ||||
PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) | PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) | ||||
/* | /* | ||||
* The presence of this flag indicates that the mapping is writeable. | * The presence of this flag indicates that the mapping is writeable. | ||||
* If the ATTR_AP_RO bit is also set, then the mapping is clean, otherwise it is | * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise | ||||
* dirty. This flag may only be set on managed mappings. | * it is dirty. This flag may only be set on managed mappings. | ||||
* | * | ||||
* The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it | * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it | ||||
* as a software managed bit. | * as a software managed bit. | ||||
*/ | */ | ||||
#define ATTR_SW_DBM ATTR_DBM | #define ATTR_SW_DBM ATTR_DBM | ||||
struct pmap kernel_pmap_store; | struct pmap kernel_pmap_store; | ||||
▲ Show 20 Lines • Show All 359 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Checks if the PTE is dirty. | * Checks if the PTE is dirty. | ||||
*/ | */ | ||||
static inline int | static inline int | ||||
pmap_pte_dirty(pt_entry_t pte) | pmap_pte_dirty(pt_entry_t pte) | ||||
{ | { | ||||
KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte)); | KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte)); | ||||
KASSERT((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) != 0, | KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0, | ||||
("pte %#lx is writeable and missing ATTR_SW_DBM", pte)); | ("pte %#lx is writeable and missing ATTR_SW_DBM", pte)); | ||||
return ((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == | return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == | ||||
(ATTR_AP(ATTR_AP_RW) | ATTR_SW_DBM)); | (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM)); | ||||
} | } | ||||
static __inline void | static __inline void | ||||
pmap_resident_count_inc(pmap_t pmap, int count) | pmap_resident_count_inc(pmap_t pmap, int count) | ||||
{ | { | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
pmap->pm_stats.resident_count += count; | pmap->pm_stats.resident_count += count; | ||||
▲ Show 20 Lines • Show All 88 Lines • ▼ Show 20 Lines | if ((pa & L1_OFFSET) != 0) { | ||||
* create a level 1 block | * create a level 1 block | ||||
*/ | */ | ||||
if ((pa & L1_OFFSET) == 0) | if ((pa & L1_OFFSET) == 0) | ||||
break; | break; | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
KASSERT(l2_slot != 0, ("...")); | KASSERT(l2_slot != 0, ("...")); | ||||
pmap_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L2_OFFSET) | ATTR_DEFAULT | | ||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); | ATTR_S1_XN | | ||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | | |||||
L2_BLOCK); | |||||
} | } | ||||
KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS), | KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS), | ||||
("...")); | ("...")); | ||||
} | } | ||||
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] && | for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] && | ||||
(physmap[i + 1] - pa) >= L1_SIZE; | (physmap[i + 1] - pa) >= L1_SIZE; | ||||
pa += L1_SIZE, va += L1_SIZE) { | pa += L1_SIZE, va += L1_SIZE) { | ||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | ||||
pmap_store(&pagetable_dmap[l1_slot], | pmap_store(&pagetable_dmap[l1_slot], | ||||
(pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN | | ||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK); | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK); | ||||
} | } | ||||
/* Create L2 mappings at the end of the region */ | /* Create L2 mappings at the end of the region */ | ||||
if (pa < physmap[i + 1]) { | if (pa < physmap[i + 1]) { | ||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); | ||||
if (l1_slot != prev_l1_slot) { | if (l1_slot != prev_l1_slot) { | ||||
prev_l1_slot = l1_slot; | prev_l1_slot = l1_slot; | ||||
l2 = (pt_entry_t *)freemempos; | l2 = (pt_entry_t *)freemempos; | ||||
l2_pa = pmap_early_vtophys(kern_l1, | l2_pa = pmap_early_vtophys(kern_l1, | ||||
(vm_offset_t)l2); | (vm_offset_t)l2); | ||||
freemempos += PAGE_SIZE; | freemempos += PAGE_SIZE; | ||||
pmap_store(&pagetable_dmap[l1_slot], | pmap_store(&pagetable_dmap[l1_slot], | ||||
(l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE); | ||||
memset(l2, 0, PAGE_SIZE); | memset(l2, 0, PAGE_SIZE); | ||||
} | } | ||||
KASSERT(l2 != NULL, | KASSERT(l2 != NULL, | ||||
("pmap_bootstrap_dmap: NULL l2 map")); | ("pmap_bootstrap_dmap: NULL l2 map")); | ||||
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1]; | ||||
pa += L2_SIZE, va += L2_SIZE) { | pa += L2_SIZE, va += L2_SIZE) { | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
pmap_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN | | (pa & ~L2_OFFSET) | ATTR_DEFAULT | | ||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); | ATTR_S1_XN | | ||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | | |||||
L2_BLOCK); | |||||
} | } | ||||
} | } | ||||
if (pa > dmap_phys_max) { | if (pa > dmap_phys_max) { | ||||
dmap_phys_max = pa; | dmap_phys_max = pa; | ||||
dmap_max_addr = va; | dmap_max_addr = va; | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) | ||||
l2_slot = pmap_l2_index(va); | l2_slot = pmap_l2_index(va); | ||||
l3pt = l3_start; | l3pt = l3_start; | ||||
for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { | ||||
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); | ||||
pa = pmap_early_vtophys(l1pt, l3pt); | pa = pmap_early_vtophys(l1pt, l3pt); | ||||
pmap_store(&l2[l2_slot], | pmap_store(&l2[l2_slot], | ||||
(pa & ~Ln_TABLE_MASK) | ATTR_UXN | L2_TABLE); | (pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE); | ||||
l3pt += PAGE_SIZE; | l3pt += PAGE_SIZE; | ||||
} | } | ||||
/* Clean the L2 page table */ | /* Clean the L2 page table */ | ||||
memset((void *)l3_start, 0, l3pt - l3_start); | memset((void *)l3_start, 0, l3pt - l3_start); | ||||
return l3pt; | return l3pt; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 344 Lines • ▼ Show 20 Lines | if (pte != NULL) { | ||||
KASSERT(lvl > 0 && lvl <= 3, | KASSERT(lvl > 0 && lvl <= 3, | ||||
("pmap_extract_and_hold: Invalid level %d", lvl)); | ("pmap_extract_and_hold: Invalid level %d", lvl)); | ||||
CTASSERT(L1_BLOCK == L2_BLOCK); | CTASSERT(L1_BLOCK == L2_BLOCK); | ||||
KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || | KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || | ||||
(lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), | (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), | ||||
("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, | ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, | ||||
tpte & ATTR_DESCR_MASK)); | tpte & ATTR_DESCR_MASK)); | ||||
if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) || | if (((tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) || | ||||
((prot & VM_PROT_WRITE) == 0)) { | ((prot & VM_PROT_WRITE) == 0)) { | ||||
switch(lvl) { | switch(lvl) { | ||||
case 1: | case 1: | ||||
off = va & L1_OFFSET; | off = va & L1_OFFSET; | ||||
break; | break; | ||||
case 2: | case 2: | ||||
off = va & L2_OFFSET; | off = va & L2_OFFSET; | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) | ||||
KASSERT((pa & L3_OFFSET) == 0, | KASSERT((pa & L3_OFFSET) == 0, | ||||
("pmap_kenter: Invalid physical address")); | ("pmap_kenter: Invalid physical address")); | ||||
KASSERT((sva & L3_OFFSET) == 0, | KASSERT((sva & L3_OFFSET) == 0, | ||||
("pmap_kenter: Invalid virtual address")); | ("pmap_kenter: Invalid virtual address")); | ||||
KASSERT((size & PAGE_MASK) == 0, | KASSERT((size & PAGE_MASK) == 0, | ||||
("pmap_kenter: Mapping is not page-sized")); | ("pmap_kenter: Mapping is not page-sized")); | ||||
attr = ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) | ATTR_XN | ATTR_IDX(mode) | | attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | | ||||
L3_PAGE; | ATTR_S1_IDX(mode) | L3_PAGE; | ||||
va = sva; | va = sva; | ||||
while (size != 0) { | while (size != 0) { | ||||
pde = pmap_pde(kernel_pmap, va, &lvl); | pde = pmap_pde(kernel_pmap, va, &lvl); | ||||
KASSERT(pde != NULL, | KASSERT(pde != NULL, | ||||
("pmap_kenter: Invalid page entry, va: 0x%lx", va)); | ("pmap_kenter: Invalid page entry, va: 0x%lx", va)); | ||||
KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); | KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); | ||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
pde = pmap_pde(kernel_pmap, va, &lvl); | pde = pmap_pde(kernel_pmap, va, &lvl); | ||||
KASSERT(pde != NULL, | KASSERT(pde != NULL, | ||||
("pmap_qenter: Invalid page entry, va: 0x%lx", va)); | ("pmap_qenter: Invalid page entry, va: 0x%lx", va)); | ||||
KASSERT(lvl == 2, | KASSERT(lvl == 2, | ||||
("pmap_qenter: Invalid level %d", lvl)); | ("pmap_qenter: Invalid level %d", lvl)); | ||||
m = ma[i]; | m = ma[i]; | ||||
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) | | pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | ||||
ATTR_XN | ATTR_IDX(m->md.pv_memattr) | L3_PAGE; | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | | ||||
ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE; | |||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
pmap_load_store(pte, pa); | pmap_load_store(pte, pa); | ||||
va += L3_SIZE; | va += L3_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 1,584 Lines • ▼ Show 20 Lines | if ((old_l2 & mask) == nbits) | ||||
return; | return; | ||||
/* | /* | ||||
* When a dirty read/write superpage mapping is write protected, | * When a dirty read/write superpage mapping is write protected, | ||||
* update the dirty field of each of the superpage's constituent 4KB | * update the dirty field of each of the superpage's constituent 4KB | ||||
* pages. | * pages. | ||||
*/ | */ | ||||
if ((old_l2 & ATTR_SW_MANAGED) != 0 && | if ((old_l2 & ATTR_SW_MANAGED) != 0 && | ||||
(nbits & ATTR_AP(ATTR_AP_RO)) != 0 && pmap_pte_dirty(old_l2)) { | (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | ||||
pmap_pte_dirty(old_l2)) { | |||||
m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK); | ||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) | ||||
vm_page_dirty(mt); | vm_page_dirty(mt); | ||||
} | } | ||||
if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) | if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits)) | ||||
goto retry; | goto retry; | ||||
Show All 18 Lines | pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) | ||||
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | ||||
if (prot == VM_PROT_NONE) { | if (prot == VM_PROT_NONE) { | ||||
pmap_remove(pmap, sva, eva); | pmap_remove(pmap, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
mask = nbits = 0; | mask = nbits = 0; | ||||
if ((prot & VM_PROT_WRITE) == 0) { | if ((prot & VM_PROT_WRITE) == 0) { | ||||
mask |= ATTR_AP_RW_BIT | ATTR_SW_DBM; | mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM; | ||||
nbits |= ATTR_AP(ATTR_AP_RO); | nbits |= ATTR_S1_AP(ATTR_S1_AP_RO); | ||||
} | } | ||||
if ((prot & VM_PROT_EXECUTE) == 0) { | if ((prot & VM_PROT_EXECUTE) == 0) { | ||||
mask |= ATTR_XN; | mask |= ATTR_S1_XN; | ||||
nbits |= ATTR_XN; | nbits |= ATTR_S1_XN; | ||||
} | } | ||||
if (mask == 0) | if (mask == 0) | ||||
return; | return; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
for (; sva < eva; sva = va_next) { | for (; sva < eva; sva = va_next) { | ||||
l0 = pmap_l0(pmap, sva); | l0 = pmap_l0(pmap, sva); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | retry: | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* When a dirty read/write mapping is write protected, | * When a dirty read/write mapping is write protected, | ||||
* update the page's dirty field. | * update the page's dirty field. | ||||
*/ | */ | ||||
if ((l3 & ATTR_SW_MANAGED) != 0 && | if ((l3 & ATTR_SW_MANAGED) != 0 && | ||||
(nbits & ATTR_AP(ATTR_AP_RO)) != 0 && | (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 && | ||||
pmap_pte_dirty(l3)) | pmap_pte_dirty(l3)) | ||||
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); | ||||
if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) | if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits)) | ||||
goto retry; | goto retry; | ||||
if (va == va_next) | if (va == va_next) | ||||
va = sva; | va = sva; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 135 Lines • ▼ Show 20 Lines | |||||
setl2: | setl2: | ||||
if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) { | if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) { | ||||
atomic_add_long(&pmap_l2_p_failures, 1); | atomic_add_long(&pmap_l2_p_failures, 1); | ||||
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" | CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return; | return; | ||||
} | } | ||||
if ((newl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == | if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == | ||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) { | (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) { | ||||
if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM)) | if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM)) | ||||
goto setl2; | goto setl2; | ||||
newl2 &= ~ATTR_SW_DBM; | newl2 &= ~ATTR_SW_DBM; | ||||
} | } | ||||
pa = newl2 + L2_SIZE - PAGE_SIZE; | pa = newl2 + L2_SIZE - PAGE_SIZE; | ||||
for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) { | for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) { | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
setl3: | setl3: | ||||
if ((oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == | if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == | ||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) { | (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) { | ||||
if (!atomic_fcmpset_64(l3, &oldl3, oldl3 & | if (!atomic_fcmpset_64(l3, &oldl3, oldl3 & | ||||
~ATTR_SW_DBM)) | ~ATTR_SW_DBM)) | ||||
goto setl3; | goto setl3; | ||||
oldl3 &= ~ATTR_SW_DBM; | oldl3 &= ~ATTR_SW_DBM; | ||||
} | } | ||||
if (oldl3 != pa) { | if (oldl3 != pa) { | ||||
atomic_add_long(&pmap_l2_p_failures, 1); | atomic_add_long(&pmap_l2_p_failures, 1); | ||||
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" | CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
vm_page_t mpte, om; | vm_page_t mpte, om; | ||||
boolean_t nosleep; | boolean_t nosleep; | ||||
int lvl, rv; | int lvl, rv; | ||||
va = trunc_page(va); | va = trunc_page(va); | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
VM_PAGE_OBJECT_BUSY_ASSERT(m); | VM_PAGE_OBJECT_BUSY_ASSERT(m); | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | | new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | | ||||
L3_PAGE); | L3_PAGE); | ||||
if ((prot & VM_PROT_WRITE) == 0) | if ((prot & VM_PROT_WRITE) == 0) | ||||
new_l3 |= ATTR_AP(ATTR_AP_RO); | new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); | ||||
if ((prot & VM_PROT_EXECUTE) == 0 || | if ((prot & VM_PROT_EXECUTE) == 0 || | ||||
m->md.pv_memattr == VM_MEMATTR_DEVICE) | m->md.pv_memattr == VM_MEMATTR_DEVICE) | ||||
new_l3 |= ATTR_XN; | new_l3 |= ATTR_S1_XN; | ||||
if ((flags & PMAP_ENTER_WIRED) != 0) | if ((flags & PMAP_ENTER_WIRED) != 0) | ||||
new_l3 |= ATTR_SW_WIRED; | new_l3 |= ATTR_SW_WIRED; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (va < VM_MAXUSER_ADDRESS) | ||||
new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; | new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; | ||||
else | else | ||||
new_l3 |= ATTR_UXN; | new_l3 |= ATTR_S1_UXN; | ||||
if (pmap != kernel_pmap) | if (pmap != kernel_pmap) | ||||
new_l3 |= ATTR_nG; | new_l3 |= ATTR_S1_nG; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
new_l3 |= ATTR_SW_MANAGED; | new_l3 |= ATTR_SW_MANAGED; | ||||
if ((prot & VM_PROT_WRITE) != 0) { | if ((prot & VM_PROT_WRITE) != 0) { | ||||
new_l3 |= ATTR_SW_DBM; | new_l3 |= ATTR_SW_DBM; | ||||
if ((flags & VM_PROT_WRITE) == 0) | if ((flags & VM_PROT_WRITE) == 0) | ||||
new_l3 |= ATTR_AP(ATTR_AP_RO); | new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO); | ||||
} | } | ||||
} | } | ||||
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); | ||||
lock = NULL; | lock = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (psind == 1) { | if (psind == 1) { | ||||
▲ Show 20 Lines • Show All 165 Lines • ▼ Show 20 Lines | validate: | ||||
* Don't do it for kernel memory which is mapped with exec | * Don't do it for kernel memory which is mapped with exec | ||||
* permission even if the memory isn't going to hold executable | * permission even if the memory isn't going to hold executable | ||||
* code. The only time when icache sync is needed is after | * code. The only time when icache sync is needed is after | ||||
* kernel module is loaded and the relocation info is processed. | * kernel module is loaded and the relocation info is processed. | ||||
* And it's done in elf_cpu_load_file(). | * And it's done in elf_cpu_load_file(). | ||||
*/ | */ | ||||
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && | if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && | ||||
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK && | m->md.pv_memattr == VM_MEMATTR_WRITE_BACK && | ||||
(opa != pa || (orig_l3 & ATTR_XN))) | (opa != pa || (orig_l3 & ATTR_S1_XN))) | ||||
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); | cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); | ||||
/* | /* | ||||
* Update the L3 entry | * Update the L3 entry | ||||
*/ | */ | ||||
if (pmap_l3_valid(orig_l3)) { | if (pmap_l3_valid(orig_l3)) { | ||||
KASSERT(opa == pa, ("pmap_enter: invalid update")); | KASSERT(opa == pa, ("pmap_enter: invalid update")); | ||||
if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) { | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | |||||
pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
struct rwlock **lockp) | struct rwlock **lockp) | ||||
{ | { | ||||
pd_entry_t new_l2; | pd_entry_t new_l2; | ||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pmap, MA_OWNED); | ||||
new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | | ||||
ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L2_BLOCK); | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | | ||||
L2_BLOCK); | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
new_l2 |= ATTR_SW_MANAGED; | new_l2 |= ATTR_SW_MANAGED; | ||||
new_l2 &= ~ATTR_AF; | new_l2 &= ~ATTR_AF; | ||||
} | } | ||||
if ((prot & VM_PROT_EXECUTE) == 0 || | if ((prot & VM_PROT_EXECUTE) == 0 || | ||||
m->md.pv_memattr == VM_MEMATTR_DEVICE) | m->md.pv_memattr == VM_MEMATTR_DEVICE) | ||||
new_l2 |= ATTR_XN; | new_l2 |= ATTR_S1_XN; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (va < VM_MAXUSER_ADDRESS) | ||||
new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; | new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; | ||||
else | else | ||||
new_l2 |= ATTR_UXN; | new_l2 |= ATTR_S1_UXN; | ||||
if (pmap != kernel_pmap) | if (pmap != kernel_pmap) | ||||
new_l2 |= ATTR_nG; | new_l2 |= ATTR_S1_nG; | ||||
return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | | return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP | | ||||
PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == | PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == | ||||
KERN_SUCCESS); | KERN_SUCCESS); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if every page table entry in the specified page table is | * Returns true if every page table entry in the specified page table is | ||||
* zero. | * zero. | ||||
▲ Show 20 Lines • Show All 280 Lines • ▼ Show 20 Lines | pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
} | } | ||||
/* | /* | ||||
* Increment counters | * Increment counters | ||||
*/ | */ | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | | l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | | ||||
ATTR_AP(ATTR_AP_RO) | L3_PAGE; | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE; | ||||
if ((prot & VM_PROT_EXECUTE) == 0 || | if ((prot & VM_PROT_EXECUTE) == 0 || | ||||
m->md.pv_memattr == VM_MEMATTR_DEVICE) | m->md.pv_memattr == VM_MEMATTR_DEVICE) | ||||
l3_val |= ATTR_XN; | l3_val |= ATTR_S1_XN; | ||||
if (va < VM_MAXUSER_ADDRESS) | if (va < VM_MAXUSER_ADDRESS) | ||||
l3_val |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; | l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; | ||||
else | else | ||||
l3_val |= ATTR_UXN; | l3_val |= ATTR_S1_UXN; | ||||
if (pmap != kernel_pmap) | if (pmap != kernel_pmap) | ||||
l3_val |= ATTR_nG; | l3_val |= ATTR_S1_nG; | ||||
/* | /* | ||||
* Now validate mapping with RO protection | * Now validate mapping with RO protection | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
l3_val |= ATTR_SW_MANAGED; | l3_val |= ATTR_SW_MANAGED; | ||||
l3_val &= ~ATTR_AF; | l3_val &= ~ATTR_AF; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 170 Lines • ▼ Show 20 Lines | if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) { | ||||
break; | break; | ||||
if (pmap_load(l2) == 0 && | if (pmap_load(l2) == 0 && | ||||
((srcptepaddr & ATTR_SW_MANAGED) == 0 || | ((srcptepaddr & ATTR_SW_MANAGED) == 0 || | ||||
pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr, | pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr, | ||||
PMAP_ENTER_NORECLAIM, &lock))) { | PMAP_ENTER_NORECLAIM, &lock))) { | ||||
mask = ATTR_AF | ATTR_SW_WIRED; | mask = ATTR_AF | ATTR_SW_WIRED; | ||||
nbits = 0; | nbits = 0; | ||||
if ((srcptepaddr & ATTR_SW_DBM) != 0) | if ((srcptepaddr & ATTR_SW_DBM) != 0) | ||||
nbits |= ATTR_AP_RW_BIT; | nbits |= ATTR_S1_AP_RW_BIT; | ||||
pmap_store(l2, (srcptepaddr & ~mask) | nbits); | pmap_store(l2, (srcptepaddr & ~mask) | nbits); | ||||
pmap_resident_count_inc(dst_pmap, L2_SIZE / | pmap_resident_count_inc(dst_pmap, L2_SIZE / | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
atomic_add_long(&pmap_l2_mappings, 1); | atomic_add_long(&pmap_l2_mappings, 1); | ||||
} else | } else | ||||
pmap_abort_ptp(dst_pmap, addr, dst_l2pg); | pmap_abort_ptp(dst_pmap, addr, dst_l2pg); | ||||
continue; | continue; | ||||
} | } | ||||
Show All 32 Lines | for (; addr < va_next; addr += PAGE_SIZE, src_pte++) { | ||||
PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) { | PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) { | ||||
/* | /* | ||||
* Clear the wired, modified, and accessed | * Clear the wired, modified, and accessed | ||||
* (referenced) bits during the copy. | * (referenced) bits during the copy. | ||||
*/ | */ | ||||
mask = ATTR_AF | ATTR_SW_WIRED; | mask = ATTR_AF | ATTR_SW_WIRED; | ||||
nbits = 0; | nbits = 0; | ||||
if ((ptetemp & ATTR_SW_DBM) != 0) | if ((ptetemp & ATTR_SW_DBM) != 0) | ||||
nbits |= ATTR_AP_RW_BIT; | nbits |= ATTR_S1_AP_RW_BIT; | ||||
pmap_store(dst_pte, (ptetemp & ~mask) | nbits); | pmap_store(dst_pte, (ptetemp & ~mask) | nbits); | ||||
pmap_resident_count_inc(dst_pmap, 1); | pmap_resident_count_inc(dst_pmap, 1); | ||||
} else { | } else { | ||||
pmap_abort_ptp(dst_pmap, addr, dstmpte); | pmap_abort_ptp(dst_pmap, addr, dstmpte); | ||||
goto out; | goto out; | ||||
} | } | ||||
/* Have we copied all of the valid mappings? */ | /* Have we copied all of the valid mappings? */ | ||||
if (dstmpte->ref_count >= srcmpte->ref_count) | if (dstmpte->ref_count >= srcmpte->ref_count) | ||||
▲ Show 20 Lines • Show All 457 Lines • ▼ Show 20 Lines | if (!PMAP_TRYLOCK(pmap)) { | ||||
} | } | ||||
} | } | ||||
pte = pmap_pte(pmap, pv->pv_va, &lvl); | pte = pmap_pte(pmap, pv->pv_va, &lvl); | ||||
KASSERT(lvl == 3, | KASSERT(lvl == 3, | ||||
("pmap_page_test_mappings: Invalid level %d", lvl)); | ("pmap_page_test_mappings: Invalid level %d", lvl)); | ||||
mask = 0; | mask = 0; | ||||
value = 0; | value = 0; | ||||
if (modified) { | if (modified) { | ||||
mask |= ATTR_AP_RW_BIT; | mask |= ATTR_S1_AP_RW_BIT; | ||||
value |= ATTR_AP(ATTR_AP_RW); | value |= ATTR_S1_AP(ATTR_S1_AP_RW); | ||||
} | } | ||||
if (accessed) { | if (accessed) { | ||||
mask |= ATTR_AF | ATTR_DESCR_MASK; | mask |= ATTR_AF | ATTR_DESCR_MASK; | ||||
value |= ATTR_AF | L3_PAGE; | value |= ATTR_AF | L3_PAGE; | ||||
} | } | ||||
rv = (pmap_load(pte) & mask) == value; | rv = (pmap_load(pte) & mask) == value; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
if (rv) | if (rv) | ||||
Show All 16 Lines | TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { | ||||
} | } | ||||
} | } | ||||
pte = pmap_pte(pmap, pv->pv_va, &lvl); | pte = pmap_pte(pmap, pv->pv_va, &lvl); | ||||
KASSERT(lvl == 2, | KASSERT(lvl == 2, | ||||
("pmap_page_test_mappings: Invalid level %d", lvl)); | ("pmap_page_test_mappings: Invalid level %d", lvl)); | ||||
mask = 0; | mask = 0; | ||||
value = 0; | value = 0; | ||||
if (modified) { | if (modified) { | ||||
mask |= ATTR_AP_RW_BIT; | mask |= ATTR_S1_AP_RW_BIT; | ||||
value |= ATTR_AP(ATTR_AP_RW); | value |= ATTR_S1_AP(ATTR_S1_AP_RW); | ||||
} | } | ||||
if (accessed) { | if (accessed) { | ||||
mask |= ATTR_AF | ATTR_DESCR_MASK; | mask |= ATTR_AF | ATTR_DESCR_MASK; | ||||
value |= ATTR_AF | L2_BLOCK; | value |= ATTR_AF | L2_BLOCK; | ||||
} | } | ||||
rv = (pmap_load(pte) & mask) == value; | rv = (pmap_load(pte) & mask) == value; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
if (rv) | if (rv) | ||||
▲ Show 20 Lines • Show All 126 Lines • ▼ Show 20 Lines | if (!PMAP_TRYLOCK(pmap)) { | ||||
goto retry_pv_loop; | goto retry_pv_loop; | ||||
} | } | ||||
} | } | ||||
pte = pmap_pte(pmap, pv->pv_va, &lvl); | pte = pmap_pte(pmap, pv->pv_va, &lvl); | ||||
oldpte = pmap_load(pte); | oldpte = pmap_load(pte); | ||||
retry: | retry: | ||||
if ((oldpte & ATTR_SW_DBM) != 0) { | if ((oldpte & ATTR_SW_DBM) != 0) { | ||||
if (!atomic_fcmpset_long(pte, &oldpte, | if (!atomic_fcmpset_long(pte, &oldpte, | ||||
(oldpte | ATTR_AP_RW_BIT) & ~ATTR_SW_DBM)) | (oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM)) | ||||
goto retry; | goto retry; | ||||
if ((oldpte & ATTR_AP_RW_BIT) == | if ((oldpte & ATTR_S1_AP_RW_BIT) == | ||||
ATTR_AP(ATTR_AP_RW)) | ATTR_S1_AP(ATTR_S1_AP_RW)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
vm_page_aflag_clear(m, PGA_WRITEABLE); | vm_page_aflag_clear(m, PGA_WRITEABLE); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 261 Lines • ▼ Show 20 Lines | for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, | ||||
* Future calls to pmap_is_modified() | * Future calls to pmap_is_modified() | ||||
* can be avoided by making the page | * can be avoided by making the page | ||||
* dirty now. | * dirty now. | ||||
*/ | */ | ||||
m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK); | m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} | } | ||||
while (!atomic_fcmpset_long(l3, &oldl3, | while (!atomic_fcmpset_long(l3, &oldl3, | ||||
(oldl3 & ~ATTR_AF) | ATTR_AP(ATTR_AP_RO))) | (oldl3 & ~ATTR_AF) | | ||||
ATTR_S1_AP(ATTR_S1_AP_RO))) | |||||
cpu_spinwait(); | cpu_spinwait(); | ||||
} else if ((oldl3 & ATTR_AF) != 0) | } else if ((oldl3 & ATTR_AF) != 0) | ||||
pmap_clear_bits(l3, ATTR_AF); | pmap_clear_bits(l3, ATTR_AF); | ||||
else | else | ||||
goto maybe_invlrng; | goto maybe_invlrng; | ||||
if (va == va_next) | if (va == va_next) | ||||
va = sva; | va = sva; | ||||
continue; | continue; | ||||
▲ Show 20 Lines • Show All 57 Lines • ▼ Show 20 Lines | if ((oldl2 & ATTR_SW_DBM) != 0 && | ||||
/* | /* | ||||
* Write protect the mapping to a single page so that | * Write protect the mapping to a single page so that | ||||
* a subsequent write access may repromote. | * a subsequent write access may repromote. | ||||
*/ | */ | ||||
va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); | va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK); | ||||
l3 = pmap_l2_to_l3(l2, va); | l3 = pmap_l2_to_l3(l2, va); | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
while (!atomic_fcmpset_long(l3, &oldl3, | while (!atomic_fcmpset_long(l3, &oldl3, | ||||
(oldl3 & ~ATTR_SW_DBM) | ATTR_AP(ATTR_AP_RO))) | (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO))) | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
pmap_invalidate_page(pmap, va); | pmap_invalidate_page(pmap, va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { | ||||
pmap = PV_PMAP(pv); | pmap = PV_PMAP(pv); | ||||
if (!PMAP_TRYLOCK(pmap)) { | if (!PMAP_TRYLOCK(pmap)) { | ||||
md_gen = m->md.pv_gen; | md_gen = m->md.pv_gen; | ||||
pvh_gen = pvh->pv_gen; | pvh_gen = pvh->pv_gen; | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
rw_wlock(lock); | rw_wlock(lock); | ||||
if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
goto restart; | goto restart; | ||||
} | } | ||||
} | } | ||||
l2 = pmap_l2(pmap, pv->pv_va); | l2 = pmap_l2(pmap, pv->pv_va); | ||||
l3 = pmap_l2_to_l3(l2, pv->pv_va); | l3 = pmap_l2_to_l3(l2, pv->pv_va); | ||||
oldl3 = pmap_load(l3); | oldl3 = pmap_load(l3); | ||||
if (pmap_l3_valid(oldl3) && | if (pmap_l3_valid(oldl3) && | ||||
(oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) { | (oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){ | ||||
pmap_set_bits(l3, ATTR_AP(ATTR_AP_RO)); | pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO)); | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
} | } | ||||
void * | void * | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | for (i = 0; i < l2_blocks; i++) { | ||||
("pmap_mapbios: Invalid page entry, va: 0x%lx", | ("pmap_mapbios: Invalid page entry, va: 0x%lx", | ||||
va)); | va)); | ||||
KASSERT(lvl == 1, | KASSERT(lvl == 1, | ||||
("pmap_mapbios: Invalid level %d", lvl)); | ("pmap_mapbios: Invalid level %d", lvl)); | ||||
/* Insert L2_BLOCK */ | /* Insert L2_BLOCK */ | ||||
l2 = pmap_l1_to_l2(pde, va); | l2 = pmap_l1_to_l2(pde, va); | ||||
pmap_load_store(l2, | pmap_load_store(l2, | ||||
pa | ATTR_DEFAULT | ATTR_XN | | pa | ATTR_DEFAULT | ATTR_S1_XN | | ||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK); | ||||
va += L2_SIZE; | va += L2_SIZE; | ||||
pa += L2_SIZE; | pa += L2_SIZE; | ||||
} | } | ||||
pmap_invalidate_all(kernel_pmap); | pmap_invalidate_all(kernel_pmap); | ||||
va = preinit_map_va + (start_idx * L2_SIZE); | va = preinit_map_va + (start_idx * L2_SIZE); | ||||
▲ Show 20 Lines • Show All 150 Lines • ▼ Show 20 Lines | if (!VIRT_IN_DMAP(base) && | ||||
!(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS)) | !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS)) | ||||
return (EINVAL); | return (EINVAL); | ||||
for (tmpva = base; tmpva < base + size; ) { | for (tmpva = base; tmpva < base + size; ) { | ||||
pte = pmap_pte(kernel_pmap, tmpva, &lvl); | pte = pmap_pte(kernel_pmap, tmpva, &lvl); | ||||
if (pte == NULL) | if (pte == NULL) | ||||
return (EINVAL); | return (EINVAL); | ||||
if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) { | if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) { | ||||
/* | /* | ||||
* We already have the correct attribute, | * We already have the correct attribute, | ||||
* ignore this entry. | * ignore this entry. | ||||
*/ | */ | ||||
switch (lvl) { | switch (lvl) { | ||||
default: | default: | ||||
panic("Invalid DMAP table level: %d\n", lvl); | panic("Invalid DMAP table level: %d\n", lvl); | ||||
case 1: | case 1: | ||||
Show All 24 Lines | if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) { | ||||
newpte = pmap_demote_l2(kernel_pmap, pte, | newpte = pmap_demote_l2(kernel_pmap, pte, | ||||
tmpva); | tmpva); | ||||
if (newpte == NULL) | if (newpte == NULL) | ||||
return (EINVAL); | return (EINVAL); | ||||
pte = pmap_l2_to_l3(pte, tmpva); | pte = pmap_l2_to_l3(pte, tmpva); | ||||
case 3: | case 3: | ||||
/* Update the entry */ | /* Update the entry */ | ||||
l3 = pmap_load(pte); | l3 = pmap_load(pte); | ||||
l3 &= ~ATTR_IDX_MASK; | l3 &= ~ATTR_S1_IDX_MASK; | ||||
l3 |= ATTR_IDX(mode); | l3 |= ATTR_S1_IDX(mode); | ||||
if (mode == VM_MEMATTR_DEVICE) | if (mode == VM_MEMATTR_DEVICE) | ||||
l3 |= ATTR_XN; | l3 |= ATTR_S1_XN; | ||||
pmap_update_entry(kernel_pmap, pte, l3, tmpva, | pmap_update_entry(kernel_pmap, pte, l3, tmpva, | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
/* | /* | ||||
* If moving to a non-cacheable entry flush | * If moving to a non-cacheable entry flush | ||||
* the cache. | * the cache. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 181 Lines • ▼ Show 20 Lines | if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) { | ||||
if (va < VM_MAXUSER_ADDRESS) { | if (va < VM_MAXUSER_ADDRESS) { | ||||
ml3->ref_count = NL3PG; | ml3->ref_count = NL3PG; | ||||
pmap_resident_count_inc(pmap, 1); | pmap_resident_count_inc(pmap, 1); | ||||
} | } | ||||
} | } | ||||
l3phys = VM_PAGE_TO_PHYS(ml3); | l3phys = VM_PAGE_TO_PHYS(ml3); | ||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); | l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); | ||||
newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE; | newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE; | ||||
KASSERT((oldl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) != | KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != | ||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM), | (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM), | ||||
("pmap_demote_l2: L2 entry is writeable but not dirty")); | ("pmap_demote_l2: L2 entry is writeable but not dirty")); | ||||
/* | /* | ||||
* If the page table page is not leftover from an earlier promotion, | * If the page table page is not leftover from an earlier promotion, | ||||
* or the mapping attributes have changed, (re)initialize the L3 table. | * or the mapping attributes have changed, (re)initialize the L3 table. | ||||
* | * | ||||
* When pmap_update_entry() clears the old L2 mapping, it (indirectly) | * When pmap_update_entry() clears the old L2 mapping, it (indirectly) | ||||
* performs a dsb(). That dsb() ensures that the stores for filling | * performs a dsb(). That dsb() ensures that the stores for filling | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | default: | ||||
panic("pmap_mincore: invalid level %d", lvl); | panic("pmap_mincore: invalid level %d", lvl); | ||||
} | } | ||||
managed = (tpte & ATTR_SW_MANAGED) != 0; | managed = (tpte & ATTR_SW_MANAGED) != 0; | ||||
val = MINCORE_INCORE; | val = MINCORE_INCORE; | ||||
if (lvl != 3) | if (lvl != 3) | ||||
val |= MINCORE_SUPER; | val |= MINCORE_SUPER; | ||||
if ((managed && pmap_pte_dirty(tpte)) || (!managed && | if ((managed && pmap_pte_dirty(tpte)) || (!managed && | ||||
(tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))) | (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))) | ||||
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; | val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; | ||||
if ((tpte & ATTR_AF) == ATTR_AF) | if ((tpte & ATTR_AF) == ATTR_AF) | ||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; | val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; | ||||
pa = (tpte & ~ATTR_MASK) | (addr & mask); | pa = (tpte & ~ATTR_MASK) | (addr & mask); | ||||
} else { | } else { | ||||
managed = false; | managed = false; | ||||
val = 0; | val = 0; | ||||
▲ Show 20 Lines • Show All 247 Lines • ▼ Show 20 Lines | pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far) | ||||
case ISS_DATA_DFSC_PF_L3: | case ISS_DATA_DFSC_PF_L3: | ||||
if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) || | if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) || | ||||
(esr & ISS_DATA_WnR) == 0) | (esr & ISS_DATA_WnR) == 0) | ||||
return (rv); | return (rv); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
ptep = pmap_pte(pmap, far, &lvl); | ptep = pmap_pte(pmap, far, &lvl); | ||||
if (ptep != NULL && | if (ptep != NULL && | ||||
((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) { | ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) { | ||||
if ((pte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RO)) { | if ((pte & ATTR_S1_AP_RW_BIT) == | ||||
pmap_clear_bits(ptep, ATTR_AP_RW_BIT); | ATTR_S1_AP(ATTR_S1_AP_RO)) { | ||||
pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT); | |||||
pmap_invalidate_page(pmap, far); | pmap_invalidate_page(pmap, far); | ||||
} | } | ||||
rv = KERN_SUCCESS; | rv = KERN_SUCCESS; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
break; | break; | ||||
case ISS_DATA_DFSC_TF_L0: | case ISS_DATA_DFSC_TF_L0: | ||||
case ISS_DATA_DFSC_TF_L1: | case ISS_DATA_DFSC_TF_L1: | ||||
▲ Show 20 Lines • Show All 157 Lines • ▼ Show 20 Lines | sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range, | ||||
vm_offset_t eva) | vm_offset_t eva) | ||||
{ | { | ||||
const char *mode; | const char *mode; | ||||
int index; | int index; | ||||
if (eva <= range->sva) | if (eva <= range->sva) | ||||
return; | return; | ||||
index = range->attrs & ATTR_IDX_MASK; | index = range->attrs & ATTR_S1_IDX_MASK; | ||||
switch (index) { | switch (index) { | ||||
case ATTR_IDX(VM_MEMATTR_DEVICE): | case ATTR_S1_IDX(VM_MEMATTR_DEVICE): | ||||
mode = "DEV"; | mode = "DEV"; | ||||
break; | break; | ||||
case ATTR_IDX(VM_MEMATTR_UNCACHEABLE): | case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE): | ||||
mode = "UC"; | mode = "UC"; | ||||
break; | break; | ||||
case ATTR_IDX(VM_MEMATTR_WRITE_BACK): | case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK): | ||||
mode = "WB"; | mode = "WB"; | ||||
break; | break; | ||||
case ATTR_IDX(VM_MEMATTR_WRITE_THROUGH): | case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH): | ||||
mode = "WT"; | mode = "WT"; | ||||
break; | break; | ||||
default: | default: | ||||
printf( | printf( | ||||
"%s: unknown memory type %x for range 0x%016lx-0x%016lx\n", | "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n", | ||||
__func__, index, range->sva, eva); | __func__, index, range->sva, eva); | ||||
mode = "??"; | mode = "??"; | ||||
break; | break; | ||||
} | } | ||||
sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n", | sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n", | ||||
range->sva, eva, | range->sva, eva, | ||||
(range->attrs & ATTR_AP_RW_BIT) == ATTR_AP_RW ? 'w' : '-', | (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-', | ||||
(range->attrs & ATTR_PXN) != 0 ? '-' : 'x', | (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x', | ||||
(range->attrs & ATTR_AP_USER) != 0 ? 'u' : 's', | (range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's', | ||||
mode, range->l1blocks, range->l2blocks, range->l3contig, | mode, range->l1blocks, range->l2blocks, range->l3contig, | ||||
range->l3pages); | range->l3pages); | ||||
/* Reset to sentinel value. */ | /* Reset to sentinel value. */ | ||||
range->sva = 0xfffffffffffffffful; | range->sva = 0xfffffffffffffffful; | ||||
} | } | ||||
/* | /* | ||||
Show All 24 Lines | |||||
*/ | */ | ||||
static void | static void | ||||
sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, | sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range, | ||||
vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e, | vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e, | ||||
pt_entry_t l3e) | pt_entry_t l3e) | ||||
{ | { | ||||
pt_entry_t attrs; | pt_entry_t attrs; | ||||
attrs = l0e & (ATTR_AP_MASK | ATTR_XN); | attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN); | ||||
attrs |= l1e & (ATTR_AP_MASK | ATTR_XN); | attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN); | ||||
if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) | if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) | ||||
attrs |= l1e & ATTR_IDX_MASK; | attrs |= l1e & ATTR_S1_IDX_MASK; | ||||
attrs |= l2e & (ATTR_AP_MASK | ATTR_XN); | attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN); | ||||
if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) | if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) | ||||
attrs |= l2e & ATTR_IDX_MASK; | attrs |= l2e & ATTR_S1_IDX_MASK; | ||||
attrs |= l3e & (ATTR_AP_MASK | ATTR_XN | ATTR_IDX_MASK); | attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK); | ||||
if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { | if (range->sva > va || !sysctl_kmaps_match(range, attrs)) { | ||||
sysctl_kmaps_dump(sb, range, va); | sysctl_kmaps_dump(sb, range, va); | ||||
sysctl_kmaps_reinit(range, va, attrs); | sysctl_kmaps_reinit(range, va, attrs); | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
▲ Show 20 Lines • Show All 102 Lines • Show Last 20 Lines |