Changeset View
Changeset View
Standalone View
Standalone View
head/sys/powerpc/aim/mmu_oea64.c
Show First 20 Lines • Show All 367 Lines • ▼ Show 20 Lines | |||||
vm_page_to_pvoh(vm_page_t m) | vm_page_to_pvoh(vm_page_t m) | ||||
{ | { | ||||
mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | ||||
return (&m->md.mdpg_pvoh); | return (&m->md.mdpg_pvoh); | ||||
} | } | ||||
static struct pvo_entry * | static struct pvo_entry * | ||||
alloc_pvo_entry(int bootstrap) | alloc_pvo_entry(int bootstrap, int flags) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
KASSERT(bootstrap || (flags & M_WAITOK) || (flags & M_NOWAIT), | |||||
("Either M_WAITOK or M_NOWAIT flag must be specified " | |||||
"when bootstrap is 0")); | |||||
KASSERT(!bootstrap || !(flags & M_WAITOK), | |||||
("M_WAITOK can't be used with bootstrap")); | |||||
if (!moea64_initialized || bootstrap) { | if (!moea64_initialized || bootstrap) { | ||||
if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) { | if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) { | ||||
panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", | panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", | ||||
moea64_bpvo_pool_index, moea64_bpvo_pool_size, | moea64_bpvo_pool_index, moea64_bpvo_pool_size, | ||||
moea64_bpvo_pool_size * sizeof(struct pvo_entry)); | moea64_bpvo_pool_size * sizeof(struct pvo_entry)); | ||||
} | } | ||||
pvo = &moea64_bpvo_pool[ | pvo = &moea64_bpvo_pool[ | ||||
atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)]; | atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)]; | ||||
bzero(pvo, sizeof(*pvo)); | bzero(pvo, sizeof(*pvo)); | ||||
pvo->pvo_vaddr = PVO_BOOTSTRAP; | pvo->pvo_vaddr = PVO_BOOTSTRAP; | ||||
} else { | } else | ||||
pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT); | pvo = uma_zalloc(moea64_pvo_zone, flags | M_ZERO); | ||||
bzero(pvo, sizeof(*pvo)); | |||||
} | |||||
return (pvo); | return (pvo); | ||||
} | } | ||||
static void | static void | ||||
init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) | init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 251 Lines • ▼ Show 20 Lines | moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, | ||||
DISABLE_TRANS(msr); | DISABLE_TRANS(msr); | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
for (i = 0; i < pregions_sz; i++) { | for (i = 0; i < pregions_sz; i++) { | ||||
for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + | for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + | ||||
pregions[i].mr_size; pa += moea64_large_page_size) { | pregions[i].mr_size; pa += moea64_large_page_size) { | ||||
pte_lo = LPTE_M; | pte_lo = LPTE_M; | ||||
pvo = alloc_pvo_entry(1 /* bootstrap */); | pvo = alloc_pvo_entry(1 /* bootstrap */, 0); | ||||
pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; | pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; | ||||
init_pvo_entry(pvo, kernel_pmap, PHYS_TO_DMAP(pa)); | init_pvo_entry(pvo, kernel_pmap, PHYS_TO_DMAP(pa)); | ||||
/* | /* | ||||
* Set memory access as guarded if prefetch within | * Set memory access as guarded if prefetch within | ||||
* the page could exit the available physmem area. | * the page could exit the available physmem area. | ||||
*/ | */ | ||||
if (pa & moea64_large_page_mask) { | if (pa & moea64_large_page_mask) { | ||||
▲ Show 20 Lines • Show All 724 Lines • ▼ Show 20 Lines | moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
struct pvo_entry *pvo, *oldpvo; | struct pvo_entry *pvo, *oldpvo; | ||||
struct pvo_head *pvo_head; | struct pvo_head *pvo_head; | ||||
uint64_t pte_lo; | uint64_t pte_lo; | ||||
int error; | int error; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) | if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) | ||||
VM_OBJECT_ASSERT_LOCKED(m->object); | VM_OBJECT_ASSERT_LOCKED(m->object); | ||||
pvo = alloc_pvo_entry(0); | pvo = alloc_pvo_entry(0, M_NOWAIT); | ||||
if (pvo == NULL) | |||||
return (KERN_RESOURCE_SHORTAGE); | |||||
pvo->pvo_pmap = NULL; /* to be filled in later */ | pvo->pvo_pmap = NULL; /* to be filled in later */ | ||||
pvo->pvo_pte.prot = prot; | pvo->pvo_pte.prot = prot; | ||||
pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); | pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); | ||||
pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; | pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; | ||||
if ((flags & PMAP_ENTER_WIRED) != 0) | if ((flags & PMAP_ENTER_WIRED) != 0) | ||||
pvo->pvo_vaddr |= PVO_WIRED; | pvo->pvo_vaddr |= PVO_WIRED; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { | if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { | ||||
pvo_head = NULL; | pvo_head = NULL; | ||||
} else { | } else { | ||||
pvo_head = &m->md.mdpg_pvoh; | pvo_head = &m->md.mdpg_pvoh; | ||||
pvo->pvo_vaddr |= PVO_MANAGED; | pvo->pvo_vaddr |= PVO_MANAGED; | ||||
} | } | ||||
for (;;) { | |||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (pvo->pvo_pmap == NULL) | if (pvo->pvo_pmap == NULL) | ||||
init_pvo_entry(pvo, pmap, va); | init_pvo_entry(pvo, pmap, va); | ||||
if (prot & VM_PROT_WRITE) | if (prot & VM_PROT_WRITE) | ||||
if (pmap_bootstrapped && | if (pmap_bootstrapped && | ||||
(m->oflags & VPO_UNMANAGED) == 0) | (m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo); | error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo); | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | ||||
oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | ||||
oldpvo->pvo_pte.prot == prot) { | oldpvo->pvo_pte.prot == prot) { | ||||
/* Identical mapping already exists */ | /* Identical mapping already exists */ | ||||
error = 0; | error = 0; | ||||
/* If not in page table, reinsert it */ | /* If not in page table, reinsert it */ | ||||
if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { | if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { | ||||
STAT_MOEA64(moea64_pte_overflow--); | STAT_MOEA64(moea64_pte_overflow--); | ||||
MOEA64_PTE_INSERT(mmu, oldpvo); | MOEA64_PTE_INSERT(mmu, oldpvo); | ||||
} | } | ||||
/* Then just clean up and go home */ | /* Then just clean up and go home */ | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
break; | goto out; | ||||
} | } else { | ||||
/* Otherwise, need to kill it first */ | /* Otherwise, need to kill it first */ | ||||
KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | ||||
"mapping does not match new mapping")); | "mapping does not match new mapping")); | ||||
moea64_pvo_remove_from_pmap(mmu, oldpvo); | moea64_pvo_remove_from_pmap(mmu, oldpvo); | ||||
moea64_pvo_enter(mmu, pvo, pvo_head, NULL); | moea64_pvo_enter(mmu, pvo, pvo_head, NULL); | ||||
} | } | ||||
} | |||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
/* Free any dead pages */ | /* Free any dead pages */ | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
moea64_pvo_remove_from_page(mmu, oldpvo); | moea64_pvo_remove_from_page(mmu, oldpvo); | ||||
free_pvo_entry(oldpvo); | free_pvo_entry(oldpvo); | ||||
} | } | ||||
if (error != ENOMEM) | out: | ||||
break; | |||||
if ((flags & PMAP_ENTER_NOSLEEP) != 0) | |||||
return (KERN_RESOURCE_SHORTAGE); | |||||
VM_OBJECT_ASSERT_UNLOCKED(m->object); | |||||
vm_wait(NULL); | |||||
} | |||||
/* | /* | ||||
* Flush the page from the instruction cache if this page is | * Flush the page from the instruction cache if this page is | ||||
* mapped executable and cacheable. | * mapped executable and cacheable. | ||||
*/ | */ | ||||
if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && | if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && | ||||
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | ||||
vm_page_aflag_set(m, PGA_EXECUTABLE); | vm_page_aflag_set(m, PGA_EXECUTABLE); | ||||
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); | moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); | ||||
▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | ||||
m = vm_page_alloc_domain(NULL, 0, domain, | m = vm_page_alloc_domain(NULL, 0, domain, | ||||
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); | malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); | ||||
if (m == NULL) | if (m == NULL) | ||||
return (NULL); | return (NULL); | ||||
va = VM_PAGE_TO_PHYS(m); | va = VM_PAGE_TO_PHYS(m); | ||||
pvo = alloc_pvo_entry(1 /* bootstrap */); | pvo = alloc_pvo_entry(1 /* bootstrap */, 0); | ||||
pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; | pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; | ||||
pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; | pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; | ||||
if (needed_lock) | if (needed_lock) | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
init_pvo_entry(pvo, kernel_pmap, va); | init_pvo_entry(pvo, kernel_pmap, va); | ||||
▲ Show 20 Lines • Show All 215 Lines • ▼ Show 20 Lines | |||||
* Map a wired page into kernel virtual address space. | * Map a wired page into kernel virtual address space. | ||||
*/ | */ | ||||
void | void | ||||
moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | ||||
{ | { | ||||
int error; | int error; | ||||
struct pvo_entry *pvo, *oldpvo; | struct pvo_entry *pvo, *oldpvo; | ||||
pvo = alloc_pvo_entry(0); | pvo = alloc_pvo_entry(0, M_WAITOK); | ||||
pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | ||||
pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); | pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); | ||||
pvo->pvo_vaddr |= PVO_WIRED; | pvo->pvo_vaddr |= PVO_WIRED; | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
oldpvo = moea64_pvo_find_va(kernel_pmap, va); | oldpvo = moea64_pvo_find_va(kernel_pmap, va); | ||||
if (oldpvo != NULL) | if (oldpvo != NULL) | ||||
moea64_pvo_remove_from_pmap(mmu, oldpvo); | moea64_pvo_remove_from_pmap(mmu, oldpvo); | ||||
▲ Show 20 Lines • Show All 1,066 Lines • Show Last 20 Lines |