Changeset View
Changeset View
Standalone View
Standalone View
sys/powerpc/aim/mmu_oea.c
Show First 20 Lines • Show All 143 Lines • ▼ Show 20 Lines | |||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/psl.h> | #include <machine/psl.h> | ||||
#include <machine/pte.h> | #include <machine/pte.h> | ||||
#include <machine/smp.h> | #include <machine/smp.h> | ||||
#include <machine/sr.h> | #include <machine/sr.h> | ||||
#include <machine/mmuvar.h> | #include <machine/mmuvar.h> | ||||
#include <machine/trap.h> | #include <machine/trap.h> | ||||
#include "mmu_if.h" | |||||
#define MOEA_DEBUG | #define MOEA_DEBUG | ||||
#define TODO panic("%s: not implemented", __func__); | #define TODO panic("%s: not implemented", __func__); | ||||
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | ||||
#define VSID_TO_SR(vsid) ((vsid) & 0xf) | #define VSID_TO_SR(vsid) ((vsid) & 0xf) | ||||
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | ||||
▲ Show 20 Lines • Show All 100 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Utility routines. | * Utility routines. | ||||
*/ | */ | ||||
static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, | static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, | ||||
vm_prot_t, u_int, int8_t); | vm_prot_t, u_int, int8_t); | ||||
static void moea_syncicache(vm_paddr_t, vm_size_t); | static void moea_syncicache(vm_paddr_t, vm_size_t); | ||||
static boolean_t moea_query_bit(vm_page_t, int); | static boolean_t moea_query_bit(vm_page_t, int); | ||||
static u_int moea_clear_bit(vm_page_t, int); | static u_int moea_clear_bit(vm_page_t, int); | ||||
static void moea_kremove(mmu_t, vm_offset_t); | static void moea_kremove(vm_offset_t); | ||||
int moea_pte_spill(vm_offset_t); | int moea_pte_spill(vm_offset_t); | ||||
/* | /* | ||||
* Kernel MMU interface | * Kernel MMU interface | ||||
*/ | */ | ||||
void moea_clear_modify(mmu_t, vm_page_t); | void moea_clear_modify(vm_page_t); | ||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t); | void moea_copy_page(vm_page_t, vm_page_t); | ||||
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize); | vm_page_t *mb, vm_offset_t b_offset, int xfersize); | ||||
int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, | int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, | ||||
int8_t); | int8_t); | ||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | ||||
vm_prot_t); | vm_prot_t); | ||||
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | ||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); | vm_paddr_t moea_extract(pmap_t, vm_offset_t); | ||||
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); | vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); | ||||
void moea_init(mmu_t); | void moea_init(void); | ||||
boolean_t moea_is_modified(mmu_t, vm_page_t); | boolean_t moea_is_modified(vm_page_t); | ||||
boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); | boolean_t moea_is_prefaultable(pmap_t, vm_offset_t); | ||||
boolean_t moea_is_referenced(mmu_t, vm_page_t); | boolean_t moea_is_referenced(vm_page_t); | ||||
int moea_ts_referenced(mmu_t, vm_page_t); | int moea_ts_referenced(vm_page_t); | ||||
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | ||||
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); | boolean_t moea_page_exists_quick(pmap_t, vm_page_t); | ||||
void moea_page_init(mmu_t, vm_page_t); | void moea_page_init(vm_page_t); | ||||
int moea_page_wired_mappings(mmu_t, vm_page_t); | int moea_page_wired_mappings(vm_page_t); | ||||
void moea_pinit(mmu_t, pmap_t); | int moea_pinit(pmap_t); | ||||
void moea_pinit0(mmu_t, pmap_t); | void moea_pinit0(pmap_t); | ||||
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | ||||
void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); | void moea_qenter(vm_offset_t, vm_page_t *, int); | ||||
void moea_qremove(mmu_t, vm_offset_t, int); | void moea_qremove(vm_offset_t, int); | ||||
void moea_release(mmu_t, pmap_t); | void moea_release(pmap_t); | ||||
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void moea_remove(pmap_t, vm_offset_t, vm_offset_t); | ||||
void moea_remove_all(mmu_t, vm_page_t); | void moea_remove_all(vm_page_t); | ||||
void moea_remove_write(mmu_t, vm_page_t); | void moea_remove_write(vm_page_t); | ||||
void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void moea_unwire(pmap_t, vm_offset_t, vm_offset_t); | ||||
void moea_zero_page(mmu_t, vm_page_t); | void moea_zero_page(vm_page_t); | ||||
void moea_zero_page_area(mmu_t, vm_page_t, int, int); | void moea_zero_page_area(vm_page_t, int, int); | ||||
void moea_activate(mmu_t, struct thread *); | void moea_activate(struct thread *); | ||||
void moea_deactivate(mmu_t, struct thread *); | void moea_deactivate(struct thread *); | ||||
void moea_cpu_bootstrap(mmu_t, int); | void moea_cpu_bootstrap(int); | ||||
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); | void moea_bootstrap(vm_offset_t, vm_offset_t); | ||||
void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); | void *moea_mapdev(vm_paddr_t, vm_size_t); | ||||
void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); | void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); | ||||
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); | void moea_unmapdev(vm_offset_t, vm_size_t); | ||||
vm_paddr_t moea_kextract(mmu_t, vm_offset_t); | vm_paddr_t moea_kextract(vm_offset_t); | ||||
void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); | void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t); | ||||
void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); | void moea_kenter(vm_offset_t, vm_paddr_t); | ||||
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); | void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma); | ||||
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); | boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t); | ||||
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); | static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t); | ||||
void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va); | void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); | ||||
void moea_scan_init(mmu_t mmu); | void moea_scan_init(void); | ||||
vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m); | vm_offset_t moea_quick_enter_page(vm_page_t m); | ||||
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr); | void moea_quick_remove_page(vm_offset_t addr); | ||||
boolean_t moea_page_is_mapped(mmu_t mmu, vm_page_t m); | boolean_t moea_page_is_mapped(vm_page_t m); | ||||
static int moea_map_user_ptr(mmu_t mmu, pmap_t pm, | static int moea_map_user_ptr(pmap_t pm, | ||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | ||||
static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, | static int moea_decode_kernel_ptr(vm_offset_t addr, | ||||
int *is_user, vm_offset_t *decoded_addr); | int *is_user, vm_offset_t *decoded_addr); | ||||
static mmu_method_t moea_methods[] = { | static struct pmap_funcs moea_methods = { | ||||
MMUMETHOD(mmu_clear_modify, moea_clear_modify), | .clear_modify = moea_clear_modify, | ||||
MMUMETHOD(mmu_copy_page, moea_copy_page), | .copy_page = moea_copy_page, | ||||
MMUMETHOD(mmu_copy_pages, moea_copy_pages), | .copy_pages = moea_copy_pages, | ||||
MMUMETHOD(mmu_enter, moea_enter), | .enter = moea_enter, | ||||
MMUMETHOD(mmu_enter_object, moea_enter_object), | .enter_object = moea_enter_object, | ||||
MMUMETHOD(mmu_enter_quick, moea_enter_quick), | .enter_quick = moea_enter_quick, | ||||
MMUMETHOD(mmu_extract, moea_extract), | .extract = moea_extract, | ||||
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), | .extract_and_hold = moea_extract_and_hold, | ||||
MMUMETHOD(mmu_init, moea_init), | .init = moea_init, | ||||
MMUMETHOD(mmu_is_modified, moea_is_modified), | .is_modified = moea_is_modified, | ||||
MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), | .is_prefaultable = moea_is_prefaultable, | ||||
MMUMETHOD(mmu_is_referenced, moea_is_referenced), | .is_referenced = moea_is_referenced, | ||||
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), | .ts_referenced = moea_ts_referenced, | ||||
MMUMETHOD(mmu_map, moea_map), | .map = moea_map, | ||||
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), | .page_exists_quick = moea_page_exists_quick, | ||||
MMUMETHOD(mmu_page_init, moea_page_init), | .page_init = moea_page_init, | ||||
MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), | .page_wired_mappings = moea_page_wired_mappings, | ||||
MMUMETHOD(mmu_pinit, moea_pinit), | .pinit = moea_pinit, | ||||
MMUMETHOD(mmu_pinit0, moea_pinit0), | .pinit0 = moea_pinit0, | ||||
MMUMETHOD(mmu_protect, moea_protect), | .protect = moea_protect, | ||||
MMUMETHOD(mmu_qenter, moea_qenter), | .qenter = moea_qenter, | ||||
MMUMETHOD(mmu_qremove, moea_qremove), | .qremove = moea_qremove, | ||||
MMUMETHOD(mmu_release, moea_release), | .release = moea_release, | ||||
MMUMETHOD(mmu_remove, moea_remove), | .remove = moea_remove, | ||||
MMUMETHOD(mmu_remove_all, moea_remove_all), | .remove_all = moea_remove_all, | ||||
MMUMETHOD(mmu_remove_write, moea_remove_write), | .remove_write = moea_remove_write, | ||||
MMUMETHOD(mmu_sync_icache, moea_sync_icache), | .sync_icache = moea_sync_icache, | ||||
MMUMETHOD(mmu_unwire, moea_unwire), | .unwire = moea_unwire, | ||||
MMUMETHOD(mmu_zero_page, moea_zero_page), | .zero_page = moea_zero_page, | ||||
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), | .zero_page_area = moea_zero_page_area, | ||||
MMUMETHOD(mmu_activate, moea_activate), | .activate = moea_activate, | ||||
MMUMETHOD(mmu_deactivate, moea_deactivate), | .deactivate = moea_deactivate, | ||||
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), | .page_set_memattr = moea_page_set_memattr, | ||||
MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page), | .quick_enter_page = moea_quick_enter_page, | ||||
MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page), | .quick_remove_page = moea_quick_remove_page, | ||||
MMUMETHOD(mmu_page_is_mapped, moea_page_is_mapped), | .page_is_mapped = moea_page_is_mapped, | ||||
/* Internal interfaces */ | /* Internal interfaces */ | ||||
MMUMETHOD(mmu_bootstrap, moea_bootstrap), | .bootstrap = moea_bootstrap, | ||||
MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), | .cpu_bootstrap = moea_cpu_bootstrap, | ||||
MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), | .mapdev_attr = moea_mapdev_attr, | ||||
MMUMETHOD(mmu_mapdev, moea_mapdev), | .mapdev = moea_mapdev, | ||||
MMUMETHOD(mmu_unmapdev, moea_unmapdev), | .unmapdev = moea_unmapdev, | ||||
MMUMETHOD(mmu_kextract, moea_kextract), | .kextract = moea_kextract, | ||||
MMUMETHOD(mmu_kenter, moea_kenter), | .kenter = moea_kenter, | ||||
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), | .kenter_attr = moea_kenter_attr, | ||||
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), | .dev_direct_mapped = moea_dev_direct_mapped, | ||||
MMUMETHOD(mmu_scan_init, moea_scan_init), | .dumpsys_pa_init = moea_scan_init, | ||||
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), | .dumpsys_map_chunk = moea_dumpsys_map, | ||||
MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr), | .map_user_ptr = moea_map_user_ptr, | ||||
MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr), | .decode_kernel_ptr = moea_decode_kernel_ptr, | ||||
{ 0, 0 } | |||||
}; | }; | ||||
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); | MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods); | ||||
static __inline uint32_t | static __inline uint32_t | ||||
moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) | moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) | ||||
{ | { | ||||
uint32_t pte_lo; | uint32_t pte_lo; | ||||
int i; | int i; | ||||
if (ma != VM_MEMATTR_DEFAULT) { | if (ma != VM_MEMATTR_DEFAULT) { | ||||
▲ Show 20 Lines • Show All 225 Lines • ▼ Show 20 Lines | if (mapa->om_pa < mapb->om_pa) | ||||
return (-1); | return (-1); | ||||
else if (mapa->om_pa > mapb->om_pa) | else if (mapa->om_pa > mapb->om_pa) | ||||
return (1); | return (1); | ||||
else | else | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
moea_cpu_bootstrap(mmu_t mmup, int ap) | moea_cpu_bootstrap(int ap) | ||||
{ | { | ||||
u_int sdr; | u_int sdr; | ||||
int i; | int i; | ||||
if (ap) { | if (ap) { | ||||
powerpc_sync(); | powerpc_sync(); | ||||
__asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); | __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); | ||||
__asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); | __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); | ||||
Show All 21 Lines | moea_cpu_bootstrap(int ap) | ||||
sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); | sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); | ||||
__asm __volatile("mtsdr1 %0" :: "r"(sdr)); | __asm __volatile("mtsdr1 %0" :: "r"(sdr)); | ||||
isync(); | isync(); | ||||
tlbia(); | tlbia(); | ||||
} | } | ||||
void | void | ||||
moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) | moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
{ | { | ||||
ihandle_t mmui; | ihandle_t mmui; | ||||
phandle_t chosen, mmu; | phandle_t chosen, mmu; | ||||
int sz; | int sz; | ||||
int i, j; | int i, j; | ||||
vm_size_t size, physsz, hwphyssz; | vm_size_t size, physsz, hwphyssz; | ||||
vm_offset_t pa, va, off; | vm_offset_t pa, va, off; | ||||
void *dpcpu; | void *dpcpu; | ||||
▲ Show 20 Lines • Show All 239 Lines • ▼ Show 20 Lines | for (i = 0; i < sz; i++) { | ||||
* on-demand BAT tables take care of the translation. | * on-demand BAT tables take care of the translation. | ||||
*/ | */ | ||||
if (translations[i].om_va == translations[i].om_pa) | if (translations[i].om_va == translations[i].om_pa) | ||||
continue; | continue; | ||||
/* Enter the pages */ | /* Enter the pages */ | ||||
for (off = 0; off < translations[i].om_len; | for (off = 0; off < translations[i].om_len; | ||||
off += PAGE_SIZE) | off += PAGE_SIZE) | ||||
moea_kenter(mmup, translations[i].om_va + off, | moea_kenter(translations[i].om_va + off, | ||||
translations[i].om_pa + off); | translations[i].om_pa + off); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Calculate the last available physical address. | * Calculate the last available physical address. | ||||
*/ | */ | ||||
for (i = 0; phys_avail[i + 2] != 0; i += 2) | for (i = 0; phys_avail[i + 2] != 0; i += 2) | ||||
; | ; | ||||
Maxmem = powerpc_btop(phys_avail[i + 1]); | Maxmem = powerpc_btop(phys_avail[i + 1]); | ||||
moea_cpu_bootstrap(mmup,0); | moea_cpu_bootstrap(0); | ||||
mtmsr(mfmsr() | PSL_DR | PSL_IR); | mtmsr(mfmsr() | PSL_DR | PSL_IR); | ||||
pmap_bootstrapped++; | pmap_bootstrapped++; | ||||
/* | /* | ||||
* Set the start and end of kva. | * Set the start and end of kva. | ||||
*/ | */ | ||||
virtual_avail = VM_MIN_KERNEL_ADDRESS; | virtual_avail = VM_MIN_KERNEL_ADDRESS; | ||||
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; | virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; | ||||
/* | /* | ||||
* Allocate a kernel stack with a guard page for thread0 and map it | * Allocate a kernel stack with a guard page for thread0 and map it | ||||
* into the kernel page map. | * into the kernel page map. | ||||
*/ | */ | ||||
pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); | pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); | ||||
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | ||||
virtual_avail = va + kstack_pages * PAGE_SIZE; | virtual_avail = va + kstack_pages * PAGE_SIZE; | ||||
CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); | CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); | ||||
thread0.td_kstack = va; | thread0.td_kstack = va; | ||||
thread0.td_kstack_pages = kstack_pages; | thread0.td_kstack_pages = kstack_pages; | ||||
for (i = 0; i < kstack_pages; i++) { | for (i = 0; i < kstack_pages; i++) { | ||||
moea_kenter(mmup, va, pa); | moea_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
/* | /* | ||||
* Allocate virtual address space for the message buffer. | * Allocate virtual address space for the message buffer. | ||||
*/ | */ | ||||
pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); | pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); | ||||
msgbufp = (struct msgbuf *)virtual_avail; | msgbufp = (struct msgbuf *)virtual_avail; | ||||
va = virtual_avail; | va = virtual_avail; | ||||
virtual_avail += round_page(msgbufsize); | virtual_avail += round_page(msgbufsize); | ||||
while (va < virtual_avail) { | while (va < virtual_avail) { | ||||
moea_kenter(mmup, va, pa); | moea_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
/* | /* | ||||
* Allocate virtual address space for the dynamic percpu area. | * Allocate virtual address space for the dynamic percpu area. | ||||
*/ | */ | ||||
pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); | pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); | ||||
dpcpu = (void *)virtual_avail; | dpcpu = (void *)virtual_avail; | ||||
va = virtual_avail; | va = virtual_avail; | ||||
virtual_avail += DPCPU_SIZE; | virtual_avail += DPCPU_SIZE; | ||||
while (va < virtual_avail) { | while (va < virtual_avail) { | ||||
moea_kenter(mmup, va, pa); | moea_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
dpcpu_init(dpcpu, 0); | dpcpu_init(dpcpu, 0); | ||||
} | } | ||||
/* | /* | ||||
* Activate a user pmap. The pmap must be activated before it's address | * Activate a user pmap. The pmap must be activated before it's address | ||||
* space can be accessed in any way. | * space can be accessed in any way. | ||||
*/ | */ | ||||
void | void | ||||
moea_activate(mmu_t mmu, struct thread *td) | moea_activate(struct thread *td) | ||||
{ | { | ||||
pmap_t pm, pmr; | pmap_t pm, pmr; | ||||
/* | /* | ||||
* Load all the data we need up front to encourage the compiler to | * Load all the data we need up front to encourage the compiler to | ||||
* not issue any loads while we have interrupts disabled below. | * not issue any loads while we have interrupts disabled below. | ||||
*/ | */ | ||||
pm = &td->td_proc->p_vmspace->vm_pmap; | pm = &td->td_proc->p_vmspace->vm_pmap; | ||||
pmr = pm->pmap_phys; | pmr = pm->pmap_phys; | ||||
CPU_SET(PCPU_GET(cpuid), &pm->pm_active); | CPU_SET(PCPU_GET(cpuid), &pm->pm_active); | ||||
PCPU_SET(curpmap, pmr); | PCPU_SET(curpmap, pmr); | ||||
mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); | mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); | ||||
} | } | ||||
void | void | ||||
moea_deactivate(mmu_t mmu, struct thread *td) | moea_deactivate(struct thread *td) | ||||
{ | { | ||||
pmap_t pm; | pmap_t pm; | ||||
pm = &td->td_proc->p_vmspace->vm_pmap; | pm = &td->td_proc->p_vmspace->vm_pmap; | ||||
CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); | CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); | ||||
PCPU_SET(curpmap, NULL); | PCPU_SET(curpmap, NULL); | ||||
} | } | ||||
void | void | ||||
moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) | moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct pvo_entry key, *pvo; | struct pvo_entry key, *pvo; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; | pvo != NULL && PVO_VADDR(pvo) < eva; | ||||
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | ||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | ||||
panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo); | panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo); | ||||
pvo->pvo_vaddr &= ~PVO_WIRED; | pvo->pvo_vaddr &= ~PVO_WIRED; | ||||
pm->pm_stats.wired_count--; | pm->pm_stats.wired_count--; | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
void | void | ||||
moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) | moea_copy_page(vm_page_t msrc, vm_page_t mdst) | ||||
{ | { | ||||
vm_offset_t dst; | vm_offset_t dst; | ||||
vm_offset_t src; | vm_offset_t src; | ||||
dst = VM_PAGE_TO_PHYS(mdst); | dst = VM_PAGE_TO_PHYS(mdst); | ||||
src = VM_PAGE_TO_PHYS(msrc); | src = VM_PAGE_TO_PHYS(msrc); | ||||
bcopy((void *)src, (void *)dst, PAGE_SIZE); | bcopy((void *)src, (void *)dst, PAGE_SIZE); | ||||
} | } | ||||
void | void | ||||
moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize) | vm_page_t *mb, vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
void *a_cp, *b_cp; | void *a_cp, *b_cp; | ||||
vm_offset_t a_pg_offset, b_pg_offset; | vm_offset_t a_pg_offset, b_pg_offset; | ||||
int cnt; | int cnt; | ||||
while (xfersize > 0) { | while (xfersize > 0) { | ||||
a_pg_offset = a_offset & PAGE_MASK; | a_pg_offset = a_offset & PAGE_MASK; | ||||
Show All 10 Lines | while (xfersize > 0) { | ||||
xfersize -= cnt; | xfersize -= cnt; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Zero a page of physical memory by temporarily mapping it into the tlb. | * Zero a page of physical memory by temporarily mapping it into the tlb. | ||||
*/ | */ | ||||
void | void | ||||
moea_zero_page(mmu_t mmu, vm_page_t m) | moea_zero_page(vm_page_t m) | ||||
{ | { | ||||
vm_offset_t off, pa = VM_PAGE_TO_PHYS(m); | vm_offset_t off, pa = VM_PAGE_TO_PHYS(m); | ||||
for (off = 0; off < PAGE_SIZE; off += cacheline_size) | for (off = 0; off < PAGE_SIZE; off += cacheline_size) | ||||
__asm __volatile("dcbz 0,%0" :: "r"(pa + off)); | __asm __volatile("dcbz 0,%0" :: "r"(pa + off)); | ||||
} | } | ||||
void | void | ||||
moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) | moea_zero_page_area(vm_page_t m, int off, int size) | ||||
{ | { | ||||
vm_offset_t pa = VM_PAGE_TO_PHYS(m); | vm_offset_t pa = VM_PAGE_TO_PHYS(m); | ||||
void *va = (void *)(pa + off); | void *va = (void *)(pa + off); | ||||
bzero(va, size); | bzero(va, size); | ||||
} | } | ||||
vm_offset_t | vm_offset_t | ||||
moea_quick_enter_page(mmu_t mmu, vm_page_t m) | moea_quick_enter_page(vm_page_t m) | ||||
{ | { | ||||
return (VM_PAGE_TO_PHYS(m)); | return (VM_PAGE_TO_PHYS(m)); | ||||
} | } | ||||
void | void | ||||
moea_quick_remove_page(mmu_t mmu, vm_offset_t addr) | moea_quick_remove_page(vm_offset_t addr) | ||||
{ | { | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea_page_is_mapped(mmu_t mmu, vm_page_t m) | moea_page_is_mapped(vm_page_t m) | ||||
{ | { | ||||
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); | return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); | ||||
} | } | ||||
/* | /* | ||||
* Map the given physical page at the specified virtual address in the | * Map the given physical page at the specified virtual address in the | ||||
* target pmap with the protection requested. If specified the page | * target pmap with the protection requested. If specified the page | ||||
* will be wired down. | * will be wired down. | ||||
*/ | */ | ||||
int | int | ||||
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, | ||||
u_int flags, int8_t psind) | u_int flags, int8_t psind) | ||||
{ | { | ||||
int error; | int error; | ||||
for (;;) { | for (;;) { | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
error = moea_enter_locked(pmap, va, m, prot, flags, psind); | error = moea_enter_locked(pmap, va, m, prot, flags, psind); | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | |||||
* amount as the page is offset from m_start within the object. The | * amount as the page is offset from m_start within the object. The | ||||
* last page in the sequence is the page with the largest offset from | * last page in the sequence is the page with the largest offset from | ||||
* m_start that can be mapped at a virtual address less than the given | * m_start that can be mapped at a virtual address less than the given | ||||
* virtual address end. Not every virtual page between start and end | * virtual address end. Not every virtual page between start and end | ||||
* is mapped; only those for which a resident page exists with the | * is mapped; only those for which a resident page exists with the | ||||
* corresponding offset from m_start are mapped. | * corresponding offset from m_start are mapped. | ||||
*/ | */ | ||||
void | void | ||||
moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, | moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, | ||||
vm_page_t m_start, vm_prot_t prot) | vm_page_t m_start, vm_prot_t prot) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
m = m_start; | m = m_start; | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
moea_enter_locked(pm, start + ptoa(diff), m, prot & | moea_enter_locked(pm, start + ptoa(diff), m, prot & | ||||
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED, | (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED, | ||||
0); | 0); | ||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
void | void | ||||
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, | moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), | moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), | ||||
PMAP_ENTER_QUICK_LOCKED, 0); | PMAP_ENTER_QUICK_LOCKED, 0); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
vm_paddr_t | vm_paddr_t | ||||
moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) | moea_extract(pmap_t pm, vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); | pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); | ||||
if (pvo == NULL) | if (pvo == NULL) | ||||
pa = 0; | pa = 0; | ||||
else | else | ||||
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); | pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
return (pa); | return (pa); | ||||
} | } | ||||
/* | /* | ||||
* Atomically extract and hold the physical page with the given | * Atomically extract and hold the physical page with the given | ||||
* pmap and virtual address pair if that mapping permits the given | * pmap and virtual address pair if that mapping permits the given | ||||
* protection. | * protection. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) | moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_page_t m; | vm_page_t m; | ||||
m = NULL; | m = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); | pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); | ||||
if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && | if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && | ||||
((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || | ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || | ||||
(prot & VM_PROT_WRITE) == 0)) { | (prot & VM_PROT_WRITE) == 0)) { | ||||
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); | m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); | ||||
if (!vm_page_wire_mapped(m)) | if (!vm_page_wire_mapped(m)) | ||||
m = NULL; | m = NULL; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (m); | return (m); | ||||
} | } | ||||
void | void | ||||
moea_init(mmu_t mmu) | moea_init() | ||||
{ | { | ||||
moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | ||||
UMA_ZONE_VM | UMA_ZONE_NOFREE); | UMA_ZONE_VM | UMA_ZONE_NOFREE); | ||||
moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), | moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | ||||
UMA_ZONE_VM | UMA_ZONE_NOFREE); | UMA_ZONE_VM | UMA_ZONE_NOFREE); | ||||
moea_initialized = TRUE; | moea_initialized = TRUE; | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea_is_referenced(mmu_t mmu, vm_page_t m) | moea_is_referenced(vm_page_t m) | ||||
{ | { | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_is_referenced: page %p is not managed", m)); | ("moea_is_referenced: page %p is not managed", m)); | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
rv = moea_query_bit(m, PTE_REF); | rv = moea_query_bit(m, PTE_REF); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea_is_modified(mmu_t mmu, vm_page_t m) | moea_is_modified(vm_page_t m) | ||||
{ | { | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_is_modified: page %p is not managed", m)); | ("moea_is_modified: page %p is not managed", m)); | ||||
/* | /* | ||||
* If the page is not busied then this check is racy. | * If the page is not busied then this check is racy. | ||||
*/ | */ | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return (FALSE); | return (FALSE); | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
rv = moea_query_bit(m, PTE_CHG); | rv = moea_query_bit(m, PTE_CHG); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) | moea_is_prefaultable(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
boolean_t rv; | boolean_t rv; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); | pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); | ||||
rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; | rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
moea_clear_modify(mmu_t mmu, vm_page_t m) | moea_clear_modify(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_clear_modify: page %p is not managed", m)); | ("moea_clear_modify: page %p is not managed", m)); | ||||
vm_page_assert_busied(m); | vm_page_assert_busied(m); | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return; | return; | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
moea_clear_bit(m, PTE_CHG); | moea_clear_bit(m, PTE_CHG); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
} | } | ||||
/* | /* | ||||
* Clear the write and modified bits in each of the given page's mappings. | * Clear the write and modified bits in each of the given page's mappings. | ||||
*/ | */ | ||||
void | void | ||||
moea_remove_write(mmu_t mmu, vm_page_t m) | moea_remove_write(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
struct pte *pt; | struct pte *pt; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
u_int lo; | u_int lo; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_remove_write: page %p is not managed", m)); | ("moea_remove_write: page %p is not managed", m)); | ||||
Show All 38 Lines | |||||
* is necessary that 0 only be returned when there are truly no | * is necessary that 0 only be returned when there are truly no | ||||
* reference bits set. | * reference bits set. | ||||
* | * | ||||
* XXX: The exact number of bits to check and clear is a matter that | * XXX: The exact number of bits to check and clear is a matter that | ||||
* should be tested and standardized at some point in the future for | * should be tested and standardized at some point in the future for | ||||
* optimal aging of shared pages. | * optimal aging of shared pages. | ||||
*/ | */ | ||||
int | int | ||||
moea_ts_referenced(mmu_t mmu, vm_page_t m) | moea_ts_referenced(vm_page_t m) | ||||
{ | { | ||||
int count; | int count; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_ts_referenced: page %p is not managed", m)); | ("moea_ts_referenced: page %p is not managed", m)); | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
count = moea_clear_bit(m, PTE_REF); | count = moea_clear_bit(m, PTE_REF); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
return (count); | return (count); | ||||
} | } | ||||
/* | /* | ||||
* Modify the WIMG settings of all mappings for a page. | * Modify the WIMG settings of all mappings for a page. | ||||
*/ | */ | ||||
void | void | ||||
moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) | moea_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
struct pvo_head *pvo_head; | struct pvo_head *pvo_head; | ||||
struct pte *pt; | struct pte *pt; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
u_int lo; | u_int lo; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) { | if ((m->oflags & VPO_UNMANAGED) != 0) { | ||||
Show All 23 Lines | moea_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
m->md.mdpg_cache_attrs = ma; | m->md.mdpg_cache_attrs = ma; | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
} | } | ||||
/* | /* | ||||
* Map a wired page into kernel virtual address space. | * Map a wired page into kernel virtual address space. | ||||
*/ | */ | ||||
void | void | ||||
moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) | moea_kenter(vm_offset_t va, vm_paddr_t pa) | ||||
{ | { | ||||
moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); | moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
void | void | ||||
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | ||||
{ | { | ||||
u_int pte_lo; | u_int pte_lo; | ||||
int error; | int error; | ||||
#if 0 | #if 0 | ||||
if (va < VM_MIN_KERNEL_ADDRESS) | if (va < VM_MIN_KERNEL_ADDRESS) | ||||
panic("moea_kenter: attempt to enter non-kernel address %#x", | panic("moea_kenter: attempt to enter non-kernel address %#x", | ||||
va); | va); | ||||
Show All 12 Lines | #endif | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
} | } | ||||
/* | /* | ||||
* Extract the physical page address associated with the given kernel virtual | * Extract the physical page address associated with the given kernel virtual | ||||
* address. | * address. | ||||
*/ | */ | ||||
vm_paddr_t | vm_paddr_t | ||||
moea_kextract(mmu_t mmu, vm_offset_t va) | moea_kextract(vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
/* | /* | ||||
* Allow direct mappings on 32-bit OEA | * Allow direct mappings on 32-bit OEA | ||||
*/ | */ | ||||
if (va < VM_MIN_KERNEL_ADDRESS) { | if (va < VM_MIN_KERNEL_ADDRESS) { | ||||
return (va); | return (va); | ||||
} | } | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); | pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); | ||||
KASSERT(pvo != NULL, ("moea_kextract: no addr found")); | KASSERT(pvo != NULL, ("moea_kextract: no addr found")); | ||||
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); | pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
return (pa); | return (pa); | ||||
} | } | ||||
/* | /* | ||||
* Remove a wired page from kernel virtual address space. | * Remove a wired page from kernel virtual address space. | ||||
*/ | */ | ||||
void | void | ||||
moea_kremove(mmu_t mmu, vm_offset_t va) | moea_kremove(vm_offset_t va) | ||||
{ | { | ||||
moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); | moea_remove(kernel_pmap, va, va + PAGE_SIZE); | ||||
} | } | ||||
/* | /* | ||||
* Provide a kernel pointer corresponding to a given userland pointer. | * Provide a kernel pointer corresponding to a given userland pointer. | ||||
* The returned pointer is valid until the next time this function is | * The returned pointer is valid until the next time this function is | ||||
* called in this thread. This is used internally in copyin/copyout. | * called in this thread. This is used internally in copyin/copyout. | ||||
*/ | */ | ||||
int | int | ||||
moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, | moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, | ||||
void **kaddr, size_t ulen, size_t *klen) | void **kaddr, size_t ulen, size_t *klen) | ||||
{ | { | ||||
size_t l; | size_t l; | ||||
register_t vsid; | register_t vsid; | ||||
*kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); | *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK); | ||||
l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); | l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); | ||||
if (l > ulen) | if (l > ulen) | ||||
Show All 22 Lines | |||||
} | } | ||||
/* | /* | ||||
* Figure out where a given kernel pointer (usually in a fault) points | * Figure out where a given kernel pointer (usually in a fault) points | ||||
* to from the VM's perspective, potentially remapping into userland's | * to from the VM's perspective, potentially remapping into userland's | ||||
* address space. | * address space. | ||||
*/ | */ | ||||
static int | static int | ||||
moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, | moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, | ||||
vm_offset_t *decoded_addr) | vm_offset_t *decoded_addr) | ||||
{ | { | ||||
vm_offset_t user_sr; | vm_offset_t user_sr; | ||||
if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { | if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { | ||||
user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; | user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; | ||||
addr &= ADDR_PIDX | ADDR_POFF; | addr &= ADDR_PIDX | ADDR_POFF; | ||||
addr |= user_sr << ADDR_SR_SHFT; | addr |= user_sr << ADDR_SR_SHFT; | ||||
Show All 12 Lines | |||||
* | * | ||||
* The value passed in *virt is a suggested virtual address for the mapping. | * The value passed in *virt is a suggested virtual address for the mapping. | ||||
* Architectures which can support a direct-mapped physical to virtual region | * Architectures which can support a direct-mapped physical to virtual region | ||||
* can return the appropriate address within that region, leaving '*virt' | * can return the appropriate address within that region, leaving '*virt' | ||||
* unchanged. We cannot and therefore do not; *virt is updated with the | * unchanged. We cannot and therefore do not; *virt is updated with the | ||||
* first usable address after the mapped region. | * first usable address after the mapped region. | ||||
*/ | */ | ||||
vm_offset_t | vm_offset_t | ||||
moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, | moea_map(vm_offset_t *virt, vm_paddr_t pa_start, | ||||
vm_paddr_t pa_end, int prot) | vm_paddr_t pa_end, int prot) | ||||
{ | { | ||||
vm_offset_t sva, va; | vm_offset_t sva, va; | ||||
sva = *virt; | sva = *virt; | ||||
va = sva; | va = sva; | ||||
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) | for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) | ||||
moea_kenter(mmu, va, pa_start); | moea_kenter(va, pa_start); | ||||
*virt = va; | *virt = va; | ||||
return (sva); | return (sva); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if the pmap's pv is one of the first | * Returns true if the pmap's pv is one of the first | ||||
* 16 pvs linked to from this page. This count may | * 16 pvs linked to from this page. This count may | ||||
* be changed upwards or downwards in the future; it | * be changed upwards or downwards in the future; it | ||||
* is only necessary that true be returned for a small | * is only necessary that true be returned for a small | ||||
* subset of pmaps for proper page aging. | * subset of pmaps for proper page aging. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) | moea_page_exists_quick(pmap_t pmap, vm_page_t m) | ||||
{ | { | ||||
int loops; | int loops; | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea_page_exists_quick: page %p is not managed", m)); | ("moea_page_exists_quick: page %p is not managed", m)); | ||||
loops = 0; | loops = 0; | ||||
rv = FALSE; | rv = FALSE; | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
if (pvo->pvo_pmap == pmap) { | if (pvo->pvo_pmap == pmap) { | ||||
rv = TRUE; | rv = TRUE; | ||||
break; | break; | ||||
} | } | ||||
if (++loops >= 16) | if (++loops >= 16) | ||||
break; | break; | ||||
} | } | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
moea_page_init(mmu_t mmu __unused, vm_page_t m) | moea_page_init(vm_page_t m) | ||||
{ | { | ||||
m->md.mdpg_attrs = 0; | m->md.mdpg_attrs = 0; | ||||
m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | ||||
LIST_INIT(&m->md.mdpg_pvoh); | LIST_INIT(&m->md.mdpg_pvoh); | ||||
} | } | ||||
/* | /* | ||||
* Return the number of managed mappings to the given physical page | * Return the number of managed mappings to the given physical page | ||||
* that are wired. | * that are wired. | ||||
*/ | */ | ||||
int | int | ||||
moea_page_wired_mappings(mmu_t mmu, vm_page_t m) | moea_page_wired_mappings(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int count; | int count; | ||||
count = 0; | count = 0; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
return (count); | return (count); | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) | ||||
if ((pvo->pvo_vaddr & PVO_WIRED) != 0) | if ((pvo->pvo_vaddr & PVO_WIRED) != 0) | ||||
count++; | count++; | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
return (count); | return (count); | ||||
} | } | ||||
static u_int moea_vsidcontext; | static u_int moea_vsidcontext; | ||||
void | int | ||||
moea_pinit(mmu_t mmu, pmap_t pmap) | moea_pinit(pmap_t pmap) | ||||
{ | { | ||||
int i, mask; | int i, mask; | ||||
u_int entropy; | u_int entropy; | ||||
RB_INIT(&pmap->pmap_pvo); | RB_INIT(&pmap->pmap_pvo); | ||||
entropy = 0; | entropy = 0; | ||||
__asm __volatile("mftb %0" : "=r"(entropy)); | __asm __volatile("mftb %0" : "=r"(entropy)); | ||||
if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) | if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap)) | ||||
== NULL) { | == NULL) { | ||||
pmap->pmap_phys = pmap; | pmap->pmap_phys = pmap; | ||||
} | } | ||||
mtx_lock(&moea_vsid_mutex); | mtx_lock(&moea_vsid_mutex); | ||||
/* | /* | ||||
* Allocate some segment registers for this pmap. | * Allocate some segment registers for this pmap. | ||||
Show All 27 Lines | if (moea_vsid_bitmap[n] & mask) { /* collision? */ | ||||
hash |= i; | hash |= i; | ||||
} | } | ||||
KASSERT(!(moea_vsid_bitmap[n] & mask), | KASSERT(!(moea_vsid_bitmap[n] & mask), | ||||
("Allocating in-use VSID group %#x\n", hash)); | ("Allocating in-use VSID group %#x\n", hash)); | ||||
moea_vsid_bitmap[n] |= mask; | moea_vsid_bitmap[n] |= mask; | ||||
for (i = 0; i < 16; i++) | for (i = 0; i < 16; i++) | ||||
pmap->pm_sr[i] = VSID_MAKE(i, hash); | pmap->pm_sr[i] = VSID_MAKE(i, hash); | ||||
mtx_unlock(&moea_vsid_mutex); | mtx_unlock(&moea_vsid_mutex); | ||||
return; | return (1); | ||||
} | } | ||||
mtx_unlock(&moea_vsid_mutex); | mtx_unlock(&moea_vsid_mutex); | ||||
panic("moea_pinit: out of segments"); | panic("moea_pinit: out of segments"); | ||||
} | } | ||||
/* | /* | ||||
* Initialize the pmap associated with process 0. | * Initialize the pmap associated with process 0. | ||||
*/ | */ | ||||
void | void | ||||
moea_pinit0(mmu_t mmu, pmap_t pm) | moea_pinit0(pmap_t pm) | ||||
{ | { | ||||
PMAP_LOCK_INIT(pm); | PMAP_LOCK_INIT(pm); | ||||
moea_pinit(mmu, pm); | moea_pinit(pm); | ||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats)); | bzero(&pm->pm_stats, sizeof(pm->pm_stats)); | ||||
} | } | ||||
/* | /* | ||||
* Set the physical protection on the specified range of this map as requested. | * Set the physical protection on the specified range of this map as requested. | ||||
*/ | */ | ||||
void | void | ||||
moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, | moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, *tpvo, key; | ||||
struct pte *pt; | struct pte *pt; | ||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | ||||
("moea_protect: non current pmap")); | ("moea_protect: non current pmap")); | ||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | ||||
moea_remove(mmu, pm, sva, eva); | moea_remove(pm, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | ||||
Show All 23 Lines | |||||
} | } | ||||
/* | /* | ||||
* Map a list of wired pages into kernel virtual address space. This is | * Map a list of wired pages into kernel virtual address space. This is | ||||
* intended for temporary mappings which do not need page modification or | * intended for temporary mappings which do not need page modification or | ||||
* references recorded. Existing mappings in the region are overwritten. | * references recorded. Existing mappings in the region are overwritten. | ||||
*/ | */ | ||||
void | void | ||||
moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) | moea_qenter(vm_offset_t sva, vm_page_t *m, int count) | ||||
{ | { | ||||
vm_offset_t va; | vm_offset_t va; | ||||
va = sva; | va = sva; | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); | moea_kenter(va, VM_PAGE_TO_PHYS(*m)); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
m++; | m++; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Remove page mappings from kernel virtual address space. Intended for | * Remove page mappings from kernel virtual address space. Intended for | ||||
* temporary mappings entered by moea_qenter. | * temporary mappings entered by moea_qenter. | ||||
*/ | */ | ||||
void | void | ||||
moea_qremove(mmu_t mmu, vm_offset_t sva, int count) | moea_qremove(vm_offset_t sva, int count) | ||||
{ | { | ||||
vm_offset_t va; | vm_offset_t va; | ||||
va = sva; | va = sva; | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
moea_kremove(mmu, va); | moea_kremove(va); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea_release(mmu_t mmu, pmap_t pmap) | moea_release(pmap_t pmap) | ||||
{ | { | ||||
int idx, mask; | int idx, mask; | ||||
/* | /* | ||||
* Free segment register's VSID | * Free segment register's VSID | ||||
*/ | */ | ||||
if (pmap->pm_sr[0] == 0) | if (pmap->pm_sr[0] == 0) | ||||
panic("moea_release"); | panic("moea_release"); | ||||
mtx_lock(&moea_vsid_mutex); | mtx_lock(&moea_vsid_mutex); | ||||
idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); | idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); | ||||
mask = 1 << (idx % VSID_NBPW); | mask = 1 << (idx % VSID_NBPW); | ||||
idx /= VSID_NBPW; | idx /= VSID_NBPW; | ||||
moea_vsid_bitmap[idx] &= ~mask; | moea_vsid_bitmap[idx] &= ~mask; | ||||
mtx_unlock(&moea_vsid_mutex); | mtx_unlock(&moea_vsid_mutex); | ||||
} | } | ||||
/* | /* | ||||
* Remove the given range of addresses from the specified map. | * Remove the given range of addresses from the specified map. | ||||
*/ | */ | ||||
void | void | ||||
moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) | moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, *tpvo, key; | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | ||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | ||||
moea_pvo_remove(pvo, -1); | moea_pvo_remove(pvo, -1); | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
rw_wunlock(&pvh_global_lock); | rw_wunlock(&pvh_global_lock); | ||||
} | } | ||||
/* | /* | ||||
* Remove physical page from all pmaps in which it resides. moea_pvo_remove() | * Remove physical page from all pmaps in which it resides. moea_pvo_remove() | ||||
* will reflect changes in pte's back to the vm_page. | * will reflect changes in pte's back to the vm_page. | ||||
*/ | */ | ||||
void | void | ||||
moea_remove_all(mmu_t mmu, vm_page_t m) | moea_remove_all(vm_page_t m) | ||||
{ | { | ||||
struct pvo_head *pvo_head; | struct pvo_head *pvo_head; | ||||
struct pvo_entry *pvo, *next_pvo; | struct pvo_entry *pvo, *next_pvo; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
rw_wlock(&pvh_global_lock); | rw_wlock(&pvh_global_lock); | ||||
pvo_head = vm_page_to_pvoh(m); | pvo_head = vm_page_to_pvoh(m); | ||||
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { | for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { | ||||
▲ Show 20 Lines • Show All 686 Lines • ▼ Show 20 Lines | moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size) | ||||
if ((pa < start) || ((pa + size) > end)) | if ((pa < start) || ((pa + size) > end)) | ||||
return (ERANGE); | return (ERANGE); | ||||
return (0); | return (0); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
int i; | int i; | ||||
/* | /* | ||||
* This currently does not work for entries that | * This currently does not work for entries that | ||||
* overlap 256M BAT segments. | * overlap 256M BAT segments. | ||||
*/ | */ | ||||
for(i = 0; i < 16; i++) | for(i = 0; i < 16; i++) | ||||
if (moea_bat_mapped(i, pa, size) == 0) | if (moea_bat_mapped(i, pa, size) == 0) | ||||
return (0); | return (0); | ||||
return (EFAULT); | return (EFAULT); | ||||
} | } | ||||
/* | /* | ||||
* Map a set of physical memory pages into the kernel virtual | * Map a set of physical memory pages into the kernel virtual | ||||
* address space. Return a pointer to where it is mapped. This | * address space. Return a pointer to where it is mapped. This | ||||
* routine is intended to be used for mapping device memory, | * routine is intended to be used for mapping device memory, | ||||
* NOT real memory. | * NOT real memory. | ||||
*/ | */ | ||||
void * | void * | ||||
moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | moea_mapdev(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); | return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); | ||||
} | } | ||||
void * | void * | ||||
moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | ||||
{ | { | ||||
vm_offset_t va, tmpva, ppa, offset; | vm_offset_t va, tmpva, ppa, offset; | ||||
int i; | int i; | ||||
ppa = trunc_page(pa); | ppa = trunc_page(pa); | ||||
offset = pa & PAGE_MASK; | offset = pa & PAGE_MASK; | ||||
size = roundup(offset + size, PAGE_SIZE); | size = roundup(offset + size, PAGE_SIZE); | ||||
/* | /* | ||||
* If the physical address lies within a valid BAT table entry, | * If the physical address lies within a valid BAT table entry, | ||||
* return the 1:1 mapping. This currently doesn't work | * return the 1:1 mapping. This currently doesn't work | ||||
* for regions that overlap 256M BAT segments. | * for regions that overlap 256M BAT segments. | ||||
*/ | */ | ||||
for (i = 0; i < 16; i++) { | for (i = 0; i < 16; i++) { | ||||
if (moea_bat_mapped(i, pa, size) == 0) | if (moea_bat_mapped(i, pa, size) == 0) | ||||
return ((void *) pa); | return ((void *) pa); | ||||
} | } | ||||
va = kva_alloc(size); | va = kva_alloc(size); | ||||
if (!va) | if (!va) | ||||
panic("moea_mapdev: Couldn't alloc kernel virtual memory"); | panic("moea_mapdev: Couldn't alloc kernel virtual memory"); | ||||
for (tmpva = va; size > 0;) { | for (tmpva = va; size > 0;) { | ||||
moea_kenter_attr(mmu, tmpva, ppa, ma); | moea_kenter_attr(tmpva, ppa, ma); | ||||
tlbie(tmpva); | tlbie(tmpva); | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
tmpva += PAGE_SIZE; | tmpva += PAGE_SIZE; | ||||
ppa += PAGE_SIZE; | ppa += PAGE_SIZE; | ||||
} | } | ||||
return ((void *)(va + offset)); | return ((void *)(va + offset)); | ||||
} | } | ||||
void | void | ||||
moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) | moea_unmapdev(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t base, offset; | vm_offset_t base, offset; | ||||
/* | /* | ||||
* If this is outside kernel virtual space, then it's a | * If this is outside kernel virtual space, then it's a | ||||
* battable entry and doesn't require unmapping | * battable entry and doesn't require unmapping | ||||
*/ | */ | ||||
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { | if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { | ||||
base = trunc_page(va); | base = trunc_page(va); | ||||
offset = va & PAGE_MASK; | offset = va & PAGE_MASK; | ||||
size = roundup(offset + size, PAGE_SIZE); | size = roundup(offset + size, PAGE_SIZE); | ||||
kva_free(base, size); | kva_free(base, size); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) | moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_offset_t lim; | vm_offset_t lim; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_size_t len; | vm_size_t len; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
while (sz > 0) { | while (sz > 0) { | ||||
lim = round_page(va + 1); | lim = round_page(va + 1); | ||||
len = MIN(lim - va, sz); | len = MIN(lim - va, sz); | ||||
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); | pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); | ||||
if (pvo != NULL) { | if (pvo != NULL) { | ||||
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | | pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | | ||||
(va & ADDR_POFF); | (va & ADDR_POFF); | ||||
moea_syncicache(pa, len); | moea_syncicache(pa, len); | ||||
} | } | ||||
va += len; | va += len; | ||||
sz -= len; | sz -= len; | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
void | void | ||||
moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) | moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) | ||||
{ | { | ||||
*va = (void *)pa; | *va = (void *)pa; | ||||
} | } | ||||
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; | extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; | ||||
void | void | ||||
moea_scan_init(mmu_t mmu) | moea_scan_init() | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
int i; | int i; | ||||
if (!do_minidump) { | if (!do_minidump) { | ||||
/* Initialize phys. segments for dumpsys(). */ | /* Initialize phys. segments for dumpsys(). */ | ||||
memset(&dump_map, 0, sizeof(dump_map)); | memset(&dump_map, 0, sizeof(dump_map)); | ||||
▲ Show 20 Lines • Show All 52 Lines • Show Last 20 Lines |