Changeset View
Changeset View
Standalone View
Standalone View
sys/powerpc/aim/mmu_radix.c
Show First 20 Lines • Show All 419 Lines • ▼ Show 20 Lines | |||||
static u_int64_t KPTphys; /* phys addr of kernel level 1 */ | static u_int64_t KPTphys; /* phys addr of kernel level 1 */ | ||||
//static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | ||||
static vm_offset_t qframe = 0; | static vm_offset_t qframe = 0; | ||||
static struct mtx qframe_mtx; | static struct mtx qframe_mtx; | ||||
void mmu_radix_activate(mmu_t mmu, struct thread *); | void mmu_radix_activate(struct thread *); | ||||
void mmu_radix_advise(mmu_t mmu, pmap_t, vm_offset_t, vm_offset_t, int); | void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int); | ||||
void mmu_radix_align_superpage(mmu_t mmu, vm_object_t, vm_ooffset_t, vm_offset_t *, | void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, | ||||
vm_size_t); | vm_size_t); | ||||
void mmu_radix_clear_modify(mmu_t, vm_page_t); | void mmu_radix_clear_modify(vm_page_t); | ||||
void mmu_radix_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); | void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); | ||||
int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm, | int mmu_radix_map_user_ptr(pmap_t pm, | ||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | ||||
int mmu_radix_decode_kernel_ptr(mmu_t, vm_offset_t, int *, vm_offset_t *); | int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *); | ||||
int mmu_radix_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); | int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); | ||||
void mmu_radix_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | ||||
vm_prot_t); | vm_prot_t); | ||||
void mmu_radix_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | ||||
vm_paddr_t mmu_radix_extract(mmu_t, pmap_t pmap, vm_offset_t va); | vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va); | ||||
vm_page_t mmu_radix_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); | vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); | ||||
void mmu_radix_kenter(mmu_t, vm_offset_t, vm_paddr_t); | void mmu_radix_kenter(vm_offset_t, vm_paddr_t); | ||||
vm_paddr_t mmu_radix_kextract(mmu_t, vm_offset_t); | vm_paddr_t mmu_radix_kextract(vm_offset_t); | ||||
void mmu_radix_kremove(mmu_t, vm_offset_t); | void mmu_radix_kremove(vm_offset_t); | ||||
boolean_t mmu_radix_is_modified(mmu_t, vm_page_t); | boolean_t mmu_radix_is_modified(vm_page_t); | ||||
boolean_t mmu_radix_is_prefaultable(mmu_t, pmap_t, vm_offset_t); | boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t); | ||||
boolean_t mmu_radix_is_referenced(mmu_t, vm_page_t); | boolean_t mmu_radix_is_referenced(vm_page_t); | ||||
void mmu_radix_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t, | void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t, | ||||
vm_pindex_t, vm_size_t); | vm_pindex_t, vm_size_t); | ||||
boolean_t mmu_radix_page_exists_quick(mmu_t, pmap_t, vm_page_t); | boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t); | ||||
void mmu_radix_page_init(mmu_t, vm_page_t); | void mmu_radix_page_init(vm_page_t); | ||||
boolean_t mmu_radix_page_is_mapped(mmu_t, vm_page_t m); | boolean_t mmu_radix_page_is_mapped(vm_page_t m); | ||||
void mmu_radix_page_set_memattr(mmu_t, vm_page_t, vm_memattr_t); | void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t); | ||||
int mmu_radix_page_wired_mappings(mmu_t, vm_page_t); | int mmu_radix_page_wired_mappings(vm_page_t); | ||||
void mmu_radix_pinit(mmu_t, pmap_t); | int mmu_radix_pinit(pmap_t); | ||||
void mmu_radix_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | ||||
boolean_t mmu_radix_ps_enabled(mmu_t, pmap_t); | bool mmu_radix_ps_enabled(pmap_t); | ||||
void mmu_radix_qenter(mmu_t, vm_offset_t, vm_page_t *, int); | void mmu_radix_qenter(vm_offset_t, vm_page_t *, int); | ||||
void mmu_radix_qremove(mmu_t, vm_offset_t, int); | void mmu_radix_qremove(vm_offset_t, int); | ||||
vm_offset_t mmu_radix_quick_enter_page(mmu_t, vm_page_t); | vm_offset_t mmu_radix_quick_enter_page(vm_page_t); | ||||
void mmu_radix_quick_remove_page(mmu_t, vm_offset_t); | void mmu_radix_quick_remove_page(vm_offset_t); | ||||
boolean_t mmu_radix_ts_referenced(mmu_t, vm_page_t); | boolean_t mmu_radix_ts_referenced(vm_page_t); | ||||
void mmu_radix_release(mmu_t, pmap_t); | void mmu_radix_release(pmap_t); | ||||
void mmu_radix_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t); | ||||
void mmu_radix_remove_all(mmu_t, vm_page_t); | void mmu_radix_remove_all(vm_page_t); | ||||
void mmu_radix_remove_pages(mmu_t, pmap_t); | void mmu_radix_remove_pages(pmap_t); | ||||
void mmu_radix_remove_write(mmu_t, vm_page_t); | void mmu_radix_remove_write(vm_page_t); | ||||
void mmu_radix_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t); | ||||
void mmu_radix_zero_page(mmu_t, vm_page_t); | void mmu_radix_zero_page(vm_page_t); | ||||
void mmu_radix_zero_page_area(mmu_t, vm_page_t, int, int); | void mmu_radix_zero_page_area(vm_page_t, int, int); | ||||
int mmu_radix_change_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); | int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); | ||||
void mmu_radix_page_array_startup(mmu_t mmu, long pages); | void mmu_radix_page_array_startup(long pages); | ||||
#include "mmu_oea64.h" | #include "mmu_oea64.h" | ||||
#include "mmu_if.h" | |||||
#include "moea64_if.h" | |||||
/* | /* | ||||
* Kernel MMU interface | * Kernel MMU interface | ||||
*/ | */ | ||||
static void mmu_radix_bootstrap(mmu_t mmup, | static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t); | ||||
vm_offset_t kernelstart, vm_offset_t kernelend); | |||||
static void mmu_radix_copy_page(mmu_t, vm_page_t, vm_page_t); | static void mmu_radix_copy_page(vm_page_t, vm_page_t); | ||||
static void mmu_radix_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize); | vm_page_t *mb, vm_offset_t b_offset, int xfersize); | ||||
static void mmu_radix_growkernel(mmu_t, vm_offset_t); | static void mmu_radix_growkernel(vm_offset_t); | ||||
static void mmu_radix_init(mmu_t); | static void mmu_radix_init(void); | ||||
static int mmu_radix_mincore(mmu_t, pmap_t, vm_offset_t, vm_paddr_t *); | static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *); | ||||
static vm_offset_t mmu_radix_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | ||||
static void mmu_radix_pinit0(mmu_t, pmap_t); | static void mmu_radix_pinit0(pmap_t); | ||||
static void *mmu_radix_mapdev(mmu_t, vm_paddr_t, vm_size_t); | static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t); | ||||
static void *mmu_radix_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); | static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); | ||||
static void mmu_radix_unmapdev(mmu_t, vm_offset_t, vm_size_t); | static void mmu_radix_unmapdev(vm_offset_t, vm_size_t); | ||||
static void mmu_radix_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma); | static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma); | ||||
static boolean_t mmu_radix_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); | static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t); | ||||
static void mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, | static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); | ||||
void **va); | static void mmu_radix_scan_init(void); | ||||
static void mmu_radix_scan_init(mmu_t mmu); | static void mmu_radix_cpu_bootstrap(int ap); | ||||
static void mmu_radix_cpu_bootstrap(mmu_t, int ap); | static void mmu_radix_tlbie_all(void); | ||||
static void mmu_radix_tlbie_all(mmu_t); | |||||
static mmu_method_t mmu_radix_methods[] = { | static struct pmap_funcs mmu_radix_methods = { | ||||
MMUMETHOD(mmu_bootstrap, mmu_radix_bootstrap), | .bootstrap = mmu_radix_bootstrap, | ||||
MMUMETHOD(mmu_copy_page, mmu_radix_copy_page), | .copy_page = mmu_radix_copy_page, | ||||
MMUMETHOD(mmu_copy_pages, mmu_radix_copy_pages), | .copy_pages = mmu_radix_copy_pages, | ||||
MMUMETHOD(mmu_cpu_bootstrap, mmu_radix_cpu_bootstrap), | .cpu_bootstrap = mmu_radix_cpu_bootstrap, | ||||
MMUMETHOD(mmu_growkernel, mmu_radix_growkernel), | .growkernel = mmu_radix_growkernel, | ||||
MMUMETHOD(mmu_init, mmu_radix_init), | .init = mmu_radix_init, | ||||
MMUMETHOD(mmu_map, mmu_radix_map), | .map = mmu_radix_map, | ||||
MMUMETHOD(mmu_mincore, mmu_radix_mincore), | .mincore = mmu_radix_mincore, | ||||
MMUMETHOD(mmu_pinit, mmu_radix_pinit), | .pinit = mmu_radix_pinit, | ||||
MMUMETHOD(mmu_pinit0, mmu_radix_pinit0), | .pinit0 = mmu_radix_pinit0, | ||||
MMUMETHOD(mmu_mapdev, mmu_radix_mapdev), | .mapdev = mmu_radix_mapdev, | ||||
MMUMETHOD(mmu_mapdev_attr, mmu_radix_mapdev_attr), | .mapdev_attr = mmu_radix_mapdev_attr, | ||||
MMUMETHOD(mmu_unmapdev, mmu_radix_unmapdev), | .unmapdev = mmu_radix_unmapdev, | ||||
MMUMETHOD(mmu_kenter_attr, mmu_radix_kenter_attr), | .kenter_attr = mmu_radix_kenter_attr, | ||||
MMUMETHOD(mmu_dev_direct_mapped,mmu_radix_dev_direct_mapped), | .dev_direct_mapped = mmu_radix_dev_direct_mapped, | ||||
MMUMETHOD(mmu_scan_init, mmu_radix_scan_init), | .dumpsys_pa_init = mmu_radix_scan_init, | ||||
MMUMETHOD(mmu_dumpsys_map, mmu_radix_dumpsys_map), | .dumpsys_map_chunk = mmu_radix_dumpsys_map, | ||||
MMUMETHOD(mmu_page_is_mapped, mmu_radix_page_is_mapped), | .page_is_mapped = mmu_radix_page_is_mapped, | ||||
MMUMETHOD(mmu_ps_enabled, mmu_radix_ps_enabled), | .ps_enabled = mmu_radix_ps_enabled, | ||||
MMUMETHOD(mmu_object_init_pt, mmu_radix_object_init_pt), | .object_init_pt = mmu_radix_object_init_pt, | ||||
MMUMETHOD(mmu_protect, mmu_radix_protect), | .protect = mmu_radix_protect, | ||||
/* pmap dispatcher interface */ | /* pmap dispatcher interface */ | ||||
MMUMETHOD(mmu_clear_modify, mmu_radix_clear_modify), | .clear_modify = mmu_radix_clear_modify, | ||||
MMUMETHOD(mmu_copy, mmu_radix_copy), | .copy = mmu_radix_copy, | ||||
MMUMETHOD(mmu_enter, mmu_radix_enter), | .enter = mmu_radix_enter, | ||||
MMUMETHOD(mmu_enter_object, mmu_radix_enter_object), | .enter_object = mmu_radix_enter_object, | ||||
MMUMETHOD(mmu_enter_quick, mmu_radix_enter_quick), | .enter_quick = mmu_radix_enter_quick, | ||||
MMUMETHOD(mmu_extract, mmu_radix_extract), | .extract = mmu_radix_extract, | ||||
MMUMETHOD(mmu_extract_and_hold, mmu_radix_extract_and_hold), | .extract_and_hold = mmu_radix_extract_and_hold, | ||||
MMUMETHOD(mmu_is_modified, mmu_radix_is_modified), | .is_modified = mmu_radix_is_modified, | ||||
MMUMETHOD(mmu_is_prefaultable, mmu_radix_is_prefaultable), | .is_prefaultable = mmu_radix_is_prefaultable, | ||||
MMUMETHOD(mmu_is_referenced, mmu_radix_is_referenced), | .is_referenced = mmu_radix_is_referenced, | ||||
MMUMETHOD(mmu_ts_referenced, mmu_radix_ts_referenced), | .ts_referenced = mmu_radix_ts_referenced, | ||||
MMUMETHOD(mmu_page_exists_quick,mmu_radix_page_exists_quick), | .page_exists_quick = mmu_radix_page_exists_quick, | ||||
MMUMETHOD(mmu_page_init, mmu_radix_page_init), | .page_init = mmu_radix_page_init, | ||||
MMUMETHOD(mmu_page_wired_mappings, mmu_radix_page_wired_mappings), | .page_wired_mappings = mmu_radix_page_wired_mappings, | ||||
MMUMETHOD(mmu_qenter, mmu_radix_qenter), | .qenter = mmu_radix_qenter, | ||||
MMUMETHOD(mmu_qremove, mmu_radix_qremove), | .qremove = mmu_radix_qremove, | ||||
MMUMETHOD(mmu_release, mmu_radix_release), | .release = mmu_radix_release, | ||||
MMUMETHOD(mmu_remove, mmu_radix_remove), | .remove = mmu_radix_remove, | ||||
MMUMETHOD(mmu_remove_all, mmu_radix_remove_all), | .remove_all = mmu_radix_remove_all, | ||||
MMUMETHOD(mmu_remove_write, mmu_radix_remove_write), | .remove_write = mmu_radix_remove_write, | ||||
MMUMETHOD(mmu_unwire, mmu_radix_unwire), | .unwire = mmu_radix_unwire, | ||||
MMUMETHOD(mmu_zero_page, mmu_radix_zero_page), | .zero_page = mmu_radix_zero_page, | ||||
MMUMETHOD(mmu_zero_page_area, mmu_radix_zero_page_area), | .zero_page_area = mmu_radix_zero_page_area, | ||||
MMUMETHOD(mmu_activate, mmu_radix_activate), | .activate = mmu_radix_activate, | ||||
MMUMETHOD(mmu_quick_enter_page, mmu_radix_quick_enter_page), | .quick_enter_page = mmu_radix_quick_enter_page, | ||||
MMUMETHOD(mmu_quick_remove_page, mmu_radix_quick_remove_page), | .quick_remove_page = mmu_radix_quick_remove_page, | ||||
MMUMETHOD(mmu_page_set_memattr, mmu_radix_page_set_memattr), | .page_set_memattr = mmu_radix_page_set_memattr, | ||||
MMUMETHOD(mmu_page_array_startup, mmu_radix_page_array_startup), | .page_array_startup = mmu_radix_page_array_startup, | ||||
/* Internal interfaces */ | /* Internal interfaces */ | ||||
MMUMETHOD(mmu_kenter, mmu_radix_kenter), | .kenter = mmu_radix_kenter, | ||||
MMUMETHOD(mmu_kextract, mmu_radix_kextract), | .kextract = mmu_radix_kextract, | ||||
MMUMETHOD(mmu_kremove, mmu_radix_kremove), | .kremove = mmu_radix_kremove, | ||||
MMUMETHOD(mmu_change_attr, mmu_radix_change_attr), | .change_attr = mmu_radix_change_attr, | ||||
MMUMETHOD(mmu_map_user_ptr, mmu_radix_map_user_ptr), | .map_user_ptr = mmu_radix_map_user_ptr, | ||||
MMUMETHOD(mmu_decode_kernel_ptr, mmu_radix_decode_kernel_ptr), | .decode_kernel_ptr = mmu_radix_decode_kernel_ptr, | ||||
MMUMETHOD(mmu_tlbie_all, mmu_radix_tlbie_all), | .tlbie_all = mmu_radix_tlbie_all, | ||||
{ 0, 0 } | |||||
}; | }; | ||||
MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods, 0); | MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods); | ||||
#define METHODVOID(m) mmu_radix_ ## m(mmu_t mmup) | |||||
static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, | static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, | ||||
struct rwlock **lockp); | struct rwlock **lockp); | ||||
static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); | static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); | ||||
static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *); | static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *); | ||||
static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, | static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, | ||||
struct spglist *free, struct rwlock **lockp); | struct spglist *free, struct rwlock **lockp); | ||||
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, | static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, | ||||
pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp); | pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp); | ||||
▲ Show 20 Lines • Show All 209 Lines • ▼ Show 20 Lines | MPASS(scope == TLB_INVAL_SCOPE_LPID || | ||||
scope == TLB_INVAL_SCOPE_GLOBAL); | scope == TLB_INVAL_SCOPE_GLOBAL); | ||||
is = scope + 2; | is = scope + 2; | ||||
tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, is); | tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, is); | ||||
__asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); | __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_tlbie_all(mmu_t __unused mmu) | mmu_radix_tlbie_all() | ||||
{ | { | ||||
/* TODO: LPID invalidate */ | /* TODO: LPID invalidate */ | ||||
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); | mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_init_amor(void) | mmu_radix_init_amor(void) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 112 Lines • ▼ Show 20 Lines | kvtopte(vm_offset_t va) | ||||
l3e = pmap_pml3e(kernel_pmap, va); | l3e = pmap_pml3e(kernel_pmap, va); | ||||
if ((*l3e & RPTE_VALID) == 0) | if ((*l3e & RPTE_VALID) == 0) | ||||
return (NULL); | return (NULL); | ||||
return (pmap_l3e_to_pte(l3e, va)); | return (pmap_l3e_to_pte(l3e, va)); | ||||
} | } | ||||
void | void | ||||
mmu_radix_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) | mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pte = kvtopte(va); | pte = kvtopte(va); | ||||
MPASS(pte != NULL); | MPASS(pte != NULL); | ||||
*pte = pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | RPTE_EAA_W | \ | *pte = pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | RPTE_EAA_W | \ | ||||
RPTE_EAA_P | PG_M | PG_A; | RPTE_EAA_P | PG_M | PG_A; | ||||
} | } | ||||
boolean_t | bool | ||||
mmu_radix_ps_enabled(mmu_t mmu, pmap_t pmap) | mmu_radix_ps_enabled(pmap_t pmap) | ||||
{ | { | ||||
return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); | return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); | ||||
} | } | ||||
static pt_entry_t * | static pt_entry_t * | ||||
pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e) | pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e) | ||||
{ | { | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
▲ Show 20 Lines • Show All 90 Lines • ▼ Show 20 Lines | protfail: | ||||
return (KERN_PROTECTION_FAILURE); | return (KERN_PROTECTION_FAILURE); | ||||
} | } | ||||
/* | /* | ||||
* Returns TRUE if the given page is mapped individually or as part of | * Returns TRUE if the given page is mapped individually or as part of | ||||
* a 2mpage. Otherwise, returns FALSE. | * a 2mpage. Otherwise, returns FALSE. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
mmu_radix_page_is_mapped(mmu_t mmu, vm_page_t m) | mmu_radix_page_is_mapped(vm_page_t m) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
boolean_t rv; | boolean_t rv; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
return (FALSE); | return (FALSE); | ||||
lock = VM_PAGE_TO_PV_LIST_LOCK(m); | lock = VM_PAGE_TO_PV_LIST_LOCK(m); | ||||
rw_rlock(lock); | rw_rlock(lock); | ||||
▲ Show 20 Lines • Show All 994 Lines • ▼ Show 20 Lines | mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end) | ||||
validate_addr(proctab0pa, proctab_size); | validate_addr(proctab0pa, proctab_size); | ||||
for (int i = 0; i < proctab_size/PAGE_SIZE; i++) | for (int i = 0; i < proctab_size/PAGE_SIZE; i++) | ||||
pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE)); | pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE)); | ||||
mmu_radix_setup_pagetables(hwphyssz); | mmu_radix_setup_pagetables(hwphyssz); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_late_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end) | mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end) | ||||
{ | { | ||||
int i; | int i; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
void *dpcpu; | void *dpcpu; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
/* | /* | ||||
* Set up the Open Firmware pmap and add its mappings if not in real | * Set up the Open Firmware pmap and add its mappings if not in real | ||||
Show All 26 Lines | mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end) | ||||
* into the kernel page map. | * into the kernel page map. | ||||
*/ | */ | ||||
pa = allocpages(kstack_pages); | pa = allocpages(kstack_pages); | ||||
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | ||||
virtual_avail = va + kstack_pages * PAGE_SIZE; | virtual_avail = va + kstack_pages * PAGE_SIZE; | ||||
CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); | CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); | ||||
thread0.td_kstack = va; | thread0.td_kstack = va; | ||||
for (i = 0; i < kstack_pages; i++) { | for (i = 0; i < kstack_pages; i++) { | ||||
mmu_radix_kenter(mmu, va, pa); | mmu_radix_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
thread0.td_kstack_pages = kstack_pages; | thread0.td_kstack_pages = kstack_pages; | ||||
/* | /* | ||||
* Allocate virtual address space for the message buffer. | * Allocate virtual address space for the message buffer. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | printf("process table %p and kernel radix PDE: %p\n", | ||||
isa3_proctab, kernel_pmap->pm_pml1); | isa3_proctab, kernel_pmap->pm_pml1); | ||||
mtmsr(mfmsr() | PSL_DR ); | mtmsr(mfmsr() | PSL_DR ); | ||||
mtmsr(mfmsr() & ~PSL_DR); | mtmsr(mfmsr() & ~PSL_DR); | ||||
kernel_pmap->pm_pid = isa3_base_pid; | kernel_pmap->pm_pid = isa3_base_pid; | ||||
isa3_base_pid++; | isa3_base_pid++; | ||||
} | } | ||||
void | void | ||||
mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | ||||
int advice) | int advice) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pml1_entry_t *l1e; | pml1_entry_t *l1e; | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
pml3_entry_t oldl3e, *l3e; | pml3_entry_t oldl3e, *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_offset_t va, va_next; | vm_offset_t va, va_next; | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | if (anychanged) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
/* | /* | ||||
* Routines used in machine-dependent code | * Routines used in machine-dependent code | ||||
*/ | */ | ||||
static void | static void | ||||
mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end) | mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end) | ||||
{ | { | ||||
uint64_t lpcr; | uint64_t lpcr; | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("%s\n", __func__); | printf("%s\n", __func__); | ||||
hw_direct_map = 1; | hw_direct_map = 1; | ||||
mmu_radix_early_bootstrap(start, end); | mmu_radix_early_bootstrap(start, end); | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("early bootstrap complete\n"); | printf("early bootstrap complete\n"); | ||||
if (powernv_enabled) { | if (powernv_enabled) { | ||||
lpcr = mfspr(SPR_LPCR); | lpcr = mfspr(SPR_LPCR); | ||||
mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); | mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); | ||||
mmu_radix_parttab_init(); | mmu_radix_parttab_init(); | ||||
mmu_radix_init_amor(); | mmu_radix_init_amor(); | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("powernv init complete\n"); | printf("powernv init complete\n"); | ||||
} | } | ||||
mmu_radix_init_iamr(); | mmu_radix_init_iamr(); | ||||
mmu_radix_proctab_init(); | mmu_radix_proctab_init(); | ||||
mmu_radix_pid_set(kernel_pmap); | mmu_radix_pid_set(kernel_pmap); | ||||
/* XXX assume CPU_FTR_HVMODE */ | /* XXX assume CPU_FTR_HVMODE */ | ||||
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); | mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); | ||||
mmu_radix_late_bootstrap(mmu, start, end); | mmu_radix_late_bootstrap(start, end); | ||||
numa_mem_regions(&numa_pregions, &numa_pregions_sz); | numa_mem_regions(&numa_pregions, &numa_pregions_sz); | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("%s done\n", __func__); | printf("%s done\n", __func__); | ||||
pmap_bootstrapped = 1; | pmap_bootstrapped = 1; | ||||
dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE); | dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE); | ||||
PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS); | PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_cpu_bootstrap(mmu_t mmu, int ap) | mmu_radix_cpu_bootstrap(int ap) | ||||
{ | { | ||||
uint64_t lpcr; | uint64_t lpcr; | ||||
uint64_t ptcr; | uint64_t ptcr; | ||||
if (powernv_enabled) { | if (powernv_enabled) { | ||||
lpcr = mfspr(SPR_LPCR); | lpcr = mfspr(SPR_LPCR); | ||||
mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); | mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); | ||||
Show All 28 Lines | |||||
static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0, | static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0, | ||||
"1GB page mapping counters"); | "1GB page mapping counters"); | ||||
static u_long pmap_l2e_demotions; | static u_long pmap_l2e_demotions; | ||||
SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD, | SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD, | ||||
&pmap_l2e_demotions, 0, "1GB page demotions"); | &pmap_l2e_demotions, 0, "1GB page demotions"); | ||||
void | void | ||||
mmu_radix_clear_modify(mmu_t mmu, vm_page_t m) | mmu_radix_clear_modify(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
pv_entry_t next_pv, pv; | pv_entry_t next_pv, pv; | ||||
pml3_entry_t oldl3e, *l3e; | pml3_entry_t oldl3e, *l3e; | ||||
pt_entry_t oldpte, *pte; | pt_entry_t oldpte, *pte; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
▲ Show 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { | ||||
pmap_invalidate_page(pmap, pv->pv_va); | pmap_invalidate_page(pmap, pv->pv_va); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
} | } | ||||
void | void | ||||
mmu_radix_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, | mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, | ||||
vm_size_t len, vm_offset_t src_addr) | vm_size_t len, vm_offset_t src_addr) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
struct spglist free; | struct spglist free; | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
vm_offset_t end_addr = src_addr + len; | vm_offset_t end_addr = src_addr + len; | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
vm_page_t dst_pdpg, dstmpte, srcmpte; | vm_page_t dst_pdpg, dstmpte, srcmpte; | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | if (invalidate_all) | ||||
pmap_invalidate_all(dst_pmap); | pmap_invalidate_all(dst_pmap); | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
PMAP_UNLOCK(src_pmap); | PMAP_UNLOCK(src_pmap); | ||||
PMAP_UNLOCK(dst_pmap); | PMAP_UNLOCK(dst_pmap); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) | mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst) | ||||
{ | { | ||||
vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); | vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); | ||||
vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); | vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); | ||||
CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst); | CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst); | ||||
/* | /* | ||||
* XXX slow | * XXX slow | ||||
*/ | */ | ||||
bcopy((void *)src, (void *)dst, PAGE_SIZE); | bcopy((void *)src, (void *)dst, PAGE_SIZE); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_copy_pages(mmu_t mmu, vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | ||||
vm_offset_t b_offset, int xfersize) | vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma, | CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma, | ||||
a_offset, mb, b_offset, xfersize); | a_offset, mb, b_offset, xfersize); | ||||
UNIMPLEMENTED(); | UNIMPLEMENTED(); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 104 Lines • ▼ Show 20 Lines | setpte: | ||||
return (0); | return (0); | ||||
fail: | fail: | ||||
atomic_add_long(&pmap_l3e_p_failures, 1); | atomic_add_long(&pmap_l3e_p_failures, 1); | ||||
return (KERN_FAILURE); | return (KERN_FAILURE); | ||||
} | } | ||||
#endif /* VM_NRESERVLEVEL > 0 */ | #endif /* VM_NRESERVLEVEL > 0 */ | ||||
int | int | ||||
mmu_radix_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, | mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, u_int flags, int8_t psind) | vm_prot_t prot, u_int flags, int8_t psind) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pt_entry_t newpte, origpte; | pt_entry_t newpte, origpte; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
vm_paddr_t opa, pa; | vm_paddr_t opa, pa; | ||||
▲ Show 20 Lines • Show All 274 Lines • ▼ Show 20 Lines | |||||
unchanged: | unchanged: | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
* If both the page table page and the reservation are fully | * If both the page table page and the reservation are fully | ||||
* populated, then attempt promotion. | * populated, then attempt promotion. | ||||
*/ | */ | ||||
if ((mpte == NULL || mpte->ref_count == NPTEPG) && | if ((mpte == NULL || mpte->ref_count == NPTEPG) && | ||||
mmu_radix_ps_enabled(mmu, pmap) && | mmu_radix_ps_enabled(pmap) && | ||||
(m->flags & PG_FICTITIOUS) == 0 && | (m->flags & PG_FICTITIOUS) == 0 && | ||||
vm_reserv_level_iffullpop(m) == 0 && | vm_reserv_level_iffullpop(m) == 0 && | ||||
pmap_promote_l3e(pmap, l3e, va, &lock) == 0) | pmap_promote_l3e(pmap, l3e, va, &lock) == 0) | ||||
invalidate_all = true; | invalidate_all = true; | ||||
#endif | #endif | ||||
if (invalidate_all) | if (invalidate_all) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
else if (invalidate_page) | else if (invalidate_page) | ||||
▲ Show 20 Lines • Show All 150 Lines • ▼ Show 20 Lines | pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags, | ||||
atomic_add_long(&pmap_l3e_mappings, 1); | atomic_add_long(&pmap_l3e_mappings, 1); | ||||
CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" | CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
void | void | ||||
mmu_radix_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, | mmu_radix_enter_object(pmap_t pmap, vm_offset_t start, | ||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot) | vm_offset_t end, vm_page_t m_start, vm_prot_t prot) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_page_t m, mpte; | vm_page_t m, mpte; | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
bool invalidate; | bool invalidate; | ||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start, | CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start, | ||||
end, m_start, prot); | end, m_start, prot); | ||||
invalidate = false; | invalidate = false; | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
mpte = NULL; | mpte = NULL; | ||||
m = m_start; | m = m_start; | ||||
lock = NULL; | lock = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
va = start + ptoa(diff); | va = start + ptoa(diff); | ||||
if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end && | if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end && | ||||
m->psind == 1 && mmu_radix_ps_enabled(mmu, pmap) && | m->psind == 1 && mmu_radix_ps_enabled(pmap) && | ||||
pmap_enter_2mpage(pmap, va, m, prot, &lock)) | pmap_enter_2mpage(pmap, va, m, prot, &lock)) | ||||
m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1]; | m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1]; | ||||
else | else | ||||
mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot, | mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot, | ||||
mpte, &lock, &invalidate); | mpte, &lock, &invalidate); | ||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
ptesync(); | ptesync(); | ||||
▲ Show 20 Lines • Show All 107 Lines • ▼ Show 20 Lines | mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
pa |= PG_MANAGED; | pa |= PG_MANAGED; | ||||
pte_store(pte, pa); | pte_store(pte, pa); | ||||
return (mpte); | return (mpte); | ||||
} | } | ||||
void | void | ||||
mmu_radix_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, | mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
bool invalidate; | bool invalidate; | ||||
lock = NULL; | lock = NULL; | ||||
invalidate = false; | invalidate = false; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock, | mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock, | ||||
&invalidate); | &invalidate); | ||||
ptesync(); | ptesync(); | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
if (invalidate) | if (invalidate) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
vm_paddr_t | vm_paddr_t | ||||
mmu_radix_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) | mmu_radix_extract(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
l3e = pmap_pml3e(pmap, va); | l3e = pmap_pml3e(pmap, va); | ||||
if (__predict_false(l3e == NULL)) | if (__predict_false(l3e == NULL)) | ||||
return (0); | return (0); | ||||
Show All 15 Lines | if (*l3e & RPTE_LEAF) { | ||||
pa = *pte; | pa = *pte; | ||||
pa = (pa & PG_FRAME) | (va & PAGE_MASK); | pa = (pa & PG_FRAME) | (va & PAGE_MASK); | ||||
pa |= (va & PAGE_MASK); | pa |= (va & PAGE_MASK); | ||||
} | } | ||||
return (pa); | return (pa); | ||||
} | } | ||||
vm_page_t | vm_page_t | ||||
mmu_radix_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) | mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | ||||
{ | { | ||||
pml3_entry_t l3e, *l3ep; | pml3_entry_t l3e, *l3ep; | ||||
pt_entry_t pte; | pt_entry_t pte; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_page_t m; | vm_page_t m; | ||||
pa = 0; | pa = 0; | ||||
m = NULL; | m = NULL; | ||||
Show All 14 Lines | if (l3ep != NULL && (l3e = *l3ep)) { | ||||
if (m != NULL && !vm_page_wire_mapped(m)) | if (m != NULL && !vm_page_wire_mapped(m)) | ||||
m = NULL; | m = NULL; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (m); | return (m); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr) | mmu_radix_growkernel(vm_offset_t addr) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
vm_page_t nkpg; | vm_page_t nkpg; | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); | CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); | ||||
if (VM_MIN_KERNEL_ADDRESS < addr && | if (VM_MIN_KERNEL_ADDRESS < addr && | ||||
addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE)) | addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE)) | ||||
return; | return; | ||||
addr = roundup2(addr, L3_PAGE_SIZE); | addr = roundup2(addr, L3_PAGE_SIZE); | ||||
if (addr - 1 >= vm_map_max(kernel_map)) | if (addr - 1 >= vm_map_max(kernel_map)) | ||||
addr = vm_map_max(kernel_map); | addr = vm_map_max(kernel_map); | ||||
while (kernel_vm_end < addr) { | while (kernel_vm_end < addr) { | ||||
l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); | l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); | ||||
if ((*l2e & PG_V) == 0) { | if ((*l2e & PG_V) == 0) { | ||||
/* We need a new PDP entry */ | /* We need a new PDP entry */ | ||||
nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, | nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, | ||||
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_WIRED | VM_ALLOC_ZERO); | ||||
if (nkpg == NULL) | if (nkpg == NULL) | ||||
panic("pmap_growkernel: no memory to grow kernel"); | panic("pmap_growkernel: no memory to grow kernel"); | ||||
if ((nkpg->flags & PG_ZERO) == 0) | if ((nkpg->flags & PG_ZERO) == 0) | ||||
mmu_radix_zero_page(mmu, nkpg); | mmu_radix_zero_page(nkpg); | ||||
paddr = VM_PAGE_TO_PHYS(nkpg); | paddr = VM_PAGE_TO_PHYS(nkpg); | ||||
pde_store(l2e, paddr); | pde_store(l2e, paddr); | ||||
continue; /* try again */ | continue; /* try again */ | ||||
} | } | ||||
l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end); | l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end); | ||||
if ((*l3e & PG_V) != 0) { | if ((*l3e & PG_V) != 0) { | ||||
kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; | kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; | ||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | ||||
kernel_vm_end = vm_map_max(kernel_map); | kernel_vm_end = vm_map_max(kernel_map); | ||||
break; | break; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), | nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), | ||||
VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | | VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | | ||||
VM_ALLOC_ZERO); | VM_ALLOC_ZERO); | ||||
if (nkpg == NULL) | if (nkpg == NULL) | ||||
panic("pmap_growkernel: no memory to grow kernel"); | panic("pmap_growkernel: no memory to grow kernel"); | ||||
if ((nkpg->flags & PG_ZERO) == 0) | if ((nkpg->flags & PG_ZERO) == 0) | ||||
mmu_radix_zero_page(mmu, nkpg); | mmu_radix_zero_page(nkpg); | ||||
paddr = VM_PAGE_TO_PHYS(nkpg); | paddr = VM_PAGE_TO_PHYS(nkpg); | ||||
pde_store(l3e, paddr); | pde_store(l3e, paddr); | ||||
kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; | kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; | ||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { | ||||
kernel_vm_end = vm_map_max(kernel_map); | kernel_vm_end = vm_map_max(kernel_map); | ||||
break; | break; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | for (int j = page_count-1; j >= 0; j--) { | ||||
vm_page_unwire_noq(&m[j]); | vm_page_unwire_noq(&m[j]); | ||||
SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); | SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); | ||||
} | } | ||||
vm_page_free_pages_toq(&free, false); | vm_page_free_pages_toq(&free, false); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_init(mmu_t mmu) | mmu_radix_init() | ||||
{ | { | ||||
vm_page_t mpte; | vm_page_t mpte; | ||||
vm_size_t s; | vm_size_t s; | ||||
int error, i, pv_npg; | int error, i, pv_npg; | ||||
/* L1TF, reserve page @0 unconditionally */ | /* L1TF, reserve page @0 unconditionally */ | ||||
vm_page_blacklist_add(0, bootverbose); | vm_page_blacklist_add(0, bootverbose); | ||||
▲ Show 20 Lines • Show All 150 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* pmap_is_modified: | * pmap_is_modified: | ||||
* | * | ||||
* Return whether or not the specified physical page was modified | * Return whether or not the specified physical page was modified | ||||
* in any physical maps. | * in any physical maps. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
mmu_radix_is_modified(mmu_t mmu, vm_page_t m) | mmu_radix_is_modified(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("pmap_is_modified: page %p is not managed", m)); | ("pmap_is_modified: page %p is not managed", m)); | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m); | CTR2(KTR_PMAP, "%s(%p)", __func__, m); | ||||
/* | /* | ||||
* If the page is not busied then this check is racy. | * If the page is not busied then this check is racy. | ||||
*/ | */ | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return (FALSE); | return (FALSE); | ||||
return (pmap_page_test_mappings(m, FALSE, TRUE)); | return (pmap_page_test_mappings(m, FALSE, TRUE)); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
mmu_radix_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) | mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr) | ||||
{ | { | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
boolean_t rv; | boolean_t rv; | ||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); | CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); | ||||
rv = FALSE; | rv = FALSE; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
l3e = pmap_pml3e(pmap, addr); | l3e = pmap_pml3e(pmap, addr); | ||||
if (l3e != NULL && (*l3e & (RPTE_LEAF | PG_V)) == PG_V) { | if (l3e != NULL && (*l3e & (RPTE_LEAF | PG_V)) == PG_V) { | ||||
pte = pmap_l3e_to_pte(l3e, addr); | pte = pmap_l3e_to_pte(l3e, addr); | ||||
rv = (*pte & PG_V) == 0; | rv = (*pte & PG_V) == 0; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
mmu_radix_is_referenced(mmu_t mmu, vm_page_t m) | mmu_radix_is_referenced(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("pmap_is_referenced: page %p is not managed", m)); | ("pmap_is_referenced: page %p is not managed", m)); | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m); | CTR2(KTR_PMAP, "%s(%p)", __func__, m); | ||||
return (pmap_page_test_mappings(m, TRUE, FALSE)); | return (pmap_page_test_mappings(m, TRUE, FALSE)); | ||||
} | } | ||||
/* | /* | ||||
Show All 12 Lines | |||||
* dirty pages. Those dirty pages will only be detected by a future call | * dirty pages. Those dirty pages will only be detected by a future call | ||||
* to pmap_is_modified(). | * to pmap_is_modified(). | ||||
* | * | ||||
* A DI block is not needed within this function, because | * A DI block is not needed within this function, because | ||||
* invalidations are performed before the PV list lock is | * invalidations are performed before the PV list lock is | ||||
* released. | * released. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
mmu_radix_ts_referenced(mmu_t mmu, vm_page_t m) | mmu_radix_ts_referenced(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv, pvf; | pv_entry_t pv, pvf; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pml3_entry_t oldl3e, *l3e; | pml3_entry_t oldl3e, *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
▲ Show 20 Lines • Show All 121 Lines • ▼ Show 20 Lines | if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) { | ||||
not_cleared < PMAP_TS_REFERENCED_MAX); | not_cleared < PMAP_TS_REFERENCED_MAX); | ||||
out: | out: | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
return (cleared + not_cleared); | return (cleared + not_cleared); | ||||
} | } | ||||
static vm_offset_t | static vm_offset_t | ||||
mmu_radix_map(mmu_t mmu, vm_offset_t *virt __unused, vm_paddr_t start, | mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start, | ||||
vm_paddr_t end, int prot __unused) | vm_paddr_t end, int prot __unused) | ||||
{ | { | ||||
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end, | CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end, | ||||
prot); | prot); | ||||
return (PHYS_TO_DMAP(start)); | return (PHYS_TO_DMAP(start)); | ||||
} | } | ||||
void | void | ||||
mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, | mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr, | ||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size) | vm_object_t object, vm_pindex_t pindex, vm_size_t size) | ||||
{ | { | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
vm_paddr_t pa, ptepa; | vm_paddr_t pa, ptepa; | ||||
vm_page_t p, pdpg; | vm_page_t p, pdpg; | ||||
vm_memattr_t ma; | vm_memattr_t ma; | ||||
CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr, | CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr, | ||||
object, pindex, size); | object, pindex, size); | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, | KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, | ||||
("pmap_object_init_pt: non-device object")); | ("pmap_object_init_pt: non-device object")); | ||||
/* NB: size can be logically ored with addr here */ | /* NB: size can be logically ored with addr here */ | ||||
if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) { | if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) { | ||||
if (!mmu_radix_ps_enabled(mmu, pmap)) | if (!mmu_radix_ps_enabled(pmap)) | ||||
return; | return; | ||||
if (!vm_object_populate(object, pindex, pindex + atop(size))) | if (!vm_object_populate(object, pindex, pindex + atop(size))) | ||||
return; | return; | ||||
p = vm_page_lookup(object, pindex); | p = vm_page_lookup(object, pindex); | ||||
KASSERT(p->valid == VM_PAGE_BITS_ALL, | KASSERT(p->valid == VM_PAGE_BITS_ALL, | ||||
("pmap_object_init_pt: invalid page %p", p)); | ("pmap_object_init_pt: invalid page %p", p)); | ||||
ma = p->md.mdpg_cache_attrs; | ma = p->md.mdpg_cache_attrs; | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | for (pa = ptepa | pmap_cache_bits(ma); | ||||
addr += L3_PAGE_SIZE; | addr += L3_PAGE_SIZE; | ||||
} | } | ||||
ptesync(); | ptesync(); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
} | } | ||||
boolean_t | boolean_t | ||||
mmu_radix_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) | mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
int loops = 0; | int loops = 0; | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
Show All 23 Lines | TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
rw_runlock(lock); | rw_runlock(lock); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
mmu_radix_page_init(mmu_t mmu, vm_page_t m) | mmu_radix_page_init(vm_page_t m) | ||||
{ | { | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m); | CTR2(KTR_PMAP, "%s(%p)", __func__, m); | ||||
TAILQ_INIT(&m->md.pv_list); | TAILQ_INIT(&m->md.pv_list); | ||||
m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | ||||
} | } | ||||
int | int | ||||
mmu_radix_page_wired_mappings(mmu_t mmu, vm_page_t m) | mmu_radix_page_wired_mappings(vm_page_t m) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
int count, md_gen, pvh_gen; | int count, md_gen, pvh_gen; | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static void | static void | ||||
mmu_radix_update_proctab(int pid, pml1_entry_t l1pa) | mmu_radix_update_proctab(int pid, pml1_entry_t l1pa) | ||||
{ | { | ||||
isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT); | isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT); | ||||
} | } | ||||
void | int | ||||
mmu_radix_pinit(mmu_t mmu, pmap_t pmap) | mmu_radix_pinit(pmap_t pmap) | ||||
{ | { | ||||
vmem_addr_t pid; | vmem_addr_t pid; | ||||
vm_paddr_t l1pa; | vm_paddr_t l1pa; | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | ||||
/* | /* | ||||
* allocate the page directory page | * allocate the page directory page | ||||
*/ | */ | ||||
pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK); | pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK); | ||||
for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++) | for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++) | ||||
pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE); | pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE); | ||||
pmap->pm_radix.rt_root = 0; | pmap->pm_radix.rt_root = 0; | ||||
TAILQ_INIT(&pmap->pm_pvchunk); | TAILQ_INIT(&pmap->pm_pvchunk); | ||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | bzero(&pmap->pm_stats, sizeof pmap->pm_stats); | ||||
pmap->pm_flags = PMAP_PDE_SUPERPAGE; | pmap->pm_flags = PMAP_PDE_SUPERPAGE; | ||||
vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid); | vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid); | ||||
pmap->pm_pid = pid; | pmap->pm_pid = pid; | ||||
l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1); | l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1); | ||||
mmu_radix_update_proctab(pid, l1pa); | mmu_radix_update_proctab(pid, l1pa); | ||||
__asm __volatile("ptesync;isync" : : : "memory"); | __asm __volatile("ptesync;isync" : : : "memory"); | ||||
return (1); | |||||
} | } | ||||
/* | /* | ||||
* This routine is called if the desired page table page does not exist. | * This routine is called if the desired page table page does not exist. | ||||
* | * | ||||
* If page table page allocation fails, this routine may sleep before | * If page table page allocation fails, this routine may sleep before | ||||
* returning NULL. It sleeps only if a lock pointer was given. | * returning NULL. It sleeps only if a lock pointer was given. | ||||
* | * | ||||
Show All 22 Lines | if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | | ||||
} | } | ||||
/* | /* | ||||
* Indicate the need to retry. While waiting, the page table | * Indicate the need to retry. While waiting, the page table | ||||
* page may have been allocated. | * page may have been allocated. | ||||
*/ | */ | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if ((m->flags & PG_ZERO) == 0) | if ((m->flags & PG_ZERO) == 0) | ||||
mmu_radix_zero_page(NULL, m); | mmu_radix_zero_page(m); | ||||
/* | /* | ||||
* Map the pagetable page into the process address space, if | * Map the pagetable page into the process address space, if | ||||
* it isn't already there. | * it isn't already there. | ||||
*/ | */ | ||||
if (ptepindex >= (NUPDE + NUPDPE)) { | if (ptepindex >= (NUPDE + NUPDPE)) { | ||||
pml1_entry_t *l1e; | pml1_entry_t *l1e; | ||||
▲ Show 20 Lines • Show All 154 Lines • ▼ Show 20 Lines | if (pd != NULL && (*pd & PG_V) != 0) { | ||||
m = _pmap_allocpte(pmap, ptepindex, lockp); | m = _pmap_allocpte(pmap, ptepindex, lockp); | ||||
if (m == NULL && lockp != NULL) | if (m == NULL && lockp != NULL) | ||||
goto retry; | goto retry; | ||||
} | } | ||||
return (m); | return (m); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_pinit0(mmu_t mmu, pmap_t pmap) | mmu_radix_pinit0(pmap_t pmap) | ||||
{ | { | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | ||||
PMAP_LOCK_INIT(pmap); | PMAP_LOCK_INIT(pmap); | ||||
pmap->pm_pml1 = kernel_pmap->pm_pml1; | pmap->pm_pml1 = kernel_pmap->pm_pml1; | ||||
pmap->pm_pid = kernel_pmap->pm_pid; | pmap->pm_pid = kernel_pmap->pm_pid; | ||||
pmap->pm_radix.rt_root = 0; | pmap->pm_radix.rt_root = 0; | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | if (newpde != oldpde) { | ||||
if (!atomic_cmpset_long(l3e, oldpde, newpde & ~PG_PROMOTED)) | if (!atomic_cmpset_long(l3e, oldpde, newpde & ~PG_PROMOTED)) | ||||
goto retry; | goto retry; | ||||
anychanged = TRUE; | anychanged = TRUE; | ||||
} | } | ||||
return (anychanged); | return (anychanged); | ||||
} | } | ||||
void | void | ||||
mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
pml1_entry_t *l1e; | pml1_entry_t *l1e; | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
pml3_entry_t ptpaddr, *l3e; | pml3_entry_t ptpaddr, *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
boolean_t anychanged; | boolean_t anychanged; | ||||
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva, | CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva, | ||||
prot); | prot); | ||||
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); | ||||
if (prot == VM_PROT_NONE) { | if (prot == VM_PROT_NONE) { | ||||
mmu_radix_remove(mmu, pmap, sva, eva); | mmu_radix_remove(pmap, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == | if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == | ||||
(VM_PROT_WRITE|VM_PROT_EXECUTE)) | (VM_PROT_WRITE|VM_PROT_EXECUTE)) | ||||
return; | return; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | #endif | ||||
} | } | ||||
} | } | ||||
if (anychanged) | if (anychanged) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
void | void | ||||
mmu_radix_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *ma, int count) | mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count) | ||||
{ | { | ||||
CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count); | CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count); | ||||
pt_entry_t oldpte, pa, *pte; | pt_entry_t oldpte, pa, *pte; | ||||
vm_page_t m; | vm_page_t m; | ||||
uint64_t cache_bits, attr_bits; | uint64_t cache_bits, attr_bits; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
Show All 25 Lines | mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count) | ||||
if (__predict_false((oldpte & RPTE_VALID) != 0)) | if (__predict_false((oldpte & RPTE_VALID) != 0)) | ||||
pmap_invalidate_range(kernel_pmap, sva, sva + count * | pmap_invalidate_range(kernel_pmap, sva, sva + count * | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
else | else | ||||
ptesync(); | ptesync(); | ||||
} | } | ||||
void | void | ||||
mmu_radix_qremove(mmu_t mmu, vm_offset_t sva, int count) | mmu_radix_qremove(vm_offset_t sva, int count) | ||||
{ | { | ||||
vm_offset_t va; | vm_offset_t va; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count); | CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count); | ||||
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva)); | KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva)); | ||||
va = sva; | va = sva; | ||||
▲ Show 20 Lines • Show All 134 Lines • ▼ Show 20 Lines | pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde, | ||||
if (va >= VM_MAXUSER_ADDRESS) | if (va >= VM_MAXUSER_ADDRESS) | ||||
return (0); | return (0); | ||||
KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); | KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); | ||||
mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); | mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); | ||||
return (pmap_unwire_ptp(pmap, va, mpte, free)); | return (pmap_unwire_ptp(pmap, va, mpte, free)); | ||||
} | } | ||||
void | void | ||||
mmu_radix_release(mmu_t mmu, pmap_t pmap) | mmu_radix_release(pmap_t pmap) | ||||
{ | { | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | ||||
KASSERT(pmap->pm_stats.resident_count == 0, | KASSERT(pmap->pm_stats.resident_count == 0, | ||||
("pmap_release: pmap resident count %ld != 0", | ("pmap_release: pmap resident count %ld != 0", | ||||
pmap->pm_stats.resident_count)); | pmap->pm_stats.resident_count)); | ||||
KASSERT(vm_radix_is_empty(&pmap->pm_radix), | KASSERT(vm_radix_is_empty(&pmap->pm_radix), | ||||
("pmap_release: pmap has reserved page table page(s)")); | ("pmap_release: pmap has reserved page table page(s)")); | ||||
▲ Show 20 Lines • Show All 346 Lines • ▼ Show 20 Lines | if (anyvalid) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
else if (va != eva) | else if (va != eva) | ||||
pmap_invalidate_range(pmap, va, sva); | pmap_invalidate_range(pmap, va, sva); | ||||
return (anyvalid); | return (anyvalid); | ||||
} | } | ||||
void | void | ||||
mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
pml1_entry_t *l1e; | pml1_entry_t *l1e; | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
pml3_entry_t ptpaddr, *l3e; | pml3_entry_t ptpaddr, *l3e; | ||||
struct spglist free; | struct spglist free; | ||||
bool anyvalid; | bool anyvalid; | ||||
▲ Show 20 Lines • Show All 100 Lines • ▼ Show 20 Lines | |||||
out: | out: | ||||
if (anyvalid) | if (anyvalid) | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
void | void | ||||
mmu_radix_remove_all(mmu_t mmu, vm_page_t m) | mmu_radix_remove_all(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pt_entry_t *pte, tpte; | pt_entry_t *pte, tpte; | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
* pmap_delayed_invl_finished(). Because the pmap is not active on | * pmap_delayed_invl_finished(). Because the pmap is not active on | ||||
* any other processor, none of these TLB entries will ever be used | * any other processor, none of these TLB entries will ever be used | ||||
* before their eventual invalidation. Consequently, there is no need | * before their eventual invalidation. Consequently, there is no need | ||||
* for either pmap_remove_all() or pmap_remove_write() to wait for | * for either pmap_remove_all() or pmap_remove_write() to wait for | ||||
* that eventual TLB invalidation. | * that eventual TLB invalidation. | ||||
*/ | */ | ||||
void | void | ||||
mmu_radix_remove_pages(mmu_t mmu, pmap_t pmap) | mmu_radix_remove_pages(pmap_t pmap) | ||||
{ | { | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); | ||||
pml3_entry_t ptel3e; | pml3_entry_t ptel3e; | ||||
pt_entry_t *pte, tpte; | pt_entry_t *pte, tpte; | ||||
struct spglist free; | struct spglist free; | ||||
vm_page_t m, mpte, mt; | vm_page_t m, mpte, mt; | ||||
pv_entry_t pv; | pv_entry_t pv; | ||||
▲ Show 20 Lines • Show All 152 Lines • ▼ Show 20 Lines | #endif | ||||
if (lock != NULL) | if (lock != NULL) | ||||
rw_wunlock(lock); | rw_wunlock(lock); | ||||
pmap_invalidate_all(pmap); | pmap_invalidate_all(pmap); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
vm_page_free_pages_toq(&free, true); | vm_page_free_pages_toq(&free, true); | ||||
} | } | ||||
void | void | ||||
mmu_radix_remove_write(mmu_t mmu, vm_page_t m) | mmu_radix_remove_write(vm_page_t m) | ||||
{ | { | ||||
struct md_page *pvh; | struct md_page *pvh; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
struct rwlock *lock; | struct rwlock *lock; | ||||
pv_entry_t next_pv, pv; | pv_entry_t next_pv, pv; | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pt_entry_t oldpte, *pte; | pt_entry_t oldpte, *pte; | ||||
int pvh_gen, md_gen; | int pvh_gen, md_gen; | ||||
▲ Show 20 Lines • Show All 75 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* The wired attribute of the page table entry is not a hardware | * The wired attribute of the page table entry is not a hardware | ||||
* feature, so there is no need to invalidate any TLB entries. | * feature, so there is no need to invalidate any TLB entries. | ||||
* Since pmap_demote_l3e() for the wired entry must never fail, | * Since pmap_demote_l3e() for the wired entry must never fail, | ||||
* pmap_delayed_invl_started()/finished() calls around the | * pmap_delayed_invl_started()/finished() calls around the | ||||
* function are not needed. | * function are not needed. | ||||
*/ | */ | ||||
void | void | ||||
mmu_radix_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
vm_offset_t va_next; | vm_offset_t va_next; | ||||
pml1_entry_t *l1e; | pml1_entry_t *l1e; | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
pml3_entry_t *l3e; | pml3_entry_t *l3e; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); | CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++, | ||||
atomic_clear_long(pte, PG_W); | atomic_clear_long(pte, PG_W); | ||||
pmap->pm_stats.wired_count--; | pmap->pm_stats.wired_count--; | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
void | void | ||||
mmu_radix_zero_page(mmu_t mmu, vm_page_t m) | mmu_radix_zero_page(vm_page_t m) | ||||
{ | { | ||||
vm_offset_t addr; | vm_offset_t addr; | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m); | CTR2(KTR_PMAP, "%s(%p)", __func__, m); | ||||
addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | ||||
pagezero(addr); | pagezero(addr); | ||||
} | } | ||||
void | void | ||||
mmu_radix_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) | mmu_radix_zero_page_area(vm_page_t m, int off, int size) | ||||
{ | { | ||||
caddr_t addr; | caddr_t addr; | ||||
CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size); | CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size); | ||||
MPASS(off + size <= PAGE_SIZE); | MPASS(off + size <= PAGE_SIZE); | ||||
addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | ||||
memset(addr + off, 0, size); | memset(addr + off, 0, size); | ||||
} | } | ||||
static int | static int | ||||
mmu_radix_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, | mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) | ||||
vm_paddr_t *locked_pa) | |||||
{ | { | ||||
pml3_entry_t *l3ep; | pml3_entry_t *l3ep; | ||||
pt_entry_t pte; | pt_entry_t pte; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
int val; | int val; | ||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); | CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
Show All 28 Lines | if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != | ||||
(pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { | (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { | ||||
*locked_pa = pa; | *locked_pa = pa; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (val); | return (val); | ||||
} | } | ||||
void | void | ||||
mmu_radix_activate(mmu_t mmu, struct thread *td) | mmu_radix_activate(struct thread *td) | ||||
{ | { | ||||
pmap_t pmap; | pmap_t pmap; | ||||
uint32_t curpid; | uint32_t curpid; | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, td); | CTR2(KTR_PMAP, "%s(%p)", __func__, td); | ||||
critical_enter(); | critical_enter(); | ||||
pmap = vmspace_pmap(td->td_proc->p_vmspace); | pmap = vmspace_pmap(td->td_proc->p_vmspace); | ||||
curpid = mfspr(SPR_PID); | curpid = mfspr(SPR_PID); | ||||
if (pmap->pm_pid > isa3_base_pid && | if (pmap->pm_pid > isa3_base_pid && | ||||
curpid != pmap->pm_pid) { | curpid != pmap->pm_pid) { | ||||
mmu_radix_pid_set(pmap); | mmu_radix_pid_set(pmap); | ||||
} | } | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
/* | /* | ||||
* Increase the starting virtual address of the given mapping if a | * Increase the starting virtual address of the given mapping if a | ||||
* different alignment might result in more superpage mappings. | * different alignment might result in more superpage mappings. | ||||
*/ | */ | ||||
void | void | ||||
mmu_radix_align_superpage(mmu_t mmu, vm_object_t object, vm_ooffset_t offset, | mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset, | ||||
vm_offset_t *addr, vm_size_t size) | vm_offset_t *addr, vm_size_t size) | ||||
{ | { | ||||
CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr, | CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr, | ||||
size); | size); | ||||
vm_offset_t superpage_offset; | vm_offset_t superpage_offset; | ||||
if (size < L3_PAGE_SIZE) | if (size < L3_PAGE_SIZE) | ||||
return; | return; | ||||
if (object != NULL && (object->flags & OBJ_COLORED) != 0) | if (object != NULL && (object->flags & OBJ_COLORED) != 0) | ||||
offset += ptoa(object->pg_color); | offset += ptoa(object->pg_color); | ||||
superpage_offset = offset & L3_PAGE_MASK; | superpage_offset = offset & L3_PAGE_MASK; | ||||
if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE || | if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE || | ||||
(*addr & L3_PAGE_MASK) == superpage_offset) | (*addr & L3_PAGE_MASK) == superpage_offset) | ||||
return; | return; | ||||
if ((*addr & L3_PAGE_MASK) < superpage_offset) | if ((*addr & L3_PAGE_MASK) < superpage_offset) | ||||
*addr = (*addr & ~L3_PAGE_MASK) + superpage_offset; | *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset; | ||||
else | else | ||||
*addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset; | *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset; | ||||
} | } | ||||
static void * | static void * | ||||
mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t attr) | mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr) | ||||
{ | { | ||||
vm_offset_t va, tmpva, ppa, offset; | vm_offset_t va, tmpva, ppa, offset; | ||||
ppa = trunc_page(pa); | ppa = trunc_page(pa); | ||||
offset = pa & PAGE_MASK; | offset = pa & PAGE_MASK; | ||||
size = roundup2(offset + size, PAGE_SIZE); | size = roundup2(offset + size, PAGE_SIZE); | ||||
if (pa < powerpc_ptob(Maxmem)) | if (pa < powerpc_ptob(Maxmem)) | ||||
panic("bad pa: %#lx less than Maxmem %#lx\n", | panic("bad pa: %#lx less than Maxmem %#lx\n", | ||||
pa, powerpc_ptob(Maxmem)); | pa, powerpc_ptob(Maxmem)); | ||||
va = kva_alloc(size); | va = kva_alloc(size); | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr); | printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr); | ||||
KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr)); | KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr)); | ||||
if (!va) | if (!va) | ||||
panic("%s: Couldn't alloc kernel virtual memory", __func__); | panic("%s: Couldn't alloc kernel virtual memory", __func__); | ||||
for (tmpva = va; size > 0;) { | for (tmpva = va; size > 0;) { | ||||
mmu_radix_kenter_attr(mmu, tmpva, ppa, attr); | mmu_radix_kenter_attr(tmpva, ppa, attr); | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
tmpva += PAGE_SIZE; | tmpva += PAGE_SIZE; | ||||
ppa += PAGE_SIZE; | ppa += PAGE_SIZE; | ||||
} | } | ||||
ptesync(); | ptesync(); | ||||
return ((void *)(va + offset)); | return ((void *)(va + offset)); | ||||
} | } | ||||
static void * | static void * | ||||
mmu_radix_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); | CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); | ||||
return (mmu_radix_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); | return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); | ||||
} | } | ||||
void | void | ||||
mmu_radix_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) | mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); | CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); | ||||
m->md.mdpg_cache_attrs = ma; | m->md.mdpg_cache_attrs = ma; | ||||
/* | /* | ||||
* If "m" is a normal page, update its direct mapping. This update | * If "m" is a normal page, update its direct mapping. This update | ||||
* can be relied upon to perform any cache operations that are | * can be relied upon to perform any cache operations that are | ||||
* required for data coherence. | * required for data coherence. | ||||
*/ | */ | ||||
if ((m->flags & PG_FICTITIOUS) == 0 && | if ((m->flags & PG_FICTITIOUS) == 0 && | ||||
mmu_radix_change_attr(mmu, PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), | mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), | ||||
PAGE_SIZE, m->md.mdpg_cache_attrs)) | PAGE_SIZE, m->md.mdpg_cache_attrs)) | ||||
panic("memory attribute change on the direct map failed"); | panic("memory attribute change on the direct map failed"); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) | mmu_radix_unmapdev(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t offset; | vm_offset_t offset; | ||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size); | CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size); | ||||
/* If we gave a direct map region in pmap_mapdev, do nothing */ | /* If we gave a direct map region in pmap_mapdev, do nothing */ | ||||
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) | if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) | ||||
return; | return; | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va) | ||||
pmap_l2e_demotions++; | pmap_l2e_demotions++; | ||||
CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" | CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" | ||||
" in pmap %p", va, pmap); | " in pmap %p", va, pmap); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
vm_paddr_t | vm_paddr_t | ||||
mmu_radix_kextract(mmu_t mmu, vm_offset_t va) | mmu_radix_kextract(vm_offset_t va) | ||||
{ | { | ||||
pml3_entry_t l3e; | pml3_entry_t l3e; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, va); | CTR2(KTR_PMAP, "%s(%#x)", __func__, va); | ||||
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { | if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { | ||||
pa = DMAP_TO_PHYS(va); | pa = DMAP_TO_PHYS(va); | ||||
} else { | } else { | ||||
Show All 34 Lines | for (int i = 0; i < pregions_sz; i++) { | ||||
if ((pa >= pregions[i].mr_start) && | if ((pa >= pregions[i].mr_start) && | ||||
(pa < (pregions[i].mr_start + pregions[i].mr_size))) | (pa < (pregions[i].mr_start + pregions[i].mr_size))) | ||||
return (RPTE_ATTR_MEM); | return (RPTE_ATTR_MEM); | ||||
} | } | ||||
return (RPTE_ATTR_GUARDEDIO); | return (RPTE_ATTR_GUARDEDIO); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | ||||
{ | { | ||||
pt_entry_t *pte, pteval; | pt_entry_t *pte, pteval; | ||||
uint64_t cache_bits; | uint64_t cache_bits; | ||||
pte = kvtopte(va); | pte = kvtopte(va); | ||||
MPASS(pte != NULL); | MPASS(pte != NULL); | ||||
pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; | pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; | ||||
cache_bits = mmu_radix_calc_wimg(pa, ma); | cache_bits = mmu_radix_calc_wimg(pa, ma); | ||||
pte_store(pte, pteval | cache_bits); | pte_store(pte, pteval | cache_bits); | ||||
} | } | ||||
void | void | ||||
mmu_radix_kremove(mmu_t mmu, vm_offset_t va) | mmu_radix_kremove(vm_offset_t va) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, va); | CTR2(KTR_PMAP, "%s(%#x)", __func__, va); | ||||
pte = kvtopte(va); | pte = kvtopte(va); | ||||
pte_clear(pte); | pte_clear(pte); | ||||
} | } | ||||
int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm, | int mmu_radix_map_user_ptr(pmap_t pm, | ||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen) | volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen) | ||||
{ | { | ||||
if ((uintptr_t)uaddr + ulen >= VM_MAXUSER_ADDRESS) | if ((uintptr_t)uaddr + ulen >= VM_MAXUSER_ADDRESS) | ||||
return (EFAULT); | return (EFAULT); | ||||
*kaddr = (void *)(uintptr_t)uaddr; | *kaddr = (void *)(uintptr_t)uaddr; | ||||
if (klen) | if (klen) | ||||
*klen = ulen; | *klen = ulen; | ||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
mmu_radix_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, | mmu_radix_decode_kernel_ptr(vm_offset_t addr, | ||||
int *is_user, vm_offset_t *decoded) | int *is_user, vm_offset_t *decoded) | ||||
{ | { | ||||
CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); | CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); | ||||
*decoded = addr; | *decoded = addr; | ||||
*is_user = (addr < VM_MAXUSER_ADDRESS); | *is_user = (addr < VM_MAXUSER_ADDRESS); | ||||
return (0); | return (0); | ||||
} | } | ||||
static boolean_t | static boolean_t | ||||
mmu_radix_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); | CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); | ||||
return (mem_valid(pa, size)); | return (mem_valid(pa, size)); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_scan_init(mmu_t mmup) | mmu_radix_scan_init() | ||||
{ | { | ||||
CTR1(KTR_PMAP, "%s()", __func__); | CTR1(KTR_PMAP, "%s()", __func__); | ||||
UNIMPLEMENTED(); | UNIMPLEMENTED(); | ||||
} | } | ||||
static void | static void | ||||
mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, | mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, | ||||
void **va) | void **va) | ||||
{ | { | ||||
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va); | CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va); | ||||
UNIMPLEMENTED(); | UNIMPLEMENTED(); | ||||
} | } | ||||
vm_offset_t | vm_offset_t | ||||
mmu_radix_quick_enter_page(mmu_t mmu, vm_page_t m) | mmu_radix_quick_enter_page(vm_page_t m) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
CTR2(KTR_PMAP, "%s(%p)", __func__, m); | CTR2(KTR_PMAP, "%s(%p)", __func__, m); | ||||
paddr = VM_PAGE_TO_PHYS(m); | paddr = VM_PAGE_TO_PHYS(m); | ||||
return (PHYS_TO_DMAP(paddr)); | return (PHYS_TO_DMAP(paddr)); | ||||
} | } | ||||
void | void | ||||
mmu_radix_quick_remove_page(mmu_t mmu, vm_offset_t addr __unused) | mmu_radix_quick_remove_page(vm_offset_t addr __unused) | ||||
{ | { | ||||
/* no work to do here */ | /* no work to do here */ | ||||
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); | CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); | ||||
} | } | ||||
static void | static void | ||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) | pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
cpu_flush_dcache((void *)sva, eva - sva); | cpu_flush_dcache((void *)sva, eva - sva); | ||||
} | } | ||||
int | int | ||||
mmu_radix_change_attr(mmu_t mmu, vm_offset_t va, vm_size_t size, | mmu_radix_change_attr(vm_offset_t va, vm_size_t size, | ||||
vm_memattr_t mode) | vm_memattr_t mode) | ||||
{ | { | ||||
int error; | int error; | ||||
CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode); | CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode); | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
error = pmap_change_attr_locked(va, size, mode, true); | error = pmap_change_attr_locked(va, size, mode, true); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
▲ Show 20 Lines • Show All 209 Lines • ▼ Show 20 Lines | pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush) | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Allocate physical memory for the vm_page array and map it into KVA, | * Allocate physical memory for the vm_page array and map it into KVA, | ||||
* attempting to back the vm_pages with domain-local memory. | * attempting to back the vm_pages with domain-local memory. | ||||
*/ | */ | ||||
void | void | ||||
mmu_radix_page_array_startup(mmu_t mmu, long pages) | mmu_radix_page_array_startup(long pages) | ||||
{ | { | ||||
#ifdef notyet | #ifdef notyet | ||||
pml2_entry_t *l2e; | pml2_entry_t *l2e; | ||||
pml3_entry_t *pde; | pml3_entry_t *pde; | ||||
pml3_entry_t newl3; | pml3_entry_t newl3; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
long pfn; | long pfn; | ||||
int domain, i; | int domain, i; | ||||
#endif | #endif | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_offset_t start, end; | vm_offset_t start, end; | ||||
vm_page_array_size = pages; | vm_page_array_size = pages; | ||||
start = VM_MIN_KERNEL_ADDRESS; | start = VM_MIN_KERNEL_ADDRESS; | ||||
end = start + pages * sizeof(struct vm_page); | end = start + pages * sizeof(struct vm_page); | ||||
pa = vm_phys_early_alloc(0, end - start); | pa = vm_phys_early_alloc(0, end - start); | ||||
start = mmu_radix_map(mmu, &start, pa, end - start, VM_MEMATTR_DEFAULT); | start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT); | ||||
#ifdef notyet | #ifdef notyet | ||||
/* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ | /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ | ||||
for (va = start; va < end; va += L3_PAGE_SIZE) { | for (va = start; va < end; va += L3_PAGE_SIZE) { | ||||
pfn = first_page + (va - start) / sizeof(struct vm_page); | pfn = first_page + (va - start) / sizeof(struct vm_page); | ||||
domain = _vm_phys_domain(ptoa(pfn)); | domain = _vm_phys_domain(ptoa(pfn)); | ||||
l2e = pmap_pml2e(kernel_pmap, va); | l2e = pmap_pml2e(kernel_pmap, va); | ||||
if ((*l2e & PG_V) == 0) { | if ((*l2e & PG_V) == 0) { | ||||
pa = vm_phys_early_alloc(domain, PAGE_SIZE); | pa = vm_phys_early_alloc(domain, PAGE_SIZE); | ||||
▲ Show 20 Lines • Show All 94 Lines • Show Last 20 Lines |