Changeset View
Changeset View
Standalone View
Standalone View
sys/powerpc/aim/mmu_oea64.c
Show First 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_map.h> | #include <vm/vm_map.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <machine/_inttypes.h> | #include <machine/_inttypes.h> | ||||
#include <machine/cpu.h> | #include <machine/cpu.h> | ||||
#include <machine/ifunc.h> | |||||
#include <machine/platform.h> | #include <machine/platform.h> | ||||
#include <machine/frame.h> | #include <machine/frame.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/psl.h> | #include <machine/psl.h> | ||||
#include <machine/bat.h> | #include <machine/bat.h> | ||||
#include <machine/hid.h> | #include <machine/hid.h> | ||||
#include <machine/pte.h> | #include <machine/pte.h> | ||||
#include <machine/sr.h> | #include <machine/sr.h> | ||||
#include <machine/trap.h> | #include <machine/trap.h> | ||||
#include <machine/mmuvar.h> | #include <machine/mmuvar.h> | ||||
#include "mmu_oea64.h" | #include "mmu_oea64.h" | ||||
#include "mmu_if.h" | |||||
#include "moea64_if.h" | |||||
void moea64_release_vsid(uint64_t vsid); | void moea64_release_vsid(uint64_t vsid); | ||||
uintptr_t moea64_get_unique_vsid(void); | uintptr_t moea64_get_unique_vsid(void); | ||||
#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) | #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) | ||||
#define ENABLE_TRANS(msr) mtmsr(msr) | #define ENABLE_TRANS(msr) mtmsr(msr) | ||||
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | ||||
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | ||||
#define VSID_HASH_MASK 0x0000007fffffffffULL | #define VSID_HASH_MASK 0x0000007fffffffffULL | ||||
/* | /* | ||||
* Locking semantics: | * Locking semantics: | ||||
* | * | ||||
* There are two locks of interest: the page locks and the pmap locks, which | * There are two locks of interest: the page locks and the pmap locks, which | ||||
* protect their individual PVO lists and are locked in that order. The contents | * protect their individual PVO lists and are locked in that order. The contents | ||||
* of all PVO entries are protected by the locks of their respective pmaps. | * of all PVO entries are protected by the locks of their respective pmaps. | ||||
* The pmap of any PVO is guaranteed not to change so long as the PVO is linked | * The pmap of any PVO is guaranteed not to change so long as the PVO is linked | ||||
* into any list. | * into any list. | ||||
* | * | ||||
*/ | */ | ||||
#define PV_LOCK_COUNT PA_LOCK_COUNT | #define PV_LOCK_COUNT PA_LOCK_COUNT | ||||
static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; | static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; | ||||
/* | /* | ||||
* Cheap NUMA-izing of the pv locks, to reduce contention across domains. | * Cheap NUMA-izing of the pv locks, to reduce contention across domains. | ||||
* NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the | * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the | ||||
* index at (N << 45). | * index at (N << 45). | ||||
*/ | */ | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
#define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT) | #define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT) | ||||
#else | #else | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
* PVO data. | * PVO data. | ||||
*/ | */ | ||||
uma_zone_t moea64_pvo_zone; /* zone for pvo entries */ | uma_zone_t moea64_pvo_zone; /* zone for pvo entries */ | ||||
static struct pvo_entry *moea64_bpvo_pool; | static struct pvo_entry *moea64_bpvo_pool; | ||||
static int moea64_bpvo_pool_index = 0; | static int moea64_bpvo_pool_index = 0; | ||||
static int moea64_bpvo_pool_size = 0; | static int moea64_bpvo_pool_size = 0; | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, | ||||
&moea64_bpvo_pool_index, 0, ""); | &moea64_bpvo_pool_index, 0, ""); | ||||
#define BPVO_POOL_SIZE 327680 /* Sensible historical default value */ | #define BPVO_POOL_SIZE 327680 /* Sensible historical default value */ | ||||
#define BPVO_POOL_EXPANSION_FACTOR 3 | #define BPVO_POOL_EXPANSION_FACTOR 3 | ||||
#define VSID_NBPW (sizeof(u_int32_t) * 8) | #define VSID_NBPW (sizeof(u_int32_t) * 8) | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
#define NVSIDS (NPMAPS * 16) | #define NVSIDS (NPMAPS * 16) | ||||
#define VSID_HASHMASK 0xffffffffUL | #define VSID_HASHMASK 0xffffffffUL | ||||
Show All 9 Lines | |||||
/* | /* | ||||
* Statistics. | * Statistics. | ||||
*/ | */ | ||||
u_int moea64_pte_valid = 0; | u_int moea64_pte_valid = 0; | ||||
u_int moea64_pte_overflow = 0; | u_int moea64_pte_overflow = 0; | ||||
u_int moea64_pvo_entries = 0; | u_int moea64_pvo_entries = 0; | ||||
u_int moea64_pvo_enter_calls = 0; | u_int moea64_pvo_enter_calls = 0; | ||||
u_int moea64_pvo_remove_calls = 0; | u_int moea64_pvo_remove_calls = 0; | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, | ||||
&moea64_pte_valid, 0, ""); | &moea64_pte_valid, 0, ""); | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, | ||||
&moea64_pte_overflow, 0, ""); | &moea64_pte_overflow, 0, ""); | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, | ||||
&moea64_pvo_entries, 0, ""); | &moea64_pvo_entries, 0, ""); | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, | ||||
&moea64_pvo_enter_calls, 0, ""); | &moea64_pvo_enter_calls, 0, ""); | ||||
SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, | SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, | ||||
&moea64_pvo_remove_calls, 0, ""); | &moea64_pvo_remove_calls, 0, ""); | ||||
#endif | #endif | ||||
vm_offset_t moea64_scratchpage_va[2]; | vm_offset_t moea64_scratchpage_va[2]; | ||||
struct pvo_entry *moea64_scratchpage_pvo[2]; | struct pvo_entry *moea64_scratchpage_pvo[2]; | ||||
struct mtx moea64_scratchpage_mtx; | struct mtx moea64_scratchpage_mtx; | ||||
uint64_t moea64_large_page_mask = 0; | uint64_t moea64_large_page_mask = 0; | ||||
uint64_t moea64_large_page_size = 0; | uint64_t moea64_large_page_size = 0; | ||||
int moea64_large_page_shift = 0; | int moea64_large_page_shift = 0; | ||||
/* | /* | ||||
* PVO calls. | * PVO calls. | ||||
*/ | */ | ||||
static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, | static int moea64_pvo_enter(struct pvo_entry *pvo, | ||||
struct pvo_head *pvo_head, struct pvo_entry **oldpvo); | struct pvo_head *pvo_head, struct pvo_entry **oldpvo); | ||||
static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo); | static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo); | ||||
static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo); | static void moea64_pvo_remove_from_page(struct pvo_entry *pvo); | ||||
static void moea64_pvo_remove_from_page_locked(mmu_t mmu, | static void moea64_pvo_remove_from_page_locked( | ||||
struct pvo_entry *pvo, vm_page_t m); | struct pvo_entry *pvo, vm_page_t m); | ||||
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); | static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); | ||||
/* | /* | ||||
* Utility routines. | * Utility routines. | ||||
*/ | */ | ||||
static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); | static boolean_t moea64_query_bit(vm_page_t, uint64_t); | ||||
static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); | static u_int moea64_clear_bit(vm_page_t, uint64_t); | ||||
static void moea64_kremove(mmu_t, vm_offset_t); | static void moea64_kremove(vm_offset_t); | ||||
static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, | static void moea64_syncicache(pmap_t pmap, vm_offset_t va, | ||||
vm_paddr_t pa, vm_size_t sz); | vm_paddr_t pa, vm_size_t sz); | ||||
static void moea64_pmap_init_qpages(void); | static void moea64_pmap_init_qpages(void); | ||||
/* | /* | ||||
* Kernel MMU interface | * Kernel MMU interface | ||||
*/ | */ | ||||
void moea64_clear_modify(mmu_t, vm_page_t); | void moea64_clear_modify(vm_page_t); | ||||
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); | void moea64_copy_page(vm_page_t, vm_page_t); | ||||
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize); | vm_page_t *mb, vm_offset_t b_offset, int xfersize); | ||||
int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, | int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, | ||||
u_int flags, int8_t psind); | u_int flags, int8_t psind); | ||||
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, | ||||
vm_prot_t); | vm_prot_t); | ||||
void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); | ||||
vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); | vm_paddr_t moea64_extract(pmap_t, vm_offset_t); | ||||
vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); | vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); | ||||
void moea64_init(mmu_t); | void moea64_init(void); | ||||
boolean_t moea64_is_modified(mmu_t, vm_page_t); | boolean_t moea64_is_modified(vm_page_t); | ||||
boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); | boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t); | ||||
boolean_t moea64_is_referenced(mmu_t, vm_page_t); | boolean_t moea64_is_referenced(vm_page_t); | ||||
int moea64_ts_referenced(mmu_t, vm_page_t); | int moea64_ts_referenced(vm_page_t); | ||||
vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); | ||||
boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); | boolean_t moea64_page_exists_quick(pmap_t, vm_page_t); | ||||
void moea64_page_init(mmu_t, vm_page_t); | void moea64_page_init(vm_page_t); | ||||
int moea64_page_wired_mappings(mmu_t, vm_page_t); | int moea64_page_wired_mappings(vm_page_t); | ||||
void moea64_pinit(mmu_t, pmap_t); | int moea64_pinit(pmap_t); | ||||
void moea64_pinit0(mmu_t, pmap_t); | void moea64_pinit0(pmap_t); | ||||
void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | void moea64_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); | ||||
void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); | void moea64_qenter(vm_offset_t, vm_page_t *, int); | ||||
void moea64_qremove(mmu_t, vm_offset_t, int); | void moea64_qremove(vm_offset_t, int); | ||||
void moea64_release(mmu_t, pmap_t); | void moea64_release(pmap_t); | ||||
void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void moea64_remove(pmap_t, vm_offset_t, vm_offset_t); | ||||
void moea64_remove_pages(mmu_t, pmap_t); | void moea64_remove_pages(pmap_t); | ||||
void moea64_remove_all(mmu_t, vm_page_t); | void moea64_remove_all(vm_page_t); | ||||
void moea64_remove_write(mmu_t, vm_page_t); | void moea64_remove_write(vm_page_t); | ||||
void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); | void moea64_unwire(pmap_t, vm_offset_t, vm_offset_t); | ||||
void moea64_zero_page(mmu_t, vm_page_t); | void moea64_zero_page(vm_page_t); | ||||
void moea64_zero_page_area(mmu_t, vm_page_t, int, int); | void moea64_zero_page_area(vm_page_t, int, int); | ||||
void moea64_activate(mmu_t, struct thread *); | void moea64_activate(struct thread *); | ||||
void moea64_deactivate(mmu_t, struct thread *); | void moea64_deactivate(struct thread *); | ||||
void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); | void *moea64_mapdev(vm_paddr_t, vm_size_t); | ||||
void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); | void *moea64_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); | ||||
void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); | void moea64_unmapdev(vm_offset_t, vm_size_t); | ||||
vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); | vm_paddr_t moea64_kextract(vm_offset_t); | ||||
void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); | void moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma); | ||||
void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma); | void moea64_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma); | ||||
void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); | void moea64_kenter(vm_offset_t, vm_paddr_t); | ||||
boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); | boolean_t moea64_dev_direct_mapped(vm_paddr_t, vm_size_t); | ||||
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); | static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t); | ||||
void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, | void moea64_dumpsys_map(vm_paddr_t pa, size_t sz, | ||||
void **va); | void **va); | ||||
void moea64_scan_init(mmu_t mmu); | void moea64_scan_init(void); | ||||
vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m); | vm_offset_t moea64_quick_enter_page(vm_page_t m); | ||||
void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr); | void moea64_quick_remove_page(vm_offset_t addr); | ||||
boolean_t moea64_page_is_mapped(mmu_t mmu, vm_page_t m); | boolean_t moea64_page_is_mapped(vm_page_t m); | ||||
static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm, | static int moea64_map_user_ptr(pmap_t pm, | ||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); | ||||
static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, | static int moea64_decode_kernel_ptr(vm_offset_t addr, | ||||
int *is_user, vm_offset_t *decoded_addr); | int *is_user, vm_offset_t *decoded_addr); | ||||
static size_t moea64_scan_pmap(mmu_t mmu); | static size_t moea64_scan_pmap(void); | ||||
static void *moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs); | static void *moea64_dump_pmap_init(unsigned blkpgs); | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
static void moea64_page_array_startup(mmu_t, long); | static void moea64_page_array_startup(long); | ||||
#endif | #endif | ||||
static mmu_method_t moea64_methods[] = { | static struct pmap_funcs moea64_methods = { | ||||
MMUMETHOD(mmu_clear_modify, moea64_clear_modify), | .clear_modify = moea64_clear_modify, | ||||
MMUMETHOD(mmu_copy_page, moea64_copy_page), | .copy_page = moea64_copy_page, | ||||
MMUMETHOD(mmu_copy_pages, moea64_copy_pages), | .copy_pages = moea64_copy_pages, | ||||
MMUMETHOD(mmu_enter, moea64_enter), | .enter = moea64_enter, | ||||
MMUMETHOD(mmu_enter_object, moea64_enter_object), | .enter_object = moea64_enter_object, | ||||
MMUMETHOD(mmu_enter_quick, moea64_enter_quick), | .enter_quick = moea64_enter_quick, | ||||
MMUMETHOD(mmu_extract, moea64_extract), | .extract = moea64_extract, | ||||
MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), | .extract_and_hold = moea64_extract_and_hold, | ||||
MMUMETHOD(mmu_init, moea64_init), | .init = moea64_init, | ||||
MMUMETHOD(mmu_is_modified, moea64_is_modified), | .is_modified = moea64_is_modified, | ||||
MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), | .is_prefaultable = moea64_is_prefaultable, | ||||
MMUMETHOD(mmu_is_referenced, moea64_is_referenced), | .is_referenced = moea64_is_referenced, | ||||
MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), | .ts_referenced = moea64_ts_referenced, | ||||
MMUMETHOD(mmu_map, moea64_map), | .map = moea64_map, | ||||
MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), | .page_exists_quick = moea64_page_exists_quick, | ||||
MMUMETHOD(mmu_page_init, moea64_page_init), | .page_init = moea64_page_init, | ||||
MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), | .page_wired_mappings = moea64_page_wired_mappings, | ||||
MMUMETHOD(mmu_pinit, moea64_pinit), | .pinit = moea64_pinit, | ||||
MMUMETHOD(mmu_pinit0, moea64_pinit0), | .pinit0 = moea64_pinit0, | ||||
MMUMETHOD(mmu_protect, moea64_protect), | .protect = moea64_protect, | ||||
MMUMETHOD(mmu_qenter, moea64_qenter), | .qenter = moea64_qenter, | ||||
MMUMETHOD(mmu_qremove, moea64_qremove), | .qremove = moea64_qremove, | ||||
MMUMETHOD(mmu_release, moea64_release), | .release = moea64_release, | ||||
MMUMETHOD(mmu_remove, moea64_remove), | .remove = moea64_remove, | ||||
MMUMETHOD(mmu_remove_pages, moea64_remove_pages), | .remove_pages = moea64_remove_pages, | ||||
MMUMETHOD(mmu_remove_all, moea64_remove_all), | .remove_all = moea64_remove_all, | ||||
MMUMETHOD(mmu_remove_write, moea64_remove_write), | .remove_write = moea64_remove_write, | ||||
MMUMETHOD(mmu_sync_icache, moea64_sync_icache), | .sync_icache = moea64_sync_icache, | ||||
MMUMETHOD(mmu_unwire, moea64_unwire), | .unwire = moea64_unwire, | ||||
MMUMETHOD(mmu_zero_page, moea64_zero_page), | .zero_page = moea64_zero_page, | ||||
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), | .zero_page_area = moea64_zero_page_area, | ||||
MMUMETHOD(mmu_activate, moea64_activate), | .activate = moea64_activate, | ||||
MMUMETHOD(mmu_deactivate, moea64_deactivate), | .deactivate = moea64_deactivate, | ||||
MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), | .page_set_memattr = moea64_page_set_memattr, | ||||
MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page), | .quick_enter_page = moea64_quick_enter_page, | ||||
MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page), | .quick_remove_page = moea64_quick_remove_page, | ||||
MMUMETHOD(mmu_page_is_mapped, moea64_page_is_mapped), | .page_is_mapped = moea64_page_is_mapped, | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
MMUMETHOD(mmu_page_array_startup, moea64_page_array_startup), | .page_array_startup = moea64_page_array_startup, | ||||
#endif | #endif | ||||
/* Internal interfaces */ | /* Internal interfaces */ | ||||
MMUMETHOD(mmu_mapdev, moea64_mapdev), | .mapdev = moea64_mapdev, | ||||
MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), | .mapdev_attr = moea64_mapdev_attr, | ||||
MMUMETHOD(mmu_unmapdev, moea64_unmapdev), | .unmapdev = moea64_unmapdev, | ||||
MMUMETHOD(mmu_kextract, moea64_kextract), | .kextract = moea64_kextract, | ||||
MMUMETHOD(mmu_kenter, moea64_kenter), | .kenter = moea64_kenter, | ||||
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), | .kenter_attr = moea64_kenter_attr, | ||||
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), | .dev_direct_mapped = moea64_dev_direct_mapped, | ||||
MMUMETHOD(mmu_scan_init, moea64_scan_init), | .dumpsys_pa_init = moea64_scan_init, | ||||
MMUMETHOD(mmu_scan_pmap, moea64_scan_pmap), | .dumpsys_scan_pmap = moea64_scan_pmap, | ||||
MMUMETHOD(mmu_dump_pmap_init, moea64_dump_pmap_init), | .dumpsys_dump_pmap_init = moea64_dump_pmap_init, | ||||
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), | .dumpsys_map_chunk = moea64_dumpsys_map, | ||||
MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr), | .map_user_ptr = moea64_map_user_ptr, | ||||
MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr), | .decode_kernel_ptr = moea64_decode_kernel_ptr, | ||||
{ 0, 0 } | |||||
}; | }; | ||||
MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); | MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods); | ||||
static struct pvo_head * | static struct pvo_head * | ||||
vm_page_to_pvoh(vm_page_t m) | vm_page_to_pvoh(vm_page_t m) | ||||
{ | { | ||||
mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | ||||
return (&m->md.mdpg_pvoh); | return (&m->md.mdpg_pvoh); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | if (mapa->om_pa < mapb->om_pa) | ||||
return (-1); | return (-1); | ||||
else if (mapa->om_pa > mapb->om_pa) | else if (mapa->om_pa > mapb->om_pa) | ||||
return (1); | return (1); | ||||
else | else | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) | moea64_add_ofw_mappings(phandle_t mmu, size_t sz) | ||||
{ | { | ||||
struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ | struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ | ||||
pcell_t acells, trans_cells[sz/sizeof(cell_t)]; | pcell_t acells, trans_cells[sz/sizeof(cell_t)]; | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
register_t msr; | register_t msr; | ||||
vm_offset_t off; | vm_offset_t off; | ||||
vm_paddr_t pa_base; | vm_paddr_t pa_base; | ||||
int i, j; | int i, j; | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
pvo = moea64_pvo_find_va(kernel_pmap, | pvo = moea64_pvo_find_va(kernel_pmap, | ||||
translations[i].om_va + off); | translations[i].om_va + off); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
if (pvo != NULL) | if (pvo != NULL) | ||||
continue; | continue; | ||||
moea64_kenter(mmup, translations[i].om_va + off, | moea64_kenter(translations[i].om_va + off, | ||||
pa_base + off); | pa_base + off); | ||||
} | } | ||||
ENABLE_TRANS(msr); | ENABLE_TRANS(msr); | ||||
} | } | ||||
} | } | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | moea64_bootstrap_slb_prefault(vm_offset_t va, int large) | ||||
if (large) | if (large) | ||||
entry.slbv |= SLBV_L; | entry.slbv |= SLBV_L; | ||||
slb_insert_kernel(entry.slbe, entry.slbv); | slb_insert_kernel(entry.slbe, entry.slbv); | ||||
} | } | ||||
#endif | #endif | ||||
static int | static int | ||||
moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap) | moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
uint64_t pte_lo; | uint64_t pte_lo; | ||||
int error; | int error; | ||||
pte_lo = LPTE_M; | pte_lo = LPTE_M; | ||||
pte_lo |= attr; | pte_lo |= attr; | ||||
pvo = alloc_pvo_entry(bootstrap); | pvo = alloc_pvo_entry(bootstrap); | ||||
pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; | pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; | ||||
init_pvo_entry(pvo, kernel_pmap, va); | init_pvo_entry(pvo, kernel_pmap, va); | ||||
pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | | pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | | ||||
VM_PROT_EXECUTE; | VM_PROT_EXECUTE; | ||||
pvo->pvo_pte.pa = pa | pte_lo; | pvo->pvo_pte.pa = pa | pte_lo; | ||||
error = moea64_pvo_enter(mmup, pvo, NULL, NULL); | error = moea64_pvo_enter(pvo, NULL, NULL); | ||||
if (error != 0) | if (error != 0) | ||||
panic("Error %d inserting large page\n", error); | panic("Error %d inserting large page\n", error); | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, | moea64_setup_direct_map(vm_offset_t kernelstart, | ||||
vm_offset_t kernelend) | vm_offset_t kernelend) | ||||
{ | { | ||||
register_t msr; | register_t msr; | ||||
vm_paddr_t pa, pkernelstart, pkernelend; | vm_paddr_t pa, pkernelstart, pkernelend; | ||||
vm_offset_t size, off; | vm_offset_t size, off; | ||||
uint64_t pte_lo; | uint64_t pte_lo; | ||||
int i; | int i; | ||||
if (moea64_large_page_size == 0) | if (moea64_large_page_size == 0) | ||||
hw_direct_map = 0; | hw_direct_map = 0; | ||||
DISABLE_TRANS(msr); | DISABLE_TRANS(msr); | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
for (i = 0; i < pregions_sz; i++) { | for (i = 0; i < pregions_sz; i++) { | ||||
for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + | for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + | ||||
pregions[i].mr_size; pa += moea64_large_page_size) { | pregions[i].mr_size; pa += moea64_large_page_size) { | ||||
pte_lo = LPTE_M; | pte_lo = LPTE_M; | ||||
if (pa & moea64_large_page_mask) { | if (pa & moea64_large_page_mask) { | ||||
pa &= moea64_large_page_mask; | pa &= moea64_large_page_mask; | ||||
pte_lo |= LPTE_G; | pte_lo |= LPTE_G; | ||||
} | } | ||||
if (pa + moea64_large_page_size > | if (pa + moea64_large_page_size > | ||||
pregions[i].mr_start + pregions[i].mr_size) | pregions[i].mr_start + pregions[i].mr_size) | ||||
pte_lo |= LPTE_G; | pte_lo |= LPTE_G; | ||||
moea64_kenter_large(mmup, PHYS_TO_DMAP(pa), pa, pte_lo, 1); | moea64_kenter_large(PHYS_TO_DMAP(pa), pa, pte_lo, 1); | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
} | } | ||||
/* | /* | ||||
* Make sure the kernel and BPVO pool stay mapped on systems either | * Make sure the kernel and BPVO pool stay mapped on systems either | ||||
* without a direct map or on which the kernel is not already executing | * without a direct map or on which the kernel is not already executing | ||||
* out of the direct-mapped region. | * out of the direct-mapped region. | ||||
*/ | */ | ||||
if (kernelstart < DMAP_BASE_ADDRESS) { | if (kernelstart < DMAP_BASE_ADDRESS) { | ||||
/* | /* | ||||
* For pre-dmap execution, we need to use identity mapping | * For pre-dmap execution, we need to use identity mapping | ||||
* because we will be operating with the mmu on but in the | * because we will be operating with the mmu on but in the | ||||
* wrong address configuration until we __restartkernel(). | * wrong address configuration until we __restartkernel(). | ||||
*/ | */ | ||||
for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; | for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; | ||||
pa += PAGE_SIZE) | pa += PAGE_SIZE) | ||||
moea64_kenter(mmup, pa, pa); | moea64_kenter(pa, pa); | ||||
} else if (!hw_direct_map) { | } else if (!hw_direct_map) { | ||||
pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS; | pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS; | ||||
pkernelend = kernelend & ~DMAP_BASE_ADDRESS; | pkernelend = kernelend & ~DMAP_BASE_ADDRESS; | ||||
for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend; | for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend; | ||||
pa += PAGE_SIZE) | pa += PAGE_SIZE) | ||||
moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa); | moea64_kenter(pa | DMAP_BASE_ADDRESS, pa); | ||||
} | } | ||||
if (!hw_direct_map) { | if (!hw_direct_map) { | ||||
size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); | size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); | ||||
off = (vm_offset_t)(moea64_bpvo_pool); | off = (vm_offset_t)(moea64_bpvo_pool); | ||||
for (pa = off; pa < off + size; pa += PAGE_SIZE) | for (pa = off; pa < off + size; pa += PAGE_SIZE) | ||||
moea64_kenter(mmup, pa, pa); | moea64_kenter(pa, pa); | ||||
/* Map exception vectors */ | /* Map exception vectors */ | ||||
for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE) | for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE) | ||||
moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa); | moea64_kenter(pa | DMAP_BASE_ADDRESS, pa); | ||||
} | } | ||||
ENABLE_TRANS(msr); | ENABLE_TRANS(msr); | ||||
/* | /* | ||||
* Allow user to override unmapped_buf_allowed for testing. | * Allow user to override unmapped_buf_allowed for testing. | ||||
* XXXKIB Only direct map implementation was tested. | * XXXKIB Only direct map implementation was tested. | ||||
*/ | */ | ||||
if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", | if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", | ||||
Show All 11 Lines | if (*pa < *pb) | ||||
return (-1); | return (-1); | ||||
else if (*pa > *pb) | else if (*pa > *pb) | ||||
return (1); | return (1); | ||||
else | else | ||||
return (0); | return (0); | ||||
} | } | ||||
void | void | ||||
moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) | moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
{ | { | ||||
int i, j; | int i, j; | ||||
vm_size_t physsz, hwphyssz; | vm_size_t physsz, hwphyssz; | ||||
vm_paddr_t kernelphysstart, kernelphysend; | vm_paddr_t kernelphysstart, kernelphysend; | ||||
int rm_pavail; | int rm_pavail; | ||||
#ifndef __powerpc64__ | #ifndef __powerpc64__ | ||||
/* We don't have a direct map since there is no BAT */ | /* We don't have a direct map since there is no BAT */ | ||||
▲ Show 20 Lines • Show All 119 Lines • ▼ Show 20 Lines | #else | ||||
while (moea64_pteg_count < physmem) | while (moea64_pteg_count < physmem) | ||||
moea64_pteg_count <<= 1; | moea64_pteg_count <<= 1; | ||||
moea64_pteg_count >>= 1; | moea64_pteg_count >>= 1; | ||||
#endif /* PTEGCOUNT */ | #endif /* PTEGCOUNT */ | ||||
} | } | ||||
void | void | ||||
moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) | moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
{ | { | ||||
int i; | int i; | ||||
/* | /* | ||||
* Set PTEG mask | * Set PTEG mask | ||||
*/ | */ | ||||
moea64_pteg_mask = moea64_pteg_count - 1; | moea64_pteg_mask = moea64_pteg_count - 1; | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
* Initialize the kernel pmap (which is statically allocated). | * Initialize the kernel pmap (which is statically allocated). | ||||
*/ | */ | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
for (i = 0; i < 64; i++) { | for (i = 0; i < 64; i++) { | ||||
pcpup->pc_aim.slb[i].slbv = 0; | pcpup->pc_aim.slb[i].slbv = 0; | ||||
pcpup->pc_aim.slb[i].slbe = 0; | pcpup->pc_aim.slb[i].slbe = 0; | ||||
} | } | ||||
#else | #else | ||||
for (i = 0; i < 16; i++) | for (i = 0; i < 16; i++) | ||||
kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; | kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; | ||||
#endif | #endif | ||||
kernel_pmap->pmap_phys = kernel_pmap; | kernel_pmap->pmap_phys = kernel_pmap; | ||||
CPU_FILL(&kernel_pmap->pm_active); | CPU_FILL(&kernel_pmap->pm_active); | ||||
RB_INIT(&kernel_pmap->pmap_pvo); | RB_INIT(&kernel_pmap->pmap_pvo); | ||||
PMAP_LOCK_INIT(kernel_pmap); | PMAP_LOCK_INIT(kernel_pmap); | ||||
/* | /* | ||||
* Now map in all the other buffers we allocated earlier | * Now map in all the other buffers we allocated earlier | ||||
*/ | */ | ||||
moea64_setup_direct_map(mmup, kernelstart, kernelend); | moea64_setup_direct_map(kernelstart, kernelend); | ||||
} | } | ||||
void | void | ||||
moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) | moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
{ | { | ||||
ihandle_t mmui; | ihandle_t mmui; | ||||
phandle_t chosen; | phandle_t chosen; | ||||
phandle_t mmu; | phandle_t mmu; | ||||
ssize_t sz; | ssize_t sz; | ||||
int i; | int i; | ||||
vm_offset_t pa, va; | vm_offset_t pa, va; | ||||
void *dpcpu; | void *dpcpu; | ||||
/* | /* | ||||
* Set up the Open Firmware pmap and add its mappings if not in real | * Set up the Open Firmware pmap and add its mappings if not in real | ||||
* mode. | * mode. | ||||
*/ | */ | ||||
chosen = OF_finddevice("/chosen"); | chosen = OF_finddevice("/chosen"); | ||||
if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) { | if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) { | ||||
mmu = OF_instance_to_package(mmui); | mmu = OF_instance_to_package(mmui); | ||||
if (mmu == -1 || | if (mmu == -1 || | ||||
(sz = OF_getproplen(mmu, "translations")) == -1) | (sz = OF_getproplen(mmu, "translations")) == -1) | ||||
sz = 0; | sz = 0; | ||||
if (sz > 6144 /* tmpstksz - 2 KB headroom */) | if (sz > 6144 /* tmpstksz - 2 KB headroom */) | ||||
panic("moea64_bootstrap: too many ofw translations"); | panic("moea64_bootstrap: too many ofw translations"); | ||||
if (sz > 0) | if (sz > 0) | ||||
moea64_add_ofw_mappings(mmup, mmu, sz); | moea64_add_ofw_mappings(mmu, sz); | ||||
} | } | ||||
/* | /* | ||||
* Calculate the last available physical address. | * Calculate the last available physical address. | ||||
*/ | */ | ||||
Maxmem = 0; | Maxmem = 0; | ||||
for (i = 0; phys_avail[i + 2] != 0; i += 2) | for (i = 0; phys_avail[i + 2] != 0; i += 2) | ||||
Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1])); | Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1])); | ||||
/* | /* | ||||
* Initialize MMU. | * Initialize MMU. | ||||
*/ | */ | ||||
MMU_CPU_BOOTSTRAP(mmup,0); | pmap_cpu_bootstrap(0); | ||||
mtmsr(mfmsr() | PSL_DR | PSL_IR); | mtmsr(mfmsr() | PSL_DR | PSL_IR); | ||||
pmap_bootstrapped++; | pmap_bootstrapped++; | ||||
/* | /* | ||||
* Set the start and end of kva. | * Set the start and end of kva. | ||||
*/ | */ | ||||
virtual_avail = VM_MIN_KERNEL_ADDRESS; | virtual_avail = VM_MIN_KERNEL_ADDRESS; | ||||
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; | virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; | ||||
Show All 33 Lines | moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
*/ | */ | ||||
pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); | pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); | ||||
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; | ||||
virtual_avail = va + kstack_pages * PAGE_SIZE; | virtual_avail = va + kstack_pages * PAGE_SIZE; | ||||
CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); | CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); | ||||
thread0.td_kstack = va; | thread0.td_kstack = va; | ||||
thread0.td_kstack_pages = kstack_pages; | thread0.td_kstack_pages = kstack_pages; | ||||
for (i = 0; i < kstack_pages; i++) { | for (i = 0; i < kstack_pages; i++) { | ||||
moea64_kenter(mmup, va, pa); | moea64_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
/* | /* | ||||
* Allocate virtual address space for the message buffer. | * Allocate virtual address space for the message buffer. | ||||
*/ | */ | ||||
pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); | pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); | ||||
msgbufp = (struct msgbuf *)virtual_avail; | msgbufp = (struct msgbuf *)virtual_avail; | ||||
va = virtual_avail; | va = virtual_avail; | ||||
virtual_avail += round_page(msgbufsize); | virtual_avail += round_page(msgbufsize); | ||||
while (va < virtual_avail) { | while (va < virtual_avail) { | ||||
moea64_kenter(mmup, va, pa); | moea64_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
/* | /* | ||||
* Allocate virtual address space for the dynamic percpu area. | * Allocate virtual address space for the dynamic percpu area. | ||||
*/ | */ | ||||
pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); | pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); | ||||
dpcpu = (void *)virtual_avail; | dpcpu = (void *)virtual_avail; | ||||
va = virtual_avail; | va = virtual_avail; | ||||
virtual_avail += DPCPU_SIZE; | virtual_avail += DPCPU_SIZE; | ||||
while (va < virtual_avail) { | while (va < virtual_avail) { | ||||
moea64_kenter(mmup, va, pa); | moea64_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
dpcpu_init(dpcpu, curcpu); | dpcpu_init(dpcpu, curcpu); | ||||
crashdumpmap = (caddr_t)virtual_avail; | crashdumpmap = (caddr_t)virtual_avail; | ||||
virtual_avail += MAXDUMPPGS * PAGE_SIZE; | virtual_avail += MAXDUMPPGS * PAGE_SIZE; | ||||
/* | /* | ||||
* Allocate some things for page zeroing. We put this directly | * Allocate some things for page zeroing. We put this directly | ||||
* in the page table and use MOEA64_PTE_REPLACE to avoid any | * in the page table and use MOEA64_PTE_REPLACE to avoid any | ||||
* of the PVO book-keeping or other parts of the VM system | * of the PVO book-keeping or other parts of the VM system | ||||
* from even knowing that this hack exists. | * from even knowing that this hack exists. | ||||
*/ | */ | ||||
if (!hw_direct_map) { | if (!hw_direct_map) { | ||||
mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, | mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, | ||||
MTX_DEF); | MTX_DEF); | ||||
for (i = 0; i < 2; i++) { | for (i = 0; i < 2; i++) { | ||||
moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; | moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; | ||||
virtual_end -= PAGE_SIZE; | virtual_end -= PAGE_SIZE; | ||||
moea64_kenter(mmup, moea64_scratchpage_va[i], 0); | moea64_kenter(moea64_scratchpage_va[i], 0); | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
moea64_scratchpage_pvo[i] = moea64_pvo_find_va( | moea64_scratchpage_pvo[i] = moea64_pvo_find_va( | ||||
kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); | kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
} | } | ||||
} | } | ||||
Show All 24 Lines | |||||
SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL); | SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL); | ||||
/* | /* | ||||
* Activate a user pmap. This mostly involves setting some non-CPU | * Activate a user pmap. This mostly involves setting some non-CPU | ||||
* state. | * state. | ||||
*/ | */ | ||||
void | void | ||||
moea64_activate(mmu_t mmu, struct thread *td) | moea64_activate(struct thread *td) | ||||
{ | { | ||||
pmap_t pm; | pmap_t pm; | ||||
pm = &td->td_proc->p_vmspace->vm_pmap; | pm = &td->td_proc->p_vmspace->vm_pmap; | ||||
CPU_SET(PCPU_GET(cpuid), &pm->pm_active); | CPU_SET(PCPU_GET(cpuid), &pm->pm_active); | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
PCPU_SET(aim.userslb, pm->pm_slb); | PCPU_SET(aim.userslb, pm->pm_slb); | ||||
__asm __volatile("slbmte %0, %1; isync" :: | __asm __volatile("slbmte %0, %1; isync" :: | ||||
"r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); | "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); | ||||
#else | #else | ||||
PCPU_SET(curpmap, pm->pmap_phys); | PCPU_SET(curpmap, pm->pmap_phys); | ||||
mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); | mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); | ||||
#endif | #endif | ||||
} | } | ||||
void | void | ||||
moea64_deactivate(mmu_t mmu, struct thread *td) | moea64_deactivate(struct thread *td) | ||||
{ | { | ||||
pmap_t pm; | pmap_t pm; | ||||
__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR)); | __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR)); | ||||
pm = &td->td_proc->p_vmspace->vm_pmap; | pm = &td->td_proc->p_vmspace->vm_pmap; | ||||
CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); | CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
PCPU_SET(aim.userslb, NULL); | PCPU_SET(aim.userslb, NULL); | ||||
#else | #else | ||||
PCPU_SET(curpmap, NULL); | PCPU_SET(curpmap, NULL); | ||||
#endif | #endif | ||||
} | } | ||||
void | void | ||||
moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) | moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct pvo_entry key, *pvo; | struct pvo_entry key, *pvo; | ||||
vm_page_t m; | vm_page_t m; | ||||
int64_t refchg; | int64_t refchg; | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; | pvo != NULL && PVO_VADDR(pvo) < eva; | ||||
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | ||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | ||||
panic("moea64_unwire: pvo %p is missing PVO_WIRED", | panic("moea64_unwire: pvo %p is missing PVO_WIRED", | ||||
pvo); | pvo); | ||||
pvo->pvo_vaddr &= ~PVO_WIRED; | pvo->pvo_vaddr &= ~PVO_WIRED; | ||||
refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */); | refchg = moea64_pte_replace(pvo, 0 /* No invalidation */); | ||||
if ((pvo->pvo_vaddr & PVO_MANAGED) && | if ((pvo->pvo_vaddr & PVO_MANAGED) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
if (refchg < 0) | if (refchg < 0) | ||||
refchg = LPTE_CHG; | refchg = LPTE_CHG; | ||||
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); | refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); | ||||
if (refchg & LPTE_CHG) | if (refchg & LPTE_CHG) | ||||
Show All 9 Lines | |||||
/* | /* | ||||
* This goes through and sets the physical address of our | * This goes through and sets the physical address of our | ||||
* special scratch PTE to the PA we want to zero or copy. Because | * special scratch PTE to the PA we want to zero or copy. Because | ||||
* of locking issues (this can get called in pvo_enter() by | * of locking issues (this can get called in pvo_enter() by | ||||
* the UMA allocator), we can't use most other utility functions here | * the UMA allocator), we can't use most other utility functions here | ||||
*/ | */ | ||||
static __inline | static __inline | ||||
void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) | void moea64_set_scratchpage_pa(int which, vm_paddr_t pa) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); | KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); | ||||
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); | mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); | ||||
pvo = moea64_scratchpage_pvo[which]; | pvo = moea64_scratchpage_pvo[which]; | ||||
PMAP_LOCK(pvo->pvo_pmap); | PMAP_LOCK(pvo->pvo_pmap); | ||||
pvo->pvo_pte.pa = | pvo->pvo_pte.pa = | ||||
moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; | moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; | ||||
MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE); | moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); | ||||
PMAP_UNLOCK(pvo->pvo_pmap); | PMAP_UNLOCK(pvo->pvo_pmap); | ||||
isync(); | isync(); | ||||
} | } | ||||
void | void | ||||
moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) | moea64_copy_page(vm_page_t msrc, vm_page_t mdst) | ||||
{ | { | ||||
vm_offset_t dst; | vm_offset_t dst; | ||||
vm_offset_t src; | vm_offset_t src; | ||||
dst = VM_PAGE_TO_PHYS(mdst); | dst = VM_PAGE_TO_PHYS(mdst); | ||||
src = VM_PAGE_TO_PHYS(msrc); | src = VM_PAGE_TO_PHYS(msrc); | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst), | bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst), | ||||
PAGE_SIZE); | PAGE_SIZE); | ||||
} else { | } else { | ||||
mtx_lock(&moea64_scratchpage_mtx); | mtx_lock(&moea64_scratchpage_mtx); | ||||
moea64_set_scratchpage_pa(mmu, 0, src); | moea64_set_scratchpage_pa(0, src); | ||||
moea64_set_scratchpage_pa(mmu, 1, dst); | moea64_set_scratchpage_pa(1, dst); | ||||
bcopy((void *)moea64_scratchpage_va[0], | bcopy((void *)moea64_scratchpage_va[0], | ||||
(void *)moea64_scratchpage_va[1], PAGE_SIZE); | (void *)moea64_scratchpage_va[1], PAGE_SIZE); | ||||
mtx_unlock(&moea64_scratchpage_mtx); | mtx_unlock(&moea64_scratchpage_mtx); | ||||
} | } | ||||
} | } | ||||
static inline void | static inline void | ||||
moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize) | vm_page_t *mb, vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
void *a_cp, *b_cp; | void *a_cp, *b_cp; | ||||
vm_offset_t a_pg_offset, b_pg_offset; | vm_offset_t a_pg_offset, b_pg_offset; | ||||
int cnt; | int cnt; | ||||
while (xfersize > 0) { | while (xfersize > 0) { | ||||
a_pg_offset = a_offset & PAGE_MASK; | a_pg_offset = a_offset & PAGE_MASK; | ||||
Show All 9 Lines | while (xfersize > 0) { | ||||
bcopy(a_cp, b_cp, cnt); | bcopy(a_cp, b_cp, cnt); | ||||
a_offset += cnt; | a_offset += cnt; | ||||
b_offset += cnt; | b_offset += cnt; | ||||
xfersize -= cnt; | xfersize -= cnt; | ||||
} | } | ||||
} | } | ||||
static inline void | static inline void | ||||
moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | moea64_copy_pages_nodmap(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize) | vm_page_t *mb, vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
void *a_cp, *b_cp; | void *a_cp, *b_cp; | ||||
vm_offset_t a_pg_offset, b_pg_offset; | vm_offset_t a_pg_offset, b_pg_offset; | ||||
int cnt; | int cnt; | ||||
mtx_lock(&moea64_scratchpage_mtx); | mtx_lock(&moea64_scratchpage_mtx); | ||||
while (xfersize > 0) { | while (xfersize > 0) { | ||||
a_pg_offset = a_offset & PAGE_MASK; | a_pg_offset = a_offset & PAGE_MASK; | ||||
cnt = min(xfersize, PAGE_SIZE - a_pg_offset); | cnt = min(xfersize, PAGE_SIZE - a_pg_offset); | ||||
moea64_set_scratchpage_pa(mmu, 0, | moea64_set_scratchpage_pa(0, | ||||
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); | VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); | ||||
a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; | a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; | ||||
b_pg_offset = b_offset & PAGE_MASK; | b_pg_offset = b_offset & PAGE_MASK; | ||||
cnt = min(cnt, PAGE_SIZE - b_pg_offset); | cnt = min(cnt, PAGE_SIZE - b_pg_offset); | ||||
moea64_set_scratchpage_pa(mmu, 1, | moea64_set_scratchpage_pa(1, | ||||
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); | VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); | ||||
b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; | b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; | ||||
bcopy(a_cp, b_cp, cnt); | bcopy(a_cp, b_cp, cnt); | ||||
a_offset += cnt; | a_offset += cnt; | ||||
b_offset += cnt; | b_offset += cnt; | ||||
xfersize -= cnt; | xfersize -= cnt; | ||||
} | } | ||||
mtx_unlock(&moea64_scratchpage_mtx); | mtx_unlock(&moea64_scratchpage_mtx); | ||||
} | } | ||||
void | void | ||||
moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, | moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize) | vm_page_t *mb, vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, | moea64_copy_pages_dmap(ma, a_offset, mb, b_offset, | ||||
xfersize); | xfersize); | ||||
} else { | } else { | ||||
moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, | moea64_copy_pages_nodmap(ma, a_offset, mb, b_offset, | ||||
xfersize); | xfersize); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) | moea64_zero_page_area(vm_page_t m, int off, int size) | ||||
{ | { | ||||
vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | ||||
if (size + off > PAGE_SIZE) | if (size + off > PAGE_SIZE) | ||||
panic("moea64_zero_page: size + off > PAGE_SIZE"); | panic("moea64_zero_page: size + off > PAGE_SIZE"); | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size); | bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size); | ||||
} else { | } else { | ||||
mtx_lock(&moea64_scratchpage_mtx); | mtx_lock(&moea64_scratchpage_mtx); | ||||
moea64_set_scratchpage_pa(mmu, 0, pa); | moea64_set_scratchpage_pa(0, pa); | ||||
bzero((caddr_t)moea64_scratchpage_va[0] + off, size); | bzero((caddr_t)moea64_scratchpage_va[0] + off, size); | ||||
mtx_unlock(&moea64_scratchpage_mtx); | mtx_unlock(&moea64_scratchpage_mtx); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Zero a page of physical memory by temporarily mapping it | * Zero a page of physical memory by temporarily mapping it | ||||
*/ | */ | ||||
void | void | ||||
moea64_zero_page(mmu_t mmu, vm_page_t m) | moea64_zero_page(vm_page_t m) | ||||
{ | { | ||||
vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | ||||
vm_offset_t va, off; | vm_offset_t va, off; | ||||
if (!hw_direct_map) { | if (!hw_direct_map) { | ||||
mtx_lock(&moea64_scratchpage_mtx); | mtx_lock(&moea64_scratchpage_mtx); | ||||
moea64_set_scratchpage_pa(mmu, 0, pa); | moea64_set_scratchpage_pa(0, pa); | ||||
va = moea64_scratchpage_va[0]; | va = moea64_scratchpage_va[0]; | ||||
} else { | } else { | ||||
va = PHYS_TO_DMAP(pa); | va = PHYS_TO_DMAP(pa); | ||||
} | } | ||||
for (off = 0; off < PAGE_SIZE; off += cacheline_size) | for (off = 0; off < PAGE_SIZE; off += cacheline_size) | ||||
__asm __volatile("dcbz 0,%0" :: "r"(va + off)); | __asm __volatile("dcbz 0,%0" :: "r"(va + off)); | ||||
if (!hw_direct_map) | if (!hw_direct_map) | ||||
mtx_unlock(&moea64_scratchpage_mtx); | mtx_unlock(&moea64_scratchpage_mtx); | ||||
} | } | ||||
vm_offset_t | vm_offset_t | ||||
moea64_quick_enter_page(mmu_t mmu, vm_page_t m) | moea64_quick_enter_page(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | vm_paddr_t pa = VM_PAGE_TO_PHYS(m); | ||||
if (hw_direct_map) | if (hw_direct_map) | ||||
return (PHYS_TO_DMAP(pa)); | return (PHYS_TO_DMAP(pa)); | ||||
/* | /* | ||||
* MOEA64_PTE_REPLACE does some locking, so we can't just grab | * MOEA64_PTE_REPLACE does some locking, so we can't just grab | ||||
* a critical section and access the PCPU data like on i386. | * a critical section and access the PCPU data like on i386. | ||||
* Instead, pin the thread and grab the PCPU lock to prevent | * Instead, pin the thread and grab the PCPU lock to prevent | ||||
* a preempting thread from using the same PCPU data. | * a preempting thread from using the same PCPU data. | ||||
*/ | */ | ||||
sched_pin(); | sched_pin(); | ||||
mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED); | mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED); | ||||
pvo = PCPU_GET(aim.qmap_pvo); | pvo = PCPU_GET(aim.qmap_pvo); | ||||
mtx_lock(PCPU_PTR(aim.qmap_lock)); | mtx_lock(PCPU_PTR(aim.qmap_lock)); | ||||
pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | | pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | | ||||
(uint64_t)pa; | (uint64_t)pa; | ||||
MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE); | moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); | ||||
isync(); | isync(); | ||||
return (PCPU_GET(qmap_addr)); | return (PCPU_GET(qmap_addr)); | ||||
} | } | ||||
void | void | ||||
moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr) | moea64_quick_remove_page(vm_offset_t addr) | ||||
{ | { | ||||
if (hw_direct_map) | if (hw_direct_map) | ||||
return; | return; | ||||
mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED); | mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED); | ||||
KASSERT(PCPU_GET(qmap_addr) == addr, | KASSERT(PCPU_GET(qmap_addr) == addr, | ||||
("moea64_quick_remove_page: invalid address")); | ("moea64_quick_remove_page: invalid address")); | ||||
mtx_unlock(PCPU_PTR(aim.qmap_lock)); | mtx_unlock(PCPU_PTR(aim.qmap_lock)); | ||||
sched_unpin(); | sched_unpin(); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea64_page_is_mapped(mmu_t mmu, vm_page_t m) | moea64_page_is_mapped(vm_page_t m) | ||||
{ | { | ||||
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); | return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); | ||||
} | } | ||||
/* | /* | ||||
* Map the given physical page at the specified virtual address in the | * Map the given physical page at the specified virtual address in the | ||||
* target pmap with the protection requested. If specified the page | * target pmap with the protection requested. If specified the page | ||||
* will be wired down. | * will be wired down. | ||||
*/ | */ | ||||
int | int | ||||
moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, | moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, u_int flags, int8_t psind) | vm_prot_t prot, u_int flags, int8_t psind) | ||||
{ | { | ||||
struct pvo_entry *pvo, *oldpvo; | struct pvo_entry *pvo, *oldpvo; | ||||
struct pvo_head *pvo_head; | struct pvo_head *pvo_head; | ||||
uint64_t pte_lo; | uint64_t pte_lo; | ||||
int error; | int error; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
Show All 26 Lines | moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (pvo->pvo_pmap == NULL) | if (pvo->pvo_pmap == NULL) | ||||
init_pvo_entry(pvo, pmap, va); | init_pvo_entry(pvo, pmap, va); | ||||
if (prot & VM_PROT_WRITE) | if (prot & VM_PROT_WRITE) | ||||
if (pmap_bootstrapped && | if (pmap_bootstrapped && | ||||
(m->oflags & VPO_UNMANAGED) == 0) | (m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo); | error = moea64_pvo_enter(pvo, pvo_head, &oldpvo); | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | ||||
oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | ||||
oldpvo->pvo_pte.prot == prot) { | oldpvo->pvo_pte.prot == prot) { | ||||
/* Identical mapping already exists */ | /* Identical mapping already exists */ | ||||
error = 0; | error = 0; | ||||
/* If not in page table, reinsert it */ | /* If not in page table, reinsert it */ | ||||
if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { | if (moea64_pte_synch(oldpvo) < 0) { | ||||
STAT_MOEA64(moea64_pte_overflow--); | STAT_MOEA64(moea64_pte_overflow--); | ||||
MOEA64_PTE_INSERT(mmu, oldpvo); | moea64_pte_insert(oldpvo); | ||||
} | } | ||||
/* Then just clean up and go home */ | /* Then just clean up and go home */ | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
goto out; | goto out; | ||||
} else { | } else { | ||||
/* Otherwise, need to kill it first */ | /* Otherwise, need to kill it first */ | ||||
KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | ||||
"mapping does not match new mapping")); | "mapping does not match new mapping")); | ||||
moea64_pvo_remove_from_pmap(mmu, oldpvo); | moea64_pvo_remove_from_pmap(oldpvo); | ||||
moea64_pvo_enter(mmu, pvo, pvo_head, NULL); | moea64_pvo_enter(pvo, pvo_head, NULL); | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
/* Free any dead pages */ | /* Free any dead pages */ | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
moea64_pvo_remove_from_page(mmu, oldpvo); | moea64_pvo_remove_from_page(oldpvo); | ||||
free_pvo_entry(oldpvo); | free_pvo_entry(oldpvo); | ||||
} | } | ||||
out: | out: | ||||
/* | /* | ||||
* Flush the page from the instruction cache if this page is | * Flush the page from the instruction cache if this page is | ||||
* mapped executable and cacheable. | * mapped executable and cacheable. | ||||
*/ | */ | ||||
if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && | if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && | ||||
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | ||||
vm_page_aflag_set(m, PGA_EXECUTABLE); | vm_page_aflag_set(m, PGA_EXECUTABLE); | ||||
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); | moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); | ||||
} | } | ||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
static void | static void | ||||
moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
vm_size_t sz) | vm_size_t sz) | ||||
{ | { | ||||
/* | /* | ||||
* This is much trickier than on older systems because | * This is much trickier than on older systems because | ||||
* we can't sync the icache on physical addresses directly | * we can't sync the icache on physical addresses directly | ||||
* without a direct map. Instead we check a couple of cases | * without a direct map. Instead we check a couple of cases | ||||
* where the memory is already mapped in and, failing that, | * where the memory is already mapped in and, failing that, | ||||
Show All 11 Lines | if (!pmap_bootstrapped) { | ||||
__syncicache((void *)va, sz); | __syncicache((void *)va, sz); | ||||
} else if (hw_direct_map) { | } else if (hw_direct_map) { | ||||
__syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz); | __syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz); | ||||
} else { | } else { | ||||
/* Use the scratch page to set up a temp mapping */ | /* Use the scratch page to set up a temp mapping */ | ||||
mtx_lock(&moea64_scratchpage_mtx); | mtx_lock(&moea64_scratchpage_mtx); | ||||
moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); | moea64_set_scratchpage_pa(1, pa & ~ADDR_POFF); | ||||
__syncicache((void *)(moea64_scratchpage_va[1] + | __syncicache((void *)(moea64_scratchpage_va[1] + | ||||
(va & ADDR_POFF)), sz); | (va & ADDR_POFF)), sz); | ||||
mtx_unlock(&moea64_scratchpage_mtx); | mtx_unlock(&moea64_scratchpage_mtx); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Maps a sequence of resident pages belonging to the same object. | * Maps a sequence of resident pages belonging to the same object. | ||||
* The sequence begins with the given page m_start. This page is | * The sequence begins with the given page m_start. This page is | ||||
* mapped at the given virtual address start. Each subsequent page is | * mapped at the given virtual address start. Each subsequent page is | ||||
* mapped at a virtual address that is offset from start by the same | * mapped at a virtual address that is offset from start by the same | ||||
* amount as the page is offset from m_start within the object. The | * amount as the page is offset from m_start within the object. The | ||||
* last page in the sequence is the page with the largest offset from | * last page in the sequence is the page with the largest offset from | ||||
* m_start that can be mapped at a virtual address less than the given | * m_start that can be mapped at a virtual address less than the given | ||||
* virtual address end. Not every virtual page between start and end | * virtual address end. Not every virtual page between start and end | ||||
* is mapped; only those for which a resident page exists with the | * is mapped; only those for which a resident page exists with the | ||||
* corresponding offset from m_start are mapped. | * corresponding offset from m_start are mapped. | ||||
*/ | */ | ||||
void | void | ||||
moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, | moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, | ||||
vm_page_t m_start, vm_prot_t prot) | vm_page_t m_start, vm_prot_t prot) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
m = m_start; | m = m_start; | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
moea64_enter(mmu, pm, start + ptoa(diff), m, prot & | moea64_enter(pm, start + ptoa(diff), m, prot & | ||||
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | | (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | | ||||
PMAP_ENTER_QUICK_LOCKED, 0); | PMAP_ENTER_QUICK_LOCKED, 0); | ||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, | moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), | moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), | ||||
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); | PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); | ||||
} | } | ||||
vm_paddr_t | vm_paddr_t | ||||
moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) | moea64_extract(pmap_t pm, vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
pvo = moea64_pvo_find_va(pm, va); | pvo = moea64_pvo_find_va(pm, va); | ||||
if (pvo == NULL) | if (pvo == NULL) | ||||
pa = 0; | pa = 0; | ||||
else | else | ||||
pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); | pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
return (pa); | return (pa); | ||||
} | } | ||||
/* | /* | ||||
* Atomically extract and hold the physical page with the given | * Atomically extract and hold the physical page with the given | ||||
* pmap and virtual address pair if that mapping permits the given | * pmap and virtual address pair if that mapping permits the given | ||||
* protection. | * protection. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) | moea64_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_page_t m; | vm_page_t m; | ||||
m = NULL; | m = NULL; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); | pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); | ||||
if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { | if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { | ||||
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
if (!vm_page_wire_mapped(m)) | if (!vm_page_wire_mapped(m)) | ||||
m = NULL; | m = NULL; | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (m); | return (m); | ||||
} | } | ||||
static mmu_t installed_mmu; | |||||
static void * | static void * | ||||
moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | ||||
uint8_t *flags, int wait) | uint8_t *flags, int wait) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
vm_page_t m; | vm_page_t m; | ||||
int needed_lock; | int needed_lock; | ||||
Show All 21 Lines | moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, | ||||
pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; | pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; | ||||
if (needed_lock) | if (needed_lock) | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
init_pvo_entry(pvo, kernel_pmap, va); | init_pvo_entry(pvo, kernel_pmap, va); | ||||
pvo->pvo_vaddr |= PVO_WIRED; | pvo->pvo_vaddr |= PVO_WIRED; | ||||
moea64_pvo_enter(installed_mmu, pvo, NULL, NULL); | moea64_pvo_enter(pvo, NULL, NULL); | ||||
if (needed_lock) | if (needed_lock) | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) | if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) | ||||
bzero((void *)va, PAGE_SIZE); | bzero((void *)va, PAGE_SIZE); | ||||
return (void *)va; | return (void *)va; | ||||
} | } | ||||
extern int elf32_nxstack; | extern int elf32_nxstack; | ||||
void | void | ||||
moea64_init(mmu_t mmu) | moea64_init() | ||||
{ | { | ||||
CTR0(KTR_PMAP, "moea64_init"); | CTR0(KTR_PMAP, "moea64_init"); | ||||
moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | ||||
UMA_ZONE_VM | UMA_ZONE_NOFREE); | UMA_ZONE_VM | UMA_ZONE_NOFREE); | ||||
if (!hw_direct_map) { | if (!hw_direct_map) { | ||||
installed_mmu = mmu; | |||||
uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc); | uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc); | ||||
} | } | ||||
#ifdef COMPAT_FREEBSD32 | #ifdef COMPAT_FREEBSD32 | ||||
elf32_nxstack = 1; | elf32_nxstack = 1; | ||||
#endif | #endif | ||||
moea64_initialized = TRUE; | moea64_initialized = TRUE; | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea64_is_referenced(mmu_t mmu, vm_page_t m) | moea64_is_referenced(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_is_referenced: page %p is not managed", m)); | ("moea64_is_referenced: page %p is not managed", m)); | ||||
return (moea64_query_bit(mmu, m, LPTE_REF)); | return (moea64_query_bit(m, LPTE_REF)); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea64_is_modified(mmu_t mmu, vm_page_t m) | moea64_is_modified(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_is_modified: page %p is not managed", m)); | ("moea64_is_modified: page %p is not managed", m)); | ||||
/* | /* | ||||
* If the page is not busied then this check is racy. | * If the page is not busied then this check is racy. | ||||
*/ | */ | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return (FALSE); | return (FALSE); | ||||
return (moea64_query_bit(mmu, m, LPTE_CHG)); | return (moea64_query_bit(m, LPTE_CHG)); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) | moea64_is_prefaultable(pmap_t pmap, vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
boolean_t rv = TRUE; | boolean_t rv = TRUE; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); | pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); | ||||
if (pvo != NULL) | if (pvo != NULL) | ||||
rv = FALSE; | rv = FALSE; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
moea64_clear_modify(mmu_t mmu, vm_page_t m) | moea64_clear_modify(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_clear_modify: page %p is not managed", m)); | ("moea64_clear_modify: page %p is not managed", m)); | ||||
vm_page_assert_busied(m); | vm_page_assert_busied(m); | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return; | return; | ||||
moea64_clear_bit(mmu, m, LPTE_CHG); | moea64_clear_bit(m, LPTE_CHG); | ||||
} | } | ||||
/* | /* | ||||
* Clear the write and modified bits in each of the given page's mappings. | * Clear the write and modified bits in each of the given page's mappings. | ||||
*/ | */ | ||||
void | void | ||||
moea64_remove_write(mmu_t mmu, vm_page_t m) | moea64_remove_write(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t refchg, ret; | int64_t refchg, ret; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_remove_write: page %p is not managed", m)); | ("moea64_remove_write: page %p is not managed", m)); | ||||
vm_page_assert_busied(m); | vm_page_assert_busied(m); | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return | return | ||||
powerpc_sync(); | powerpc_sync(); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
refchg = 0; | refchg = 0; | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD) && | if (!(pvo->pvo_vaddr & PVO_DEAD) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
pvo->pvo_pte.prot &= ~VM_PROT_WRITE; | pvo->pvo_pte.prot &= ~VM_PROT_WRITE; | ||||
ret = MOEA64_PTE_REPLACE(mmu, pvo, | ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE); | ||||
MOEA64_PTE_PROT_UPDATE); | |||||
if (ret < 0) | if (ret < 0) | ||||
ret = LPTE_CHG; | ret = LPTE_CHG; | ||||
refchg |= ret; | refchg |= ret; | ||||
if (pvo->pvo_pmap == kernel_pmap) | if (pvo->pvo_pmap == kernel_pmap) | ||||
isync(); | isync(); | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
Show All 11 Lines | |||||
* is necessary that 0 only be returned when there are truly no | * is necessary that 0 only be returned when there are truly no | ||||
* reference bits set. | * reference bits set. | ||||
* | * | ||||
* XXX: The exact number of bits to check and clear is a matter that | * XXX: The exact number of bits to check and clear is a matter that | ||||
* should be tested and standardized at some point in the future for | * should be tested and standardized at some point in the future for | ||||
* optimal aging of shared pages. | * optimal aging of shared pages. | ||||
*/ | */ | ||||
int | int | ||||
moea64_ts_referenced(mmu_t mmu, vm_page_t m) | moea64_ts_referenced(vm_page_t m) | ||||
{ | { | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_ts_referenced: page %p is not managed", m)); | ("moea64_ts_referenced: page %p is not managed", m)); | ||||
return (moea64_clear_bit(mmu, m, LPTE_REF)); | return (moea64_clear_bit(m, LPTE_REF)); | ||||
} | } | ||||
/* | /* | ||||
* Modify the WIMG settings of all mappings for a page. | * Modify the WIMG settings of all mappings for a page. | ||||
*/ | */ | ||||
void | void | ||||
moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) | moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t refchg; | int64_t refchg; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
uint64_t lo; | uint64_t lo; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) { | if ((m->oflags & VPO_UNMANAGED) != 0) { | ||||
m->md.mdpg_cache_attrs = ma; | m->md.mdpg_cache_attrs = ma; | ||||
return; | return; | ||||
} | } | ||||
lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); | lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD)) { | if (!(pvo->pvo_vaddr & PVO_DEAD)) { | ||||
pvo->pvo_pte.pa &= ~LPTE_WIMG; | pvo->pvo_pte.pa &= ~LPTE_WIMG; | ||||
pvo->pvo_pte.pa |= lo; | pvo->pvo_pte.pa |= lo; | ||||
refchg = MOEA64_PTE_REPLACE(mmu, pvo, | refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); | ||||
MOEA64_PTE_INVALIDATE); | |||||
if (refchg < 0) | if (refchg < 0) | ||||
refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? | refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? | ||||
LPTE_CHG : 0; | LPTE_CHG : 0; | ||||
if ((pvo->pvo_vaddr & PVO_MANAGED) && | if ((pvo->pvo_vaddr & PVO_MANAGED) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
refchg |= | refchg |= | ||||
atomic_readandclear_32(&m->md.mdpg_attrs); | atomic_readandclear_32(&m->md.mdpg_attrs); | ||||
if (refchg & LPTE_CHG) | if (refchg & LPTE_CHG) | ||||
Show All 9 Lines | moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
m->md.mdpg_cache_attrs = ma; | m->md.mdpg_cache_attrs = ma; | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
} | } | ||||
/* | /* | ||||
* Map a wired page into kernel virtual address space. | * Map a wired page into kernel virtual address space. | ||||
*/ | */ | ||||
void | void | ||||
moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) | ||||
{ | { | ||||
int error; | int error; | ||||
struct pvo_entry *pvo, *oldpvo; | struct pvo_entry *pvo, *oldpvo; | ||||
do { | do { | ||||
pvo = alloc_pvo_entry(0); | pvo = alloc_pvo_entry(0); | ||||
if (pvo == NULL) | if (pvo == NULL) | ||||
vm_wait(NULL); | vm_wait(NULL); | ||||
} while (pvo == NULL); | } while (pvo == NULL); | ||||
pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; | ||||
pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); | pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); | ||||
pvo->pvo_vaddr |= PVO_WIRED; | pvo->pvo_vaddr |= PVO_WIRED; | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
oldpvo = moea64_pvo_find_va(kernel_pmap, va); | oldpvo = moea64_pvo_find_va(kernel_pmap, va); | ||||
if (oldpvo != NULL) | if (oldpvo != NULL) | ||||
moea64_pvo_remove_from_pmap(mmu, oldpvo); | moea64_pvo_remove_from_pmap(oldpvo); | ||||
init_pvo_entry(pvo, kernel_pmap, va); | init_pvo_entry(pvo, kernel_pmap, va); | ||||
error = moea64_pvo_enter(mmu, pvo, NULL, NULL); | error = moea64_pvo_enter(pvo, NULL, NULL); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
/* Free any dead pages */ | /* Free any dead pages */ | ||||
if (oldpvo != NULL) { | if (oldpvo != NULL) { | ||||
moea64_pvo_remove_from_page(mmu, oldpvo); | moea64_pvo_remove_from_page(oldpvo); | ||||
free_pvo_entry(oldpvo); | free_pvo_entry(oldpvo); | ||||
} | } | ||||
if (error != 0) | if (error != 0) | ||||
panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va, | panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va, | ||||
(uintmax_t)pa, error); | (uintmax_t)pa, error); | ||||
} | } | ||||
void | void | ||||
moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) | moea64_kenter(vm_offset_t va, vm_paddr_t pa) | ||||
{ | { | ||||
moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); | moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
/* | /* | ||||
* Extract the physical page address associated with the given kernel virtual | * Extract the physical page address associated with the given kernel virtual | ||||
* address. | * address. | ||||
*/ | */ | ||||
vm_paddr_t | vm_paddr_t | ||||
moea64_kextract(mmu_t mmu, vm_offset_t va) | moea64_kextract(vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
/* | /* | ||||
* Shortcut the direct-mapped case when applicable. We never put | * Shortcut the direct-mapped case when applicable. We never put | ||||
* anything but 1:1 (or 62-bit aliased) mappings below | * anything but 1:1 (or 62-bit aliased) mappings below | ||||
* VM_MIN_KERNEL_ADDRESS. | * VM_MIN_KERNEL_ADDRESS. | ||||
Show All 9 Lines | moea64_kextract(vm_offset_t va) | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
return (pa); | return (pa); | ||||
} | } | ||||
/* | /* | ||||
* Remove a wired page from kernel virtual address space. | * Remove a wired page from kernel virtual address space. | ||||
*/ | */ | ||||
void | void | ||||
moea64_kremove(mmu_t mmu, vm_offset_t va) | moea64_kremove(vm_offset_t va) | ||||
{ | { | ||||
moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); | moea64_remove(kernel_pmap, va, va + PAGE_SIZE); | ||||
} | } | ||||
/* | /* | ||||
* Provide a kernel pointer corresponding to a given userland pointer. | * Provide a kernel pointer corresponding to a given userland pointer. | ||||
* The returned pointer is valid until the next time this function is | * The returned pointer is valid until the next time this function is | ||||
* called in this thread. This is used internally in copyin/copyout. | * called in this thread. This is used internally in copyin/copyout. | ||||
*/ | */ | ||||
static int | static int | ||||
moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, | moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, | ||||
void **kaddr, size_t ulen, size_t *klen) | void **kaddr, size_t ulen, size_t *klen) | ||||
{ | { | ||||
size_t l; | size_t l; | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
struct slb *slb; | struct slb *slb; | ||||
#endif | #endif | ||||
register_t slbv; | register_t slbv; | ||||
Show All 26 Lines | #else | ||||
/* Mark segment no-execute */ | /* Mark segment no-execute */ | ||||
slbv |= SR_N; | slbv |= SR_N; | ||||
#endif | #endif | ||||
/* If we have already set this VSID, we can just return */ | /* If we have already set this VSID, we can just return */ | ||||
if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) | if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) | ||||
return (0); | return (0); | ||||
__asm __volatile("isync"); | __asm __volatile("isync"); | ||||
curthread->td_pcb->pcb_cpu.aim.usr_segm = | curthread->td_pcb->pcb_cpu.aim.usr_segm = | ||||
(uintptr_t)uaddr >> ADDR_SR_SHFT; | (uintptr_t)uaddr >> ADDR_SR_SHFT; | ||||
curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; | curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
__asm __volatile ("slbie %0; slbmte %1, %2; isync" :: | __asm __volatile ("slbie %0; slbmte %1, %2; isync" :: | ||||
"r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); | "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE)); | ||||
#else | #else | ||||
__asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv)); | __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv)); | ||||
#endif | #endif | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Figure out where a given kernel pointer (usually in a fault) points | * Figure out where a given kernel pointer (usually in a fault) points | ||||
* to from the VM's perspective, potentially remapping into userland's | * to from the VM's perspective, potentially remapping into userland's | ||||
* address space. | * address space. | ||||
*/ | */ | ||||
static int | static int | ||||
moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, | moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user, | ||||
vm_offset_t *decoded_addr) | vm_offset_t *decoded_addr) | ||||
{ | { | ||||
vm_offset_t user_sr; | vm_offset_t user_sr; | ||||
if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { | if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { | ||||
user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; | user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; | ||||
addr &= ADDR_PIDX | ADDR_POFF; | addr &= ADDR_PIDX | ADDR_POFF; | ||||
addr |= user_sr << ADDR_SR_SHFT; | addr |= user_sr << ADDR_SR_SHFT; | ||||
Show All 12 Lines | |||||
* | * | ||||
* The value passed in *virt is a suggested virtual address for the mapping. | * The value passed in *virt is a suggested virtual address for the mapping. | ||||
* Architectures which can support a direct-mapped physical to virtual region | * Architectures which can support a direct-mapped physical to virtual region | ||||
* can return the appropriate address within that region, leaving '*virt' | * can return the appropriate address within that region, leaving '*virt' | ||||
* unchanged. Other architectures should map the pages starting at '*virt' and | * unchanged. Other architectures should map the pages starting at '*virt' and | ||||
* update '*virt' with the first usable address after the mapped region. | * update '*virt' with the first usable address after the mapped region. | ||||
*/ | */ | ||||
vm_offset_t | vm_offset_t | ||||
moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, | moea64_map(vm_offset_t *virt, vm_paddr_t pa_start, | ||||
vm_paddr_t pa_end, int prot) | vm_paddr_t pa_end, int prot) | ||||
{ | { | ||||
vm_offset_t sva, va; | vm_offset_t sva, va; | ||||
if (hw_direct_map) { | if (hw_direct_map) { | ||||
/* | /* | ||||
* Check if every page in the region is covered by the direct | * Check if every page in the region is covered by the direct | ||||
* map. The direct map covers all of physical memory. Use | * map. The direct map covers all of physical memory. Use | ||||
* moea64_calc_wimg() as a shortcut to see if the page is in | * moea64_calc_wimg() as a shortcut to see if the page is in | ||||
* physical memory as a way to see if the direct map covers it. | * physical memory as a way to see if the direct map covers it. | ||||
*/ | */ | ||||
for (va = pa_start; va < pa_end; va += PAGE_SIZE) | for (va = pa_start; va < pa_end; va += PAGE_SIZE) | ||||
if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) | if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) | ||||
break; | break; | ||||
if (va == pa_end) | if (va == pa_end) | ||||
return (PHYS_TO_DMAP(pa_start)); | return (PHYS_TO_DMAP(pa_start)); | ||||
} | } | ||||
sva = *virt; | sva = *virt; | ||||
va = sva; | va = sva; | ||||
/* XXX respect prot argument */ | /* XXX respect prot argument */ | ||||
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) | for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) | ||||
moea64_kenter(mmu, va, pa_start); | moea64_kenter(va, pa_start); | ||||
*virt = va; | *virt = va; | ||||
return (sva); | return (sva); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if the pmap's pv is one of the first | * Returns true if the pmap's pv is one of the first | ||||
* 16 pvs linked to from this page. This count may | * 16 pvs linked to from this page. This count may | ||||
* be changed upwards or downwards in the future; it | * be changed upwards or downwards in the future; it | ||||
* is only necessary that true be returned for a small | * is only necessary that true be returned for a small | ||||
* subset of pmaps for proper page aging. | * subset of pmaps for proper page aging. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) | moea64_page_exists_quick(pmap_t pmap, vm_page_t m) | ||||
{ | { | ||||
int loops; | int loops; | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
boolean_t rv; | boolean_t rv; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_page_exists_quick: page %p is not managed", m)); | ("moea64_page_exists_quick: page %p is not managed", m)); | ||||
loops = 0; | loops = 0; | ||||
rv = FALSE; | rv = FALSE; | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { | if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { | ||||
rv = TRUE; | rv = TRUE; | ||||
break; | break; | ||||
} | } | ||||
if (++loops >= 16) | if (++loops >= 16) | ||||
break; | break; | ||||
} | } | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
void | void | ||||
moea64_page_init(mmu_t mmu __unused, vm_page_t m) | moea64_page_init(vm_page_t m) | ||||
{ | { | ||||
m->md.mdpg_attrs = 0; | m->md.mdpg_attrs = 0; | ||||
m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; | ||||
LIST_INIT(&m->md.mdpg_pvoh); | LIST_INIT(&m->md.mdpg_pvoh); | ||||
} | } | ||||
/* | /* | ||||
* Return the number of managed mappings to the given physical page | * Return the number of managed mappings to the given physical page | ||||
* that are wired. | * that are wired. | ||||
*/ | */ | ||||
int | int | ||||
moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) | moea64_page_wired_mappings(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int count; | int count; | ||||
count = 0; | count = 0; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0) | if ((m->oflags & VPO_UNMANAGED) != 0) | ||||
return (count); | return (count); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | for (i = 0; i < NVSIDS; i += VSID_NBPW) { | ||||
return (hash); | return (hash); | ||||
} | } | ||||
mtx_unlock(&moea64_slb_mutex); | mtx_unlock(&moea64_slb_mutex); | ||||
panic("%s: out of segments",__func__); | panic("%s: out of segments",__func__); | ||||
} | } | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
void | int | ||||
moea64_pinit(mmu_t mmu, pmap_t pmap) | moea64_pinit(pmap_t pmap) | ||||
{ | { | ||||
RB_INIT(&pmap->pmap_pvo); | RB_INIT(&pmap->pmap_pvo); | ||||
pmap->pm_slb_tree_root = slb_alloc_tree(); | pmap->pm_slb_tree_root = slb_alloc_tree(); | ||||
pmap->pm_slb = slb_alloc_user_cache(); | pmap->pm_slb = slb_alloc_user_cache(); | ||||
pmap->pm_slb_len = 0; | pmap->pm_slb_len = 0; | ||||
return (1); | |||||
} | } | ||||
#else | #else | ||||
void | int | ||||
moea64_pinit(mmu_t mmu, pmap_t pmap) | moea64_pinit(pmap_t pmap) | ||||
{ | { | ||||
int i; | int i; | ||||
uint32_t hash; | uint32_t hash; | ||||
RB_INIT(&pmap->pmap_pvo); | RB_INIT(&pmap->pmap_pvo); | ||||
if (pmap_bootstrapped) | if (pmap_bootstrapped) | ||||
pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, | pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap); | ||||
(vm_offset_t)pmap); | |||||
else | else | ||||
pmap->pmap_phys = pmap; | pmap->pmap_phys = pmap; | ||||
/* | /* | ||||
* Allocate some segment registers for this pmap. | * Allocate some segment registers for this pmap. | ||||
*/ | */ | ||||
hash = moea64_get_unique_vsid(); | hash = moea64_get_unique_vsid(); | ||||
for (i = 0; i < 16; i++) | for (i = 0; i < 16; i++) | ||||
pmap->pm_sr[i] = VSID_MAKE(i, hash); | pmap->pm_sr[i] = VSID_MAKE(i, hash); | ||||
KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); | KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); | ||||
return (1); | |||||
} | } | ||||
#endif | #endif | ||||
/* | /* | ||||
* Initialize the pmap associated with process 0. | * Initialize the pmap associated with process 0. | ||||
*/ | */ | ||||
void | void | ||||
moea64_pinit0(mmu_t mmu, pmap_t pm) | moea64_pinit0(pmap_t pm) | ||||
{ | { | ||||
PMAP_LOCK_INIT(pm); | PMAP_LOCK_INIT(pm); | ||||
moea64_pinit(mmu, pm); | moea64_pinit(pm); | ||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats)); | bzero(&pm->pm_stats, sizeof(pm->pm_stats)); | ||||
} | } | ||||
/* | /* | ||||
* Set the physical protection on the specified range of this map as requested. | * Set the physical protection on the specified range of this map as requested. | ||||
*/ | */ | ||||
static void | static void | ||||
moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) | moea64_pvo_protect( pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) | ||||
{ | { | ||||
struct vm_page *pg; | struct vm_page *pg; | ||||
vm_prot_t oldprot; | vm_prot_t oldprot; | ||||
int32_t refchg; | int32_t refchg; | ||||
PMAP_LOCK_ASSERT(pm, MA_OWNED); | PMAP_LOCK_ASSERT(pm, MA_OWNED); | ||||
/* | /* | ||||
* Change the protection of the page. | * Change the protection of the page. | ||||
*/ | */ | ||||
oldprot = pvo->pvo_pte.prot; | oldprot = pvo->pvo_pte.prot; | ||||
pvo->pvo_pte.prot = prot; | pvo->pvo_pte.prot = prot; | ||||
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
/* | /* | ||||
* If the PVO is in the page table, update mapping | * If the PVO is in the page table, update mapping | ||||
*/ | */ | ||||
refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE); | refchg = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE); | ||||
if (refchg < 0) | if (refchg < 0) | ||||
refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0; | refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0; | ||||
if (pm != kernel_pmap && pg != NULL && | if (pm != kernel_pmap && pg != NULL && | ||||
(pg->a.flags & PGA_EXECUTABLE) == 0 && | (pg->a.flags & PGA_EXECUTABLE) == 0 && | ||||
(pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | ||||
if ((pg->oflags & VPO_UNMANAGED) == 0) | if ((pg->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_aflag_set(pg, PGA_EXECUTABLE); | vm_page_aflag_set(pg, PGA_EXECUTABLE); | ||||
moea64_syncicache(mmu, pm, PVO_VADDR(pvo), | moea64_syncicache(pm, PVO_VADDR(pvo), | ||||
pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); | pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); | ||||
} | } | ||||
/* | /* | ||||
* Update vm about the REF/CHG bits if the page is managed and we have | * Update vm about the REF/CHG bits if the page is managed and we have | ||||
* removed write access. | * removed write access. | ||||
*/ | */ | ||||
if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && | if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && | ||||
(oldprot & VM_PROT_WRITE)) { | (oldprot & VM_PROT_WRITE)) { | ||||
refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); | refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); | ||||
if (refchg & LPTE_CHG) | if (refchg & LPTE_CHG) | ||||
vm_page_dirty(pg); | vm_page_dirty(pg); | ||||
if (refchg & LPTE_REF) | if (refchg & LPTE_REF) | ||||
vm_page_aflag_set(pg, PGA_REFERENCED); | vm_page_aflag_set(pg, PGA_REFERENCED); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, | moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, *tpvo, key; | ||||
CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, | CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, | ||||
sva, eva, prot); | sva, eva, prot); | ||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | ||||
("moea64_protect: non current pmap")); | ("moea64_protect: non current pmap")); | ||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | ||||
moea64_remove(mmu, pm, sva, eva); | moea64_remove(pm, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | ||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | ||||
moea64_pvo_protect(mmu, pm, pvo, prot); | moea64_pvo_protect(pm, pvo, prot); | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
/* | /* | ||||
* Map a list of wired pages into kernel virtual address space. This is | * Map a list of wired pages into kernel virtual address space. This is | ||||
* intended for temporary mappings which do not need page modification or | * intended for temporary mappings which do not need page modification or | ||||
* references recorded. Existing mappings in the region are overwritten. | * references recorded. Existing mappings in the region are overwritten. | ||||
*/ | */ | ||||
void | void | ||||
moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) | moea64_qenter(vm_offset_t va, vm_page_t *m, int count) | ||||
{ | { | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); | moea64_kenter(va, VM_PAGE_TO_PHYS(*m)); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
m++; | m++; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Remove page mappings from kernel virtual address space. Intended for | * Remove page mappings from kernel virtual address space. Intended for | ||||
* temporary mappings entered by moea64_qenter. | * temporary mappings entered by moea64_qenter. | ||||
*/ | */ | ||||
void | void | ||||
moea64_qremove(mmu_t mmu, vm_offset_t va, int count) | moea64_qremove(vm_offset_t va, int count) | ||||
{ | { | ||||
while (count-- > 0) { | while (count-- > 0) { | ||||
moea64_kremove(mmu, va); | moea64_kremove(va); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_release_vsid(uint64_t vsid) | moea64_release_vsid(uint64_t vsid) | ||||
{ | { | ||||
int idx, mask; | int idx, mask; | ||||
mtx_lock(&moea64_slb_mutex); | mtx_lock(&moea64_slb_mutex); | ||||
idx = vsid & (NVSIDS-1); | idx = vsid & (NVSIDS-1); | ||||
mask = 1 << (idx % VSID_NBPW); | mask = 1 << (idx % VSID_NBPW); | ||||
idx /= VSID_NBPW; | idx /= VSID_NBPW; | ||||
KASSERT(moea64_vsid_bitmap[idx] & mask, | KASSERT(moea64_vsid_bitmap[idx] & mask, | ||||
("Freeing unallocated VSID %#jx", vsid)); | ("Freeing unallocated VSID %#jx", vsid)); | ||||
moea64_vsid_bitmap[idx] &= ~mask; | moea64_vsid_bitmap[idx] &= ~mask; | ||||
mtx_unlock(&moea64_slb_mutex); | mtx_unlock(&moea64_slb_mutex); | ||||
} | } | ||||
void | void | ||||
moea64_release(mmu_t mmu, pmap_t pmap) | moea64_release(pmap_t pmap) | ||||
{ | { | ||||
/* | /* | ||||
* Free segment registers' VSIDs | * Free segment registers' VSIDs | ||||
*/ | */ | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
slb_free_tree(pmap); | slb_free_tree(pmap); | ||||
slb_free_user_cache(pmap->pm_slb); | slb_free_user_cache(pmap->pm_slb); | ||||
#else | #else | ||||
KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); | KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); | ||||
moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); | moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); | ||||
#endif | #endif | ||||
} | } | ||||
/* | /* | ||||
* Remove all pages mapped by the specified pmap | * Remove all pages mapped by the specified pmap | ||||
*/ | */ | ||||
void | void | ||||
moea64_remove_pages(mmu_t mmu, pmap_t pm) | moea64_remove_pages(pmap_t pm) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo; | struct pvo_entry *pvo, *tpvo; | ||||
struct pvo_dlist tofree; | struct pvo_dlist tofree; | ||||
SLIST_INIT(&tofree); | SLIST_INIT(&tofree); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { | RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { | ||||
if (pvo->pvo_vaddr & PVO_WIRED) | if (pvo->pvo_vaddr & PVO_WIRED) | ||||
continue; | continue; | ||||
/* | /* | ||||
* For locking reasons, remove this from the page table and | * For locking reasons, remove this from the page table and | ||||
* pmap, but save delinking from the vm_page for a second | * pmap, but save delinking from the vm_page for a second | ||||
* pass | * pass | ||||
*/ | */ | ||||
moea64_pvo_remove_from_pmap(mmu, pvo); | moea64_pvo_remove_from_pmap(pvo); | ||||
SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink); | SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink); | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
while (!SLIST_EMPTY(&tofree)) { | while (!SLIST_EMPTY(&tofree)) { | ||||
pvo = SLIST_FIRST(&tofree); | pvo = SLIST_FIRST(&tofree); | ||||
SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | ||||
moea64_pvo_remove_from_page(mmu, pvo); | moea64_pvo_remove_from_page(pvo); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Remove the given range of addresses from the specified map. | * Remove the given range of addresses from the specified map. | ||||
*/ | */ | ||||
void | void | ||||
moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) | moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, *tpvo, key; | ||||
struct pvo_dlist tofree; | struct pvo_dlist tofree; | ||||
/* | /* | ||||
* Perform an unsynchronized read. This is, however, safe. | * Perform an unsynchronized read. This is, however, safe. | ||||
*/ | */ | ||||
if (pm->pm_stats.resident_count == 0) | if (pm->pm_stats.resident_count == 0) | ||||
return; | return; | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
SLIST_INIT(&tofree); | SLIST_INIT(&tofree); | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | ||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | ||||
/* | /* | ||||
* For locking reasons, remove this from the page table and | * For locking reasons, remove this from the page table and | ||||
* pmap, but save delinking from the vm_page for a second | * pmap, but save delinking from the vm_page for a second | ||||
* pass | * pass | ||||
*/ | */ | ||||
moea64_pvo_remove_from_pmap(mmu, pvo); | moea64_pvo_remove_from_pmap(pvo); | ||||
SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink); | SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink); | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
while (!SLIST_EMPTY(&tofree)) { | while (!SLIST_EMPTY(&tofree)) { | ||||
pvo = SLIST_FIRST(&tofree); | pvo = SLIST_FIRST(&tofree); | ||||
SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | ||||
moea64_pvo_remove_from_page(mmu, pvo); | moea64_pvo_remove_from_page(pvo); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Remove physical page from all pmaps in which it resides. moea64_pvo_remove() | * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() | ||||
* will reflect changes in pte's back to the vm_page. | * will reflect changes in pte's back to the vm_page. | ||||
*/ | */ | ||||
void | void | ||||
moea64_remove_all(mmu_t mmu, vm_page_t m) | moea64_remove_all(vm_page_t m) | ||||
{ | { | ||||
struct pvo_entry *pvo, *next_pvo; | struct pvo_entry *pvo, *next_pvo; | ||||
struct pvo_head freequeue; | struct pvo_head freequeue; | ||||
int wasdead; | int wasdead; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
LIST_INIT(&freequeue); | LIST_INIT(&freequeue); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { | LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
wasdead = (pvo->pvo_vaddr & PVO_DEAD); | wasdead = (pvo->pvo_vaddr & PVO_DEAD); | ||||
if (!wasdead) | if (!wasdead) | ||||
moea64_pvo_remove_from_pmap(mmu, pvo); | moea64_pvo_remove_from_pmap(pvo); | ||||
else | else | ||||
LIST_REMOVE(pvo, pvo_vlink); | LIST_REMOVE(pvo, pvo_vlink); | ||||
/* Don't let moea64_pvo_remove_from_page_locked() touch it. */ | /* Don't let moea64_pvo_remove_from_page_locked() touch it. */ | ||||
pvo->pvo_pmap = NULL; | pvo->pvo_pmap = NULL; | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
LIST_SWAP(&freequeue, vm_page_to_pvoh(m), pvo_entry, pvo_vlink); | LIST_SWAP(&freequeue, vm_page_to_pvoh(m), pvo_entry, pvo_vlink); | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | for (i = 0; phys_avail[i + 1] != 0; i += 2) { | ||||
} | } | ||||
return (s); | return (s); | ||||
} | } | ||||
panic("moea64_bootstrap_alloc: could not allocate memory"); | panic("moea64_bootstrap_alloc: could not allocate memory"); | ||||
} | } | ||||
static int | static int | ||||
moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head, | moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head, | ||||
struct pvo_entry **oldpvop) | struct pvo_entry **oldpvop) | ||||
{ | { | ||||
struct pvo_entry *old_pvo; | struct pvo_entry *old_pvo; | ||||
int err; | int err; | ||||
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); | ||||
STAT_MOEA64(moea64_pvo_enter_calls++); | STAT_MOEA64(moea64_pvo_enter_calls++); | ||||
Show All 15 Lines | moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head, | ||||
if (pvo->pvo_vaddr & PVO_WIRED) | if (pvo->pvo_vaddr & PVO_WIRED) | ||||
pvo->pvo_pmap->pm_stats.wired_count++; | pvo->pvo_pmap->pm_stats.wired_count++; | ||||
pvo->pvo_pmap->pm_stats.resident_count++; | pvo->pvo_pmap->pm_stats.resident_count++; | ||||
/* | /* | ||||
* Insert it into the hardware page table | * Insert it into the hardware page table | ||||
*/ | */ | ||||
err = MOEA64_PTE_INSERT(mmu, pvo); | err = moea64_pte_insert(pvo); | ||||
if (err != 0) { | if (err != 0) { | ||||
panic("moea64_pvo_enter: overflow"); | panic("moea64_pvo_enter: overflow"); | ||||
} | } | ||||
STAT_MOEA64(moea64_pvo_entries++); | STAT_MOEA64(moea64_pvo_entries++); | ||||
if (pvo->pvo_pmap == kernel_pmap) | if (pvo->pvo_pmap == kernel_pmap) | ||||
isync(); | isync(); | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
/* | /* | ||||
* Make sure all our bootstrap mappings are in the SLB as soon | * Make sure all our bootstrap mappings are in the SLB as soon | ||||
* as virtual memory is switched on. | * as virtual memory is switched on. | ||||
*/ | */ | ||||
if (!pmap_bootstrapped) | if (!pmap_bootstrapped) | ||||
moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), | moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), | ||||
pvo->pvo_vaddr & PVO_LARGE); | pvo->pvo_vaddr & PVO_LARGE); | ||||
#endif | #endif | ||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo) | moea64_pvo_remove_from_pmap(struct pvo_entry *pvo) | ||||
{ | { | ||||
struct vm_page *pg; | struct vm_page *pg; | ||||
int32_t refchg; | int32_t refchg; | ||||
KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); | KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); | ||||
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); | PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); | ||||
KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); | KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); | ||||
/* | /* | ||||
* If there is an active pte entry, we need to deactivate it | * If there is an active pte entry, we need to deactivate it | ||||
*/ | */ | ||||
refchg = MOEA64_PTE_UNSET(mmu, pvo); | refchg = moea64_pte_unset(pvo); | ||||
if (refchg < 0) { | if (refchg < 0) { | ||||
/* | /* | ||||
* If it was evicted from the page table, be pessimistic and | * If it was evicted from the page table, be pessimistic and | ||||
* dirty the page. | * dirty the page. | ||||
*/ | */ | ||||
if (pvo->pvo_pte.prot & VM_PROT_WRITE) | if (pvo->pvo_pte.prot & VM_PROT_WRITE) | ||||
refchg = LPTE_CHG; | refchg = LPTE_CHG; | ||||
else | else | ||||
Show All 27 Lines | if (pg != NULL) { | ||||
vm_page_dirty(pg); | vm_page_dirty(pg); | ||||
if (refchg & LPTE_REF) | if (refchg & LPTE_REF) | ||||
vm_page_aflag_set(pg, PGA_REFERENCED); | vm_page_aflag_set(pg, PGA_REFERENCED); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
static inline void | static inline void | ||||
moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo, | moea64_pvo_remove_from_page_locked(struct pvo_entry *pvo, | ||||
vm_page_t m) | vm_page_t m) | ||||
{ | { | ||||
KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); | KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); | ||||
/* Use NULL pmaps as a sentinel for races in page deletion */ | /* Use NULL pmaps as a sentinel for races in page deletion */ | ||||
if (pvo->pvo_pmap == NULL) | if (pvo->pvo_pmap == NULL) | ||||
return; | return; | ||||
Show All 12 Lines | if (pvo->pvo_vaddr & PVO_MANAGED) { | ||||
} | } | ||||
} | } | ||||
STAT_MOEA64(moea64_pvo_entries--); | STAT_MOEA64(moea64_pvo_entries--); | ||||
STAT_MOEA64(moea64_pvo_remove_calls++); | STAT_MOEA64(moea64_pvo_remove_calls++); | ||||
} | } | ||||
static void | static void | ||||
moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo) | moea64_pvo_remove_from_page(struct pvo_entry *pvo) | ||||
{ | { | ||||
vm_page_t pg = NULL; | vm_page_t pg = NULL; | ||||
if (pvo->pvo_vaddr & PVO_MANAGED) | if (pvo->pvo_vaddr & PVO_MANAGED) | ||||
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); | PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
moea64_pvo_remove_from_page_locked(mmu, pvo, pg); | moea64_pvo_remove_from_page_locked(pvo, pg); | ||||
PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); | PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); | ||||
} | } | ||||
static struct pvo_entry * | static struct pvo_entry * | ||||
moea64_pvo_find_va(pmap_t pm, vm_offset_t va) | moea64_pvo_find_va(pmap_t pm, vm_offset_t va) | ||||
{ | { | ||||
struct pvo_entry key; | struct pvo_entry key; | ||||
PMAP_LOCK_ASSERT(pm, MA_OWNED); | PMAP_LOCK_ASSERT(pm, MA_OWNED); | ||||
key.pvo_vaddr = va & ~ADDR_POFF; | key.pvo_vaddr = va & ~ADDR_POFF; | ||||
return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); | return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); | ||||
} | } | ||||
static boolean_t | static boolean_t | ||||
moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit) | moea64_query_bit(vm_page_t m, uint64_t ptebit) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t ret; | int64_t ret; | ||||
boolean_t rv; | boolean_t rv; | ||||
/* | /* | ||||
* See if this bit is stored in the page already. | * See if this bit is stored in the page already. | ||||
*/ | */ | ||||
Show All 12 Lines | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
/* | /* | ||||
* See if this pvo has a valid PTE. if so, fetch the | * See if this pvo has a valid PTE. if so, fetch the | ||||
* REF/CHG bits from the valid PTE. If the appropriate | * REF/CHG bits from the valid PTE. If the appropriate | ||||
* ptebit is set, return success. | * ptebit is set, return success. | ||||
*/ | */ | ||||
PMAP_LOCK(pvo->pvo_pmap); | PMAP_LOCK(pvo->pvo_pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD)) | if (!(pvo->pvo_vaddr & PVO_DEAD)) | ||||
ret = MOEA64_PTE_SYNCH(mmu, pvo); | ret = moea64_pte_synch(pvo); | ||||
PMAP_UNLOCK(pvo->pvo_pmap); | PMAP_UNLOCK(pvo->pvo_pmap); | ||||
if (ret > 0) { | if (ret > 0) { | ||||
atomic_set_32(&m->md.mdpg_attrs, | atomic_set_32(&m->md.mdpg_attrs, | ||||
ret & (LPTE_CHG | LPTE_REF)); | ret & (LPTE_CHG | LPTE_REF)); | ||||
if (ret & ptebit) { | if (ret & ptebit) { | ||||
rv = TRUE; | rv = TRUE; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
return (rv); | return (rv); | ||||
} | } | ||||
static u_int | static u_int | ||||
moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) | moea64_clear_bit(vm_page_t m, u_int64_t ptebit) | ||||
{ | { | ||||
u_int count; | u_int count; | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t ret; | int64_t ret; | ||||
/* | /* | ||||
* Sync so that any pending REF/CHG bits are flushed to the PTEs (so | * Sync so that any pending REF/CHG bits are flushed to the PTEs (so | ||||
* we can reset the right ones). | * we can reset the right ones). | ||||
*/ | */ | ||||
powerpc_sync(); | powerpc_sync(); | ||||
/* | /* | ||||
* For each pvo entry, clear the pte's ptebit. | * For each pvo entry, clear the pte's ptebit. | ||||
*/ | */ | ||||
count = 0; | count = 0; | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
ret = 0; | ret = 0; | ||||
PMAP_LOCK(pvo->pvo_pmap); | PMAP_LOCK(pvo->pvo_pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD)) | if (!(pvo->pvo_vaddr & PVO_DEAD)) | ||||
ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit); | ret = moea64_pte_clear(pvo, ptebit); | ||||
PMAP_UNLOCK(pvo->pvo_pmap); | PMAP_UNLOCK(pvo->pvo_pmap); | ||||
if (ret > 0 && (ret & ptebit)) | if (ret > 0 && (ret & ptebit)) | ||||
count++; | count++; | ||||
} | } | ||||
atomic_clear_32(&m->md.mdpg_attrs, ptebit); | atomic_clear_32(&m->md.mdpg_attrs, ptebit); | ||||
PV_PAGE_UNLOCK(m); | PV_PAGE_UNLOCK(m); | ||||
return (count); | return (count); | ||||
} | } | ||||
boolean_t | boolean_t | ||||
moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | moea64_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
struct pvo_entry *pvo, key; | struct pvo_entry *pvo, key; | ||||
vm_offset_t ppa; | vm_offset_t ppa; | ||||
int error = 0; | int error = 0; | ||||
if (hw_direct_map && mem_valid(pa, size) == 0) | if (hw_direct_map && mem_valid(pa, size) == 0) | ||||
return (0); | return (0); | ||||
Show All 15 Lines | |||||
/* | /* | ||||
* Map a set of physical memory pages into the kernel virtual | * Map a set of physical memory pages into the kernel virtual | ||||
* address space. Return a pointer to where it is mapped. This | * address space. Return a pointer to where it is mapped. This | ||||
* routine is intended to be used for mapping device memory, | * routine is intended to be used for mapping device memory, | ||||
* NOT real memory. | * NOT real memory. | ||||
*/ | */ | ||||
void * | void * | ||||
moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | moea64_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) | ||||
{ | { | ||||
vm_offset_t va, tmpva, ppa, offset; | vm_offset_t va, tmpva, ppa, offset; | ||||
ppa = trunc_page(pa); | ppa = trunc_page(pa); | ||||
offset = pa & PAGE_MASK; | offset = pa & PAGE_MASK; | ||||
size = roundup2(offset + size, PAGE_SIZE); | size = roundup2(offset + size, PAGE_SIZE); | ||||
va = kva_alloc(size); | va = kva_alloc(size); | ||||
if (!va) | if (!va) | ||||
panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); | panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); | ||||
for (tmpva = va; size > 0;) { | for (tmpva = va; size > 0;) { | ||||
moea64_kenter_attr(mmu, tmpva, ppa, ma); | moea64_kenter_attr(tmpva, ppa, ma); | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
tmpva += PAGE_SIZE; | tmpva += PAGE_SIZE; | ||||
ppa += PAGE_SIZE; | ppa += PAGE_SIZE; | ||||
} | } | ||||
return ((void *)(va + offset)); | return ((void *)(va + offset)); | ||||
} | } | ||||
void * | void * | ||||
moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) | moea64_mapdev(vm_paddr_t pa, vm_size_t size) | ||||
{ | { | ||||
return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); | return moea64_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
void | void | ||||
moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) | moea64_unmapdev(vm_offset_t va, vm_size_t size) | ||||
{ | { | ||||
vm_offset_t base, offset; | vm_offset_t base, offset; | ||||
base = trunc_page(va); | base = trunc_page(va); | ||||
offset = va & PAGE_MASK; | offset = va & PAGE_MASK; | ||||
size = roundup2(offset + size, PAGE_SIZE); | size = roundup2(offset + size, PAGE_SIZE); | ||||
kva_free(base, size); | kva_free(base, size); | ||||
} | } | ||||
void | void | ||||
moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) | moea64_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_offset_t lim; | vm_offset_t lim; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_size_t len; | vm_size_t len; | ||||
if (__predict_false(pm == NULL)) | if (__predict_false(pm == NULL)) | ||||
pm = &curthread->td_proc->p_vmspace->vm_pmap; | pm = &curthread->td_proc->p_vmspace->vm_pmap; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
while (sz > 0) { | while (sz > 0) { | ||||
lim = round_page(va+1); | lim = round_page(va+1); | ||||
len = MIN(lim - va, sz); | len = MIN(lim - va, sz); | ||||
pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); | pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); | ||||
if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { | if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { | ||||
pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); | pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); | ||||
moea64_syncicache(mmu, pm, va, pa, len); | moea64_syncicache(pm, va, pa, len); | ||||
} | } | ||||
va += len; | va += len; | ||||
sz -= len; | sz -= len; | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
void | void | ||||
moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) | moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) | ||||
{ | { | ||||
*va = (void *)(uintptr_t)pa; | *va = (void *)(uintptr_t)pa; | ||||
} | } | ||||
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; | extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; | ||||
void | void | ||||
moea64_scan_init(mmu_t mmu) | moea64_scan_init() | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
int i; | int i; | ||||
if (!do_minidump) { | if (!do_minidump) { | ||||
/* Initialize phys. segments for dumpsys(). */ | /* Initialize phys. segments for dumpsys(). */ | ||||
memset(&dump_map, 0, sizeof(dump_map)); | memset(&dump_map, 0, sizeof(dump_map)); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (va < virtual_end) { | ||||
} | } | ||||
dump_map[2].pa_size = va - dump_map[2].pa_start; | dump_map[2].pa_size = va - dump_map[2].pa_start; | ||||
} | } | ||||
} | } | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
static size_t | static size_t | ||||
moea64_scan_pmap(mmu_t mmu) | moea64_scan_pmap() | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
vm_paddr_t pa, pa_end; | vm_paddr_t pa, pa_end; | ||||
vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp; | vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp; | ||||
uint64_t lpsize; | uint64_t lpsize; | ||||
lpsize = moea64_large_page_size; | lpsize = moea64_large_page_size; | ||||
kstart = trunc_page((vm_offset_t)_etext); | kstart = trunc_page((vm_offset_t)_etext); | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | moea64_scan_pmap() | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
return (sizeof(struct lpte) * moea64_pteg_count * 8); | return (sizeof(struct lpte) * moea64_pteg_count * 8); | ||||
} | } | ||||
static struct dump_context dump_ctx; | static struct dump_context dump_ctx; | ||||
static void * | static void * | ||||
moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs) | moea64_dump_pmap_init(unsigned blkpgs) | ||||
{ | { | ||||
dump_ctx.ptex = 0; | dump_ctx.ptex = 0; | ||||
dump_ctx.ptex_end = moea64_pteg_count * 8; | dump_ctx.ptex_end = moea64_pteg_count * 8; | ||||
dump_ctx.blksz = blkpgs * PAGE_SIZE; | dump_ctx.blksz = blkpgs * PAGE_SIZE; | ||||
return (&dump_ctx); | return (&dump_ctx); | ||||
} | } | ||||
#else | #else | ||||
static size_t | static size_t | ||||
moea64_scan_pmap(mmu_t mmu) | moea64_scan_pmap() | ||||
{ | { | ||||
return (0); | return (0); | ||||
} | } | ||||
static void * | static void * | ||||
moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs) | moea64_dump_pmap_init(unsigned blkpgs) | ||||
{ | { | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
static void | static void | ||||
moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages) | moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages) | ||||
{ | { | ||||
for (; npages > 0; --npages) { | for (; npages > 0; --npages) { | ||||
if (moea64_large_page_size != 0 && | if (moea64_large_page_size != 0 && | ||||
(pa & moea64_large_page_mask) == 0 && | (pa & moea64_large_page_mask) == 0 && | ||||
(va & moea64_large_page_mask) == 0 && | (va & moea64_large_page_mask) == 0 && | ||||
npages >= (moea64_large_page_size >> PAGE_SHIFT)) { | npages >= (moea64_large_page_size >> PAGE_SHIFT)) { | ||||
PMAP_LOCK(kernel_pmap); | PMAP_LOCK(kernel_pmap); | ||||
moea64_kenter_large(mmu, va, pa, 0, 0); | moea64_kenter_large(va, pa, 0, 0); | ||||
PMAP_UNLOCK(kernel_pmap); | PMAP_UNLOCK(kernel_pmap); | ||||
pa += moea64_large_page_size; | pa += moea64_large_page_size; | ||||
va += moea64_large_page_size; | va += moea64_large_page_size; | ||||
npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1; | npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1; | ||||
} else { | } else { | ||||
moea64_kenter(mmu, va, pa); | moea64_kenter(va, pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
moea64_page_array_startup(mmu_t mmu, long pages) | moea64_page_array_startup(long pages) | ||||
{ | { | ||||
long dom_pages[MAXMEMDOM]; | long dom_pages[MAXMEMDOM]; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
vm_offset_t va, vm_page_base; | vm_offset_t va, vm_page_base; | ||||
vm_size_t needed, size; | vm_size_t needed, size; | ||||
long page; | long page; | ||||
int domain; | int domain; | ||||
int i; | int i; | ||||
vm_page_base = 0xd000000000000000ULL; | vm_page_base = 0xd000000000000000ULL; | ||||
/* Short-circuit single-domain systems. */ | /* Short-circuit single-domain systems. */ | ||||
if (vm_ndomains == 1) { | if (vm_ndomains == 1) { | ||||
size = round_page(pages * sizeof(struct vm_page)); | size = round_page(pages * sizeof(struct vm_page)); | ||||
pa = vm_phys_early_alloc(0, size); | pa = vm_phys_early_alloc(0, size); | ||||
vm_page_base = moea64_map(mmu, &vm_page_base, | vm_page_base = moea64_map(&vm_page_base, | ||||
pa, pa + size, VM_PROT_READ | VM_PROT_WRITE); | pa, pa + size, VM_PROT_READ | VM_PROT_WRITE); | ||||
vm_page_array_size = pages; | vm_page_array_size = pages; | ||||
vm_page_array = (vm_page_t)vm_page_base; | vm_page_array = (vm_page_t)vm_page_base; | ||||
return; | return; | ||||
} | } | ||||
page = 0; | page = 0; | ||||
for (i = 0; i < MAXMEMDOM; i++) | for (i = 0; i < MAXMEMDOM; i++) | ||||
Show All 27 Lines | for (i = 0; i < MAXMEMDOM && vm_page_array_size < pages; i++) { | ||||
if (dom_pages[i] == 0) | if (dom_pages[i] == 0) | ||||
continue; | continue; | ||||
size = ulmin(pages - vm_page_array_size, dom_pages[i]); | size = ulmin(pages - vm_page_array_size, dom_pages[i]); | ||||
size = round_page(size * sizeof(struct vm_page)); | size = round_page(size * sizeof(struct vm_page)); | ||||
needed = size; | needed = size; | ||||
size = roundup2(size, moea64_large_page_size); | size = roundup2(size, moea64_large_page_size); | ||||
pa = vm_phys_early_alloc(i, size); | pa = vm_phys_early_alloc(i, size); | ||||
vm_page_array_size += size / sizeof(struct vm_page); | vm_page_array_size += size / sizeof(struct vm_page); | ||||
moea64_map_range(mmu, va, pa, size >> PAGE_SHIFT); | moea64_map_range(va, pa, size >> PAGE_SHIFT); | ||||
/* Scoot up domain 0, to reduce the domain page overlap. */ | /* Scoot up domain 0, to reduce the domain page overlap. */ | ||||
if (i == 0) | if (i == 0) | ||||
vm_page_base += size - needed; | vm_page_base += size - needed; | ||||
va += size; | va += size; | ||||
} | } | ||||
vm_page_array = (vm_page_t)vm_page_base; | vm_page_array = (vm_page_t)vm_page_base; | ||||
vm_page_array_size = pages; | vm_page_array_size = pages; | ||||
} | } | ||||
#endif | #endif | ||||
static int64_t | |||||
moea64_null_method(void) | |||||
{ | |||||
return (0); | |||||
} | |||||
static int64_t moea64_pte_replace_default(struct pvo_entry *pvo, int flags) | |||||
{ | |||||
int64_t refchg; | |||||
refchg = moea64_pte_unset(pvo); | |||||
moea64_pte_insert(pvo); | |||||
return (refchg); | |||||
} | |||||
struct moea64_funcs *moea64_ops; | |||||
#define DEFINE_OEA64_IFUNC(ret, func, args, def) \ | |||||
DEFINE_IFUNC(, ret, moea64_##func, args) { \ | |||||
moea64_##func##_t f; \ | |||||
if (moea64_ops == NULL) \ | |||||
return ((moea64_##func##_t)def); \ | |||||
f = moea64_ops->func; \ | |||||
return (f != NULL ? f : (moea64_##func##_t)def);\ | |||||
} | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int), | |||||
moea64_pte_replace_default) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t), | |||||
moea64_null_method) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method) |