Changeset View
Changeset View
Standalone View
Standalone View
head/sys/powerpc/aim/mmu_oea64.c
Show First 20 Lines • Show All 77 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_kern.h> | #include <vm/vm_kern.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_map.h> | #include <vm/vm_map.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/vm_dumpset.h> | #include <vm/vm_dumpset.h> | ||||
#include <vm/vm_reserv.h> | |||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <machine/_inttypes.h> | #include <machine/_inttypes.h> | ||||
#include <machine/cpu.h> | #include <machine/cpu.h> | ||||
#include <machine/ifunc.h> | #include <machine/ifunc.h> | ||||
#include <machine/platform.h> | #include <machine/platform.h> | ||||
#include <machine/frame.h> | #include <machine/frame.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
Show All 12 Lines | |||||
#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) | #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) | ||||
#define ENABLE_TRANS(msr) mtmsr(msr) | #define ENABLE_TRANS(msr) mtmsr(msr) | ||||
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) | ||||
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) | ||||
#define VSID_HASH_MASK 0x0000007fffffffffULL | #define VSID_HASH_MASK 0x0000007fffffffffULL | ||||
/* Get physical address from PVO. */ | |||||
#define PVO_PADDR(pvo) ((pvo)->pvo_pte.pa & LPTE_RPGN) | |||||
/* | /* | ||||
* Locking semantics: | * Locking semantics: | ||||
* | * | ||||
* There are two locks of interest: the page locks and the pmap locks, which | * There are two locks of interest: the page locks and the pmap locks, which | ||||
* protect their individual PVO lists and are locked in that order. The contents | * protect their individual PVO lists and are locked in that order. The contents | ||||
* of all PVO entries are protected by the locks of their respective pmaps. | * of all PVO entries are protected by the locks of their respective pmaps. | ||||
* The pmap of any PVO is guaranteed not to change so long as the PVO is linked | * The pmap of any PVO is guaranteed not to change so long as the PVO is linked | ||||
* into any list. | * into any list. | ||||
Show All 16 Lines | |||||
#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)])) | #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)])) | ||||
#define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) | #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) | ||||
#define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) | #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) | ||||
#define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) | #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) | ||||
#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m)) | #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m)) | ||||
#define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m)) | #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m)) | ||||
#define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m)) | #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m)) | ||||
/* Superpage PV lock */ | |||||
#define PV_LOCK_SIZE (1<<PDRSHIFT) | |||||
static __always_inline void | |||||
moea64_sp_pv_lock(vm_paddr_t pa) | |||||
{ | |||||
vm_paddr_t pa_end; | |||||
/* Note: breaking when pa_end is reached to avoid overflows */ | |||||
pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE); | |||||
for (;;) { | |||||
mtx_lock_flags(PV_LOCKPTR(pa), MTX_DUPOK); | |||||
if (pa == pa_end) | |||||
break; | |||||
pa += PV_LOCK_SIZE; | |||||
} | |||||
} | |||||
static __always_inline void | |||||
moea64_sp_pv_unlock(vm_paddr_t pa) | |||||
{ | |||||
vm_paddr_t pa_end; | |||||
/* Note: breaking when pa_end is reached to avoid overflows */ | |||||
pa_end = pa; | |||||
pa += HPT_SP_SIZE - PV_LOCK_SIZE; | |||||
for (;;) { | |||||
mtx_unlock_flags(PV_LOCKPTR(pa), MTX_DUPOK); | |||||
if (pa == pa_end) | |||||
break; | |||||
pa -= PV_LOCK_SIZE; | |||||
} | |||||
} | |||||
#define SP_PV_LOCK_ALIGNED(pa) moea64_sp_pv_lock(pa) | |||||
#define SP_PV_UNLOCK_ALIGNED(pa) moea64_sp_pv_unlock(pa) | |||||
#define SP_PV_LOCK(pa) moea64_sp_pv_lock((pa) & ~HPT_SP_MASK) | |||||
#define SP_PV_UNLOCK(pa) moea64_sp_pv_unlock((pa) & ~HPT_SP_MASK) | |||||
#define SP_PV_PAGE_LOCK(m) SP_PV_LOCK(VM_PAGE_TO_PHYS(m)) | |||||
#define SP_PV_PAGE_UNLOCK(m) SP_PV_UNLOCK(VM_PAGE_TO_PHYS(m)) | |||||
struct ofw_map { | struct ofw_map { | ||||
cell_t om_va; | cell_t om_va; | ||||
cell_t om_len; | cell_t om_len; | ||||
uint64_t om_pa; | uint64_t om_pa; | ||||
cell_t om_mode; | cell_t om_mode; | ||||
}; | }; | ||||
extern unsigned char _etext[]; | extern unsigned char _etext[]; | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
vm_offset_t moea64_scratchpage_va[2]; | vm_offset_t moea64_scratchpage_va[2]; | ||||
struct pvo_entry *moea64_scratchpage_pvo[2]; | struct pvo_entry *moea64_scratchpage_pvo[2]; | ||||
struct mtx moea64_scratchpage_mtx; | struct mtx moea64_scratchpage_mtx; | ||||
uint64_t moea64_large_page_mask = 0; | uint64_t moea64_large_page_mask = 0; | ||||
uint64_t moea64_large_page_size = 0; | uint64_t moea64_large_page_size = 0; | ||||
int moea64_large_page_shift = 0; | int moea64_large_page_shift = 0; | ||||
bool moea64_has_lp_4k_16m = false; | |||||
/* | /* | ||||
* PVO calls. | * PVO calls. | ||||
*/ | */ | ||||
static int moea64_pvo_enter(struct pvo_entry *pvo, | static int moea64_pvo_enter(struct pvo_entry *pvo, | ||||
struct pvo_head *pvo_head, struct pvo_entry **oldpvo); | struct pvo_head *pvo_head, struct pvo_entry **oldpvo); | ||||
static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo); | static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo); | ||||
static void moea64_pvo_remove_from_page(struct pvo_entry *pvo); | static void moea64_pvo_remove_from_page(struct pvo_entry *pvo); | ||||
static void moea64_pvo_remove_from_page_locked( | static void moea64_pvo_remove_from_page_locked( | ||||
struct pvo_entry *pvo, vm_page_t m); | struct pvo_entry *pvo, vm_page_t m); | ||||
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); | static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); | ||||
/* | /* | ||||
* Utility routines. | * Utility routines. | ||||
*/ | */ | ||||
static boolean_t moea64_query_bit(vm_page_t, uint64_t); | static boolean_t moea64_query_bit(vm_page_t, uint64_t); | ||||
static u_int moea64_clear_bit(vm_page_t, uint64_t); | static u_int moea64_clear_bit(vm_page_t, uint64_t); | ||||
static void moea64_kremove(vm_offset_t); | static void moea64_kremove(vm_offset_t); | ||||
static void moea64_syncicache(pmap_t pmap, vm_offset_t va, | static void moea64_syncicache(pmap_t pmap, vm_offset_t va, | ||||
vm_paddr_t pa, vm_size_t sz); | vm_paddr_t pa, vm_size_t sz); | ||||
static void moea64_pmap_init_qpages(void); | static void moea64_pmap_init_qpages(void); | ||||
static void moea64_remove_locked(pmap_t, vm_offset_t, | |||||
vm_offset_t, struct pvo_dlist *); | |||||
/* | /* | ||||
* Superpages data and routines. | |||||
*/ | |||||
/* | |||||
* PVO flags (in vaddr) that must match for promotion to succeed. | |||||
* Note that protection bits are checked separately, as they reside in | |||||
* another field. | |||||
*/ | |||||
#define PVO_FLAGS_PROMOTE (PVO_WIRED | PVO_MANAGED | PVO_PTEGIDX_VALID) | |||||
#define PVO_IS_SP(pvo) (((pvo)->pvo_vaddr & PVO_LARGE) && \ | |||||
(pvo)->pvo_pmap != kernel_pmap) | |||||
/* Get physical address from PVO. */ | |||||
#define PVO_PADDR(pvo) moea64_pvo_paddr(pvo) | |||||
/* MD page flag indicating that the page is a superpage. */ | |||||
#define MDPG_ATTR_SP 0x40000000 | |||||
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, | |||||
"VM/pmap parameters"); | |||||
static int superpages_enabled = 0; | |||||
SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, CTLFLAG_RDTUN, | |||||
&superpages_enabled, 0, "Enable support for transparent superpages"); | |||||
static SYSCTL_NODE(_vm_pmap, OID_AUTO, sp, CTLFLAG_RD, 0, | |||||
"SP page mapping counters"); | |||||
static u_long sp_demotions; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, demotions, CTLFLAG_RD, | |||||
&sp_demotions, 0, "SP page demotions"); | |||||
static u_long sp_mappings; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, mappings, CTLFLAG_RD, | |||||
&sp_mappings, 0, "SP page mappings"); | |||||
static u_long sp_p_failures; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_failures, CTLFLAG_RD, | |||||
&sp_p_failures, 0, "SP page promotion failures"); | |||||
static u_long sp_p_fail_pa; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_pa, CTLFLAG_RD, | |||||
&sp_p_fail_pa, 0, "SP page promotion failure: PAs don't match"); | |||||
static u_long sp_p_fail_flags; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_flags, CTLFLAG_RD, | |||||
&sp_p_fail_flags, 0, "SP page promotion failure: page flags don't match"); | |||||
static u_long sp_p_fail_prot; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_prot, CTLFLAG_RD, | |||||
&sp_p_fail_prot, 0, | |||||
"SP page promotion failure: page protections don't match"); | |||||
static u_long sp_p_fail_wimg; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_wimg, CTLFLAG_RD, | |||||
&sp_p_fail_wimg, 0, "SP page promotion failure: WIMG bits don't match"); | |||||
static u_long sp_promotions; | |||||
SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, promotions, CTLFLAG_RD, | |||||
&sp_promotions, 0, "SP page promotions"); | |||||
static bool moea64_ps_enabled(pmap_t); | |||||
static void moea64_align_superpage(vm_object_t, vm_ooffset_t, | |||||
vm_offset_t *, vm_size_t); | |||||
static int moea64_sp_enter(pmap_t pmap, vm_offset_t va, | |||||
vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind); | |||||
static struct pvo_entry *moea64_sp_remove(struct pvo_entry *sp, | |||||
struct pvo_dlist *tofree); | |||||
static void moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m); | |||||
static void moea64_sp_demote_aligned(struct pvo_entry *sp); | |||||
static void moea64_sp_demote(struct pvo_entry *pvo); | |||||
static struct pvo_entry *moea64_sp_unwire(struct pvo_entry *sp); | |||||
static struct pvo_entry *moea64_sp_protect(struct pvo_entry *sp, | |||||
vm_prot_t prot); | |||||
static int64_t moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit); | |||||
static int64_t moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m, | |||||
uint64_t ptebit); | |||||
static __inline bool moea64_sp_pvo_in_range(struct pvo_entry *pvo, | |||||
vm_offset_t sva, vm_offset_t eva); | |||||
/* | |||||
* Kernel MMU interface | * Kernel MMU interface | ||||
*/ | */ | ||||
void moea64_clear_modify(vm_page_t); | void moea64_clear_modify(vm_page_t); | ||||
void moea64_copy_page(vm_page_t, vm_page_t); | void moea64_copy_page(vm_page_t, vm_page_t); | ||||
void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset, | ||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize); | vm_page_t *mb, vm_offset_t b_offset, int xfersize); | ||||
int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, | int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, | ||||
u_int flags, int8_t psind); | u_int flags, int8_t psind); | ||||
▲ Show 20 Lines • Show All 89 Lines • ▼ Show 20 Lines | static struct pmap_funcs moea64_methods = { | ||||
.deactivate = moea64_deactivate, | .deactivate = moea64_deactivate, | ||||
.page_set_memattr = moea64_page_set_memattr, | .page_set_memattr = moea64_page_set_memattr, | ||||
.quick_enter_page = moea64_quick_enter_page, | .quick_enter_page = moea64_quick_enter_page, | ||||
.quick_remove_page = moea64_quick_remove_page, | .quick_remove_page = moea64_quick_remove_page, | ||||
.page_is_mapped = moea64_page_is_mapped, | .page_is_mapped = moea64_page_is_mapped, | ||||
#ifdef __powerpc64__ | #ifdef __powerpc64__ | ||||
.page_array_startup = moea64_page_array_startup, | .page_array_startup = moea64_page_array_startup, | ||||
#endif | #endif | ||||
.ps_enabled = moea64_ps_enabled, | |||||
.align_superpage = moea64_align_superpage, | |||||
/* Internal interfaces */ | /* Internal interfaces */ | ||||
.mapdev = moea64_mapdev, | .mapdev = moea64_mapdev, | ||||
.mapdev_attr = moea64_mapdev_attr, | .mapdev_attr = moea64_mapdev_attr, | ||||
.unmapdev = moea64_unmapdev, | .unmapdev = moea64_unmapdev, | ||||
.kextract = moea64_kextract, | .kextract = moea64_kextract, | ||||
.kenter = moea64_kenter, | .kenter = moea64_kenter, | ||||
.kenter_attr = moea64_kenter_attr, | .kenter_attr = moea64_kenter_attr, | ||||
.dev_direct_mapped = moea64_dev_direct_mapped, | .dev_direct_mapped = moea64_dev_direct_mapped, | ||||
.dumpsys_pa_init = moea64_scan_init, | .dumpsys_pa_init = moea64_scan_init, | ||||
.dumpsys_scan_pmap = moea64_scan_pmap, | .dumpsys_scan_pmap = moea64_scan_pmap, | ||||
.dumpsys_dump_pmap_init = moea64_dump_pmap_init, | .dumpsys_dump_pmap_init = moea64_dump_pmap_init, | ||||
.dumpsys_map_chunk = moea64_dumpsys_map, | .dumpsys_map_chunk = moea64_dumpsys_map, | ||||
.map_user_ptr = moea64_map_user_ptr, | .map_user_ptr = moea64_map_user_ptr, | ||||
.decode_kernel_ptr = moea64_decode_kernel_ptr, | .decode_kernel_ptr = moea64_decode_kernel_ptr, | ||||
}; | }; | ||||
MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods); | MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods); | ||||
/* | |||||
* Get physical address from PVO. | |||||
* | |||||
* For superpages, the lower bits are not stored on pvo_pte.pa and must be | |||||
* obtained from VA. | |||||
*/ | |||||
static __always_inline vm_paddr_t | |||||
moea64_pvo_paddr(struct pvo_entry *pvo) | |||||
{ | |||||
vm_paddr_t pa; | |||||
pa = (pvo)->pvo_pte.pa & LPTE_RPGN; | |||||
if (PVO_IS_SP(pvo)) { | |||||
pa &= ~HPT_SP_MASK; /* This is needed to clear LPTE_LP bits. */ | |||||
pa |= PVO_VADDR(pvo) & HPT_SP_MASK; | |||||
} | |||||
return (pa); | |||||
} | |||||
static struct pvo_head * | static struct pvo_head * | ||||
vm_page_to_pvoh(vm_page_t m) | vm_page_to_pvoh(vm_page_t m) | ||||
{ | { | ||||
mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); | ||||
return (&m->md.mdpg_pvoh); | return (&m->md.mdpg_pvoh); | ||||
} | } | ||||
Show All 31 Lines | init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) | ||||
pvo->pvo_pmap = pmap; | pvo->pvo_pmap = pmap; | ||||
va &= ~ADDR_POFF; | va &= ~ADDR_POFF; | ||||
pvo->pvo_vaddr |= va; | pvo->pvo_vaddr |= va; | ||||
vsid = va_to_vsid(pmap, va); | vsid = va_to_vsid(pmap, va); | ||||
pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) | pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) | ||||
| (vsid << 16); | | (vsid << 16); | ||||
shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift : | if (pmap == kernel_pmap && (pvo->pvo_vaddr & PVO_LARGE) != 0) | ||||
ADDR_PIDX_SHFT; | shift = moea64_large_page_shift; | ||||
else | |||||
shift = ADDR_PIDX_SHFT; | |||||
hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); | hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); | ||||
pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; | pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; | ||||
} | } | ||||
static void | static void | ||||
free_pvo_entry(struct pvo_entry *pvo) | free_pvo_entry(struct pvo_entry *pvo) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 327 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) | ||||
{ | { | ||||
int i, j; | int i, j; | ||||
vm_size_t physsz, hwphyssz; | vm_size_t physsz, hwphyssz; | ||||
vm_paddr_t kernelphysstart, kernelphysend; | vm_paddr_t kernelphysstart, kernelphysend; | ||||
int rm_pavail; | int rm_pavail; | ||||
/* Level 0 reservations consist of 4096 pages (16MB superpage). */ | |||||
vm_level_0_order = 12; | |||||
#ifndef __powerpc64__ | #ifndef __powerpc64__ | ||||
/* We don't have a direct map since there is no BAT */ | /* We don't have a direct map since there is no BAT */ | ||||
hw_direct_map = 0; | hw_direct_map = 0; | ||||
/* Make sure battable is zero, since we have no BAT */ | /* Make sure battable is zero, since we have no BAT */ | ||||
for (i = 0; i < 16; i++) { | for (i = 0; i < 16; i++) { | ||||
battable[i].batu = 0; | battable[i].batu = 0; | ||||
battable[i].batl = 0; | battable[i].batl = 0; | ||||
▲ Show 20 Lines • Show All 415 Lines • ▼ Show 20 Lines | moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | ||||
vm_page_t m; | vm_page_t m; | ||||
int64_t refchg; | int64_t refchg; | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; | pvo != NULL && PVO_VADDR(pvo) < eva; | ||||
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
if (moea64_sp_pvo_in_range(pvo, sva, eva)) { | |||||
pvo = moea64_sp_unwire(pvo); | |||||
continue; | |||||
} else { | |||||
CTR1(KTR_PMAP, "%s: demote before unwire", | |||||
__func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
} | |||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | ||||
panic("moea64_unwire: pvo %p is missing PVO_WIRED", | panic("moea64_unwire: pvo %p is missing PVO_WIRED", | ||||
pvo); | pvo); | ||||
pvo->pvo_vaddr &= ~PVO_WIRED; | pvo->pvo_vaddr &= ~PVO_WIRED; | ||||
refchg = moea64_pte_replace(pvo, 0 /* No invalidation */); | refchg = moea64_pte_replace(pvo, 0 /* No invalidation */); | ||||
if ((pvo->pvo_vaddr & PVO_MANAGED) && | if ((pvo->pvo_vaddr & PVO_MANAGED) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
if (refchg < 0) | if (refchg < 0) | ||||
▲ Show 20 Lines • Show All 269 Lines • ▼ Show 20 Lines | |||||
* target pmap with the protection requested. If specified the page | * target pmap with the protection requested. If specified the page | ||||
* will be wired down. | * will be wired down. | ||||
*/ | */ | ||||
int | int | ||||
moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot, u_int flags, int8_t psind) | vm_prot_t prot, u_int flags, int8_t psind) | ||||
{ | { | ||||
struct pvo_entry *pvo, *oldpvo; | struct pvo_entry *pvo, *oldpvo, *tpvo; | ||||
struct pvo_head *pvo_head; | struct pvo_head *pvo_head; | ||||
uint64_t pte_lo; | uint64_t pte_lo; | ||||
int error; | int error; | ||||
vm_paddr_t pa; | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) | if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) | ||||
VM_PAGE_OBJECT_BUSY_ASSERT(m); | VM_PAGE_OBJECT_BUSY_ASSERT(m); | ||||
else | else | ||||
VM_OBJECT_ASSERT_LOCKED(m->object); | VM_OBJECT_ASSERT_LOCKED(m->object); | ||||
} | } | ||||
if (psind > 0) | |||||
return (moea64_sp_enter(pmap, va, m, prot, flags, psind)); | |||||
pvo = alloc_pvo_entry(0); | pvo = alloc_pvo_entry(0); | ||||
if (pvo == NULL) | if (pvo == NULL) | ||||
return (KERN_RESOURCE_SHORTAGE); | return (KERN_RESOURCE_SHORTAGE); | ||||
pvo->pvo_pmap = NULL; /* to be filled in later */ | pvo->pvo_pmap = NULL; /* to be filled in later */ | ||||
pvo->pvo_pte.prot = prot; | pvo->pvo_pte.prot = prot; | ||||
pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); | pa = VM_PAGE_TO_PHYS(m); | ||||
pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; | pte_lo = moea64_calc_wimg(pa, pmap_page_get_memattr(m)); | ||||
pvo->pvo_pte.pa = pa | pte_lo; | |||||
if ((flags & PMAP_ENTER_WIRED) != 0) | if ((flags & PMAP_ENTER_WIRED) != 0) | ||||
pvo->pvo_vaddr |= PVO_WIRED; | pvo->pvo_vaddr |= PVO_WIRED; | ||||
if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { | if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { | ||||
pvo_head = NULL; | pvo_head = NULL; | ||||
} else { | } else { | ||||
pvo_head = &m->md.mdpg_pvoh; | pvo_head = &m->md.mdpg_pvoh; | ||||
pvo->pvo_vaddr |= PVO_MANAGED; | pvo->pvo_vaddr |= PVO_MANAGED; | ||||
} | } | ||||
PV_PAGE_LOCK(m); | PV_LOCK(pa); | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (pvo->pvo_pmap == NULL) | if (pvo->pvo_pmap == NULL) | ||||
init_pvo_entry(pvo, pmap, va); | init_pvo_entry(pvo, pmap, va); | ||||
if (moea64_ps_enabled(pmap) && | |||||
(tpvo = moea64_pvo_find_va(pmap, va & ~HPT_SP_MASK)) != NULL && | |||||
PVO_IS_SP(tpvo)) { | |||||
/* Demote SP before entering a regular page */ | |||||
CTR2(KTR_PMAP, "%s: demote before enter: va=%#jx", | |||||
__func__, (uintmax_t)va); | |||||
moea64_sp_demote_aligned(tpvo); | |||||
} | |||||
if (prot & VM_PROT_WRITE) | if (prot & VM_PROT_WRITE) | ||||
if (pmap_bootstrapped && | if (pmap_bootstrapped && | ||||
(m->oflags & VPO_UNMANAGED) == 0) | (m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_aflag_set(m, PGA_WRITEABLE); | vm_page_aflag_set(m, PGA_WRITEABLE); | ||||
error = moea64_pvo_enter(pvo, pvo_head, &oldpvo); | error = moea64_pvo_enter(pvo, pvo_head, &oldpvo); | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && | ||||
oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && | ||||
oldpvo->pvo_pte.prot == prot) { | oldpvo->pvo_pte.prot == prot) { | ||||
/* Identical mapping already exists */ | /* Identical mapping already exists */ | ||||
error = 0; | error = 0; | ||||
/* If not in page table, reinsert it */ | /* If not in page table, reinsert it */ | ||||
if (moea64_pte_synch(oldpvo) < 0) { | if (moea64_pte_synch(oldpvo) < 0) { | ||||
STAT_MOEA64(moea64_pte_overflow--); | STAT_MOEA64(moea64_pte_overflow--); | ||||
moea64_pte_insert(oldpvo); | moea64_pte_insert(oldpvo); | ||||
} | } | ||||
/* Then just clean up and go home */ | /* Then just clean up and go home */ | ||||
PV_PAGE_UNLOCK(m); | |||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
PV_UNLOCK(pa); | |||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
pvo = NULL; | |||||
goto out; | goto out; | ||||
} else { | } else { | ||||
/* Otherwise, need to kill it first */ | /* Otherwise, need to kill it first */ | ||||
KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " | ||||
"mapping does not match new mapping")); | "mapping does not match new mapping")); | ||||
moea64_pvo_remove_from_pmap(oldpvo); | moea64_pvo_remove_from_pmap(oldpvo); | ||||
moea64_pvo_enter(pvo, pvo_head, NULL); | moea64_pvo_enter(pvo, pvo_head, NULL); | ||||
} | } | ||||
} | } | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
PV_PAGE_UNLOCK(m); | PV_UNLOCK(pa); | ||||
/* Free any dead pages */ | /* Free any dead pages */ | ||||
if (error == EEXIST) { | if (error == EEXIST) { | ||||
moea64_pvo_remove_from_page(oldpvo); | moea64_pvo_remove_from_page(oldpvo); | ||||
free_pvo_entry(oldpvo); | free_pvo_entry(oldpvo); | ||||
} | } | ||||
out: | out: | ||||
/* | /* | ||||
* Flush the page from the instruction cache if this page is | * Flush the page from the instruction cache if this page is | ||||
* mapped executable and cacheable. | * mapped executable and cacheable. | ||||
*/ | */ | ||||
if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && | if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && | ||||
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | ||||
vm_page_aflag_set(m, PGA_EXECUTABLE); | vm_page_aflag_set(m, PGA_EXECUTABLE); | ||||
moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); | moea64_syncicache(pmap, va, pa, PAGE_SIZE); | ||||
} | } | ||||
/* | |||||
* Try to promote pages. | |||||
* | |||||
* If the VA of the entered page is not aligned with its PA, | |||||
* don't try page promotion as it is not possible. | |||||
* This reduces the number of promotion failures dramatically. | |||||
*/ | |||||
if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL && | |||||
(pvo->pvo_vaddr & PVO_MANAGED) != 0 && | |||||
(va & HPT_SP_MASK) == (pa & HPT_SP_MASK) && | |||||
(m->flags & PG_FICTITIOUS) == 0 && | |||||
vm_reserv_level_iffullpop(m) == 0) | |||||
moea64_sp_promote(pmap, va, m); | |||||
return (KERN_SUCCESS); | return (KERN_SUCCESS); | ||||
} | } | ||||
static void | static void | ||||
moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, | ||||
vm_size_t sz) | vm_size_t sz) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
* corresponding offset from m_start are mapped. | * corresponding offset from m_start are mapped. | ||||
*/ | */ | ||||
void | void | ||||
moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, | moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, | ||||
vm_page_t m_start, vm_prot_t prot) | vm_page_t m_start, vm_prot_t prot) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_pindex_t diff, psize; | vm_pindex_t diff, psize; | ||||
vm_offset_t va; | |||||
int8_t psind; | |||||
VM_OBJECT_ASSERT_LOCKED(m_start->object); | VM_OBJECT_ASSERT_LOCKED(m_start->object); | ||||
psize = atop(end - start); | psize = atop(end - start); | ||||
m = m_start; | m = m_start; | ||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | ||||
moea64_enter(pm, start + ptoa(diff), m, prot & | va = start + ptoa(diff); | ||||
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | | if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end && | ||||
PMAP_ENTER_QUICK_LOCKED, 0); | m->psind == 1 && moea64_ps_enabled(pm)) | ||||
psind = 1; | |||||
else | |||||
psind = 0; | |||||
moea64_enter(pm, va, m, prot & | |||||
(VM_PROT_READ | VM_PROT_EXECUTE), | |||||
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind); | |||||
if (psind == 1) | |||||
m = &m[HPT_SP_SIZE / PAGE_SIZE - 1]; | |||||
m = TAILQ_NEXT(m, listq); | m = TAILQ_NEXT(m, listq); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, | moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
CTR0(KTR_PMAP, "moea64_init"); | CTR0(KTR_PMAP, "moea64_init"); | ||||
moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), | ||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, | ||||
UMA_ZONE_VM | UMA_ZONE_NOFREE); | UMA_ZONE_VM | UMA_ZONE_NOFREE); | ||||
/* | |||||
* Are large page mappings enabled? | |||||
*/ | |||||
TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); | |||||
if (superpages_enabled) { | |||||
KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, | |||||
("moea64_init: can't assign to pagesizes[1]")); | |||||
if (moea64_large_page_size == 0) { | |||||
printf("mmu_oea64: HW does not support large pages. " | |||||
"Disabling superpages...\n"); | |||||
superpages_enabled = 0; | |||||
} else if (!moea64_has_lp_4k_16m) { | |||||
printf("mmu_oea64: " | |||||
"HW does not support mixed 4KB/16MB page sizes. " | |||||
"Disabling superpages...\n"); | |||||
superpages_enabled = 0; | |||||
} else | |||||
pagesizes[1] = HPT_SP_SIZE; | |||||
} | |||||
if (!hw_direct_map) { | if (!hw_direct_map) { | ||||
uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc); | uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc); | ||||
} | } | ||||
#ifdef COMPAT_FREEBSD32 | #ifdef COMPAT_FREEBSD32 | ||||
elf32_nxstack = 1; | elf32_nxstack = 1; | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | moea64_remove_write(vm_page_t m) | ||||
int64_t refchg, ret; | int64_t refchg, ret; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("moea64_remove_write: page %p is not managed", m)); | ("moea64_remove_write: page %p is not managed", m)); | ||||
vm_page_assert_busied(m); | vm_page_assert_busied(m); | ||||
if (!pmap_page_is_write_mapped(m)) | if (!pmap_page_is_write_mapped(m)) | ||||
return | return; | ||||
powerpc_sync(); | powerpc_sync(); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
refchg = 0; | refchg = 0; | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD) && | if (!(pvo->pvo_vaddr & PVO_DEAD) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
CTR1(KTR_PMAP, "%s: demote before remwr", | |||||
__func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
pvo->pvo_pte.prot &= ~VM_PROT_WRITE; | pvo->pvo_pte.prot &= ~VM_PROT_WRITE; | ||||
ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE); | ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE); | ||||
if (ret < 0) | if (ret < 0) | ||||
ret = LPTE_CHG; | ret = LPTE_CHG; | ||||
refchg |= ret; | refchg |= ret; | ||||
if (pvo->pvo_pmap == kernel_pmap) | if (pvo->pvo_pmap == kernel_pmap) | ||||
isync(); | isync(); | ||||
} | } | ||||
Show All 32 Lines | |||||
void | void | ||||
moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma) | moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t refchg; | int64_t refchg; | ||||
pmap_t pmap; | pmap_t pmap; | ||||
uint64_t lo; | uint64_t lo; | ||||
CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x", | |||||
__func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma); | |||||
if ((m->oflags & VPO_UNMANAGED) != 0) { | if ((m->oflags & VPO_UNMANAGED) != 0) { | ||||
m->md.mdpg_cache_attrs = ma; | m->md.mdpg_cache_attrs = ma; | ||||
return; | return; | ||||
} | } | ||||
lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); | lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD)) { | if (!(pvo->pvo_vaddr & PVO_DEAD)) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
CTR1(KTR_PMAP, | |||||
"%s: demote before set_memattr", __func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
pvo->pvo_pte.pa &= ~LPTE_WIMG; | pvo->pvo_pte.pa &= ~LPTE_WIMG; | ||||
pvo->pvo_pte.pa |= lo; | pvo->pvo_pte.pa |= lo; | ||||
refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); | refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE); | ||||
if (refchg < 0) | if (refchg < 0) | ||||
refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? | refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? | ||||
LPTE_CHG : 0; | LPTE_CHG : 0; | ||||
if ((pvo->pvo_vaddr & PVO_MANAGED) && | if ((pvo->pvo_vaddr & PVO_MANAGED) && | ||||
(pvo->pvo_pte.prot & VM_PROT_WRITE)) { | (pvo->pvo_pte.prot & VM_PROT_WRITE)) { | ||||
▲ Show 20 Lines • Show All 436 Lines • ▼ Show 20 Lines | if (refchg & LPTE_REF) | ||||
vm_page_aflag_set(pg, PGA_REFERENCED); | vm_page_aflag_set(pg, PGA_REFERENCED); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, | moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, | ||||
vm_prot_t prot) | vm_prot_t prot) | ||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, key; | ||||
CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, | CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, | ||||
sva, eva, prot); | sva, eva, prot); | ||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, | ||||
("moea64_protect: non current pmap")); | ("moea64_protect: non current pmap")); | ||||
if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | ||||
moea64_remove(pm, sva, eva); | moea64_remove(pm, sva, eva); | ||||
return; | return; | ||||
} | } | ||||
PMAP_LOCK(pm); | PMAP_LOCK(pm); | ||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; | ||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
if (moea64_sp_pvo_in_range(pvo, sva, eva)) { | |||||
pvo = moea64_sp_protect(pvo, prot); | |||||
continue; | |||||
} else { | |||||
CTR1(KTR_PMAP, "%s: demote before protect", | |||||
__func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
} | |||||
moea64_pvo_protect(pm, pvo, prot); | moea64_pvo_protect(pm, pvo, prot); | ||||
} | } | ||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
} | } | ||||
/* | /* | ||||
* Map a list of wired pages into kernel virtual address space. This is | * Map a list of wired pages into kernel virtual address space. This is | ||||
* intended for temporary mappings which do not need page modification or | * intended for temporary mappings which do not need page modification or | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | moea64_remove_pages(pmap_t pm) | ||||
while (!SLIST_EMPTY(&tofree)) { | while (!SLIST_EMPTY(&tofree)) { | ||||
pvo = SLIST_FIRST(&tofree); | pvo = SLIST_FIRST(&tofree); | ||||
SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | ||||
moea64_pvo_remove_from_page(pvo); | moea64_pvo_remove_from_page(pvo); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
} | } | ||||
} | } | ||||
/* | static void | ||||
* Remove the given range of addresses from the specified map. | moea64_remove_locked(pmap_t pm, vm_offset_t sva, vm_offset_t eva, | ||||
*/ | struct pvo_dlist *tofree) | ||||
void | |||||
moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | |||||
{ | { | ||||
struct pvo_entry *pvo, *tpvo, key; | struct pvo_entry *pvo, *tpvo, key; | ||||
struct pvo_dlist tofree; | |||||
/* | PMAP_LOCK_ASSERT(pm, MA_OWNED); | ||||
* Perform an unsynchronized read. This is, however, safe. | |||||
*/ | |||||
if (pm->pm_stats.resident_count == 0) | |||||
return; | |||||
key.pvo_vaddr = sva; | key.pvo_vaddr = sva; | ||||
SLIST_INIT(&tofree); | |||||
PMAP_LOCK(pm); | |||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); | ||||
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
if (moea64_sp_pvo_in_range(pvo, sva, eva)) { | |||||
tpvo = moea64_sp_remove(pvo, tofree); | |||||
continue; | |||||
} else { | |||||
CTR1(KTR_PMAP, "%s: demote before remove", | |||||
__func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
} | |||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | ||||
/* | /* | ||||
* For locking reasons, remove this from the page table and | * For locking reasons, remove this from the page table and | ||||
* pmap, but save delinking from the vm_page for a second | * pmap, but save delinking from the vm_page for a second | ||||
* pass | * pass | ||||
*/ | */ | ||||
moea64_pvo_remove_from_pmap(pvo); | moea64_pvo_remove_from_pmap(pvo); | ||||
SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink); | SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink); | ||||
} | } | ||||
} | |||||
/* | |||||
* Remove the given range of addresses from the specified map. | |||||
*/ | |||||
void | |||||
moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) | |||||
{ | |||||
struct pvo_entry *pvo; | |||||
struct pvo_dlist tofree; | |||||
/* | |||||
* Perform an unsynchronized read. This is, however, safe. | |||||
*/ | |||||
if (pm->pm_stats.resident_count == 0) | |||||
return; | |||||
SLIST_INIT(&tofree); | |||||
PMAP_LOCK(pm); | |||||
moea64_remove_locked(pm, sva, eva, &tofree); | |||||
PMAP_UNLOCK(pm); | PMAP_UNLOCK(pm); | ||||
while (!SLIST_EMPTY(&tofree)) { | while (!SLIST_EMPTY(&tofree)) { | ||||
pvo = SLIST_FIRST(&tofree); | pvo = SLIST_FIRST(&tofree); | ||||
SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | SLIST_REMOVE_HEAD(&tofree, pvo_dlink); | ||||
moea64_pvo_remove_from_page(pvo); | moea64_pvo_remove_from_page(pvo); | ||||
free_pvo_entry(pvo); | free_pvo_entry(pvo); | ||||
} | } | ||||
Show All 13 Lines | moea64_remove_all(vm_page_t m) | ||||
LIST_INIT(&freequeue); | LIST_INIT(&freequeue); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { | LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { | ||||
pmap = pvo->pvo_pmap; | pmap = pvo->pvo_pmap; | ||||
PMAP_LOCK(pmap); | PMAP_LOCK(pmap); | ||||
wasdead = (pvo->pvo_vaddr & PVO_DEAD); | wasdead = (pvo->pvo_vaddr & PVO_DEAD); | ||||
if (!wasdead) | if (!wasdead) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
CTR1(KTR_PMAP, "%s: demote before remove_all", | |||||
__func__); | |||||
moea64_sp_demote(pvo); | |||||
} | |||||
moea64_pvo_remove_from_pmap(pvo); | moea64_pvo_remove_from_pmap(pvo); | ||||
} | |||||
moea64_pvo_remove_from_page_locked(pvo, m); | moea64_pvo_remove_from_page_locked(pvo, m); | ||||
if (!wasdead) | if (!wasdead) | ||||
LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink); | LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink); | ||||
PMAP_UNLOCK(pmap); | PMAP_UNLOCK(pmap); | ||||
} | } | ||||
KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings")); | KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings")); | ||||
KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable")); | KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable")); | ||||
▲ Show 20 Lines • Show All 216 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static boolean_t | static boolean_t | ||||
moea64_query_bit(vm_page_t m, uint64_t ptebit) | moea64_query_bit(vm_page_t m, uint64_t ptebit) | ||||
{ | { | ||||
struct pvo_entry *pvo; | struct pvo_entry *pvo; | ||||
int64_t ret; | int64_t ret; | ||||
boolean_t rv; | boolean_t rv; | ||||
vm_page_t sp; | |||||
/* | /* | ||||
* See if this bit is stored in the page already. | * See if this bit is stored in the page already. | ||||
* | |||||
* For superpages, the bit is stored in the first vm page. | |||||
*/ | */ | ||||
if (m->md.mdpg_attrs & ptebit) | if ((m->md.mdpg_attrs & ptebit) != 0 || | ||||
((sp = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK)) != NULL && | |||||
(sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) == | |||||
(ptebit | MDPG_ATTR_SP))) | |||||
return (TRUE); | return (TRUE); | ||||
/* | /* | ||||
* Examine each PTE. Sync so that any pending REF/CHG bits are | * Examine each PTE. Sync so that any pending REF/CHG bits are | ||||
* flushed to the PTEs. | * flushed to the PTEs. | ||||
*/ | */ | ||||
rv = FALSE; | rv = FALSE; | ||||
powerpc_sync(); | powerpc_sync(); | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
ret = moea64_sp_query(pvo, ptebit); | |||||
/* | |||||
* If SP was not demoted, check its REF/CHG bits here. | |||||
*/ | |||||
if (ret != -1) { | |||||
if ((ret & ptebit) != 0) { | |||||
rv = TRUE; | |||||
break; | |||||
} | |||||
continue; | |||||
} | |||||
/* else, fallthrough */ | |||||
} | |||||
ret = 0; | ret = 0; | ||||
/* | /* | ||||
* See if this pvo has a valid PTE. if so, fetch the | * See if this pvo has a valid PTE. if so, fetch the | ||||
* REF/CHG bits from the valid PTE. If the appropriate | * REF/CHG bits from the valid PTE. If the appropriate | ||||
* ptebit is set, return success. | * ptebit is set, return success. | ||||
*/ | */ | ||||
PMAP_LOCK(pvo->pvo_pmap); | PMAP_LOCK(pvo->pvo_pmap); | ||||
Show All 29 Lines | moea64_clear_bit(vm_page_t m, u_int64_t ptebit) | ||||
powerpc_sync(); | powerpc_sync(); | ||||
/* | /* | ||||
* For each pvo entry, clear the pte's ptebit. | * For each pvo entry, clear the pte's ptebit. | ||||
*/ | */ | ||||
count = 0; | count = 0; | ||||
PV_PAGE_LOCK(m); | PV_PAGE_LOCK(m); | ||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { | ||||
if (PVO_IS_SP(pvo)) { | |||||
if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) { | |||||
count += ret; | |||||
continue; | |||||
} | |||||
} | |||||
ret = 0; | ret = 0; | ||||
PMAP_LOCK(pvo->pvo_pmap); | PMAP_LOCK(pvo->pvo_pmap); | ||||
if (!(pvo->pvo_vaddr & PVO_DEAD)) | if (!(pvo->pvo_vaddr & PVO_DEAD)) | ||||
ret = moea64_pte_clear(pvo, ptebit); | ret = moea64_pte_clear(pvo, ptebit); | ||||
PMAP_UNLOCK(pvo->pvo_pmap); | PMAP_UNLOCK(pvo->pvo_pmap); | ||||
if (ret > 0 && (ret & ptebit)) | if (ret > 0 && (ret & ptebit)) | ||||
▲ Show 20 Lines • Show All 387 Lines • ▼ Show 20 Lines | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int), | DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int), | ||||
moea64_pte_replace_default) | moea64_pte_replace_default) | ||||
DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method) | DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method) | ||||
DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method) | DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method) | ||||
DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t), | DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t), | ||||
moea64_null_method) | moea64_null_method) | ||||
DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method) | DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method) | ||||
DEFINE_OEA64_IFUNC(int64_t, pte_insert_sp, (struct pvo_entry *), moea64_null_method) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_unset_sp, (struct pvo_entry *), moea64_null_method) | |||||
DEFINE_OEA64_IFUNC(int64_t, pte_replace_sp, (struct pvo_entry *), moea64_null_method) | |||||
/* Superpage functions */ | |||||
/* MMU interface */ | |||||
static bool | |||||
moea64_ps_enabled(pmap_t pmap) | |||||
{ | |||||
return (superpages_enabled); | |||||
} | |||||
static void | |||||
moea64_align_superpage(vm_object_t object, vm_ooffset_t offset, | |||||
vm_offset_t *addr, vm_size_t size) | |||||
{ | |||||
vm_offset_t sp_offset; | |||||
if (size < HPT_SP_SIZE) | |||||
return; | |||||
CTR4(KTR_PMAP, "%s: offs=%#jx, addr=%p, size=%#jx", | |||||
__func__, (uintmax_t)offset, addr, (uintmax_t)size); | |||||
if (object != NULL && (object->flags & OBJ_COLORED) != 0) | |||||
offset += ptoa(object->pg_color); | |||||
sp_offset = offset & HPT_SP_MASK; | |||||
if (size - ((HPT_SP_SIZE - sp_offset) & HPT_SP_MASK) < HPT_SP_SIZE || | |||||
(*addr & HPT_SP_MASK) == sp_offset) | |||||
return; | |||||
if ((*addr & HPT_SP_MASK) < sp_offset) | |||||
*addr = (*addr & ~HPT_SP_MASK) + sp_offset; | |||||
else | |||||
*addr = ((*addr + HPT_SP_MASK) & ~HPT_SP_MASK) + sp_offset; | |||||
} | |||||
/* Helpers */ | |||||
static __inline void | |||||
moea64_pvo_cleanup(struct pvo_dlist *tofree) | |||||
{ | |||||
struct pvo_entry *pvo; | |||||
/* clean up */ | |||||
while (!SLIST_EMPTY(tofree)) { | |||||
pvo = SLIST_FIRST(tofree); | |||||
SLIST_REMOVE_HEAD(tofree, pvo_dlink); | |||||
if (pvo->pvo_vaddr & PVO_DEAD) | |||||
moea64_pvo_remove_from_page(pvo); | |||||
free_pvo_entry(pvo); | |||||
} | |||||
} | |||||
static __inline uint16_t | |||||
pvo_to_vmpage_flags(struct pvo_entry *pvo) | |||||
{ | |||||
uint16_t flags; | |||||
flags = 0; | |||||
if ((pvo->pvo_pte.prot & VM_PROT_WRITE) != 0) | |||||
flags |= PGA_WRITEABLE; | |||||
if ((pvo->pvo_pte.prot & VM_PROT_EXECUTE) != 0) | |||||
flags |= PGA_EXECUTABLE; | |||||
return (flags); | |||||
} | |||||
/* | |||||
* Check if the given pvo and its superpage are in sva-eva range. | |||||
*/ | |||||
static __inline bool | |||||
moea64_sp_pvo_in_range(struct pvo_entry *pvo, vm_offset_t sva, vm_offset_t eva) | |||||
{ | |||||
vm_offset_t spva; | |||||
spva = PVO_VADDR(pvo) & ~HPT_SP_MASK; | |||||
if (spva >= sva && spva + HPT_SP_SIZE <= eva) { | |||||
/* | |||||
* Because this function is intended to be called from loops | |||||
* that iterate over ordered pvo entries, if the condition | |||||
* above is true then the pvo must be the first of its | |||||
* superpage. | |||||
*/ | |||||
KASSERT(PVO_VADDR(pvo) == spva, | |||||
("%s: unexpected unaligned superpage pvo", __func__)); | |||||
return (true); | |||||
} | |||||
return (false); | |||||
} | |||||
/* | |||||
* Update vm about the REF/CHG bits if the superpage is managed and | |||||
* has (or had) write access. | |||||
*/ | |||||
static void | |||||
moea64_sp_refchg_process(struct pvo_entry *sp, vm_page_t m, | |||||
int64_t sp_refchg, vm_prot_t prot) | |||||
{ | |||||
vm_page_t m_end; | |||||
int64_t refchg; | |||||
if ((sp->pvo_vaddr & PVO_MANAGED) != 0 && (prot & VM_PROT_WRITE) != 0) { | |||||
for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++) { | |||||
refchg = sp_refchg | | |||||
atomic_readandclear_32(&m->md.mdpg_attrs); | |||||
if (refchg & LPTE_CHG) | |||||
vm_page_dirty(m); | |||||
if (refchg & LPTE_REF) | |||||
vm_page_aflag_set(m, PGA_REFERENCED); | |||||
} | |||||
} | |||||
} | |||||
/* Superpage ops */ | |||||
static int | |||||
moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, | |||||
vm_prot_t prot, u_int flags, int8_t psind) | |||||
{ | |||||
struct pvo_entry *pvo, **pvos; | |||||
struct pvo_head *pvo_head; | |||||
vm_offset_t sva; | |||||
vm_page_t sm; | |||||
vm_paddr_t pa, spa; | |||||
bool sync; | |||||
struct pvo_dlist tofree; | |||||
int error, i; | |||||
uint16_t aflags; | |||||
KASSERT((va & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned", | |||||
__func__, (uintmax_t)va)); | |||||
KASSERT(psind == 1, ("%s: invalid psind: %d", __func__, psind)); | |||||
KASSERT(m->psind == 1, ("%s: invalid m->psind: %d", | |||||
__func__, m->psind)); | |||||
KASSERT(pmap != kernel_pmap, | |||||
("%s: function called with kernel pmap", __func__)); | |||||
CTR5(KTR_PMAP, "%s: va=%#jx, pa=%#jx, prot=%#x, flags=%#x, psind=1", | |||||
__func__, (uintmax_t)va, (uintmax_t)VM_PAGE_TO_PHYS(m), | |||||
prot, flags); | |||||
SLIST_INIT(&tofree); | |||||
sva = va; | |||||
sm = m; | |||||
spa = pa = VM_PAGE_TO_PHYS(sm); | |||||
/* Try to allocate all PVOs first, to make failure handling easier. */ | |||||
pvos = malloc(HPT_SP_PAGES * sizeof(struct pvo_entry *), M_TEMP, | |||||
M_NOWAIT); | |||||
if (pvos == NULL) { | |||||
CTR1(KTR_PMAP, "%s: failed to alloc pvo array", __func__); | |||||
return (KERN_RESOURCE_SHORTAGE); | |||||
} | |||||
for (i = 0; i < HPT_SP_PAGES; i++) { | |||||
pvos[i] = alloc_pvo_entry(0); | |||||
if (pvos[i] == NULL) { | |||||
CTR1(KTR_PMAP, "%s: failed to alloc pvo", __func__); | |||||
for (i = i - 1; i >= 0; i--) | |||||
free_pvo_entry(pvos[i]); | |||||
free(pvos, M_TEMP); | |||||
return (KERN_RESOURCE_SHORTAGE); | |||||
} | |||||
} | |||||
SP_PV_LOCK_ALIGNED(spa); | |||||
PMAP_LOCK(pmap); | |||||
/* Note: moea64_remove_locked() also clears cached REF/CHG bits. */ | |||||
moea64_remove_locked(pmap, va, va + HPT_SP_SIZE, &tofree); | |||||
/* Enter pages */ | |||||
for (i = 0; i < HPT_SP_PAGES; | |||||
i++, va += PAGE_SIZE, pa += PAGE_SIZE, m++) { | |||||
pvo = pvos[i]; | |||||
pvo->pvo_pte.prot = prot; | |||||
pvo->pvo_pte.pa = (pa & ~LPTE_LP_MASK) | LPTE_LP_4K_16M | | |||||
moea64_calc_wimg(pa, pmap_page_get_memattr(m)); | |||||
if ((flags & PMAP_ENTER_WIRED) != 0) | |||||
pvo->pvo_vaddr |= PVO_WIRED; | |||||
pvo->pvo_vaddr |= PVO_LARGE; | |||||
if ((m->oflags & VPO_UNMANAGED) != 0) | |||||
pvo_head = NULL; | |||||
else { | |||||
pvo_head = &m->md.mdpg_pvoh; | |||||
pvo->pvo_vaddr |= PVO_MANAGED; | |||||
} | |||||
init_pvo_entry(pvo, pmap, va); | |||||
error = moea64_pvo_enter(pvo, pvo_head, NULL); | |||||
/* | |||||
* All superpage PVOs were previously removed, so no errors | |||||
* should occur while inserting the new ones. | |||||
*/ | |||||
KASSERT(error == 0, ("%s: unexpected error " | |||||
"when inserting superpage PVO: %d", | |||||
__func__, error)); | |||||
} | |||||
PMAP_UNLOCK(pmap); | |||||
SP_PV_UNLOCK_ALIGNED(spa); | |||||
sync = (sm->a.flags & PGA_EXECUTABLE) == 0; | |||||
/* Note: moea64_pvo_cleanup() also clears page prot. flags. */ | |||||
moea64_pvo_cleanup(&tofree); | |||||
pvo = pvos[0]; | |||||
/* Set vm page flags */ | |||||
aflags = pvo_to_vmpage_flags(pvo); | |||||
if (aflags != 0) | |||||
for (m = sm; m < &sm[HPT_SP_PAGES]; m++) | |||||
vm_page_aflag_set(m, aflags); | |||||
/* | |||||
* Flush the page from the instruction cache if this page is | |||||
* mapped executable and cacheable. | |||||
*/ | |||||
if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) | |||||
moea64_syncicache(pmap, sva, spa, HPT_SP_SIZE); | |||||
atomic_add_long(&sp_mappings, 1); | |||||
CTR3(KTR_PMAP, "%s: SP success for va %#jx in pmap %p", | |||||
__func__, (uintmax_t)sva, pmap); | |||||
free(pvos, M_TEMP); | |||||
return (KERN_SUCCESS); | |||||
} | |||||
static void | |||||
moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m) | |||||
{ | |||||
struct pvo_entry *first, *pvo; | |||||
vm_paddr_t pa, pa_end; | |||||
vm_offset_t sva, va_end; | |||||
int64_t sp_refchg; | |||||
/* This CTR may generate a lot of output. */ | |||||
/* CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)va); */ | |||||
va &= ~HPT_SP_MASK; | |||||
sva = va; | |||||
/* Get superpage */ | |||||
pa = VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK; | |||||
m = PHYS_TO_VM_PAGE(pa); | |||||
PMAP_LOCK(pmap); | |||||
/* | |||||
* Check if all pages meet promotion criteria. | |||||
* | |||||
* XXX In some cases the loop below may be executed for each or most | |||||
* of the entered pages of a superpage, which can be expensive | |||||
* (although it was not profiled) and need some optimization. | |||||
* | |||||
* Some cases where this seems to happen are: | |||||
* - When a superpage is first entered read-only and later becomes | |||||
* read-write. | |||||
* - When some of the superpage's virtual addresses map to previously | |||||
* wired/cached pages while others map to pages allocated from a | |||||
* different physical address range. A common scenario where this | |||||
* happens is when mmap'ing a file that is already present in FS | |||||
* block cache and doesn't fill a superpage. | |||||
*/ | |||||
first = pvo = moea64_pvo_find_va(pmap, sva); | |||||
for (pa_end = pa + HPT_SP_SIZE; | |||||
pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) { | |||||
if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD) != 0) { | |||||
CTR3(KTR_PMAP, | |||||
"%s: NULL or dead PVO: pmap=%p, va=%#jx", | |||||
__func__, pmap, (uintmax_t)va); | |||||
goto error; | |||||
} | |||||
if (PVO_PADDR(pvo) != pa) { | |||||
CTR5(KTR_PMAP, "%s: PAs don't match: " | |||||
"pmap=%p, va=%#jx, pvo_pa=%#jx, exp_pa=%#jx", | |||||
__func__, pmap, (uintmax_t)va, | |||||
(uintmax_t)PVO_PADDR(pvo), (uintmax_t)pa); | |||||
atomic_add_long(&sp_p_fail_pa, 1); | |||||
goto error; | |||||
} | |||||
if ((first->pvo_vaddr & PVO_FLAGS_PROMOTE) != | |||||
(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE)) { | |||||
CTR5(KTR_PMAP, "%s: PVO flags don't match: " | |||||
"pmap=%p, va=%#jx, pvo_flags=%#jx, exp_flags=%#jx", | |||||
__func__, pmap, (uintmax_t)va, | |||||
(uintmax_t)(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE), | |||||
(uintmax_t)(first->pvo_vaddr & PVO_FLAGS_PROMOTE)); | |||||
atomic_add_long(&sp_p_fail_flags, 1); | |||||
goto error; | |||||
} | |||||
if (first->pvo_pte.prot != pvo->pvo_pte.prot) { | |||||
CTR5(KTR_PMAP, "%s: PVO protections don't match: " | |||||
"pmap=%p, va=%#jx, pvo_prot=%#x, exp_prot=%#x", | |||||
__func__, pmap, (uintmax_t)va, | |||||
pvo->pvo_pte.prot, first->pvo_pte.prot); | |||||
atomic_add_long(&sp_p_fail_prot, 1); | |||||
goto error; | |||||
} | |||||
if ((first->pvo_pte.pa & LPTE_WIMG) != | |||||
(pvo->pvo_pte.pa & LPTE_WIMG)) { | |||||
CTR5(KTR_PMAP, "%s: WIMG bits don't match: " | |||||
"pmap=%p, va=%#jx, pvo_wimg=%#jx, exp_wimg=%#jx", | |||||
__func__, pmap, (uintmax_t)va, | |||||
(uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG), | |||||
(uintmax_t)(first->pvo_pte.pa & LPTE_WIMG)); | |||||
atomic_add_long(&sp_p_fail_wimg, 1); | |||||
goto error; | |||||
} | |||||
pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo); | |||||
} | |||||
/* All OK, promote. */ | |||||
/* | |||||
* Handle superpage REF/CHG bits. If REF or CHG is set in | |||||
* any page, then it must be set in the superpage. | |||||
* | |||||
* Instead of querying each page, we take advantage of two facts: | |||||
* 1- If a page is being promoted, it was referenced. | |||||
* 2- If promoted pages are writable, they were modified. | |||||
*/ | |||||
sp_refchg = LPTE_REF | | |||||
((first->pvo_pte.prot & VM_PROT_WRITE) != 0 ? LPTE_CHG : 0); | |||||
/* Promote pages */ | |||||
for (pvo = first, va_end = PVO_VADDR(pvo) + HPT_SP_SIZE; | |||||
pvo != NULL && PVO_VADDR(pvo) < va_end; | |||||
pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { | |||||
pvo->pvo_pte.pa &= ~LPTE_LP_MASK; | |||||
pvo->pvo_pte.pa |= LPTE_LP_4K_16M; | |||||
pvo->pvo_vaddr |= PVO_LARGE; | |||||
} | |||||
moea64_pte_replace_sp(first); | |||||
/* Send REF/CHG bits to VM */ | |||||
moea64_sp_refchg_process(first, m, sp_refchg, first->pvo_pte.prot); | |||||
/* Use first page to cache REF/CHG bits */ | |||||
atomic_set_32(&m->md.mdpg_attrs, sp_refchg | MDPG_ATTR_SP); | |||||
PMAP_UNLOCK(pmap); | |||||
atomic_add_long(&sp_mappings, 1); | |||||
atomic_add_long(&sp_promotions, 1); | |||||
CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p", | |||||
__func__, (uintmax_t)sva, pmap); | |||||
return; | |||||
error: | |||||
atomic_add_long(&sp_p_failures, 1); | |||||
PMAP_UNLOCK(pmap); | |||||
} | |||||
static void | |||||
moea64_sp_demote_aligned(struct pvo_entry *sp) | |||||
{ | |||||
struct pvo_entry *pvo; | |||||
vm_offset_t va, va_end; | |||||
vm_paddr_t pa; | |||||
vm_page_t m; | |||||
pmap_t pmap; | |||||
int64_t refchg; | |||||
CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp)); | |||||
pmap = sp->pvo_pmap; | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
pvo = sp; | |||||
/* Demote pages */ | |||||
va = PVO_VADDR(pvo); | |||||
pa = PVO_PADDR(pvo); | |||||
m = PHYS_TO_VM_PAGE(pa); | |||||
for (pvo = sp, va_end = va + HPT_SP_SIZE; | |||||
pvo != NULL && PVO_VADDR(pvo) < va_end; | |||||
pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo), | |||||
va += PAGE_SIZE, pa += PAGE_SIZE) { | |||||
KASSERT(pvo && PVO_VADDR(pvo) == va, | |||||
("%s: missing PVO for va %#jx", __func__, (uintmax_t)va)); | |||||
pvo->pvo_vaddr &= ~PVO_LARGE; | |||||
pvo->pvo_pte.pa &= ~LPTE_RPGN; | |||||
pvo->pvo_pte.pa |= pa; | |||||
} | |||||
refchg = moea64_pte_replace_sp(sp); | |||||
/* | |||||
* Clear SP flag | |||||
* | |||||
* XXX It is possible that another pmap has this page mapped as | |||||
* part of a superpage, but as the SP flag is used only for | |||||
* caching SP REF/CHG bits, that will be queried if not set | |||||
* in cache, it should be ok to clear it here. | |||||
*/ | |||||
atomic_clear_32(&m->md.mdpg_attrs, MDPG_ATTR_SP); | |||||
/* | |||||
* Handle superpage REF/CHG bits. A bit set in the superpage | |||||
* means all pages should consider it set. | |||||
*/ | |||||
moea64_sp_refchg_process(sp, m, refchg, sp->pvo_pte.prot); | |||||
atomic_add_long(&sp_demotions, 1); | |||||
CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p", | |||||
__func__, (uintmax_t)PVO_VADDR(sp), pmap); | |||||
} | |||||
static void | |||||
moea64_sp_demote(struct pvo_entry *pvo) | |||||
{ | |||||
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); | |||||
if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) { | |||||
pvo = moea64_pvo_find_va(pvo->pvo_pmap, | |||||
PVO_VADDR(pvo) & ~HPT_SP_MASK); | |||||
KASSERT(pvo != NULL, ("%s: missing PVO for va %#jx", | |||||
__func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK))); | |||||
} | |||||
moea64_sp_demote_aligned(pvo); | |||||
} | |||||
static struct pvo_entry * | |||||
moea64_sp_unwire(struct pvo_entry *sp) | |||||
{ | |||||
struct pvo_entry *pvo, *prev; | |||||
vm_offset_t eva; | |||||
pmap_t pm; | |||||
int64_t ret, refchg; | |||||
CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp)); | |||||
pm = sp->pvo_pmap; | |||||
PMAP_LOCK_ASSERT(pm, MA_OWNED); | |||||
eva = PVO_VADDR(sp) + HPT_SP_SIZE; | |||||
refchg = 0; | |||||
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; | |||||
prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | |||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | |||||
panic("%s: pvo %p is missing PVO_WIRED", | |||||
__func__, pvo); | |||||
pvo->pvo_vaddr &= ~PVO_WIRED; | |||||
ret = moea64_pte_replace(pvo, 0 /* No invalidation */); | |||||
if (ret < 0) | |||||
refchg |= LPTE_CHG; | |||||
else | |||||
refchg |= ret; | |||||
pm->pm_stats.wired_count--; | |||||
} | |||||
/* Send REF/CHG bits to VM */ | |||||
moea64_sp_refchg_process(sp, PHYS_TO_VM_PAGE(PVO_PADDR(sp)), | |||||
refchg, sp->pvo_pte.prot); | |||||
return (prev); | |||||
} | |||||
static struct pvo_entry * | |||||
moea64_sp_protect(struct pvo_entry *sp, vm_prot_t prot) | |||||
{ | |||||
struct pvo_entry *pvo, *prev; | |||||
vm_offset_t eva; | |||||
pmap_t pm; | |||||
vm_page_t m, m_end; | |||||
int64_t ret, refchg; | |||||
vm_prot_t oldprot; | |||||
CTR3(KTR_PMAP, "%s: va=%#jx, prot=%x", | |||||
__func__, (uintmax_t)PVO_VADDR(sp), prot); | |||||
pm = sp->pvo_pmap; | |||||
PMAP_LOCK_ASSERT(pm, MA_OWNED); | |||||
oldprot = sp->pvo_pte.prot; | |||||
m = PHYS_TO_VM_PAGE(PVO_PADDR(sp)); | |||||
KASSERT(m != NULL, ("%s: missing vm page for pa %#jx", | |||||
__func__, (uintmax_t)PVO_PADDR(sp))); | |||||
eva = PVO_VADDR(sp) + HPT_SP_SIZE; | |||||
refchg = 0; | |||||
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; | |||||
prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { | |||||
pvo->pvo_pte.prot = prot; | |||||
/* | |||||
* If the PVO is in the page table, update mapping | |||||
*/ | |||||
ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE); | |||||
if (ret < 0) | |||||
refchg |= LPTE_CHG; | |||||
else | |||||
refchg |= ret; | |||||
} | |||||
/* Send REF/CHG bits to VM */ | |||||
moea64_sp_refchg_process(sp, m, refchg, oldprot); | |||||
/* Handle pages that became executable */ | |||||
if ((m->a.flags & PGA_EXECUTABLE) == 0 && | |||||
(sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) | |||||
for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++) | |||||
vm_page_aflag_set(m, PGA_EXECUTABLE); | |||||
moea64_syncicache(pm, PVO_VADDR(sp), PVO_PADDR(sp), | |||||
HPT_SP_SIZE); | |||||
} | |||||
return (prev); | |||||
} | |||||
static struct pvo_entry * | |||||
moea64_sp_remove(struct pvo_entry *sp, struct pvo_dlist *tofree) | |||||
{ | |||||
struct pvo_entry *pvo, *tpvo; | |||||
vm_offset_t eva; | |||||
pmap_t pm; | |||||
CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp)); | |||||
pm = sp->pvo_pmap; | |||||
PMAP_LOCK_ASSERT(pm, MA_OWNED); | |||||
eva = PVO_VADDR(sp) + HPT_SP_SIZE; | |||||
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { | |||||
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); | |||||
/* | |||||
* For locking reasons, remove this from the page table and | |||||
* pmap, but save delinking from the vm_page for a second | |||||
* pass | |||||
*/ | |||||
moea64_pvo_remove_from_pmap(pvo); | |||||
SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink); | |||||
} | |||||
/* | |||||
* Clear SP bit | |||||
* | |||||
* XXX See comment in moea64_sp_demote_aligned() for why it's | |||||
* ok to always clear the SP bit on remove/demote. | |||||
*/ | |||||
atomic_clear_32(&PHYS_TO_VM_PAGE(PVO_PADDR(sp))->md.mdpg_attrs, | |||||
MDPG_ATTR_SP); | |||||
return (tpvo); | |||||
} | |||||
static int64_t | |||||
moea64_sp_query_locked(struct pvo_entry *pvo, uint64_t ptebit) | |||||
{ | |||||
int64_t refchg, ret; | |||||
vm_offset_t eva; | |||||
vm_page_t m; | |||||
pmap_t pmap; | |||||
struct pvo_entry *sp; | |||||
pmap = pvo->pvo_pmap; | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
/* Get first SP PVO */ | |||||
if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) { | |||||
sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK); | |||||
KASSERT(sp != NULL, ("%s: missing PVO for va %#jx", | |||||
__func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK))); | |||||
} else | |||||
sp = pvo; | |||||
eva = PVO_VADDR(sp) + HPT_SP_SIZE; | |||||
refchg = 0; | |||||
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; | |||||
pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { | |||||
ret = moea64_pte_synch(pvo); | |||||
if (ret > 0) { | |||||
refchg |= ret & (LPTE_CHG | LPTE_REF); | |||||
if ((refchg & ptebit) != 0) | |||||
break; | |||||
} | |||||
} | |||||
/* Save results */ | |||||
if (refchg != 0) { | |||||
m = PHYS_TO_VM_PAGE(PVO_PADDR(sp)); | |||||
atomic_set_32(&m->md.mdpg_attrs, refchg | MDPG_ATTR_SP); | |||||
} | |||||
return (refchg); | |||||
} | |||||
static int64_t | |||||
moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit) | |||||
{ | |||||
int64_t refchg; | |||||
pmap_t pmap; | |||||
pmap = pvo->pvo_pmap; | |||||
PMAP_LOCK(pmap); | |||||
/* | |||||
* Check if SP was demoted/removed before pmap lock was acquired. | |||||
*/ | |||||
if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { | |||||
CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx", | |||||
__func__, (uintmax_t)PVO_PADDR(pvo)); | |||||
PMAP_UNLOCK(pmap); | |||||
return (-1); | |||||
} | |||||
refchg = moea64_sp_query_locked(pvo, ptebit); | |||||
PMAP_UNLOCK(pmap); | |||||
CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx", | |||||
__func__, (uintmax_t)PVO_VADDR(pvo), | |||||
(uintmax_t)PVO_PADDR(pvo), (uintmax_t)refchg); | |||||
return (refchg); | |||||
} | |||||
static int64_t | |||||
moea64_sp_pvo_clear(struct pvo_entry *pvo, uint64_t ptebit) | |||||
{ | |||||
int64_t refchg, ret; | |||||
pmap_t pmap; | |||||
struct pvo_entry *sp; | |||||
vm_offset_t eva; | |||||
vm_page_t m; | |||||
pmap = pvo->pvo_pmap; | |||||
PMAP_LOCK(pmap); | |||||
/* | |||||
* Check if SP was demoted/removed before pmap lock was acquired. | |||||
*/ | |||||
if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { | |||||
CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx", | |||||
__func__, (uintmax_t)PVO_PADDR(pvo)); | |||||
PMAP_UNLOCK(pmap); | |||||
return (-1); | |||||
} | |||||
/* Get first SP PVO */ | |||||
if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) { | |||||
sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK); | |||||
KASSERT(sp != NULL, ("%s: missing PVO for va %#jx", | |||||
__func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK))); | |||||
} else | |||||
sp = pvo; | |||||
eva = PVO_VADDR(sp) + HPT_SP_SIZE; | |||||
refchg = 0; | |||||
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; | |||||
pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { | |||||
ret = moea64_pte_clear(pvo, ptebit); | |||||
if (ret > 0) | |||||
refchg |= ret & (LPTE_CHG | LPTE_REF); | |||||
} | |||||
m = PHYS_TO_VM_PAGE(PVO_PADDR(sp)); | |||||
atomic_clear_32(&m->md.mdpg_attrs, ptebit); | |||||
PMAP_UNLOCK(pmap); | |||||
CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx", | |||||
__func__, (uintmax_t)PVO_VADDR(sp), | |||||
(uintmax_t)PVO_PADDR(sp), (uintmax_t)refchg); | |||||
return (refchg); | |||||
} | |||||
static int64_t | |||||
moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m, uint64_t ptebit) | |||||
{ | |||||
int64_t count, ret; | |||||
pmap_t pmap; | |||||
count = 0; | |||||
pmap = pvo->pvo_pmap; | |||||
/* | |||||
* Since this reference bit is shared by 4096 4KB pages, it | |||||
* should not be cleared every time it is tested. Apply a | |||||
* simple "hash" function on the physical page number, the | |||||
* virtual superpage number, and the pmap address to select | |||||
* one 4KB page out of the 4096 on which testing the | |||||
* reference bit will result in clearing that reference bit. | |||||
* This function is designed to avoid the selection of the | |||||
* same 4KB page for every 16MB page mapping. | |||||
* | |||||
* Always leave the reference bit of a wired mapping set, as | |||||
* the current state of its reference bit won't affect page | |||||
* replacement. | |||||
*/ | |||||
if (ptebit == LPTE_REF && (((VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) ^ | |||||
(PVO_VADDR(pvo) >> HPT_SP_SHIFT) ^ (uintptr_t)pmap) & | |||||
(HPT_SP_PAGES - 1)) == 0 && (pvo->pvo_vaddr & PVO_WIRED) == 0) { | |||||
if ((ret = moea64_sp_pvo_clear(pvo, ptebit)) == -1) | |||||
return (-1); | |||||
if ((ret & ptebit) != 0) | |||||
count++; | |||||
/* | |||||
* If this page was not selected by the hash function, then assume | |||||
* its REF bit was set. | |||||
*/ | |||||
} else if (ptebit == LPTE_REF) { | |||||
count++; | |||||
/* | |||||
* To clear the CHG bit of a single SP page, first it must be demoted. | |||||
* But if no CHG bit is set, no bit clear and thus no SP demotion is | |||||
* needed. | |||||
*/ | |||||
} else { | |||||
CTR4(KTR_PMAP, "%s: ptebit=%#jx, va=%#jx, pa=%#jx", | |||||
__func__, (uintmax_t)ptebit, (uintmax_t)PVO_VADDR(pvo), | |||||
(uintmax_t)PVO_PADDR(pvo)); | |||||
PMAP_LOCK(pmap); | |||||
/* | |||||
* Make sure SP wasn't demoted/removed before pmap lock | |||||
* was acquired. | |||||
*/ | |||||
if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { | |||||
CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx", | |||||
__func__, (uintmax_t)PVO_PADDR(pvo)); | |||||
PMAP_UNLOCK(pmap); | |||||
return (-1); | |||||
} | |||||
ret = moea64_sp_query_locked(pvo, ptebit); | |||||
if ((ret & ptebit) != 0) | |||||
count++; | |||||
else { | |||||
PMAP_UNLOCK(pmap); | |||||
return (0); | |||||
} | |||||
moea64_sp_demote(pvo); | |||||
moea64_pte_clear(pvo, ptebit); | |||||
/* | |||||
* Write protect the mapping to a single page so that a | |||||
* subsequent write access may repromote. | |||||
*/ | |||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0) | |||||
moea64_pvo_protect(pmap, pvo, | |||||
pvo->pvo_pte.prot & ~VM_PROT_WRITE); | |||||
PMAP_UNLOCK(pmap); | |||||
} | |||||
return (count); | |||||
} |