Changeset View
Changeset View
Standalone View
Standalone View
sys/powerpc/aim/moea64_native.c
Show First 20 Lines • Show All 352 Lines • ▼ Show 20 Lines | moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) | ||||
/* Keep statistics */ | /* Keep statistics */ | ||||
STAT_MOEA64(moea64_pte_valid--); | STAT_MOEA64(moea64_pte_valid--); | ||||
return (ptelo & (LPTE_CHG | LPTE_REF)); | return (ptelo & (LPTE_CHG | LPTE_REF)); | ||||
} | } | ||||
static int64_t | static int64_t | ||||
moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo, | |||||
volatile struct lpte *pt) | |||||
{ | |||||
struct lpte properpt; | |||||
uint64_t ptelo; | |||||
moea64_pte_from_pvo(pvo, &properpt); | |||||
rw_rlock(&moea64_eviction_lock); | |||||
if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != | |||||
(properpt.pte_hi & LPTE_AVPN_MASK)) { | |||||
/* Evicted */ | |||||
STAT_MOEA64(moea64_pte_overflow--); | |||||
rw_runlock(&moea64_eviction_lock); | |||||
return (-1); | |||||
} | |||||
/* | |||||
* Replace the pte, briefly locking it to collect RC bits. No | |||||
* atomics needed since this is protected against eviction by the lock. | |||||
*/ | |||||
isync(); | |||||
critical_enter(); | |||||
pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); | |||||
PTESYNC(); | |||||
TLBIE(pvo->pvo_vpn); | |||||
ptelo = be64toh(pt->pte_lo); | |||||
EIEIO(); | |||||
pt->pte_lo = htobe64(properpt.pte_lo); | |||||
EIEIO(); | |||||
pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */ | |||||
PTESYNC(); | |||||
critical_exit(); | |||||
rw_runlock(&moea64_eviction_lock); | |||||
return (ptelo & (LPTE_CHG | LPTE_REF)); | |||||
} | |||||
luporl: This doesn't look correct.
In the old code `moea64_pte_unset_native()` would decrement… | |||||
static int64_t | |||||
moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) | moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) | ||||
{ | { | ||||
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; | volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; | ||||
Not Done Inline ActionsIt seems pt assignment can be moved to the if (flags == 0) { } block, to save a few more cycles. luporl: It seems pt assignment can be moved to the if (flags == 0) { } block, to save a few more cycles. | |||||
struct lpte properpt; | struct lpte properpt; | ||||
int64_t ptelo; | int64_t ptelo; | ||||
if (flags == 0) { | if (flags == 0) { | ||||
/* Just some software bits changing. */ | /* Just some software bits changing. */ | ||||
moea64_pte_from_pvo(pvo, &properpt); | moea64_pte_from_pvo(pvo, &properpt); | ||||
rw_rlock(&moea64_eviction_lock); | rw_rlock(&moea64_eviction_lock); | ||||
if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != | if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != | ||||
(properpt.pte_hi & LPTE_AVPN_MASK)) { | (properpt.pte_hi & LPTE_AVPN_MASK)) { | ||||
rw_runlock(&moea64_eviction_lock); | rw_runlock(&moea64_eviction_lock); | ||||
return (-1); | return (-1); | ||||
} | } | ||||
pt->pte_hi = htobe64(properpt.pte_hi); | pt->pte_hi = htobe64(properpt.pte_hi); | ||||
ptelo = be64toh(pt->pte_lo); | ptelo = be64toh(pt->pte_lo); | ||||
rw_runlock(&moea64_eviction_lock); | rw_runlock(&moea64_eviction_lock); | ||||
} else { | } else { | ||||
/* Otherwise, need reinsertion and deletion */ | /* Otherwise, need reinsertion and deletion */ | ||||
ptelo = moea64_pte_unset_native(mmu, pvo); | ptelo = moea64_pte_replace_inval_native(mmu, pvo, pt); | ||||
moea64_pte_insert_native(mmu, pvo); | |||||
} | } | ||||
return (ptelo); | return (ptelo); | ||||
} | } | ||||
static void | static void | ||||
moea64_cpu_bootstrap_native(mmu_t mmup, int ap) | moea64_cpu_bootstrap_native(mmu_t mmup, int ap) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 360 Lines • Show Last 20 Lines |
This doesn't look correct.
In the old code moea64_pte_unset_native() would decrement moea64_pte_valid and moea64_pte_insert_native() would increment it again.
In this case, these two lines should be removed.