Page MenuHomeFreeBSD

D54936.id.diff
No OneTemporary

D54936.id.diff

diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -123,7 +123,7 @@
*/
#define PV_LOCK_COUNT MAXCPU
-static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
+static struct rwlock __exclusive_cache_line pv_lock[PV_LOCK_COUNT];
#define PV_LOCK_SHIFT HPT_SP_SHIFT
#define pa_index(pa) ((pa) >> PV_LOCK_SHIFT)
@@ -138,11 +138,30 @@
#else
#define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT)
#endif
-#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
-#define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
-#define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
-#define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
-#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_LOCKPTR(pa) ((struct rwlock *)(&pv_lock[PV_LOCK_IDX(pa)]))
+
+#define PV_WR_LOCK(pa) rw_wlock(PV_LOCKPTR(pa))
+#define PV_RD_LOCK(pa) rw_rlock(PV_LOCKPTR(pa))
+#define PV_UNLOCK(pa) rw_unlock(PV_LOCKPTR(pa))
+#define PV_LOCKASSERT(pa) rw_assert(PV_LOCKPTR(pa), RA_LOCKED)
+#define PV_LOCK_RD_ASSERT(pa) rw_assert(PV_LOCKPTR(pa), RA_RLOCKED)
+#define PV_LOCK_WR_ASSERT(pa) rw_assert(PV_LOCKPTR(pa), RA_WLOCKED)
+
+#define CHANGE_PV_LIST_WR_LOCK_TO_PHYS(lockp, pa) do { \
+ struct rwlock **_lockp = (lockp); \
+ struct rwlock *_new_lock; \
+ \
+ _new_lock = PV_LOCKPTR(pa); \
+ if (_new_lock != *_lockp) { \
+ if (*_lockp != NULL) \
+ rw_unlock(*_lockp); \
+ *_lockp = _new_lock; \
+ rw_wlock(*lockp); \
+ } \
+} while (0)
+
+#define PV_PAGE_WR_LOCK(m) PV_WR_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_PAGE_RD_LOCK(m) PV_RD_LOCK(VM_PAGE_TO_PHYS(m))
#define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
#define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
@@ -244,6 +263,8 @@
struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo);
static void moea64_pvo_remove_from_page(struct pvo_entry *pvo);
+static void moea64_pvo_remove_from_page_single_locked(struct pvo_entry *pvo,
+ struct rwlock **lockp);
static void moea64_pvo_remove_from_page_locked(
struct pvo_entry *pvo, vm_page_t m);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
@@ -322,8 +343,9 @@
static void moea64_align_superpage(vm_object_t, vm_ooffset_t,
vm_offset_t *, vm_size_t);
-static int moea64_sp_enter(pmap_t pmap, vm_offset_t va,
- vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind);
+static int moea64_sp_enter_single(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind,
+ struct rwlock **lockp);
static struct pvo_entry *moea64_sp_remove(struct pvo_entry *sp,
struct pvo_dlist *tofree);
@@ -354,6 +376,8 @@
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
void moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
+static int moea64_enter_single(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
+ u_int flags, int8_t psind, struct rwlock **lockp);
int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
u_int flags, int8_t psind);
void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
@@ -500,7 +524,7 @@
vm_page_to_pvoh(vm_page_t m)
{
- mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
+ rw_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), RA_LOCKED);
return (&m->md.mdpg_pvoh);
}
@@ -1028,7 +1052,7 @@
*/
mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
for (i = 0; i < PV_LOCK_COUNT; i++)
- mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
+ rw_init(&pv_lock[i], "pv lock");
/*
* Initialise the bootstrap pvo pool.
@@ -1605,8 +1629,8 @@
*/
int
-moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, u_int flags, int8_t psind)
+moea64_enter_single(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, u_int flags, int8_t psind, struct rwlock **lockp)
{
struct pvo_entry *pvo, *oldpvo, *tpvo;
struct pvo_head *pvo_head;
@@ -1622,7 +1646,8 @@
}
if (psind > 0)
- return (moea64_sp_enter(pmap, va, m, prot, flags, psind));
+ return (moea64_sp_enter_single(pmap, va, m, prot, flags,
+ psind, lockp));
pvo = alloc_pvo_entry(0);
if (pvo == NULL)
@@ -1644,7 +1669,8 @@
pvo->pvo_vaddr |= PVO_MANAGED;
}
- PV_LOCK(pa);
+ CHANGE_PV_LIST_WR_LOCK_TO_PHYS(lockp, pa);
+
PMAP_LOCK(pmap);
if (pvo->pvo_pmap == NULL)
init_pvo_entry(pvo, pmap, va);
@@ -1679,7 +1705,6 @@
/* Then just clean up and go home */
PMAP_UNLOCK(pmap);
- PV_UNLOCK(pa);
free_pvo_entry(pvo);
pvo = NULL;
goto out;
@@ -1692,11 +1717,10 @@
}
}
PMAP_UNLOCK(pmap);
- PV_UNLOCK(pa);
/* Free any dead pages */
if (error == EEXIST) {
- moea64_pvo_remove_from_page(oldpvo);
+ moea64_pvo_remove_from_page_single_locked(oldpvo, lockp);
free_pvo_entry(oldpvo);
}
@@ -1734,6 +1758,20 @@
return (KERN_SUCCESS);
}
+int
+moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, u_int flags, int8_t psind)
+{
+ struct rwlock *lock = NULL;
+ int ret;
+
+ ret = moea64_enter_single(pmap, va, m, prot, flags, psind, &lock);
+ if (lock != NULL)
+ rw_unlock(lock);
+
+ return (ret);
+}
+
static void
moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
vm_size_t sz)
@@ -1787,6 +1825,7 @@
moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
+ struct rwlock *lock = NULL;
struct pctrie_iter pages;
vm_page_t m;
vm_offset_t va;
@@ -1797,6 +1836,7 @@
vm_page_iter_limit_init(&pages, m_start->object,
m_start->pindex + atop(end - start));
m = vm_radix_iter_lookup(&pages, m_start->pindex);
+
while (m != NULL) {
va = start + ptoa(m->pindex - m_start->pindex);
if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end &&
@@ -1804,14 +1844,16 @@
psind = 1;
else
psind = 0;
- moea64_enter(pm, va, m, prot &
+ moea64_enter_single(pm, va, m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE),
- PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind);
+ PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind, &lock);
if (psind == 1)
m = vm_radix_iter_jump(&pages, HPT_SP_SIZE / PAGE_SIZE);
else
m = vm_radix_iter_step(&pages);
}
+ if (lock != NULL)
+ rw_unlock(lock);
}
void
@@ -2028,7 +2070,7 @@
return;
powerpc_sync();
- PV_PAGE_LOCK(m);
+ PV_PAGE_WR_LOCK(m);
refchg = 0;
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
@@ -2101,7 +2143,7 @@
lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
- PV_PAGE_LOCK(m);
+ PV_PAGE_WR_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
@@ -2360,7 +2402,7 @@
("moea64_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = false;
- PV_PAGE_LOCK(m);
+ PV_PAGE_RD_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
rv = true;
@@ -2395,7 +2437,7 @@
count = 0;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
- PV_PAGE_LOCK(m);
+ PV_PAGE_RD_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
count++;
@@ -2664,6 +2706,7 @@
{
struct pvo_entry *pvo, *tpvo;
struct pvo_dlist tofree;
+ struct rwlock *lock = NULL;
SLIST_INIT(&tofree);
@@ -2685,9 +2728,12 @@
while (!SLIST_EMPTY(&tofree)) {
pvo = SLIST_FIRST(&tofree);
SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
- moea64_pvo_remove_from_page(pvo);
+ moea64_pvo_remove_from_page_single_locked(pvo, &lock);
free_pvo_entry(pvo);
}
+
+ if (lock != NULL)
+ rw_unlock(lock);
}
static void
@@ -2731,6 +2777,7 @@
{
struct pvo_entry *pvo;
struct pvo_dlist tofree;
+ struct rwlock *lock = NULL;
/*
* Perform an unsynchronized read. This is, however, safe.
@@ -2746,9 +2793,12 @@
while (!SLIST_EMPTY(&tofree)) {
pvo = SLIST_FIRST(&tofree);
SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
- moea64_pvo_remove_from_page(pvo);
+ moea64_pvo_remove_from_page_single_locked(pvo, &lock);
free_pvo_entry(pvo);
}
+
+ if (lock != NULL)
+ rw_unlock(lock);
}
/*
@@ -2765,7 +2815,7 @@
LIST_INIT(&freequeue);
- PV_PAGE_LOCK(m);
+ PV_PAGE_WR_LOCK(m);
LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
@@ -2988,11 +3038,24 @@
if (pvo->pvo_vaddr & PVO_MANAGED)
pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
- PV_LOCK(PVO_PADDR(pvo));
+ PV_WR_LOCK(PVO_PADDR(pvo));
moea64_pvo_remove_from_page_locked(pvo, pg);
PV_UNLOCK(PVO_PADDR(pvo));
}
+static void
+moea64_pvo_remove_from_page_single_locked(struct pvo_entry *pvo,
+ struct rwlock **lockp)
+{
+ vm_page_t pg = NULL;
+
+ if (pvo->pvo_vaddr & PVO_MANAGED)
+ pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
+
+ CHANGE_PV_LIST_WR_LOCK_TO_PHYS(lockp, PVO_PADDR(pvo));
+ moea64_pvo_remove_from_page_locked(pvo, pg);
+}
+
static struct pvo_entry *
moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
{
@@ -3029,7 +3092,7 @@
*/
rv = false;
powerpc_sync();
- PV_PAGE_LOCK(m);
+ PV_PAGE_RD_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (PVO_IS_SP(pvo)) {
ret = moea64_sp_query(pvo, ptebit);
@@ -3089,7 +3152,7 @@
* For each pvo entry, clear the pte's ptebit.
*/
count = 0;
- PV_PAGE_LOCK(m);
+ PV_PAGE_WR_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (PVO_IS_SP(pvo)) {
if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) {
@@ -3568,7 +3631,7 @@
/* Helpers */
static __inline void
-moea64_pvo_cleanup(struct pvo_dlist *tofree)
+moea64_pvo_cleanup_locked(struct pvo_dlist *tofree, struct rwlock **lockp)
{
struct pvo_entry *pvo;
@@ -3577,7 +3640,7 @@
pvo = SLIST_FIRST(tofree);
SLIST_REMOVE_HEAD(tofree, pvo_dlink);
if (pvo->pvo_vaddr & PVO_DEAD)
- moea64_pvo_remove_from_page(pvo);
+ moea64_pvo_remove_from_page_single_locked(pvo, lockp);
free_pvo_entry(pvo);
}
}
@@ -3645,8 +3708,8 @@
/* Superpage ops */
static int
-moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_prot_t prot, u_int flags, int8_t psind)
+moea64_sp_enter_single(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, u_int flags, int8_t psind, struct rwlock **lockp)
{
struct pvo_entry *pvo, **pvos;
struct pvo_head *pvo_head;
@@ -3695,7 +3758,7 @@
}
}
- PV_LOCK(spa);
+ CHANGE_PV_LIST_WR_LOCK_TO_PHYS(lockp, spa);
PMAP_LOCK(pmap);
/* Note: moea64_remove_locked() also clears cached REF/CHG bits. */
@@ -3734,11 +3797,11 @@
}
PMAP_UNLOCK(pmap);
- PV_UNLOCK(spa);
sync = (sm->a.flags & PGA_EXECUTABLE) == 0;
/* Note: moea64_pvo_cleanup() also clears page prot. flags. */
- moea64_pvo_cleanup(&tofree);
+ moea64_pvo_cleanup_locked(&tofree, lockp);
+
pvo = pvos[0];
/* Set vm page flags */
@@ -4099,6 +4162,8 @@
pmap_t pmap;
struct pvo_entry *sp;
+ PV_LOCKASSERT(PVO_PADDR(pvo));
+
pmap = pvo->pvo_pmap;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -4131,12 +4196,18 @@
return (refchg);
}
+/*
+ * Note: this assumes the vm_page represented by the given pvo
+ * is at least read locked.
+ */
static int64_t
moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit)
{
int64_t refchg;
pmap_t pmap;
+ PV_LOCKASSERT(PVO_PADDR(pvo));
+
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);

File Metadata

Mime Type
text/plain
Expires
Mon, Feb 16, 7:30 AM (3 h, 51 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28766960
Default Alt Text
D54936.id.diff (11 KB)

Event Timeline