diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -342,7 +342,6 @@ #define PV_STAT(x) do { } while (0) #endif -#undef pa_index #ifdef NUMA #define pa_index(pa) ({ \ KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end, \ diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -125,6 +125,9 @@ #define PV_LOCK_COUNT MAXCPU static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; +#define PV_LOCK_SHIFT 21 +#define pa_index(pa) ((pa) >> PV_LOCK_SHIFT) + /* * Cheap NUMA-izing of the pv locks, to reduce contention across domains. * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the @@ -145,7 +148,7 @@ /* Superpage PV lock */ -#define PV_LOCK_SIZE (1<> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) +#define pa_index(pa) ((pa) >> L2_SHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ - (&pv_list_locks[pmap_l2_pindex(pa) % NPV_LIST_LOCKS]) + (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -336,14 +336,6 @@ extern vm_page_t bogus_page; #endif /* _KERNEL */ -#if defined(__arm__) -#define PDRSHIFT PDR_SHIFT -#elif !defined(PDRSHIFT) -#define PDRSHIFT 21 -#endif - -#define pa_index(pa) ((pa) >> PDRSHIFT) - /* * The vm_page's aflags are updated using atomic operations. To set or clear * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()