Index: sys/amd64/include/vmparam.h =================================================================== --- sys/amd64/include/vmparam.h +++ sys/amd64/include/vmparam.h @@ -149,10 +149,6 @@ #define VM_LEVEL_0_ORDER 9 #endif -#ifdef SMP -#define PA_LOCK_COUNT 256 -#endif - /* * Kernel physical load address for non-UEFI boot and for legacy UEFI loader. * Newer UEFI loader loads kernel anywhere below 4G, with memory allocated Index: sys/compat/linuxkpi/common/include/linux/highmem.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/highmem.h +++ sys/compat/linuxkpi/common/include/linux/highmem.h @@ -79,9 +79,7 @@ vm_memattr_t attr = pgprot2cachemode(prot); if (attr != VM_MEMATTR_DEFAULT) { - vm_page_lock(page); page->flags |= PG_FICTITIOUS; - vm_page_unlock(page); pmap_page_set_memattr(page, attr); } return (kmap(page)); Index: sys/compat/linuxkpi/common/include/linux/mm.h =================================================================== --- sys/compat/linuxkpi/common/include/linux/mm.h +++ sys/compat/linuxkpi/common/include/linux/mm.h @@ -406,14 +406,14 @@ static inline int trylock_page(struct page *page) { - return (vm_page_trylock(page)); + return (vm_page_tryxbusy(page)); } static inline void unlock_page(struct page *page) { - vm_page_unlock(page); + vm_page_xunbusy(page); } extern int is_vmalloc_addr(const void *addr); Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -122,7 +122,7 @@ * */ -#define PV_LOCK_COUNT PA_LOCK_COUNT +#define PV_LOCK_COUNT MAXCPU static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; /* Index: sys/powerpc/include/vmparam.h =================================================================== --- sys/powerpc/include/vmparam.h +++ sys/powerpc/include/vmparam.h @@ -216,12 +216,6 @@ #define VM_LEVEL_0_ORDER_MAX 12 #endif -#ifdef __powerpc64__ -#ifdef SMP -#define PA_LOCK_COUNT 256 -#endif -#endif - #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1005,7 +1005,6 @@ vm_page_t ma[vm_pageout_page_count]; int count, runlen; - vm_page_lock_assert(p, MA_NOTOWNED); vm_page_assert_xbusied(p); ma[0] = p; runlen = vm_radix_iter_lookup_range(pages, p->pindex + 1, Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -336,8 +336,6 @@ extern vm_page_t bogus_page; #endif /* _KERNEL */ -extern struct mtx_padalign pa_lock[]; - #if defined(__arm__) #define PDRSHIFT PDR_SHIFT #elif !defined(PDRSHIFT) @@ -345,40 +343,6 @@ #endif #define pa_index(pa) ((pa) >> PDRSHIFT) -#define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) -#define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) -#define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) -#define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) -#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) -#define PA_UNLOCK_COND(pa) \ - do { \ - if ((pa) != 0) { \ - PA_UNLOCK((pa)); \ - (pa) = 0; \ - } \ - } while (0) - -#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) - -#if defined(KLD_MODULE) && !defined(KLD_TIED) -#define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE) -#define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE) -#define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE) -#else /* !KLD_MODULE */ -#define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) -#define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) -#define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) -#define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) -#endif -#if defined(INVARIANTS) -#define vm_page_assert_locked(m) \ - vm_page_assert_locked_KBI((m), __FILE__, __LINE__) -#define vm_page_lock_assert(m, a) \ - vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__) -#else -#define vm_page_assert_locked(m) -#define vm_page_lock_assert(m, a) -#endif /* * The vm_page's aflags are updated using atomic operations. To set or clear @@ -713,13 +677,6 @@ void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_dirty_KBI(vm_page_t m); -void vm_page_lock_KBI(vm_page_t m, const char *file, int line); -void vm_page_unlock_KBI(vm_page_t m, const char *file, int line); -int vm_page_trylock_KBI(vm_page_t m, const char *file, int line); -#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line); -void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); -#endif #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock) Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -114,8 +114,6 @@ DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]); -struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; - struct mtx_padalign __exclusive_cache_line vm_domainset_lock; /* The following fields are protected by the domainset lock. */ domainset_t __exclusive_cache_line vm_min_domains; @@ -586,8 +584,6 @@ * Initialize the page and queue locks. */ mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF); - for (i = 0; i < PA_LOCK_COUNT; i++) - mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); for (i = 0; i < vm_ndomains; i++) vm_page_domain_init(i); @@ -5809,43 +5805,6 @@ vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); } -void -vm_page_lock_KBI(vm_page_t m, const char *file, int line) -{ - - mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); -} - -void -vm_page_unlock_KBI(vm_page_t m, const char *file, int line) -{ - - mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); -} - -int -vm_page_trylock_KBI(vm_page_t m, const char *file, int line) -{ - - return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); -} - -#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void -vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) -{ - - vm_page_lock_assert_KBI(m, MA_OWNED, file, line); -} - -void -vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) -{ - - mtx_assert_(vm_page_lockptr(m), a, file, line); -} -#endif - #ifdef INVARIANTS void vm_page_object_busy_assert(vm_page_t m) Index: sys/vm/vm_param.h =================================================================== --- sys/vm/vm_param.h +++ sys/vm/vm_param.h @@ -112,14 +112,6 @@ #define KERN_OUT_OF_BOUNDS 9 #define KERN_RESTART 10 -#ifndef PA_LOCK_COUNT -#ifdef SMP -#define PA_LOCK_COUNT 32 -#else -#define PA_LOCK_COUNT 1 -#endif /* !SMP */ -#endif /* !PA_LOCK_COUNT */ - #ifndef KSTACK_MAX_PAGES #define KSTACK_MAX_PAGES 32 #endif