Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -316,13 +316,12 @@ #define PV_STAT(x) do { } while (0) #endif -#define pa_index(pa) ((pa) >> PDRSHIFT) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - #define NPV_LIST_LOCKS MAXCPU -#define PHYS_TO_PV_LIST_LOCK(pa) \ - (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) +#define pa_index(pa) ((pa) >> PDRSHIFT) +#define pa_to_pmdp(pa) (pv_table[pa_index(pa)]) +#define pa_to_pvh(pa) (&(pa_to_pmdp(pa)->pv_page)) +#define PHYS_TO_PV_LIST_LOCK(pa) (&(pa_to_pmdp(pa)->pv_lock)) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ @@ -405,9 +404,13 @@ */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx __exclusive_cache_line pv_chunks_mutex; -static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS]; static u_long pv_invl_gen[NPV_LIST_LOCKS]; -static struct md_page *pv_table; + +struct pmap_large_md_page { + struct rwlock pv_lock; + struct md_page pv_page; +}; +static struct pmap_large_md_page **pv_table; static struct md_page pv_dummy; /* @@ -1800,6 +1803,49 @@ m->md.pat_mode = PAT_WRITE_BACK; } +/* + * Initialize pv head table for superpages. + */ +static void +pmap_init_pv_table(void) +{ + struct pmap_large_md_page *pvd; + vm_paddr_t start, end; + vm_size_t s; + int domain, i, j, k, pv_npg; + int pages; + + /* + * Calculate the size of the pointer array. + */ + pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); + s = (vm_size_t)(pv_npg * sizeof(struct pmap_large_md_page *)); + s = round_page(s); + pv_table = (struct pmap_large_md_page **)kmem_malloc(s, M_WAITOK | M_ZERO); + + /* + * Iterate physical segments to assign pointers to respective pages. + */ + for (i = 0; i < vm_phys_nsegs; i++) { + start = vm_phys_segs[i].start; + end = vm_phys_segs[i].end; + domain = vm_phys_segs[i].domain; + + pages = ((end - start) / NBPDR) + 1; + s = round_page(pages * sizeof(*pvd)); + + pvd = (struct pmap_large_md_page *) + kmem_malloc_domainset(DOMAINSET_PREF(domain), s, + M_WAITOK | M_ZERO); + for (j = start / NBPDR, k = 0; k < pages; j++, k++) { + rw_init(&pvd[k].pv_lock, "pmap pv list"); + TAILQ_INIT(&pvd[k].pv_page.pv_list); + pv_table[j] = &pvd[k]; + } + } + TAILQ_INIT(&pv_dummy.pv_list); +} + /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap @@ -1810,8 +1856,7 @@ { struct pmap_preinit_mapping *ppim; vm_page_t m, mpte; - vm_size_t s; - int error, i, pv_npg, ret, skz63; + int error, i, ret, skz63; /* L1TF, reserve page @0 unconditionally */ vm_page_blacklist_add(0, bootverbose); @@ -1899,26 +1944,7 @@ */ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); - /* - * Initialize the pool of pv list locks. - */ - for (i = 0; i < NPV_LIST_LOCKS; i++) - rw_init(&pv_list_locks[i], "pmap pv list"); - - /* - * Calculate the size of the pv head table for superpages. - */ - pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); - - /* - * Allocate memory for the pv head table for superpages. - */ - s = (vm_size_t)(pv_npg * sizeof(struct md_page)); - s = round_page(s); - pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); - for (i = 0; i < pv_npg; i++) - TAILQ_INIT(&pv_table[i].pv_list); - TAILQ_INIT(&pv_dummy.pv_list); + pmap_init_pv_table(); pmap_initialized = 1; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {