Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -178,8 +178,32 @@ #endif #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) -#define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)]) +static struct md_page * +pa_to_pvh(vm_paddr_t pa) +{ + struct vm_phys_seg *seg; + int segind; + + for (segind = 0; segind < vm_phys_nsegs; segind++) { + seg = &vm_phys_segs[segind]; + if (pa >= seg->start && pa < seg->end) + return ((struct md_page *)seg->md_first + + pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start)); + } + panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa); +} + +static struct md_page * +page_to_pvh(vm_page_t m) +{ + struct vm_phys_seg *seg; + + seg = &vm_phys_segs[m->segind]; + return ((struct md_page *)seg->md_first + + pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start)); +} + #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ @@ -1049,6 +1073,8 @@ void pmap_init(void) { + struct vm_phys_seg *seg, *next_seg; + struct md_page *pvh; vm_size_t s; uint64_t mmfr1; int i, pv_npg, vmid_bits; @@ -1093,7 +1119,12 @@ /* * Calculate the size of the pv head table for superpages. */ - pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE); + pv_npg = 0; + for (i = 0; i < vm_phys_nsegs; i++) { + seg = &vm_phys_segs[i]; + pv_npg += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - + pmap_l2_pindex(seg->start); + } /* * Allocate memory for the pv head table for superpages. @@ -1105,6 +1136,31 @@ TAILQ_INIT(&pv_table[i].pv_list); TAILQ_INIT(&pv_dummy.pv_list); + /* + * Set pointers from vm_phys_segs to pv_table. + */ + for (i = 0, pvh = pv_table; i < vm_phys_nsegs; i++) { + seg = &vm_phys_segs[i]; + seg->md_first = pvh; + pvh += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - + pmap_l2_pindex(seg->start); + + /* + * If there is a following segment, and the final + * superpage of this segment and the initial superpage + * of the next segment are the same then adjust the + * pv_table entry for that next segment down by one so + * that the pv_table entries will be shared. + */ + if (i + 1 < vm_phys_nsegs) { + next_seg = &vm_phys_segs[i + 1]; + if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 == + pmap_l2_pindex(next_seg->start)) { + pvh--; + } + } + } + vm_initialized = 1; } @@ -2247,7 +2303,7 @@ m->md.pv_gen++; if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) { vm_page_aflag_clear(m, PGA_WRITEABLE); @@ -2788,7 +2844,7 @@ pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } @@ -2858,7 +2914,7 @@ pmap_pvh_free(&m->md, pmap, sva); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } @@ -2997,8 +3053,7 @@ ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); lock = VM_PAGE_TO_PV_LIST_LOCK(m); - pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : - pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); retry: rw_wlock(lock); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { @@ -4480,7 +4535,7 @@ break; } if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; @@ -4535,7 +4590,7 @@ PMAP_UNLOCK(pmap); } if ((m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { @@ -4577,7 +4632,7 @@ rw_rlock(lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && - !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); + !TAILQ_EMPTY(&page_to_pvh(m)->pv_list)); rw_runlock(lock); return (rv); } @@ -4740,8 +4795,7 @@ if ((m->a.flags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh( - VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); @@ -4818,7 +4872,7 @@ goto out; } if ((m->flags & PG_FICTITIOUS) == 0) { - pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_ASSERT_STAGE1(pmap); @@ -4938,8 +4992,7 @@ if (!pmap_page_is_write_mapped(m)) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); - pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : - pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); retry_pv_loop: rw_wlock(lock); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { @@ -5035,7 +5088,7 @@ cleared = 0; pa = VM_PAGE_TO_PHYS(m); lock = PHYS_TO_PV_LIST_LOCK(pa); - pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa); + pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); rw_wlock(lock); retry: not_cleared = 0; @@ -5312,8 +5365,7 @@ if (!pmap_page_is_write_mapped(m)) return; - pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : - pa_to_pvh(VM_PAGE_TO_PHYS(m)); + pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_wlock(lock); restart: Index: head/sys/vm/vm_phys.h =================================================================== --- head/sys/vm/vm_phys.h +++ head/sys/vm/vm_phys.h @@ -72,6 +72,7 @@ #if VM_NRESERVLEVEL > 0 vm_reserv_t first_reserv; #endif + void *md_first; int domain; struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX]; };