Index: head/sys/amd64/amd64/pmap.c =================================================================== --- head/sys/amd64/amd64/pmap.c +++ head/sys/amd64/amd64/pmap.c @@ -372,6 +372,8 @@ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ static int ndmpdpphys; /* number of DMPDPphys pages */ +static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ + /* * pmap_mapdev support pre initialization (i.e. console) */ @@ -998,8 +1000,9 @@ /* Map from zero to end of allocations under 2M pages */ /* This replaces some of the KPTphys entries above */ for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) + /* Preset PG_M and PG_A because demotion expects it. */ pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g | - bootaddr_rwx(i << PDRSHIFT); + X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT); /* * Because we map the physical blocks in 2M pages, adjust firstaddr @@ -1091,6 +1094,8 @@ pt_entry_t *pte; int i; + KERNend = *firstaddr; + if (!pti) pg_g = X86_PG_G; @@ -1323,6 +1328,7 @@ * Initialize the vm page array entries for the kernel pmap's * page table pages. */ + PMAP_LOCK(kernel_pmap); for (i = 0; i < nkpt; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); KASSERT(mpte >= vm_page_array && @@ -1331,7 +1337,11 @@ mpte->pindex = pmap_pde_pindex(KERNBASE) + i; mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); mpte->wire_count = 1; + if (i << PDRSHIFT < KERNend && + pmap_insert_pt_page(kernel_pmap, mpte)) + panic("pmap_init: pmap_insert_pt_page failed"); } + PMAP_UNLOCK(kernel_pmap); vm_wire_add(nkpt); /* Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -931,6 +931,7 @@ * Initialize the vm page array entries for the kernel pmap's * page table pages. */ + PMAP_LOCK(kernel_pmap); for (i = 0; i < NKPT; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); KASSERT(mpte >= vm_page_array && @@ -938,7 +939,14 @@ ("pmap_init: page table page is out of range")); mpte->pindex = i + KPTDI; mpte->phys_addr = KPTphys + ptoa(i); + mpte->wire_count = 1; + if (pseflag != 0 && + KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && + pmap_insert_pt_page(kernel_pmap, mpte)) + panic("pmap_init: pmap_insert_pt_page failed"); } + PMAP_UNLOCK(kernel_pmap); + vm_wire_add(NKPT); /* * Initialize the address space (zone) for the pv entries. Set a Index: head/sys/vm/vm_kern.c =================================================================== --- head/sys/vm/vm_kern.c +++ head/sys/vm/vm_kern.c @@ -700,16 +700,15 @@ { #if defined(__i386__) || defined(__amd64__) struct vm_domain *vmd; - vm_offset_t end; + vm_offset_t end, va; vm_paddr_t pa; vm_page_t m; end = trunc_page(start + size); start = round_page(start); - (void)vm_map_remove(kernel_map, start, end); - for (; start < end; start += PAGE_SIZE) { - pa = pmap_kextract(start); + for (va = start; va < end; va += PAGE_SIZE) { + pa = pmap_kextract(va); m = PHYS_TO_VM_PAGE(pa); vmd = vm_pagequeue_domain(m); @@ -717,6 +716,8 @@ vm_phys_free_pages(m, 0); vm_domain_free_unlock(vmd); } + pmap_remove(kernel_pmap, start, end); + (void)vmem_add(kernel_arena, start, end - start, M_WAITOK); #endif }