Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -1396,20 +1396,29 @@ KPTphys = allocpages(firstaddr, nkpt); KPDphys = allocpages(firstaddr, nkpdpe); - /* Fill in the underlying page table pages */ - /* XXX not fully used, underneath 2M pages */ + /* + * Fill in the underlying page table pages. The tables up to + * atop(KERNend) are only used when mappings of pre-loaded + * memory are demoted. + */ pt_p = (pt_entry_t *)KPTphys; - for (i = 0; ptoa(i) < *firstaddr; i++) + for (i = 0; ptoa(i) < round_2mpage(KERNend); i++) pt_p[i] = ptoa(i) | X86_PG_V | pg_g | bootaddr_rwx(ptoa(i)); - /* Now map the page tables at their location within PTmap */ + /* + * Now connect the page tables to page directory pages. This + * implicitly map the page tables at their location within PTmap. + */ pd_p = (pd_entry_t *)KPDphys; for (i = 0; i < nkpt; i++) pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V; - /* Map from zero to end of allocations under 2M pages */ - /* This replaces some of the KPTphys entries above */ - for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) + /* + * Map from zero to end of loader preallocated memory under 2M pages. + * This replaces some of the KPTphys entries above, implementing + * promotion. + */ + for (i = 0; (i << PDRSHIFT) < round_2mpage(KERNend); i++) /* Preset PG_M and PG_A because demotion expects it. */ pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g | X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT);