Changeset View
Changeset View
Standalone View
Standalone View
head/sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 377 Lines • ▼ Show 20 Lines | |||||
static u_int64_t KPDphys; /* phys addr of kernel level 2 */ | static u_int64_t KPDphys; /* phys addr of kernel level 2 */ | ||||
u_int64_t KPDPphys; /* phys addr of kernel level 3 */ | u_int64_t KPDPphys; /* phys addr of kernel level 3 */ | ||||
u_int64_t KPML4phys; /* phys addr of kernel level 4 */ | u_int64_t KPML4phys; /* phys addr of kernel level 4 */ | ||||
static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ | static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ | ||||
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ | static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ | ||||
static int ndmpdpphys; /* number of DMPDPphys pages */ | static int ndmpdpphys; /* number of DMPDPphys pages */ | ||||
static uint64_t PAPDPphys; /* phys addr of page array level 3 */ | |||||
static int npapdpphys; /* number of PAPDPphys pages */ | |||||
static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | ||||
/* | /* | ||||
* pmap_mapdev support pre initialization (i.e. console) | * pmap_mapdev support pre initialization (i.e. console) | ||||
*/ | */ | ||||
#define PMAP_PREINIT_MAPPING_COUNT 8 | #define PMAP_PREINIT_MAPPING_COUNT 8 | ||||
static struct pmap_preinit_mapping { | static struct pmap_preinit_mapping { | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
▲ Show 20 Lines • Show All 1,028 Lines • ▼ Show 20 Lines | |||||
create_pagetables(vm_paddr_t *firstaddr) | create_pagetables(vm_paddr_t *firstaddr) | ||||
{ | { | ||||
int i, j, ndm1g, nkpdpe, nkdmpde; | int i, j, ndm1g, nkpdpe, nkdmpde; | ||||
pd_entry_t *pd_p; | pd_entry_t *pd_p; | ||||
pdp_entry_t *pdp_p; | pdp_entry_t *pdp_p; | ||||
pml4_entry_t *p4_p; | pml4_entry_t *p4_p; | ||||
uint64_t DMPDkernphys; | uint64_t DMPDkernphys; | ||||
npapdpphys = howmany(ptoa(Maxmem) / sizeof(struct vm_page), NBPML4); | |||||
if (npapdpphys > NPAPML4E) { | |||||
printf("NDMPML4E limits system to %lu GB\n", | |||||
(NDMPML4E * 512) * (PAGE_SIZE / sizeof(struct vm_page))); | |||||
npapdpphys = NPAPML4E; | |||||
Maxmem = atop(NPAPML4E * NBPML4 * | |||||
(PAGE_SIZE / sizeof(struct vm_page))); | |||||
} | |||||
PAPDPphys = allocpages(firstaddr, npapdpphys); | |||||
/* Allocate page table pages for the direct map */ | /* Allocate page table pages for the direct map */ | ||||
ndmpdp = howmany(ptoa(Maxmem), NBPDP); | ndmpdp = howmany(ptoa(Maxmem), NBPDP); | ||||
if (ndmpdp < 4) /* Minimum 4GB of dirmap */ | if (ndmpdp < 4) /* Minimum 4GB of dirmap */ | ||||
ndmpdp = 4; | ndmpdp = 4; | ||||
ndmpdpphys = howmany(ndmpdp, NPDPEPG); | ndmpdpphys = howmany(ndmpdp, NPDPEPG); | ||||
if (ndmpdpphys > NDMPML4E) { | if (ndmpdpphys > NDMPML4E) { | ||||
/* | /* | ||||
* Each NDMPML4E allows 512 GB, so limit to that, | * Each NDMPML4E allows 512 GB, so limit to that, | ||||
▲ Show 20 Lines • Show All 130 Lines • ▼ Show 20 Lines | for (i = 0; i < ndmpdpphys; i++) { | ||||
p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V; | p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V; | ||||
} | } | ||||
/* Connect the KVA slots up to the PML4 */ | /* Connect the KVA slots up to the PML4 */ | ||||
for (i = 0; i < NKPML4E; i++) { | for (i = 0; i < NKPML4E; i++) { | ||||
p4_p[KPML4BASE + i] = KPDPphys + ptoa(i); | p4_p[KPML4BASE + i] = KPDPphys + ptoa(i); | ||||
p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V; | p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V; | ||||
} | } | ||||
/* Connect the page array slots up to the pml4. */ | |||||
for (i = 0; i < npapdpphys; i++) { | |||||
p4_p[PAPML4I + i] = PAPDPphys + ptoa(i); | |||||
p4_p[PAPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Bootstrap the system enough to run with virtual memory. | * Bootstrap the system enough to run with virtual memory. | ||||
* | * | ||||
* On amd64 this is called after mapping has already been enabled | * On amd64 this is called after mapping has already been enabled | ||||
* and just syncs the pmap module with what has already been done. | * and just syncs the pmap module with what has already been done. | ||||
* [We can't call it easily with mapping off since the kernel is not | * [We can't call it easily with mapping off since the kernel is not | ||||
* mapped with PA == VA, hence we would have to relocate every address | * mapped with PA == VA, hence we would have to relocate every address | ||||
▲ Show 20 Lines • Show All 1,797 Lines • ▼ Show 20 Lines | for (i = 0; i < NKPML4E; i++) { | ||||
pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW | | pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW | | ||||
X86_PG_V; | X86_PG_V; | ||||
} | } | ||||
for (i = 0; i < ndmpdpphys; i++) { | for (i = 0; i < ndmpdpphys; i++) { | ||||
pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | | pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | | ||||
X86_PG_V; | X86_PG_V; | ||||
} | } | ||||
for (i = 0; i < npapdpphys; i++) { | |||||
pm_pml4[PAPML4I + i] = (PAPDPphys + ptoa(i)) | X86_PG_RW | | |||||
X86_PG_V; | |||||
} | |||||
/* install self-referential address mapping entry(s) */ | /* install self-referential address mapping entry(s) */ | ||||
pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | | pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | | ||||
X86_PG_A | X86_PG_M; | X86_PG_A | X86_PG_M; | ||||
/* install large map entries if configured */ | /* install large map entries if configured */ | ||||
for (i = 0; i < lm_ents; i++) | for (i = 0; i < lm_ents; i++) | ||||
pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pml4[LMSPML4I + i]; | pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pml4[LMSPML4I + i]; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 340 Lines • ▼ Show 20 Lines | KASSERT(CPU_EMPTY(&pmap->pm_active), | ||||
("releasing active pmap %p", pmap)); | ("releasing active pmap %p", pmap)); | ||||
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4)); | m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4)); | ||||
for (i = 0; i < NKPML4E; i++) /* KVA */ | for (i = 0; i < NKPML4E; i++) /* KVA */ | ||||
pmap->pm_pml4[KPML4BASE + i] = 0; | pmap->pm_pml4[KPML4BASE + i] = 0; | ||||
for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ | for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ | ||||
pmap->pm_pml4[DMPML4I + i] = 0; | pmap->pm_pml4[DMPML4I + i] = 0; | ||||
for (i = 0; i < npapdpphys; i++) | |||||
pmap->pm_pml4[PAPML4I + i] = 0; | |||||
pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ | pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ | ||||
for (i = 0; i < lm_ents; i++) /* Large Map */ | for (i = 0; i < lm_ents; i++) /* Large Map */ | ||||
pmap->pm_pml4[LMSPML4I + i] = 0; | pmap->pm_pml4[LMSPML4I + i] = 0; | ||||
vm_page_unwire_noq(m); | vm_page_unwire_noq(m); | ||||
vm_page_free_zero(m); | vm_page_free_zero(m); | ||||
if (pmap->pm_pml4u != NULL) { | if (pmap->pm_pml4u != NULL) { | ||||
Show All 20 Lines | |||||
kvm_free(SYSCTL_HANDLER_ARGS) | kvm_free(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; | unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; | ||||
return sysctl_handle_long(oidp, &kfree, 0, req); | return sysctl_handle_long(oidp, &kfree, 0, req); | ||||
} | } | ||||
SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, | SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, | ||||
0, 0, kvm_free, "LU", "Amount of KVM free"); | 0, 0, kvm_free, "LU", "Amount of KVM free"); | ||||
void | |||||
pmap_page_array_startup(long pages) | |||||
{ | |||||
pdp_entry_t *pdpe; | |||||
pd_entry_t *pde, newpdir; | |||||
vm_offset_t va, start, end; | |||||
vm_paddr_t pa; | |||||
long pfn; | |||||
int domain, i; | |||||
vm_page_array_size = pages; | |||||
start = va = PA_MIN_ADDRESS; | |||||
end = va + (pages * sizeof(struct vm_page)); | |||||
while (va < end) { | |||||
pfn = first_page + ((va - start) / sizeof(struct vm_page)); | |||||
domain = _vm_phys_domain(ctob(pfn)); | |||||
pdpe = pmap_pdpe(kernel_pmap, va); | |||||
if ((*pdpe & X86_PG_V) == 0) { | |||||
pa = vm_phys_early_alloc(domain, PAGE_SIZE); | |||||
bzero((void *)PHYS_TO_DMAP(pa), PAGE_SIZE); | |||||
*pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW | | |||||
X86_PG_A | X86_PG_M); | |||||
continue; /* try again */ | |||||
} | |||||
pde = pmap_pdpe_to_pde(pdpe, va); | |||||
if ((*pde & X86_PG_V) != 0) | |||||
panic("Unexpected pde"); | |||||
pa = vm_phys_early_alloc(domain, NBPDR); | |||||
for (i = 0; i < NPDEPG; i++) | |||||
dump_add_page(pa + (i * PAGE_SIZE)); | |||||
newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | | |||||
X86_PG_M | PG_PS | pg_g | pg_nx); | |||||
pde_store(pde, newpdir); | |||||
va += NBPDR; | |||||
} | |||||
} | |||||
/* | /* | ||||
* grow the number of kernel page table entries, if needed | * grow the number of kernel page table entries, if needed | ||||
*/ | */ | ||||
void | void | ||||
pmap_growkernel(vm_offset_t addr) | pmap_growkernel(vm_offset_t addr) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
▲ Show 20 Lines • Show All 6,172 Lines • Show Last 20 Lines |