Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | |||||
#include <sys/bus.h> | #include <sys/bus.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/counter.h> | #include <sys/counter.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/ktr.h> | #include <sys/ktr.h> | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mman.h> | #include <sys/mman.h> | ||||
#include <sys/msan.h> | |||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/rangeset.h> | #include <sys/rangeset.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sbuf.h> | #include <sys/sbuf.h> | ||||
#include <sys/smr.h> | #include <sys/smr.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/turnstile.h> | #include <sys/turnstile.h> | ||||
Show All 23 Lines | |||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <machine/asan.h> | #include <machine/asan.h> | ||||
#include <machine/intr_machdep.h> | #include <machine/intr_machdep.h> | ||||
#include <x86/apicvar.h> | #include <x86/apicvar.h> | ||||
#include <x86/ifunc.h> | #include <x86/ifunc.h> | ||||
#include <machine/cpu.h> | #include <machine/cpu.h> | ||||
#include <machine/cputypes.h> | #include <machine/cputypes.h> | ||||
#include <machine/intr_machdep.h> | |||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/msan.h> | |||||
#include <machine/pcb.h> | #include <machine/pcb.h> | ||||
#include <machine/specialreg.h> | #include <machine/specialreg.h> | ||||
#ifdef SMP | #ifdef SMP | ||||
#include <machine/smp.h> | #include <machine/smp.h> | ||||
#endif | #endif | ||||
#include <machine/sysarch.h> | #include <machine/sysarch.h> | ||||
#include <machine/tss.h> | #include <machine/tss.h> | ||||
▲ Show 20 Lines • Show All 251 Lines • ▼ Show 20 Lines | |||||
static u_int64_t KPDPphys; /* phys addr of kernel level 3 */ | static u_int64_t KPDPphys; /* phys addr of kernel level 3 */ | ||||
u_int64_t KPML4phys; /* phys addr of kernel level 4 */ | u_int64_t KPML4phys; /* phys addr of kernel level 4 */ | ||||
u_int64_t KPML5phys; /* phys addr of kernel level 5, | u_int64_t KPML5phys; /* phys addr of kernel level 5, | ||||
if supported */ | if supported */ | ||||
#ifdef KASAN | #ifdef KASAN | ||||
static uint64_t KASANPDPphys; | static uint64_t KASANPDPphys; | ||||
#endif | #endif | ||||
#ifdef KMSAN | |||||
static uint64_t KMSANSHADPDPphys; | |||||
static uint64_t KMSANORIGPDPphys; | |||||
#endif | |||||
static pml4_entry_t *kernel_pml4; | static pml4_entry_t *kernel_pml4; | ||||
alc: Currently, wouldn't "is necessary" be more accurate than "can be useful"? | |||||
static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ | static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ | ||||
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ | static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ | ||||
static int ndmpdpphys; /* number of DMPDPphys pages */ | static int ndmpdpphys; /* number of DMPDPphys pages */ | ||||
static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ | ||||
/* | /* | ||||
* pmap_mapdev support pre initialization (i.e. console) | * pmap_mapdev support pre initialization (i.e. console) | ||||
▲ Show 20 Lines • Show All 1,231 Lines • ▼ Show 20 Lines | if ((amd_feature & AMDID_PAGE1GB) != 0) { | ||||
*/ | */ | ||||
nkdmpde = howmany((vm_offset_t)(brwsection - KERNBASE), NBPDP); | nkdmpde = howmany((vm_offset_t)(brwsection - KERNBASE), NBPDP); | ||||
DMPDkernphys = allocpages(firstaddr, nkdmpde); | DMPDkernphys = allocpages(firstaddr, nkdmpde); | ||||
} | } | ||||
if (ndm1g < ndmpdp) | if (ndm1g < ndmpdp) | ||||
DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g); | DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g); | ||||
dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; | dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; | ||||
/* Allocate pages */ | /* Allocate pages. */ | ||||
KPML4phys = allocpages(firstaddr, 1); | KPML4phys = allocpages(firstaddr, 1); | ||||
KPDPphys = allocpages(firstaddr, NKPML4E); | KPDPphys = allocpages(firstaddr, NKPML4E); | ||||
#ifdef KASAN | #ifdef KASAN | ||||
KASANPDPphys = allocpages(firstaddr, NKASANPML4E); | KASANPDPphys = allocpages(firstaddr, NKASANPML4E); | ||||
KASANPDphys = allocpages(firstaddr, 1); | KASANPDphys = allocpages(firstaddr, 1); | ||||
#endif | #endif | ||||
#ifdef KMSAN | |||||
/* | |||||
* The KMSAN shadow maps are initially left unpopulated, since there is | |||||
* no need to shadow memory above KERNBASE. | |||||
*/ | |||||
KMSANSHADPDPphys = allocpages(firstaddr, NKMSANSHADPML4E); | |||||
KMSANORIGPDPphys = allocpages(firstaddr, NKMSANORIGPML4E); | |||||
#endif | |||||
/* | /* | ||||
* Allocate the initial number of kernel page table pages required to | * Allocate the initial number of kernel page table pages required to | ||||
* bootstrap. We defer this until after all memory-size dependent | * bootstrap. We defer this until after all memory-size dependent | ||||
* allocations are done (e.g. direct map), so that we don't have to | * allocations are done (e.g. direct map), so that we don't have to | ||||
* build in too much slop in our estimate. | * build in too much slop in our estimate. | ||||
* | * | ||||
* Note that when NKPML4E > 1, we have an empty page underneath | * Note that when NKPML4E > 1, we have an empty page underneath | ||||
▲ Show 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | |||||
#ifdef KASAN | #ifdef KASAN | ||||
/* Connect the KASAN shadow map slots up to the PML4. */ | /* Connect the KASAN shadow map slots up to the PML4. */ | ||||
for (i = 0; i < NKASANPML4E; i++) { | for (i = 0; i < NKASANPML4E; i++) { | ||||
p4_p[KASANPML4I + i] = KASANPDPphys + ptoa(i); | p4_p[KASANPML4I + i] = KASANPDPphys + ptoa(i); | ||||
p4_p[KASANPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | p4_p[KASANPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef KMSAN | |||||
/* Connect the KMSAN shadow map slots up to the PML4. */ | |||||
for (i = 0; i < NKMSANSHADPML4E; i++) { | |||||
p4_p[KMSANSHADPML4I + i] = KMSANSHADPDPphys + ptoa(i); | |||||
p4_p[KMSANSHADPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | |||||
} | |||||
/* Connect the KMSAN origin map slots up to the PML4. */ | |||||
for (i = 0; i < NKMSANORIGPML4E; i++) { | |||||
p4_p[KMSANORIGPML4I + i] = KMSANORIGPDPphys + ptoa(i); | |||||
p4_p[KMSANORIGPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | |||||
} | |||||
#endif | |||||
/* Connect the Direct Map slots up to the PML4. */ | /* Connect the Direct Map slots up to the PML4. */ | ||||
for (i = 0; i < ndmpdpphys; i++) { | for (i = 0; i < ndmpdpphys; i++) { | ||||
p4_p[DMPML4I + i] = DMPDPphys + ptoa(i); | p4_p[DMPML4I + i] = DMPDPphys + ptoa(i); | ||||
p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx; | ||||
} | } | ||||
/* Connect the KVA slots up to the PML4 */ | /* Connect the KVA slots up to the PML4 */ | ||||
for (i = 0; i < NKPML4E; i++) { | for (i = 0; i < NKPML4E; i++) { | ||||
▲ Show 20 Lines • Show All 665 Lines • ▼ Show 20 Lines | error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, | ||||
(vmem_addr_t *)&qframe); | (vmem_addr_t *)&qframe); | ||||
if (error != 0) | if (error != 0) | ||||
panic("qframe allocation failed"); | panic("qframe allocation failed"); | ||||
lm_ents = 8; | lm_ents = 8; | ||||
TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents); | TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents); | ||||
if (lm_ents > LMEPML4I - LMSPML4I + 1) | if (lm_ents > LMEPML4I - LMSPML4I + 1) | ||||
lm_ents = LMEPML4I - LMSPML4I + 1; | lm_ents = LMEPML4I - LMSPML4I + 1; | ||||
#ifdef KMSAN | |||||
if (lm_ents > KMSANORIGPML4I - LMSPML4I) { | |||||
printf("pmap: shrinking large map for KMSAN\n"); | |||||
kibUnsubmitted Done Inline ActionsPrint how large the squeeze is? kib: Print how large the squeeze is? | |||||
lm_ents = KMSANORIGPML4I - LMSPML4I; | |||||
kibUnsubmitted Not Done Inline ActionsCould this underflow? kib: Could this underflow? | |||||
markjAuthorUnsubmitted Done Inline ActionsNot unless one changes the values at compile time. Perhaps this should also be asserted? markj: Not unless one changes the values at compile time. Perhaps this should also be asserted? | |||||
} | |||||
#endif | |||||
if (bootverbose) | if (bootverbose) | ||||
printf("pmap: large map %u PML4 slots (%lu GB)\n", | printf("pmap: large map %u PML4 slots (%lu GB)\n", | ||||
lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024)); | lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024)); | ||||
if (lm_ents != 0) { | if (lm_ents != 0) { | ||||
large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS, | large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS, | ||||
(vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK); | (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK); | ||||
if (large_vmem == NULL) { | if (large_vmem == NULL) { | ||||
printf("pmap: cannot create large map\n"); | printf("pmap: cannot create large map\n"); | ||||
▲ Show 20 Lines • Show All 1,673 Lines • ▼ Show 20 Lines | pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW | | ||||
X86_PG_V; | X86_PG_V; | ||||
} | } | ||||
#ifdef KASAN | #ifdef KASAN | ||||
for (i = 0; i < NKASANPML4E; i++) { | for (i = 0; i < NKASANPML4E; i++) { | ||||
pm_pml4[KASANPML4I + i] = (KASANPDPphys + ptoa(i)) | X86_PG_RW | | pm_pml4[KASANPML4I + i] = (KASANPDPphys + ptoa(i)) | X86_PG_RW | | ||||
X86_PG_V | pg_nx; | X86_PG_V | pg_nx; | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef KMSAN | |||||
for (i = 0; i < NKMSANSHADPML4E; i++) { | |||||
pm_pml4[KMSANSHADPML4I + i] = (KMSANSHADPDPphys + ptoa(i)) | | |||||
X86_PG_RW | X86_PG_V | pg_nx; | |||||
} | |||||
for (i = 0; i < NKMSANORIGPML4E; i++) { | |||||
pm_pml4[KMSANORIGPML4I + i] = (KMSANORIGPDPphys + ptoa(i)) | | |||||
X86_PG_RW | X86_PG_V | pg_nx; | |||||
} | |||||
#endif | |||||
for (i = 0; i < ndmpdpphys; i++) { | for (i = 0; i < ndmpdpphys; i++) { | ||||
pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | | pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | | ||||
X86_PG_V; | X86_PG_V; | ||||
} | } | ||||
/* install self-referential address mapping entry(s) */ | /* install self-referential address mapping entry(s) */ | ||||
pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | | pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | | ||||
X86_PG_A | X86_PG_M; | X86_PG_A | X86_PG_M; | ||||
▲ Show 20 Lines • Show All 570 Lines • ▼ Show 20 Lines | if (pmap_is_la57(pmap)) { | ||||
pmap->pm_pmltop[PML5PML5I] = 0; | pmap->pm_pmltop[PML5PML5I] = 0; | ||||
} else { | } else { | ||||
for (i = 0; i < NKPML4E; i++) /* KVA */ | for (i = 0; i < NKPML4E; i++) /* KVA */ | ||||
pmap->pm_pmltop[KPML4BASE + i] = 0; | pmap->pm_pmltop[KPML4BASE + i] = 0; | ||||
#ifdef KASAN | #ifdef KASAN | ||||
for (i = 0; i < NKASANPML4E; i++) /* KASAN shadow map */ | for (i = 0; i < NKASANPML4E; i++) /* KASAN shadow map */ | ||||
pmap->pm_pmltop[KASANPML4I + i] = 0; | pmap->pm_pmltop[KASANPML4I + i] = 0; | ||||
#endif | #endif | ||||
#ifdef KMSAN | |||||
for (i = 0; i < NKMSANSHADPML4E; i++) /* KMSAN shadow map */ | |||||
pmap->pm_pmltop[KMSANSHADPML4I + i] = 0; | |||||
for (i = 0; i < NKMSANORIGPML4E; i++) /* KMSAN shadow map */ | |||||
pmap->pm_pmltop[KMSANORIGPML4I + i] = 0; | |||||
#endif | |||||
for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ | for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ | ||||
pmap->pm_pmltop[DMPML4I + i] = 0; | pmap->pm_pmltop[DMPML4I + i] = 0; | ||||
pmap->pm_pmltop[PML4PML4I] = 0; /* Recursive Mapping */ | pmap->pm_pmltop[PML4PML4I] = 0; /* Recursive Mapping */ | ||||
for (i = 0; i < lm_ents; i++) /* Large Map */ | for (i = 0; i < lm_ents; i++) /* Large Map */ | ||||
pmap->pm_pmltop[LMSPML4I + i] = 0; | pmap->pm_pmltop[LMSPML4I + i] = 0; | ||||
} | } | ||||
pmap_free_pt_page(NULL, m, true); | pmap_free_pt_page(NULL, m, true); | ||||
Show All 25 Lines | kvm_free(SYSCTL_HANDLER_ARGS) | ||||
unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; | unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; | ||||
return sysctl_handle_long(oidp, &kfree, 0, req); | return sysctl_handle_long(oidp, &kfree, 0, req); | ||||
} | } | ||||
SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE, | SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE, | ||||
0, 0, kvm_free, "LU", | 0, 0, kvm_free, "LU", | ||||
"Amount of KVM free"); | "Amount of KVM free"); | ||||
#ifdef KMSAN | |||||
static void | |||||
pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa, vm_size_t size) | |||||
{ | |||||
pdp_entry_t *pdpe; | |||||
pd_entry_t *pde; | |||||
pt_entry_t *pte; | |||||
vm_paddr_t dummypa, dummypd, dummypt; | |||||
int i, npde, npdpg; | |||||
npdpg = howmany(size, NBPDP); | |||||
npde = size / NBPDR; | |||||
dummypa = vm_phys_early_alloc(-1, PAGE_SIZE); | |||||
pagezero((void *)PHYS_TO_DMAP(dummypa)); | |||||
dummypt = vm_phys_early_alloc(-1, PAGE_SIZE); | |||||
pagezero((void *)PHYS_TO_DMAP(dummypt)); | |||||
dummypd = vm_phys_early_alloc(-1, PAGE_SIZE * npdpg); | |||||
for (i = 0; i < npdpg; i++) | |||||
pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i))); | |||||
pte = (pt_entry_t *)PHYS_TO_DMAP(dummypt); | |||||
for (i = 0; i < NPTEPG; i++) | |||||
pte[i] = (pt_entry_t)(dummypa | X86_PG_V | X86_PG_RW | | |||||
X86_PG_A | X86_PG_M | pg_nx); | |||||
pde = (pd_entry_t *)PHYS_TO_DMAP(dummypd); | |||||
for (i = 0; i < npde; i++) | |||||
pde[i] = (pd_entry_t)(dummypt | X86_PG_V | X86_PG_RW | pg_nx); | |||||
pdpe = (pdp_entry_t *)PHYS_TO_DMAP(pdppa); | |||||
for (i = 0; i < npdpg; i++) | |||||
pdpe[i] = (pdp_entry_t)(dummypd + ptoa(i) | X86_PG_V | | |||||
X86_PG_RW | pg_nx); | |||||
} | |||||
static void | |||||
pmap_kmsan_page_array_startup(vm_offset_t start, vm_offset_t end) | |||||
{ | |||||
vm_size_t size; | |||||
KASSERT(start % NBPDP == 0, ("unaligned page array start address")); | |||||
/* | /* | ||||
* The end of the page array's KVA region is 2MB aligned, see | |||||
* kmem_init(). | |||||
*/ | |||||
size = round_2mpage(end) - start; | |||||
pmap_kmsan_shadow_map_page_array(KMSANSHADPDPphys, size); | |||||
pmap_kmsan_shadow_map_page_array(KMSANORIGPDPphys, size); | |||||
} | |||||
#endif | |||||
/* | |||||
* Allocate physical memory for the vm_page array and map it into KVA, | * Allocate physical memory for the vm_page array and map it into KVA, | ||||
* attempting to back the vm_pages with domain-local memory. | * attempting to back the vm_pages with domain-local memory. | ||||
*/ | */ | ||||
void | void | ||||
pmap_page_array_startup(long pages) | pmap_page_array_startup(long pages) | ||||
{ | { | ||||
pdp_entry_t *pdpe; | pdp_entry_t *pdpe; | ||||
pd_entry_t *pde, newpdir; | pd_entry_t *pde, newpdir; | ||||
Show All 23 Lines | for (va = start; va < end; va += NBPDR) { | ||||
pa = vm_phys_early_alloc(domain, NBPDR); | pa = vm_phys_early_alloc(domain, NBPDR); | ||||
for (i = 0; i < NPDEPG; i++) | for (i = 0; i < NPDEPG; i++) | ||||
dump_add_page(pa + i * PAGE_SIZE); | dump_add_page(pa + i * PAGE_SIZE); | ||||
newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | | newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | | ||||
X86_PG_M | PG_PS | pg_g | pg_nx); | X86_PG_M | PG_PS | pg_g | pg_nx); | ||||
pde_store(pde, newpdir); | pde_store(pde, newpdir); | ||||
} | } | ||||
vm_page_array = (vm_page_t)start; | vm_page_array = (vm_page_t)start; | ||||
#ifdef KMSAN | |||||
pmap_kmsan_page_array_startup(start, round_page(end)); | |||||
Done Inline ActionsIs there a reason to round "end" both here and inside pmap_kmsan_page_array_startup()? alc: Is there a reason to round "end" both here and inside pmap_kmsan_page_array_startup()? | |||||
#endif | |||||
} | } | ||||
/* | /* | ||||
* grow the number of kernel page table entries, if needed | * grow the number of kernel page table entries, if needed | ||||
*/ | */ | ||||
void | void | ||||
pmap_growkernel(vm_offset_t addr) | pmap_growkernel(vm_offset_t addr) | ||||
{ | { | ||||
Show All 22 Lines | pmap_growkernel(vm_offset_t addr) | ||||
if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR) | if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR) | ||||
return; | return; | ||||
addr = roundup2(addr, NBPDR); | addr = roundup2(addr, NBPDR); | ||||
if (addr - 1 >= vm_map_max(kernel_map)) | if (addr - 1 >= vm_map_max(kernel_map)) | ||||
addr = vm_map_max(kernel_map); | addr = vm_map_max(kernel_map); | ||||
if (kernel_vm_end < addr) | if (kernel_vm_end < addr) | ||||
kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end); | kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end); | ||||
if (kernel_vm_end < addr) | |||||
kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end); | |||||
while (kernel_vm_end < addr) { | while (kernel_vm_end < addr) { | ||||
pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); | pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); | ||||
if ((*pdpe & X86_PG_V) == 0) { | if ((*pdpe & X86_PG_V) == 0) { | ||||
/* We need a new PDP entry */ | /* We need a new PDP entry */ | ||||
nkpg = pmap_alloc_pt_page(kernel_pmap, | nkpg = pmap_alloc_pt_page(kernel_pmap, | ||||
kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED | | kernel_vm_end >> PDPSHIFT, VM_ALLOC_WIRED | | ||||
VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO); | VM_ALLOC_INTERRUPT | VM_ALLOC_ZERO); | ||||
if (nkpg == NULL) | if (nkpg == NULL) | ||||
▲ Show 20 Lines • Show All 3,024 Lines • ▼ Show 20 Lines | |||||
pmap_copy_page(vm_page_t msrc, vm_page_t mdst) | pmap_copy_page(vm_page_t msrc, vm_page_t mdst) | ||||
{ | { | ||||
vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); | vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); | ||||
vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); | vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); | ||||
pagecopy((void *)src, (void *)dst); | pagecopy((void *)src, (void *)dst); | ||||
} | } | ||||
#ifdef KMSAN | |||||
int unmapped_buf_allowed = 0; | |||||
#else | |||||
int unmapped_buf_allowed = 1; | int unmapped_buf_allowed = 1; | ||||
#endif | |||||
kibUnsubmitted Done Inline ActionsCan you patch this somewhere in bufinit(), in MI way? kib: Can you patch this somewhere in bufinit(), in MI way? | |||||
void | void | ||||
pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], | ||||
vm_offset_t b_offset, int xfersize) | vm_offset_t b_offset, int xfersize) | ||||
{ | { | ||||
void *a_cp, *b_cp; | void *a_cp, *b_cp; | ||||
vm_page_t pages[2]; | vm_page_t pages[2]; | ||||
vm_offset_t vaddr[2], a_pg_offset, b_pg_offset; | vm_offset_t vaddr[2], a_pg_offset, b_pg_offset; | ||||
▲ Show 20 Lines • Show All 3,383 Lines • ▼ Show 20 Lines | if (m != NULL) { | ||||
X86_PG_V | pg_nx); | X86_PG_V | pg_nx); | ||||
} | } | ||||
} | } | ||||
if ((*pde & X86_PG_PS) != 0) | if ((*pde & X86_PG_PS) != 0) | ||||
return; | return; | ||||
pte = pmap_pde_to_pte(pde, va); | pte = pmap_pde_to_pte(pde, va); | ||||
if ((*pte & X86_PG_V) != 0) | if ((*pte & X86_PG_V) != 0) | ||||
return; | return; | ||||
m = pmap_kasan_enter_alloc_4k(); | |||||
*pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V | | |||||
X86_PG_M | X86_PG_A | pg_nx); | |||||
} | |||||
#endif | |||||
#ifdef KMSAN | |||||
static vm_page_t | |||||
pmap_kmsan_enter_alloc_4k(void) | |||||
{ | |||||
vm_page_t m; | |||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | |||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | |||||
if (m == NULL) | |||||
panic("%s: no memory to grow shadow map", __func__); | |||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
return (m); | |||||
} | |||||
static vm_page_t | |||||
pmap_kmsan_enter_alloc_2m(void) | |||||
{ | |||||
vm_page_t m; | |||||
m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | |||||
VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT); | |||||
if (m != NULL) | |||||
memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR); | |||||
return (m); | |||||
} | |||||
/* | |||||
* Grow the shadow or origin maps by at least one 4KB page at the specified | |||||
* address. Use 2MB pages when possible. | |||||
*/ | |||||
void | |||||
pmap_kmsan_enter(vm_offset_t va) | |||||
{ | |||||
pdp_entry_t *pdpe; | |||||
pd_entry_t *pde; | |||||
pt_entry_t *pte; | |||||
vm_page_t m; | |||||
mtx_assert(&kernel_map->system_mtx, MA_OWNED); | |||||
pdpe = pmap_pdpe(kernel_pmap, va); | |||||
if ((*pdpe & X86_PG_V) == 0) { | |||||
m = pmap_kmsan_enter_alloc_4k(); | |||||
*pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | | |||||
X86_PG_V | pg_nx); | |||||
} | |||||
pde = pmap_pdpe_to_pde(pdpe, va); | |||||
if ((*pde & X86_PG_V) == 0) { | |||||
m = pmap_kmsan_enter_alloc_2m(); | |||||
if (m != NULL) { | |||||
*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | | |||||
X86_PG_PS | X86_PG_V | pg_nx); | |||||
Done Inline ActionsIs there a reason why you preset PG_M and PG_A for the 4KB mapping below, but not for the 2MB mapping here? alc: Is there a reason why you preset PG_M and PG_A for the 4KB mapping below, but not for the 2MB… | |||||
} else { | |||||
m = pmap_kmsan_enter_alloc_4k(); | |||||
*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | | |||||
X86_PG_V | pg_nx); | |||||
} | |||||
} | |||||
if ((*pde & X86_PG_PS) != 0) | |||||
return; | |||||
pte = pmap_pde_to_pte(pde, va); | |||||
if ((*pte & X86_PG_V) != 0) | |||||
return; | |||||
KASSERT((*pte & X86_PG_V) == 0, | KASSERT((*pte & X86_PG_V) == 0, | ||||
("%s: shadow address %#lx is already mapped", __func__, va)); | ("%s: shadow address %#lx is already mapped", __func__, va)); | ||||
Done Inline ActionsGiven the preceding "if" statement, I don't see the reason for this assertion. alc: Given the preceding "if" statement, I don't see the reason for this assertion. | |||||
m = pmap_kasan_enter_alloc_4k(); | m = pmap_kmsan_enter_alloc_4k(); | ||||
*pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V | | *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V | | ||||
X86_PG_M | X86_PG_A | pg_nx); | X86_PG_M | X86_PG_A | pg_nx); | ||||
} | } | ||||
#endif | #endif | ||||
/* | /* | ||||
* Track a range of the kernel's virtual address space that is contiguous | * Track a range of the kernel's virtual address space that is contiguous | ||||
* in various mapping attributes. | * in various mapping attributes. | ||||
▲ Show 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | case PML4PML4I: | ||||
sbuf_printf(sb, "\nRecursive map:\n"); | sbuf_printf(sb, "\nRecursive map:\n"); | ||||
break; | break; | ||||
case DMPML4I: | case DMPML4I: | ||||
sbuf_printf(sb, "\nDirect map:\n"); | sbuf_printf(sb, "\nDirect map:\n"); | ||||
break; | break; | ||||
#ifdef KASAN | #ifdef KASAN | ||||
case KASANPML4I: | case KASANPML4I: | ||||
sbuf_printf(sb, "\nKASAN shadow map:\n"); | sbuf_printf(sb, "\nKASAN shadow map:\n"); | ||||
break; | |||||
#endif | |||||
#ifdef KMSAN | |||||
case KMSANSHADPML4I: | |||||
sbuf_printf(sb, "\nKMSAN shadow map:\n"); | |||||
break; | |||||
case KMSANORIGPML4I: | |||||
sbuf_printf(sb, "\nKMSAN origin map:\n"); | |||||
break; | break; | ||||
#endif | #endif | ||||
case KPML4BASE: | case KPML4BASE: | ||||
sbuf_printf(sb, "\nKernel map:\n"); | sbuf_printf(sb, "\nKernel map:\n"); | ||||
break; | break; | ||||
case LMSPML4I: | case LMSPML4I: | ||||
sbuf_printf(sb, "\nLarge map:\n"); | sbuf_printf(sb, "\nLarge map:\n"); | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 258 Lines • Show Last 20 Lines |
Currently, wouldn't "is necessary" be more accurate than "can be useful"?