Changeset View
Standalone View
sys/amd64/amd64/pmap.c
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 2,336 Lines • ▼ Show 20 Lines | for (i = 0; i < vm_phys_nsegs; i++) { | ||||
start = highest + 1; | start = highest + 1; | ||||
pvd = &pv_table[start]; | pvd = &pv_table[start]; | ||||
pages = end - start + 1; | pages = end - start + 1; | ||||
s = round_page(pages * sizeof(*pvd)); | s = round_page(pages * sizeof(*pvd)); | ||||
highest = start + (s / sizeof(*pvd)) - 1; | highest = start + (s / sizeof(*pvd)) - 1; | ||||
for (j = 0; j < s; j += PAGE_SIZE) { | for (j = 0; j < s; j += PAGE_SIZE) { | ||||
vm_page_t m = vm_page_alloc_domain(NULL, 0, | vm_page_t m = vm_page_alloc_noobj_domain(domain, | ||||
domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); | VM_ALLOC_NORMAL); | ||||
if (m == NULL) | if (m == NULL) | ||||
panic("vm_page_alloc_domain failed for %lx\n", (vm_offset_t)pvd + j); | panic("failed to allocate PV table page"); | ||||
pmap_qenter((vm_offset_t)pvd + j, &m, 1); | pmap_qenter((vm_offset_t)pvd + j, &m, 1); | ||||
} | } | ||||
for (j = 0; j < s / sizeof(*pvd); j++) { | for (j = 0; j < s / sizeof(*pvd); j++) { | ||||
rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW); | rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW); | ||||
TAILQ_INIT(&pvd->pv_page.pv_list); | TAILQ_INIT(&pvd->pv_page.pv_list); | ||||
pvd->pv_page.pv_gen = 0; | pvd->pv_page.pv_gen = 0; | ||||
pvd->pv_page.pat_mode = 0; | pvd->pv_page.pat_mode = 0; | ||||
▲ Show 20 Lines • Show All 1,949 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* Allocate a page table page and do related bookkeeping */ | /* Allocate a page table page and do related bookkeeping */ | ||||
static vm_page_t | static vm_page_t | ||||
pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags) | pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
m = vm_page_alloc(NULL, pindex, flags | VM_ALLOC_NOOBJ); | m = vm_page_alloc_noobj(flags); | ||||
if (__predict_false(m == NULL)) | if (__predict_false(m == NULL)) | ||||
return (NULL); | return (NULL); | ||||
m->pindex = pindex; | |||||
pmap_pt_page_count_adj(pmap, 1); | pmap_pt_page_count_adj(pmap, 1); | ||||
if ((flags & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
return (m); | return (m); | ||||
} | } | ||||
static void | static void | ||||
pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled) | pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled) | ||||
{ | { | ||||
/* | /* | ||||
* This function assumes the page will need to be unwired, | * This function assumes the page will need to be unwired, | ||||
Show All 19 Lines | |||||
int | int | ||||
pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) | pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) | ||||
{ | { | ||||
vm_page_t pmltop_pg, pmltop_pgu; | vm_page_t pmltop_pg, pmltop_pgu; | ||||
vm_paddr_t pmltop_phys; | vm_paddr_t pmltop_phys; | ||||
int i; | int i; | ||||
/* | /* | ||||
* allocate the page directory page | * allocate the page directory page | ||||
alc: As an aside, this comment should explain why 'pmap' isn't passed to 'pmap_alloc_pt_page'. | |||||
Done Inline ActionsIs it only because the pmap stats fields are not initialized at this point? If so, I wonder if we should instead initialize them first and count this page (with corresponding changes to pmap_release()). markj: Is it only because the pmap stats fields are not initialized at this point? If so, I wonder if… | |||||
Not Done Inline ActionsNo. Think about the early termination optimization in pmap_remove(). For it to work, the root-level PTPs allocated in this function can't be counted in the pmap's resident. alc: No. Think about the early termination optimization in pmap_remove(). For it to work, the root… | |||||
*/ | */ | ||||
pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_NORMAL | | pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_NORMAL | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); | VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK); | ||||
pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg); | pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg); | ||||
pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys); | pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys); | ||||
CPU_FOREACH(i) { | CPU_FOREACH(i) { | ||||
Show All 15 Lines | pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) | ||||
switch (pm_type) { | switch (pm_type) { | ||||
case PT_X86: | case PT_X86: | ||||
pmap->pm_cr3 = pmltop_phys; | pmap->pm_cr3 = pmltop_phys; | ||||
if (pmap_is_la57(pmap)) | if (pmap_is_la57(pmap)) | ||||
pmap_pinit_pml5(pmltop_pg); | pmap_pinit_pml5(pmltop_pg); | ||||
else | else | ||||
pmap_pinit_pml4(pmltop_pg); | pmap_pinit_pml4(pmltop_pg); | ||||
if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) { | if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) { | ||||
pmltop_pgu = pmap_alloc_pt_page(NULL, 0, | pmltop_pgu = pmap_alloc_pt_page(NULL, 0, | ||||
VM_ALLOC_WIRED | VM_ALLOC_NORMAL | | VM_ALLOC_WIRED | VM_ALLOC_NORMAL | | ||||
VM_ALLOC_WAITOK); | VM_ALLOC_WAITOK); | ||||
Not Done Inline ActionsAs an aside, as above this deserves a comment (as part of a separate patch) so that readers don't think it is an accounting bug. alc: As an aside, as above this deserves a comment (as part of a separate patch) so that readers… | |||||
Done Inline ActionsHere it makes a bit more sense that the page isn't counted. markj: Here it makes a bit more sense that the page isn't counted. | |||||
Not Done Inline ActionsI do not see why, it is reasonable to account for all page table pages, even if we have two page tables. kib: I do not see why, it is reasonable to account for all page table pages, even if we have two… | |||||
Done Inline ActionsAh, pmap_resident_count_adj() asserts that the pmap mutex is held, but at this point it is not. And it doesn't seem very useful to manually adjust the count. markj: Ah, pmap_resident_count_adj() asserts that the pmap mutex is held, but at this point it is not. | |||||
pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP( | pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP( | ||||
VM_PAGE_TO_PHYS(pmltop_pgu)); | VM_PAGE_TO_PHYS(pmltop_pgu)); | ||||
if (pmap_is_la57(pmap)) | if (pmap_is_la57(pmap)) | ||||
pmap_pinit_pml5_pti(pmltop_pgu); | pmap_pinit_pml5_pti(pmltop_pgu); | ||||
Not Done Inline ActionsI feel like I'm missing something here. We only appear to initialize one PTE within the pml5 page and the contents of the rest of the PTEs within the page are unknown. alc: I feel like I'm missing something here. We only appear to initialize one PTE within the pml5… | |||||
Done Inline ActionsI think this is a bug. markj: I think this is a bug. | |||||
Not Done Inline ActionsYes it is bug. Would you add VM_ALLOC_ZERO as part of this patch or should we do a separate patch? kib: Yes it is bug. Would you add VM_ALLOC_ZERO as part of this patch or should we do a separate… | |||||
else | else | ||||
pmap_pinit_pml4_pti(pmltop_pgu); | pmap_pinit_pml4_pti(pmltop_pgu); | ||||
pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu); | pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu); | ||||
} | } | ||||
if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) { | if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) { | ||||
rangeset_init(&pmap->pm_pkru, pkru_dup_range, | rangeset_init(&pmap->pm_pkru, pkru_dup_range, | ||||
pkru_free_range, pmap, M_NOWAIT); | pkru_free_range, pmap, M_NOWAIT); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 1,069 Lines • ▼ Show 20 Lines | if (field < _NPCM) { | ||||
pc_list); | pc_list); | ||||
} | } | ||||
PV_STAT(counter_u64_add(pv_entry_count, 1)); | PV_STAT(counter_u64_add(pv_entry_count, 1)); | ||||
PV_STAT(counter_u64_add(pv_entry_spare, -1)); | PV_STAT(counter_u64_add(pv_entry_spare, -1)); | ||||
return (pv); | return (pv); | ||||
} | } | ||||
} | } | ||||
/* No free items, allocate another chunk */ | /* No free items, allocate another chunk */ | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | m = vm_page_alloc_noobj(VM_ALLOC_NORMAL | VM_ALLOC_WIRED); | ||||
VM_ALLOC_WIRED); | |||||
if (m == NULL) { | if (m == NULL) { | ||||
if (lockp == NULL) { | if (lockp == NULL) { | ||||
PV_STAT(counter_u64_add(pc_chunk_tryfail, 1)); | PV_STAT(counter_u64_add(pc_chunk_tryfail, 1)); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
m = reclaim_pv_chunk(pmap, lockp); | m = reclaim_pv_chunk(pmap, lockp); | ||||
if (m == NULL) | if (m == NULL) | ||||
goto retry; | goto retry; | ||||
▲ Show 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | #endif | ||||
free = popcnt_pc_map_pq(pc->pc_map); | free = popcnt_pc_map_pq(pc->pc_map); | ||||
if (free == 0) | if (free == 0) | ||||
break; | break; | ||||
avail += free; | avail += free; | ||||
if (avail >= needed) | if (avail >= needed) | ||||
break; | break; | ||||
} | } | ||||
for (reclaimed = false; avail < needed; avail += _NPCPV) { | for (reclaimed = false; avail < needed; avail += _NPCPV) { | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | | m = vm_page_alloc_noobj(VM_ALLOC_NORMAL | VM_ALLOC_WIRED); | ||||
VM_ALLOC_WIRED); | |||||
if (m == NULL) { | if (m == NULL) { | ||||
m = reclaim_pv_chunk(pmap, lockp); | m = reclaim_pv_chunk(pmap, lockp); | ||||
if (m == NULL) | if (m == NULL) | ||||
goto retry; | goto retry; | ||||
reclaimed = true; | reclaimed = true; | ||||
} else | } else | ||||
counter_u64_add(pv_page_count, 1); | counter_u64_add(pv_page_count, 1); | ||||
PV_STAT(counter_u64_add(pc_chunk_count, 1)); | PV_STAT(counter_u64_add(pc_chunk_count, 1)); | ||||
▲ Show 20 Lines • Show All 5,792 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
#ifdef KASAN | #ifdef KASAN | ||||
static vm_page_t | static vm_page_t | ||||
pmap_kasan_enter_alloc_4k(void) | pmap_kasan_enter_alloc_4k(void) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_ZERO); | ||||
if (m == NULL) | if (m == NULL) | ||||
Done Inline ActionsThis doesn't look right. alc: This doesn't look right. | |||||
panic("%s: no memory to grow shadow map", __func__); | panic("%s: no memory to grow shadow map", __func__); | ||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
return (m); | return (m); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
pmap_kasan_enter_alloc_2m(void) | pmap_kasan_enter_alloc_2m(void) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
#ifdef KMSAN | #ifdef KMSAN | ||||
static vm_page_t | static vm_page_t | ||||
pmap_kmsan_enter_alloc_4k(void) | pmap_kmsan_enter_alloc_4k(void) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | | m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | | ||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO); | VM_ALLOC_ZERO); | ||||
if (m == NULL) | if (m == NULL) | ||||
panic("%s: no memory to grow shadow map", __func__); | panic("%s: no memory to grow shadow map", __func__); | ||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
return (m); | return (m); | ||||
} | } | ||||
static vm_page_t | static vm_page_t | ||||
pmap_kmsan_enter_alloc_2m(void) | pmap_kmsan_enter_alloc_2m(void) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
▲ Show 20 Lines • Show All 504 Lines • Show Last 20 Lines |
As an aside, this comment should explain why 'pmap' isn't passed to 'pmap_alloc_pt_page'.