diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1958,11 +1958,16 @@ kernel_pmap->pm_pmltop = kernel_pml4; kernel_pmap->pm_cr3 = KPML4phys; kernel_pmap->pm_ucr3 = PMAP_NO_CR3; - CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); kernel_pmap->pm_stats.resident_count = res; kernel_pmap->pm_flags = pmap_flags; + /* + * The kernel pmap is always active on all CPUs. Once CPUs are + * enumerated, the mask will be set equal to all_cpus. + */ + CPU_FILL(&kernel_pmap->pm_active); + /* * Initialize the TLB invalidations generation number lock. */ @@ -3010,12 +3015,6 @@ smr_wait(pmap->pm_eptsmr, goal); } -static cpuset_t -pmap_invalidate_cpu_mask(pmap_t pmap) -{ - return (pmap == kernel_pmap ? all_cpus : pmap->pm_active); -} - static inline void pmap_invalidate_preipi_pcid(pmap_t pmap) { @@ -10925,7 +10924,14 @@ pti_finalized = true; VM_OBJECT_WUNLOCK(pti_obj); } -SYSINIT(pmap_pti, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_pti_init, NULL); + +static void +pmap_cpu_init(void *arg __unused) +{ + CPU_COPY(&all_cpus, &kernel_pmap->pm_active); + pmap_pti_init(); +} +SYSINIT(pmap_cpu, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_cpu_init, NULL); static pdp_entry_t * pmap_pti_pdpe(vm_offset_t va) diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -532,6 +532,12 @@ void pmap_san_enter(vm_offset_t); #endif +static __inline cpuset_t +pmap_invalidate_cpu_mask(pmap_t pmap) +{ + return (pmap->pm_active); +} + #endif /* _KERNEL */ /* Return various clipped indexes for a given VA */