Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -7302,8 +7302,8 @@ CRITICAL_ASSERT(curthread); gen = PCPU_GET(pcid_gen); - if (!pti && (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN || - pmap->pm_pcids[cpuid].pm_gen == gen)) + if ((!pti && pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN) || + pmap->pm_pcids[cpuid].pm_gen == gen) return (CR3_PCID_SAVE); pcid_next = PCPU_GET(pcid_next); KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) || @@ -7330,7 +7330,7 @@ { pmap_t oldpmap, pmap; struct invpcid_descr d; - uint64_t cached, cr3, kcr3, ucr3; + uint64_t cached, cr3, kcr3, kern_pti_cached, ucr3; register_t rflags; u_int cpuid; @@ -7379,11 +7379,10 @@ if (!invpcid_works) rflags = intr_disable(); - if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) { + kern_pti_cached = pti ? 0 : cached; + if (!kern_pti_cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) { load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid | - cached); - if (cached) - PCPU_INC(pm_save_cnt); + kern_pti_cached); } PCPU_SET(curpmap, pmap); if (pti) { @@ -7391,13 +7390,15 @@ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid | PMAP_PCID_USER_PT; - /* - * Manually invalidate translations cached - * from the user page table, which are not - * flushed by reload of cr3 with the kernel - * page table pointer above. - */ - if (pmap->pm_ucr3 != PMAP_NO_CR3) { + if (cached) + PCPU_INC(pm_save_cnt); + else if (pmap->pm_ucr3 != PMAP_NO_CR3) { + /* + * Manually invalidate translations cached + * from the user page table, which are not + * flushed by reload of cr3 with the kernel + * page table pointer above. + */ if (invpcid_works) { d.pcid = PMAP_PCID_USER_PT | pmap->pm_pcids[cpuid].pm_pcid;