Index: sys/amd64/amd64/cpu_switch.S =================================================================== --- sys/amd64/amd64/cpu_switch.S +++ sys/amd64/amd64/cpu_switch.S @@ -216,9 +216,15 @@ movq %r8,PCPU(CURPCB) /* Update the TSS_RSP0 pointer for the next interrupt */ cmpb $0,pti(%rip) - jne 1f - movq %r8,TSS_RSP0(%rdx) -1: movq %r12,PCPU(CURTHREAD) /* into next thread */ + je 1f + cmpq $~0,PCPU(UCR3) + je 1f + movq PCPU(PRVSPACE),%rax + addq $PC_PTI_STACK+PC_PTI_STACK_SZ*8,%rax + movq %rax,TSS_RSP0(%rdx) + jmp 2f +1: movq %r8,TSS_RSP0(%rdx) +2: movq %r12,PCPU(CURTHREAD) /* into next thread */ /* Test if debug registers should be restored. */ testl $PCB_DBREGS,PCB_FLAGS(%r8) @@ -295,12 +301,7 @@ shrq $8,%rcx movl %ecx,8(%rax) movb $0x89,5(%rax) /* unset busy */ - cmpb $0,pti(%rip) - je 1f - movq PCPU(PRVSPACE),%rax - addq $PC_PTI_STACK+PC_PTI_STACK_SZ*8,%rax - movq %rax,TSS_RSP0(%rdx) -1: movl $TSSSEL,%eax + movl $TSSSEL,%eax ltr %ax jmp done_tss Index: sys/amd64/amd64/exception.S =================================================================== --- sys/amd64/amd64/exception.S +++ sys/amd64/amd64/exception.S @@ -297,12 +297,14 @@ testb $SEL_RPL_MASK,PTI_CS-2*8(%rsp) jz Xpage swapgs + cmpq $~0,PCPU(UCR3) + je 1f pushq %rax pushq %rdx movq %cr3,%rax movq %rax,PCPU(SAVED_UCR3) PTI_UUENTRY has_err=1 - subq $TF_ERR,%rsp +1: subq $TF_ERR,%rsp movq %rdi,TF_RDI(%rsp) movq %rax,TF_RAX(%rsp) movq %rdx,TF_RDX(%rsp) @@ -347,13 +349,15 @@ pushq %rax pushq %rdx swapgs + cmpq $~0,PCPU(UCR3) + je 1f movq PCPU(KCR3),%rax movq %rax,%cr3 movq PCPU(RSP0),%rax subq $2*PTI_SIZE-3*8,%rax /* no err, %rax, %rdx in faulted frame */ MOVE_STACKS (PTI_SIZE / 4 - 3) movq %rax,%rsp - popq %rdx +1: popq %rdx popq %rax swapgs jmp X\name @@ -431,6 +435,8 @@ IDTVEC(fast_syscall_pti) swapgs movq %rax,PCPU(SCRATCH_RAX) + cmpq $~0,PCPU(UCR3) + je fast_syscall_common movq PCPU(KCR3),%rax movq %rax,%cr3 jmp fast_syscall_common @@ -498,6 +504,8 @@ movq TF_RSP(%rsp),%rsp /* user stack pointer */ cmpb $0,pti je 2f + cmpq $~0,PCPU(UCR3) + je 2f movq PCPU(UCR3),%r9 movq %r9,%cr3 xorl %r9d,%r9d @@ -1006,6 +1014,8 @@ call handle_ibrs_exit_rs cmpb $0,pti je 1f + cmpq $~0,PCPU(UCR3) + je 1f pushq %rdx movq PCPU(PRVSPACE),%rdx addq $PC_PTI_STACK+PC_PTI_STACK_SZ*8-PTI_SIZE,%rdx Index: sys/amd64/amd64/machdep.c =================================================================== --- sys/amd64/amd64/machdep.c +++ sys/amd64/amd64/machdep.c @@ -1795,8 +1795,7 @@ rsp0 = (vm_offset_t)thread0.td_pcb; /* Ensure the stack is aligned to 16 bytes */ rsp0 &= ~0xFul; - common_tss[0].tss_rsp0 = pti ? ((vm_offset_t)PCPU_PTR(pti_stack) + - PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful : rsp0; + common_tss[0].tss_rsp0 = rsp0; PCPU_SET(rsp0, rsp0); PCPU_SET(curpcb, thread0.td_pcb); Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2783,7 +2783,8 @@ * the kernel-mode page table active on return * to user space. */ - *pml4 |= pg_nx; + if (pmap->pm_ucr3 != PMAP_NO_CR3) + *pml4 |= pg_nx; pml4u = &pmap->pm_pml4u[pml4index]; *pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | @@ -7439,6 +7440,15 @@ PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE); PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE); + if (pmap->pm_ucr3 != PMAP_NO_CR3) { + u_int64_t rsp0; + struct amd64tss *tssp; + + rsp0 = (vm_offset_t)PCPU_PTR(pti_stack) + + PC_PTI_STACK_SZ * sizeof(uint64_t); + tssp = PCPU_GET(tssp); + tssp->tss_rsp0 = rsp0; + } } if (!invpcid_works) intr_restore(rflags); Index: sys/amd64/include/asmacros.h =================================================================== --- sys/amd64/include/asmacros.h +++ sys/amd64/include/asmacros.h @@ -196,9 +196,12 @@ .macro PTI_UENTRY has_err swapgs + cmpq $~0,PCPU(UCR3) + je 1f pushq %rax pushq %rdx PTI_UUENTRY \has_err +1: .endm .macro PTI_ENTRY name, cont, has_err=0