Page MenuHomeFreeBSD

D14180.id38885.diff
No OneTemporary

D14180.id38885.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: Makefile.inc1
===================================================================
--- Makefile.inc1
+++ Makefile.inc1
@@ -1374,6 +1374,14 @@
.endif
.endfor
+_cleankernobj_fast_depend_hack: .PHONY
+# 20170202 remove stale generated assym.s after renaming to .S in e64bf19
+.if exists(${OBJTOP}/sys/${KERNCONF}/assym.s)
+ @echo Removing stale generated assym files
+ @rm -f ${OBJTOP}/sys/${KERNCONF}/assym.* \
+ ${OBJTOP}/sys/${KERNCONF}/.depend.assym.*
+.endif
+
${WMAKE_TGTS:N_worldtmp:Nbuild${libcompat}} ${.ALLTARGETS:M_*:N_worldtmp}: .MAKE .PHONY
#
@@ -1409,6 +1417,8 @@
@echo ">>> stage 2.1: cleaning up the object tree"
@echo "--------------------------------------------------------------"
${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR}
+.else
+ ${_+_}cd ${.CURDIR}; ${WMAKE} _cleankernobj_fast_depend_hack
.endif
.if !defined(NO_KERNELOBJ)
@echo
Index: sys/amd64/acpica/acpi_wakecode.S
===================================================================
--- sys/amd64/acpica/acpi_wakecode.S
+++ sys/amd64/acpica/acpi_wakecode.S
@@ -34,7 +34,7 @@
#include <machine/specialreg.h>
#include <machine/timerreg.h>
-#include "assym.s"
+#include "assym.S"
/*
* Resume entry point for real mode.
Index: sys/amd64/amd64/apic_vector.S
===================================================================
--- sys/amd64/amd64/apic_vector.S
+++ sys/amd64/amd64/apic_vector.S
@@ -44,7 +44,7 @@
#include "opt_smp.h"
-#include "assym.s"
+#include "assym.S"
#include <machine/asmacros.h>
#include <machine/specialreg.h>
Index: sys/amd64/amd64/apic_vector.S.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/apic_vector.S.orig
@@ -0,0 +1,333 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 2014-2018 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by
+ * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $FreeBSD$
+ */
+
+/*
+ * Interrupt entry points for external interrupts triggered by I/O APICs
+ * as well as IPI handlers.
+ */
+
+#include "opt_smp.h"
+
+#include "assym.S"
+
+#include <machine/asmacros.h>
+#include <machine/specialreg.h>
+#include <x86/apicreg.h>
+
+#ifdef SMP
+#define LK lock ;
+#else
+#define LK
+#endif
+
+ .text
+ SUPERALIGN_TEXT
+ /* End Of Interrupt to APIC */
+as_lapic_eoi:
+ cmpl $0,x2apic_mode
+ jne 1f
+ movq lapic_map,%rax
+ movl $0,LA_EOI(%rax)
+ ret
+1:
+ movl $MSR_APIC_EOI,%ecx
+ xorl %eax,%eax
+ xorl %edx,%edx
+ wrmsr
+ ret
+
+/*
+ * I/O Interrupt Entry Point. Rather than having one entry point for
+ * each interrupt source, we use one entry point for each 32-bit word
+ * in the ISR. The handler determines the highest bit set in the ISR,
+ * translates that into a vector, and passes the vector to the
+ * lapic_handle_intr() function.
+ */
+ .macro ISR_VEC index, vec_name
+ INTR_HANDLER \vec_name
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ cmpl $0,x2apic_mode
+ je 1f
+ movl $(MSR_APIC_ISR0 + \index),%ecx
+ rdmsr
+ jmp 2f
+1:
+ movq lapic_map, %rdx /* pointer to local APIC */
+ movl LA_ISR + 16 * (\index)(%rdx), %eax /* load ISR */
+2:
+ bsrl %eax, %eax /* index of highest set bit in ISR */
+ jz 3f
+ addl $(32 * \index),%eax
+ movq %rsp, %rsi
+ movl %eax, %edi /* pass the IRQ */
+ call lapic_handle_intr
+3:
+ MEXITCOUNT
+ jmp doreti
+ .endm
+
+/*
+ * Handle "spurious INTerrupts".
+ * Notes:
+ * This is different than the "spurious INTerrupt" generated by an
+ * 8259 PIC for missing INTs. See the APIC documentation for details.
+ * This routine should NOT do an 'EOI' cycle.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(spuriousint)
+ /* No EOI cycle used here */
+ jmp doreti_iret
+
+ ISR_VEC 1, apic_isr1
+ ISR_VEC 2, apic_isr2
+ ISR_VEC 3, apic_isr3
+ ISR_VEC 4, apic_isr4
+ ISR_VEC 5, apic_isr5
+ ISR_VEC 6, apic_isr6
+ ISR_VEC 7, apic_isr7
+
+/*
+ * Local APIC periodic timer handler.
+ */
+ INTR_HANDLER timerint
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ movq %rsp, %rdi
+ call lapic_handle_timer
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Local APIC CMCI handler.
+ */
+ INTR_HANDLER cmcint
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ call lapic_handle_cmc
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Local APIC error interrupt handler.
+ */
+ INTR_HANDLER errorint
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ call lapic_handle_error
+ MEXITCOUNT
+ jmp doreti
+
+#ifdef XENHVM
+/*
+ * Xen event channel upcall interrupt handler.
+ * Only used when the hypervisor supports direct vector callbacks.
+ */
+ INTR_HANDLER xen_intr_upcall
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ movq %rsp, %rdi
+ call xen_intr_handle_upcall
+ MEXITCOUNT
+ jmp doreti
+#endif
+
+#ifdef SMP
+/*
+ * Global address space TLB shootdown.
+ */
+ .text
+
+ SUPERALIGN_TEXT
+invltlb_ret:
+ call as_lapic_eoi
+ jmp ld_regs
+
+ SUPERALIGN_TEXT
+ INTR_HANDLER invltlb
+ call invltlb_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invltlb_pcid
+ call invltlb_pcid_handler
+ jmp invltlb_ret
+
+<<<<<<< HEAD
+ INTR_HANDLER invltlb_invpcid
+ cmpb $0,pti(%rip)
+ jne 1f
+=======
+ INTR_HANDLER invltlb_invpcid_nopti
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ call invltlb_invpcid_handler
+ jmp invltlb_ret
+1: call invltlb_invpcid_pti_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invltlb_invpcid_pti
+ call invltlb_invpcid_pti_handler
+ jmp invltlb_ret
+
+/*
+ * Single page TLB shootdown
+ */
+ INTR_HANDLER invlpg
+ call invlpg_handler
+ jmp invltlb_ret
+
+<<<<<<< HEAD
+ INTR_HANDLER invlpg_invpcid_pti
+ call invlpg_invpcid_pti_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invlpg_pcid_pti
+ call invlpg_pcid_pti_handler
+=======
+ INTR_HANDLER invlpg_invpcid
+ call invlpg_invpcid_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invlpg_pcid
+ call invlpg_pcid_handler
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ jmp invltlb_ret
+
+/*
+ * Page range TLB shootdown.
+ */
+ INTR_HANDLER invlrng
+ call invlrng_handler
+ jmp invltlb_ret
+
+<<<<<<< HEAD
+ INTR_HANDLER invlrng_invpcid_pti
+ call invlrng_invpcid_pti_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invlrng_pcid_pti
+ call invlrng_pcid_pti_handler
+=======
+ INTR_HANDLER invlrng_invpcid
+ call invlrng_invpcid_handler
+ jmp invltlb_ret
+
+ INTR_HANDLER invlrng_pcid
+ call invlrng_pcid_handler
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ jmp invltlb_ret
+
+/*
+ * Invalidate cache.
+ */
+ INTR_HANDLER invlcache
+ call invlcache_handler
+ jmp invltlb_ret
+
+/*
+ * Handler for IPIs sent via the per-cpu IPI bitmap.
+ */
+ INTR_HANDLER ipi_intr_bitmap_handler
+ call as_lapic_eoi
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ call ipi_bitmap_handler
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives an IPI_STOP from another CPU.
+ */
+ INTR_HANDLER cpustop
+ call as_lapic_eoi
+ call cpustop_handler
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives an IPI_SUSPEND from another CPU.
+ */
+ INTR_HANDLER cpususpend
+ call cpususpend_handler
+ call as_lapic_eoi
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
+ *
+ * - Calls the generic rendezvous action function.
+ */
+ INTR_HANDLER rendezvous
+#ifdef COUNT_IPIS
+ movl PCPU(CPUID), %eax
+ movq ipi_rendezvous_counts(,%rax,8), %rax
+ incq (%rax)
+#endif
+ call smp_rendezvous_action
+ call as_lapic_eoi
+ jmp doreti
+
+/*
+ * IPI handler whose purpose is to interrupt the CPU with minimum overhead.
+ * This is used by bhyve to force a host cpu executing in guest context to
+ * trap into the hypervisor.
+ *
+ * This handler is different from other IPI handlers in the following aspects:
+ *
+ * 1. It doesn't push a trapframe on the stack.
+ *
+ * This implies that a DDB backtrace involving 'justreturn' will skip the
+ * function that was interrupted by this handler.
+ *
+ * 2. It doesn't 'swapgs' when userspace is interrupted.
+ *
+ * The 'justreturn' handler does not access any pcpu data so it is not an
+ * issue. Moreover the 'justreturn' handler can only be interrupted by an NMI
+ * whose handler already doesn't trust GS.base when kernel code is interrupted.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(justreturn)
+ pushq %rax
+ pushq %rcx
+ pushq %rdx
+ call as_lapic_eoi
+ popq %rdx
+ popq %rcx
+ popq %rax
+ jmp doreti_iret
+
+ INTR_HANDLER justreturn1
+ call as_lapic_eoi
+ jmp doreti
+
+#endif /* SMP */
Index: sys/amd64/amd64/atpic_vector.S
===================================================================
--- sys/amd64/amd64/atpic_vector.S
+++ sys/amd64/amd64/atpic_vector.S
@@ -36,7 +36,7 @@
* master and slave interrupt controllers.
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asmacros.h>
/*
Index: sys/amd64/amd64/atpic_vector.S.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/atpic_vector.S.orig
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $FreeBSD$
+ */
+
+/*
+ * Interrupt entry points for external interrupts triggered by the 8259A
+ * master and slave interrupt controllers.
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.S"
+
+/*
+ * Macros for interrupt entry, call to handler, and exit.
+ */
+#define INTR(irq_num, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ FAKE_MCOUNT(TF_RIP(%rsp)) ; \
+ movq %rsp, %rsi ; \
+ movl $irq_num, %edi; /* pass the IRQ */ \
+ call atpic_handle_intr ; \
+ MEXITCOUNT ; \
+ jmp doreti
+
+ INTR(0, atpic_intr0)
+ INTR(1, atpic_intr1)
+ INTR(2, atpic_intr2)
+ INTR(3, atpic_intr3)
+ INTR(4, atpic_intr4)
+ INTR(5, atpic_intr5)
+ INTR(6, atpic_intr6)
+ INTR(7, atpic_intr7)
+ INTR(8, atpic_intr8)
+ INTR(9, atpic_intr9)
+ INTR(10, atpic_intr10)
+ INTR(11, atpic_intr11)
+ INTR(12, atpic_intr12)
+ INTR(13, atpic_intr13)
+ INTR(14, atpic_intr14)
+ INTR(15, atpic_intr15)
Index: sys/amd64/amd64/cpu_switch.S
===================================================================
--- sys/amd64/amd64/cpu_switch.S
+++ sys/amd64/amd64/cpu_switch.S
@@ -36,7 +36,7 @@
#include <machine/asmacros.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
/*****************************************************************************/
Index: sys/amd64/amd64/cpu_switch.S.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/cpu_switch.S.orig
@@ -0,0 +1,499 @@
+/*-
+ * Copyright (c) 2003 Peter Wemm.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <machine/asmacros.h>
+#include <machine/specialreg.h>
+
+#include "assym.S"
+#include "opt_sched.h"
+
+/*****************************************************************************/
+/* Scheduling */
+/*****************************************************************************/
+
+ .text
+
+#ifdef SMP
+#define LK lock ;
+#else
+#define LK
+#endif
+
+#if defined(SCHED_ULE) && defined(SMP)
+#define SETLK xchgq
+#else
+#define SETLK movq
+#endif
+
+/*
+ * cpu_throw()
+ *
+ * This is the second half of cpu_switch(). It is used when the current
+ * thread is either a dummy or slated to die, and we no longer care
+ * about its state. This is only a slight optimization and is probably
+ * not worth it anymore. Note that we need to clear the pm_active bits so
+ * we do need the old proc if it still exists.
+ * %rdi = oldtd
+ * %rsi = newtd
+ */
+ENTRY(cpu_throw)
+ movq %rsi,%r12
+ movq %rsi,%rdi
+ call pmap_activate_sw
+ jmp sw1
+END(cpu_throw)
+
+/*
+ * cpu_switch(old, new, mtx)
+ *
+ * Save the current thread state, then select the next thread to run
+ * and load its state.
+ * %rdi = oldtd
+ * %rsi = newtd
+ * %rdx = mtx
+ */
+ENTRY(cpu_switch)
+ /* Switch to new thread. First, save context. */
+ movq TD_PCB(%rdi),%r8
+
+ movq (%rsp),%rax /* Hardware registers */
+ movq %r15,PCB_R15(%r8)
+ movq %r14,PCB_R14(%r8)
+ movq %r13,PCB_R13(%r8)
+ movq %r12,PCB_R12(%r8)
+ movq %rbp,PCB_RBP(%r8)
+ movq %rsp,PCB_RSP(%r8)
+ movq %rbx,PCB_RBX(%r8)
+ movq %rax,PCB_RIP(%r8)
+
+ testl $PCB_FULL_IRET,PCB_FLAGS(%r8)
+ jnz 2f
+ orl $PCB_FULL_IRET,PCB_FLAGS(%r8)
+ testl $TDP_KTHREAD,TD_PFLAGS(%rdi)
+ jnz 2f
+ testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
+ jz 2f
+ movl %fs,%eax
+ cmpl $KUF32SEL,%eax
+ jne 1f
+ rdfsbase %rax
+ movq %rax,PCB_FSBASE(%r8)
+1: movl %gs,%eax
+ cmpl $KUG32SEL,%eax
+ jne 2f
+ movq %rdx,%r12
+ movl $MSR_KGSBASE,%ecx /* Read user gs base */
+ rdmsr
+ shlq $32,%rdx
+ orq %rdx,%rax
+ movq %rax,PCB_GSBASE(%r8)
+ movq %r12,%rdx
+
+2:
+ testl $PCB_DBREGS,PCB_FLAGS(%r8)
+ jnz store_dr /* static predict not taken */
+done_store_dr:
+
+ /* have we used fp, and need a save? */
+ cmpq %rdi,PCPU(FPCURTHREAD)
+ jne 3f
+ movq PCB_SAVEFPU(%r8),%r8
+ clts
+ cmpl $0,use_xsave
+ jne 1f
+ fxsave (%r8)
+ jmp 2f
+1: movq %rdx,%rcx
+ movl xsave_mask,%eax
+ movl xsave_mask+4,%edx
+ .globl ctx_switch_xsave
+ctx_switch_xsave:
+ /* This is patched to xsaveopt if supported, see fpuinit_bsp1() */
+ xsave (%r8)
+ movq %rcx,%rdx
+2: smsw %ax
+ orb $CR0_TS,%al
+ lmsw %ax
+ xorl %eax,%eax
+ movq %rax,PCPU(FPCURTHREAD)
+3:
+ /* Save is done. Now fire up new thread. Leave old vmspace. */
+ movq %rsi,%r12
+ movq %rdi,%r13
+ movq %rdx,%r15
+ movq %rsi,%rdi
+ callq pmap_activate_sw
+ SETLK %r15,TD_LOCK(%r13) /* Release the old thread */
+sw1:
+ movq TD_PCB(%r12),%r8
+#if defined(SCHED_ULE) && defined(SMP)
+ /* Wait for the new thread to become unblocked */
+ movq $blocked_lock, %rdx
+1:
+ movq TD_LOCK(%r12),%rcx
+ cmpq %rcx, %rdx
+ pause
+ je 1b
+#endif
+ /*
+ * At this point, we've switched address spaces and are ready
+ * to load up the rest of the next context.
+ */
+
+ /* Skip loading LDT and user fsbase/gsbase for kthreads */
+ testl $TDP_KTHREAD,TD_PFLAGS(%r12)
+ jnz do_kthread
+
+ /*
+ * Load ldt register
+ */
+ movq TD_PROC(%r12),%rcx
+ cmpq $0, P_MD+MD_LDT(%rcx)
+ jne do_ldt
+ xorl %eax,%eax
+ld_ldt: lldt %ax
+
+ /* Restore fs base in GDT */
+ movl PCB_FSBASE(%r8),%eax
+ movq PCPU(FS32P),%rdx
+ movw %ax,2(%rdx)
+ shrl $16,%eax
+ movb %al,4(%rdx)
+ shrl $8,%eax
+ movb %al,7(%rdx)
+
+ /* Restore gs base in GDT */
+ movl PCB_GSBASE(%r8),%eax
+ movq PCPU(GS32P),%rdx
+ movw %ax,2(%rdx)
+ shrl $16,%eax
+ movb %al,4(%rdx)
+ shrl $8,%eax
+ movb %al,7(%rdx)
+
+do_kthread:
+ /* Do we need to reload tss ? */
+ movq PCPU(TSSP),%rax
+ movq PCB_TSSP(%r8),%rdx
+ testq %rdx,%rdx
+ cmovzq PCPU(COMMONTSSP),%rdx
+ cmpq %rax,%rdx
+ jne do_tss
+done_tss:
+ movq %r8,PCPU(RSP0)
+ movq %r8,PCPU(CURPCB)
+ /* Update the TSS_RSP0 pointer for the next interrupt */
+ movq %r8,TSS_RSP0(%rdx)
+ movq %r12,PCPU(CURTHREAD) /* into next thread */
+
+ /* Test if debug registers should be restored. */
+ testl $PCB_DBREGS,PCB_FLAGS(%r8)
+ jnz load_dr /* static predict not taken */
+done_load_dr:
+
+ /* Restore context. */
+ movq PCB_R15(%r8),%r15
+ movq PCB_R14(%r8),%r14
+ movq PCB_R13(%r8),%r13
+ movq PCB_R12(%r8),%r12
+ movq PCB_RBP(%r8),%rbp
+ movq PCB_RSP(%r8),%rsp
+ movq PCB_RBX(%r8),%rbx
+ movq PCB_RIP(%r8),%rax
+ movq %rax,(%rsp)
+ ret
+
+ /*
+ * We order these strangely for several reasons.
+ * 1: I wanted to use static branch prediction hints
+ * 2: Most athlon64/opteron cpus don't have them. They define
+ * a forward branch as 'predict not taken'. Intel cores have
+ * the 'rep' prefix to invert this.
+ * So, to make it work on both forms of cpu we do the detour.
+ * We use jumps rather than call in order to avoid the stack.
+ */
+
+store_dr:
+ movq %dr7,%rax /* yes, do the save */
+ movq %dr0,%r15
+ movq %dr1,%r14
+ movq %dr2,%r13
+ movq %dr3,%r12
+ movq %dr6,%r11
+ movq %r15,PCB_DR0(%r8)
+ movq %r14,PCB_DR1(%r8)
+ movq %r13,PCB_DR2(%r8)
+ movq %r12,PCB_DR3(%r8)
+ movq %r11,PCB_DR6(%r8)
+ movq %rax,PCB_DR7(%r8)
+ andq $0x0000fc00, %rax /* disable all watchpoints */
+ movq %rax,%dr7
+ jmp done_store_dr
+
+load_dr:
+ movq %dr7,%rax
+ movq PCB_DR0(%r8),%r15
+ movq PCB_DR1(%r8),%r14
+ movq PCB_DR2(%r8),%r13
+ movq PCB_DR3(%r8),%r12
+ movq PCB_DR6(%r8),%r11
+ movq PCB_DR7(%r8),%rcx
+ movq %r15,%dr0
+ movq %r14,%dr1
+ /* Preserve reserved bits in %dr7 */
+ andq $0x0000fc00,%rax
+ andq $~0x0000fc00,%rcx
+ movq %r13,%dr2
+ movq %r12,%dr3
+ orq %rcx,%rax
+ movq %r11,%dr6
+ movq %rax,%dr7
+ jmp done_load_dr
+
+do_tss: movq %rdx,PCPU(TSSP)
+ movq %rdx,%rcx
+ movq PCPU(TSS),%rax
+ movw %cx,2(%rax)
+ shrq $16,%rcx
+ movb %cl,4(%rax)
+ shrq $8,%rcx
+ movb %cl,7(%rax)
+ shrq $8,%rcx
+ movl %ecx,8(%rax)
+ movb $0x89,5(%rax) /* unset busy */
+ movl $TSSSEL,%eax
+ ltr %ax
+ jmp done_tss
+
+do_ldt: movq PCPU(LDT),%rax
+ movq P_MD+MD_LDT_SD(%rcx),%rdx
+ movq %rdx,(%rax)
+ movq P_MD+MD_LDT_SD+8(%rcx),%rdx
+ movq %rdx,8(%rax)
+ movl $LDTSEL,%eax
+ jmp ld_ldt
+END(cpu_switch)
+
+/*
+ * savectx(pcb)
+ * Update pcb, saving current processor state.
+ */
+ENTRY(savectx)
+ /* Save caller's return address. */
+ movq (%rsp),%rax
+ movq %rax,PCB_RIP(%rdi)
+
+ movq %rbx,PCB_RBX(%rdi)
+ movq %rsp,PCB_RSP(%rdi)
+ movq %rbp,PCB_RBP(%rdi)
+ movq %r12,PCB_R12(%rdi)
+ movq %r13,PCB_R13(%rdi)
+ movq %r14,PCB_R14(%rdi)
+ movq %r15,PCB_R15(%rdi)
+
+ movq %cr0,%rax
+ movq %rax,PCB_CR0(%rdi)
+ movq %cr2,%rax
+ movq %rax,PCB_CR2(%rdi)
+ movq %cr3,%rax
+ movq %rax,PCB_CR3(%rdi)
+ movq %cr4,%rax
+ movq %rax,PCB_CR4(%rdi)
+
+ movq %dr0,%rax
+ movq %rax,PCB_DR0(%rdi)
+ movq %dr1,%rax
+ movq %rax,PCB_DR1(%rdi)
+ movq %dr2,%rax
+ movq %rax,PCB_DR2(%rdi)
+ movq %dr3,%rax
+ movq %rax,PCB_DR3(%rdi)
+ movq %dr6,%rax
+ movq %rax,PCB_DR6(%rdi)
+ movq %dr7,%rax
+ movq %rax,PCB_DR7(%rdi)
+
+ movl $MSR_FSBASE,%ecx
+ rdmsr
+ movl %eax,PCB_FSBASE(%rdi)
+ movl %edx,PCB_FSBASE+4(%rdi)
+ movl $MSR_GSBASE,%ecx
+ rdmsr
+ movl %eax,PCB_GSBASE(%rdi)
+ movl %edx,PCB_GSBASE+4(%rdi)
+ movl $MSR_KGSBASE,%ecx
+ rdmsr
+ movl %eax,PCB_KGSBASE(%rdi)
+ movl %edx,PCB_KGSBASE+4(%rdi)
+ movl $MSR_EFER,%ecx
+ rdmsr
+ movl %eax,PCB_EFER(%rdi)
+ movl %edx,PCB_EFER+4(%rdi)
+ movl $MSR_STAR,%ecx
+ rdmsr
+ movl %eax,PCB_STAR(%rdi)
+ movl %edx,PCB_STAR+4(%rdi)
+ movl $MSR_LSTAR,%ecx
+ rdmsr
+ movl %eax,PCB_LSTAR(%rdi)
+ movl %edx,PCB_LSTAR+4(%rdi)
+ movl $MSR_CSTAR,%ecx
+ rdmsr
+ movl %eax,PCB_CSTAR(%rdi)
+ movl %edx,PCB_CSTAR+4(%rdi)
+ movl $MSR_SF_MASK,%ecx
+ rdmsr
+ movl %eax,PCB_SFMASK(%rdi)
+ movl %edx,PCB_SFMASK+4(%rdi)
+
+ sgdt PCB_GDT(%rdi)
+ sidt PCB_IDT(%rdi)
+ sldt PCB_LDT(%rdi)
+ str PCB_TR(%rdi)
+
+ movl $1,%eax
+ ret
+END(savectx)
+
+/*
+ * resumectx(pcb)
+ * Resuming processor state from pcb.
+ */
+ENTRY(resumectx)
+ /* Switch to KPML4phys. */
+ movq KPML4phys,%rax
+ movq %rax,%cr3
+
+ /* Force kernel segment registers. */
+ movl $KDSEL,%eax
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%ss
+ movl $KUF32SEL,%eax
+ movw %ax,%fs
+ movl $KUG32SEL,%eax
+ movw %ax,%gs
+
+ movl $MSR_FSBASE,%ecx
+ movl PCB_FSBASE(%rdi),%eax
+ movl 4 + PCB_FSBASE(%rdi),%edx
+ wrmsr
+ movl $MSR_GSBASE,%ecx
+ movl PCB_GSBASE(%rdi),%eax
+ movl 4 + PCB_GSBASE(%rdi),%edx
+ wrmsr
+ movl $MSR_KGSBASE,%ecx
+ movl PCB_KGSBASE(%rdi),%eax
+ movl 4 + PCB_KGSBASE(%rdi),%edx
+ wrmsr
+
+ /* Restore EFER one more time. */
+ movl $MSR_EFER,%ecx
+ movl PCB_EFER(%rdi),%eax
+ wrmsr
+
+ /* Restore fast syscall stuff. */
+ movl $MSR_STAR,%ecx
+ movl PCB_STAR(%rdi),%eax
+ movl 4 + PCB_STAR(%rdi),%edx
+ wrmsr
+ movl $MSR_LSTAR,%ecx
+ movl PCB_LSTAR(%rdi),%eax
+ movl 4 + PCB_LSTAR(%rdi),%edx
+ wrmsr
+ movl $MSR_CSTAR,%ecx
+ movl PCB_CSTAR(%rdi),%eax
+ movl 4 + PCB_CSTAR(%rdi),%edx
+ wrmsr
+ movl $MSR_SF_MASK,%ecx
+ movl PCB_SFMASK(%rdi),%eax
+ wrmsr
+
+ /* Restore CR0, CR2, CR4 and CR3. */
+ movq PCB_CR0(%rdi),%rax
+ movq %rax,%cr0
+ movq PCB_CR2(%rdi),%rax
+ movq %rax,%cr2
+ movq PCB_CR4(%rdi),%rax
+ movq %rax,%cr4
+ movq PCB_CR3(%rdi),%rax
+ movq %rax,%cr3
+
+ /* Restore descriptor tables. */
+ lidt PCB_IDT(%rdi)
+ lldt PCB_LDT(%rdi)
+
+#define SDT_SYSTSS 9
+#define SDT_SYSBSY 11
+
+ /* Clear "task busy" bit and reload TR. */
+ movq PCPU(TSS),%rax
+ andb $(~SDT_SYSBSY | SDT_SYSTSS),5(%rax)
+ movw PCB_TR(%rdi),%ax
+ ltr %ax
+
+#undef SDT_SYSTSS
+#undef SDT_SYSBSY
+
+ /* Restore debug registers. */
+ movq PCB_DR0(%rdi),%rax
+ movq %rax,%dr0
+ movq PCB_DR1(%rdi),%rax
+ movq %rax,%dr1
+ movq PCB_DR2(%rdi),%rax
+ movq %rax,%dr2
+ movq PCB_DR3(%rdi),%rax
+ movq %rax,%dr3
+ movq PCB_DR6(%rdi),%rax
+ movq %rax,%dr6
+ movq PCB_DR7(%rdi),%rax
+ movq %rax,%dr7
+
+ /* Restore other callee saved registers. */
+ movq PCB_R15(%rdi),%r15
+ movq PCB_R14(%rdi),%r14
+ movq PCB_R13(%rdi),%r13
+ movq PCB_R12(%rdi),%r12
+ movq PCB_RBP(%rdi),%rbp
+ movq PCB_RSP(%rdi),%rsp
+ movq PCB_RBX(%rdi),%rbx
+
+ /* Restore return address. */
+ movq PCB_RIP(%rdi),%rax
+ movq %rax,(%rsp)
+
+ xorl %eax,%eax
+ ret
+END(resumectx)
Index: sys/amd64/amd64/exception.S
===================================================================
--- sys/amd64/amd64/exception.S
+++ sys/amd64/amd64/exception.S
@@ -42,7 +42,7 @@
#include "opt_compat.h"
#include "opt_hwpmc_hooks.h"
-#include "assym.s"
+#include "assym.S"
#include <machine/asmacros.h>
#include <machine/psl.h>
Index: sys/amd64/amd64/exception.S.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/exception.S.orig
@@ -0,0 +1,969 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_atpic.h"
+#include "opt_compat.h"
+#include "opt_hwpmc_hooks.h"
+
+#include <machine/asmacros.h>
+#include <machine/psl.h>
+#include <machine/trap.h>
+#include <machine/specialreg.h>
+
+#include "assym.S"
+
+#ifdef KDTRACE_HOOKS
+ .bss
+ .globl dtrace_invop_jump_addr
+ .align 8
+ .type dtrace_invop_jump_addr,@object
+ .size dtrace_invop_jump_addr,8
+dtrace_invop_jump_addr:
+ .zero 8
+ .globl dtrace_invop_calltrap_addr
+ .align 8
+ .type dtrace_invop_calltrap_addr,@object
+ .size dtrace_invop_calltrap_addr,8
+dtrace_invop_calltrap_addr:
+ .zero 8
+#endif
+ .text
+#ifdef HWPMC_HOOKS
+ ENTRY(start_exceptions)
+#endif
+
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines.
+ *
+ * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes
+ * state on the stack but also disables interrupts. This is important for
+ * us for the use of the swapgs instruction. We cannot be interrupted
+ * until the GS.base value is correct. For most traps, we automatically
+ * then enable interrupts if the interrupted context had them enabled.
+ * This is equivalent to the i386 port's use of SDT_SYS386TGT.
+ *
+ * The cpu will push a certain amount of state onto the kernel stack for
+ * the current process. See amd64/include/frame.h.
+ * This includes the current RFLAGS (status register, which includes
+ * the interrupt disable state prior to the trap), the code segment register,
+ * and the return instruction pointer are pushed by the cpu. The cpu
+ * will also push an 'error' code for certain traps. We push a dummy
+ * error code for those traps where the cpu doesn't in order to maintain
+ * a consistent frame. We also push a contrived 'trap number'.
+ *
+ * The CPU does not push the general registers, so we must do that, and we
+ * must restore them prior to calling 'iret'. The CPU adjusts %cs and %ss
+ * but does not mess with %ds, %es, %gs or %fs. We swap the %gs base for
+ * for the kernel mode operation shortly, without changes to the selector
+ * loaded. Since superuser long mode works with any selectors loaded into
+ * segment registers other then %cs, which makes them mostly unused in long
+ * mode, and kernel does not reference %fs, leave them alone. The segment
+ * registers are reloaded on return to the usermode.
+ */
+
+MCOUNT_LABEL(user)
+MCOUNT_LABEL(btrap)
+
+/* Traps that we leave interrupts disabled for.. */
+#define TRAP_NOEN(a) \
+ subq $TF_RIP,%rsp; \
+ movl $(a),TF_TRAPNO(%rsp) ; \
+ movq $0,TF_ADDR(%rsp) ; \
+ movq $0,TF_ERR(%rsp) ; \
+ jmp alltraps_noen
+IDTVEC(dbg)
+ TRAP_NOEN(T_TRCTRAP)
+IDTVEC(bpt)
+ TRAP_NOEN(T_BPTFLT)
+#ifdef KDTRACE_HOOKS
+IDTVEC(dtrace_ret)
+ TRAP_NOEN(T_DTRACE_RET)
+#endif
+
+/* Regular traps; The cpu does not supply tf_err for these. */
+#define TRAP(a) \
+ subq $TF_RIP,%rsp; \
+ movl $(a),TF_TRAPNO(%rsp) ; \
+ movq $0,TF_ADDR(%rsp) ; \
+ movq $0,TF_ERR(%rsp) ; \
+ jmp alltraps
+IDTVEC(div)
+ TRAP(T_DIVIDE)
+IDTVEC(ofl)
+ TRAP(T_OFLOW)
+IDTVEC(bnd)
+ TRAP(T_BOUND)
+IDTVEC(ill)
+ TRAP(T_PRIVINFLT)
+IDTVEC(dna)
+ TRAP(T_DNA)
+IDTVEC(fpusegm)
+ TRAP(T_FPOPFLT)
+IDTVEC(mchk)
+ TRAP(T_MCHK)
+IDTVEC(rsvd)
+ TRAP(T_RESERVED)
+IDTVEC(fpu)
+ TRAP(T_ARITHTRAP)
+IDTVEC(xmm)
+ TRAP(T_XMMFLT)
+
+/* This group of traps have tf_err already pushed by the cpu */
+#define TRAP_ERR(a) \
+ subq $TF_ERR,%rsp; \
+ movl $(a),TF_TRAPNO(%rsp) ; \
+ movq $0,TF_ADDR(%rsp) ; \
+ jmp alltraps
+IDTVEC(tss)
+ TRAP_ERR(T_TSSFLT)
+IDTVEC(missing)
+ subq $TF_ERR,%rsp
+ movl $T_SEGNPFLT,TF_TRAPNO(%rsp)
+ jmp prot_addrf
+IDTVEC(stk)
+ subq $TF_ERR,%rsp
+ movl $T_STKFLT,TF_TRAPNO(%rsp)
+ jmp prot_addrf
+IDTVEC(align)
+ TRAP_ERR(T_ALIGNFLT)
+
+ /*
+ * alltraps entry point. Use swapgs if this is the first time in the
+ * kernel from userland. Reenable interrupts if they were enabled
+ * before the trap. This approximates SDT_SYS386TGT on the i386 port.
+ */
+ SUPERALIGN_TEXT
+ .globl alltraps
+ .type alltraps,@function
+alltraps:
+ movq %rdi,TF_RDI(%rsp)
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz alltraps_testi /* already running with kernel GS.base */
+ swapgs
+ movq PCPU(CURPCB),%rdi
+ andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+alltraps_testi:
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz alltraps_pushregs_no_rdi
+ sti
+alltraps_pushregs_no_rdi:
+ movq %rdx,TF_RDX(%rsp)
+ movq %rax,TF_RAX(%rsp)
+alltraps_pushregs_no_rax:
+ movq %rsi,TF_RSI(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %r8,TF_R8(%rsp)
+ movq %r9,TF_R9(%rsp)
+ movq %rbx,TF_RBX(%rsp)
+ movq %rbp,TF_RBP(%rsp)
+ movq %r10,TF_R10(%rsp)
+ movq %r11,TF_R11(%rsp)
+ movq %r12,TF_R12(%rsp)
+ movq %r13,TF_R13(%rsp)
+ movq %r14,TF_R14(%rsp)
+ movq %r15,TF_R15(%rsp)
+ movl $TF_HASSEGS,TF_FLAGS(%rsp)
+ cld
+ FAKE_MCOUNT(TF_RIP(%rsp))
+#ifdef KDTRACE_HOOKS
+ /*
+ * DTrace Function Boundary Trace (fbt) probes are triggered
+ * by int3 (0xcc) which causes the #BP (T_BPTFLT) breakpoint
+ * interrupt. For all other trap types, just handle them in
+ * the usual way.
+ */
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jnz calltrap /* ignore userland traps */
+ cmpl $T_BPTFLT,TF_TRAPNO(%rsp)
+ jne calltrap
+
+ /* Check if there is no DTrace hook registered. */
+ cmpq $0,dtrace_invop_jump_addr
+ je calltrap
+
+ /*
+ * Set our jump address for the jump back in the event that
+ * the breakpoint wasn't caused by DTrace at all.
+ */
+ movq $calltrap,dtrace_invop_calltrap_addr(%rip)
+
+ /* Jump to the code hooked in by DTrace. */
+ jmpq *dtrace_invop_jump_addr
+#endif
+ .globl calltrap
+ .type calltrap,@function
+calltrap:
+ movq %rsp,%rdi
+ call trap_check
+ MEXITCOUNT
+ jmp doreti /* Handle any pending ASTs */
+
+ /*
+ * alltraps_noen entry point. Unlike alltraps above, we want to
+ * leave the interrupts disabled. This corresponds to
+ * SDT_SYS386IGT on the i386 port.
+ */
+ SUPERALIGN_TEXT
+ .globl alltraps_noen
+ .type alltraps_noen,@function
+alltraps_noen:
+ movq %rdi,TF_RDI(%rsp)
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz 1f /* already running with kernel GS.base */
+ swapgs
+ movq PCPU(CURPCB),%rdi
+ andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
+1: movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ jmp alltraps_pushregs_no_rdi
+
+IDTVEC(dblfault)
+ subq $TF_ERR,%rsp
+ movl $T_DOUBLEFLT,TF_TRAPNO(%rsp)
+ movq $0,TF_ADDR(%rsp)
+ movq $0,TF_ERR(%rsp)
+ movq %rdi,TF_RDI(%rsp)
+ movq %rsi,TF_RSI(%rsp)
+ movq %rdx,TF_RDX(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %r8,TF_R8(%rsp)
+ movq %r9,TF_R9(%rsp)
+ movq %rax,TF_RAX(%rsp)
+ movq %rbx,TF_RBX(%rsp)
+ movq %rbp,TF_RBP(%rsp)
+ movq %r10,TF_R10(%rsp)
+ movq %r11,TF_R11(%rsp)
+ movq %r12,TF_R12(%rsp)
+ movq %r13,TF_R13(%rsp)
+ movq %r14,TF_R14(%rsp)
+ movq %r15,TF_R15(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ movl $TF_HASSEGS,TF_FLAGS(%rsp)
+ cld
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz 1f /* already running with kernel GS.base */
+ swapgs
+1:
+ movq %rsp,%rdi
+ call dblfault_handler
+2:
+ hlt
+ jmp 2b
+
+IDTVEC(page)
+ subq $TF_ERR,%rsp
+ movl $T_PAGEFLT,TF_TRAPNO(%rsp)
+ movq %rdi,TF_RDI(%rsp) /* free up a GP register */
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz 1f /* already running with kernel GS.base */
+ swapgs
+ movq PCPU(CURPCB),%rdi
+ andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi)
+1: movq %cr2,%rdi /* preserve %cr2 before .. */
+ movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz alltraps_pushregs_no_rdi
+ sti
+ jmp alltraps_pushregs_no_rdi
+
+ /*
+ * We have to special-case this one. If we get a trap in doreti() at
+ * the iretq stage, we'll reenter with the wrong gs state. We'll have
+ * to do a special the swapgs in this case even coming from the kernel.
+ * XXX linux has a trap handler for their equivalent of load_gs().
+ */
+IDTVEC(prot)
+ subq $TF_ERR,%rsp
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+prot_addrf:
+ movq $0,TF_ADDR(%rsp)
+ movq %rdi,TF_RDI(%rsp) /* free up a GP register */
+ movq %rax,TF_RAX(%rsp)
+ movq %rdx,TF_RDX(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ leaq doreti_iret(%rip),%rdi
+ cmpq %rdi,TF_RIP(%rsp)
+ je 5f /* kernel but with user gsbase!! */
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz 6f /* already running with kernel GS.base */
+ testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
+ jz 2f
+ cmpw $KUF32SEL,TF_FS(%rsp)
+ jne 1f
+ rdfsbase %rax
+1: cmpw $KUG32SEL,TF_GS(%rsp)
+ jne 2f
+ rdgsbase %rdx
+2: swapgs
+ movq PCPU(CURPCB),%rdi
+ testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
+ jz 4f
+ cmpw $KUF32SEL,TF_FS(%rsp)
+ jne 3f
+ movq %rax,PCB_FSBASE(%rdi)
+3: cmpw $KUG32SEL,TF_GS(%rsp)
+ jne 4f
+ movq %rdx,PCB_GSBASE(%rdi)
+4: orl $PCB_FULL_IRET,PCB_FLAGS(%rdi) /* always full iret from GPF */
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz alltraps_pushregs_no_rax
+ sti
+ jmp alltraps_pushregs_no_rax
+
+5: swapgs
+6: movq PCPU(CURPCB),%rdi
+ jmp 4b
+
+/*
+ * Fast syscall entry point. We enter here with just our new %cs/%ss set,
+ * and the new privilige level. We are still running on the old user stack
+ * pointer. We have to juggle a few things around to find our stack etc.
+ * swapgs gives us access to our PCPU space only.
+ *
+ * We do not support invoking this from a custom segment registers,
+ * esp. %cs, %ss, %fs, %gs, e.g. using entries from an LDT.
+ */
+IDTVEC(fast_syscall)
+ swapgs
+ movq %rsp,PCPU(SCRATCH_RSP)
+ movq PCPU(RSP0),%rsp
+ /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */
+ subq $TF_SIZE,%rsp
+ /* defer TF_RSP till we have a spare register */
+ movq %r11,TF_RFLAGS(%rsp)
+ movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */
+ movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */
+ movq %r11,TF_RSP(%rsp) /* user stack pointer */
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ movq PCPU(CURPCB),%r11
+ andl $~PCB_FULL_IRET,PCB_FLAGS(%r11)
+ sti
+ movq $KUDSEL,TF_SS(%rsp)
+ movq $KUCSEL,TF_CS(%rsp)
+ movq $2,TF_ERR(%rsp)
+ movq %rdi,TF_RDI(%rsp) /* arg 1 */
+ movq %rsi,TF_RSI(%rsp) /* arg 2 */
+ movq %rdx,TF_RDX(%rsp) /* arg 3 */
+ movq %r10,TF_RCX(%rsp) /* arg 4 */
+ movq %r8,TF_R8(%rsp) /* arg 5 */
+ movq %r9,TF_R9(%rsp) /* arg 6 */
+ movq %rax,TF_RAX(%rsp) /* syscall number */
+ movq %rbx,TF_RBX(%rsp) /* C preserved */
+ movq %rbp,TF_RBP(%rsp) /* C preserved */
+ movq %r12,TF_R12(%rsp) /* C preserved */
+ movq %r13,TF_R13(%rsp) /* C preserved */
+ movq %r14,TF_R14(%rsp) /* C preserved */
+ movq %r15,TF_R15(%rsp) /* C preserved */
+ movl $TF_HASSEGS,TF_FLAGS(%rsp)
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ movq PCPU(CURTHREAD),%rdi
+ movq %rsp,TD_FRAME(%rdi)
+ movl TF_RFLAGS(%rsp),%esi
+ andl $PSL_T,%esi
+ call amd64_syscall
+1: movq PCPU(CURPCB),%rax
+ /* Disable interrupts before testing PCB_FULL_IRET. */
+ cli
+ testl $PCB_FULL_IRET,PCB_FLAGS(%rax)
+ jnz 3f
+ /* Check for and handle AST's on return to userland. */
+ movq PCPU(CURTHREAD),%rax
+ testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
+ jne 2f
+ /* Restore preserved registers. */
+ MEXITCOUNT
+ movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
+ movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */
+ movq TF_RDX(%rsp),%rdx /* return value 2 */
+ movq TF_RAX(%rsp),%rax /* return value 1 */
+ movq TF_RFLAGS(%rsp),%r11 /* original %rflags */
+ movq TF_RIP(%rsp),%rcx /* original %rip */
+ movq TF_RSP(%rsp),%rsp /* user stack pointer */
+ swapgs
+ sysretq
+
+2: /* AST scheduled. */
+ sti
+ movq %rsp,%rdi
+ call ast
+ jmp 1b
+
+3: /* Requested full context restore, use doreti for that. */
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Here for CYA insurance, in case a "syscall" instruction gets
+ * issued from 32 bit compatibility mode. MSR_CSTAR has to point
+ * to *something* if EFER_SCE is enabled.
+ */
+IDTVEC(fast_syscall32)
+ sysret
+
+/*
+ * NMI handling is special.
+ *
+ * First, NMIs do not respect the state of the processor's RFLAGS.IF
+ * bit. The NMI handler may be entered at any time, including when
+ * the processor is in a critical section with RFLAGS.IF == 0.
+ * The processor's GS.base value could be invalid on entry to the
+ * handler.
+ *
+ * Second, the processor treats NMIs specially, blocking further NMIs
+ * until an 'iretq' instruction is executed. We thus need to execute
+ * the NMI handler with interrupts disabled, to prevent a nested interrupt
+ * from executing an 'iretq' instruction and inadvertently taking the
+ * processor out of NMI mode.
+ *
+ * Third, the NMI handler runs on its own stack (tss_ist2). The canonical
+ * GS.base value for the processor is stored just above the bottom of its
+ * NMI stack. For NMIs taken from kernel mode, the current value in
+ * the processor's GS.base is saved at entry to C-preserved register %r12,
+ * the canonical value for GS.base is then loaded into the processor, and
+ * the saved value is restored at exit time. For NMIs taken from user mode,
+ * the cheaper 'SWAPGS' instructions are used for swapping GS.base.
+ */
+
+IDTVEC(nmi)
+ subq $TF_RIP,%rsp
+ movl $(T_NMI),TF_TRAPNO(%rsp)
+ movq $0,TF_ADDR(%rsp)
+ movq $0,TF_ERR(%rsp)
+ movq %rdi,TF_RDI(%rsp)
+ movq %rsi,TF_RSI(%rsp)
+ movq %rdx,TF_RDX(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %r8,TF_R8(%rsp)
+ movq %r9,TF_R9(%rsp)
+ movq %rax,TF_RAX(%rsp)
+ movq %rbx,TF_RBX(%rsp)
+ movq %rbp,TF_RBP(%rsp)
+ movq %r10,TF_R10(%rsp)
+ movq %r11,TF_R11(%rsp)
+ movq %r12,TF_R12(%rsp)
+ movq %r13,TF_R13(%rsp)
+ movq %r14,TF_R14(%rsp)
+ movq %r15,TF_R15(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ movl $TF_HASSEGS,TF_FLAGS(%rsp)
+ cld
+ xorl %ebx,%ebx
+ testb $SEL_RPL_MASK,TF_CS(%rsp)
+ jnz nmi_fromuserspace
+ /*
+ * We've interrupted the kernel. Preserve GS.base in %r12.
+ */
+ movl $MSR_GSBASE,%ecx
+ rdmsr
+ movq %rax,%r12
+ shlq $32,%rdx
+ orq %rdx,%r12
+ /* Retrieve and load the canonical value for GS.base. */
+ movq TF_SIZE(%rsp),%rdx
+ movl %edx,%eax
+ shrq $32,%rdx
+ wrmsr
+ jmp nmi_calltrap
+nmi_fromuserspace:
+ incl %ebx
+ swapgs
+ testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
+ jz 2f
+ movq PCPU(CURPCB),%rdi
+ testq %rdi,%rdi
+ jz 2f
+ cmpw $KUF32SEL,TF_FS(%rsp)
+ jne 1f
+ rdfsbase %rax
+ movq %rax,PCB_FSBASE(%rdi)
+1: cmpw $KUG32SEL,TF_GS(%rsp)
+ jne 2f
+ movl $MSR_KGSBASE,%ecx
+ rdmsr
+ shlq $32,%rdx
+ orq %rdx,%rax
+ movq %rax,PCB_GSBASE(%rdi)
+2:
+/* Note: this label is also used by ddb and gdb: */
+nmi_calltrap:
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ movq %rsp,%rdi
+ call trap
+ MEXITCOUNT
+#ifdef HWPMC_HOOKS
+ /*
+ * Capture a userspace callchain if needed.
+ *
+ * - Check if the current trap was from user mode.
+ * - Check if the current thread is valid.
+ * - Check if the thread requires a user call chain to be
+ * captured.
+ *
+ * We are still in NMI mode at this point.
+ */
+ testl %ebx,%ebx
+ jz nocallchain /* not from userspace */
+ movq PCPU(CURTHREAD),%rax
+ orq %rax,%rax /* curthread present? */
+ jz nocallchain
+ testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */
+ jz nocallchain
+ /*
+ * A user callchain is to be captured, so:
+ * - Move execution to the regular kernel stack, to allow for
+ * nested NMI interrupts.
+ * - Take the processor out of "NMI" mode by faking an "iret".
+ * - Enable interrupts, so that copyin() can work.
+ */
+ movq %rsp,%rsi /* source stack pointer */
+ movq $TF_SIZE,%rcx
+ movq PCPU(RSP0),%rdx
+ subq %rcx,%rdx
+ movq %rdx,%rdi /* destination stack pointer */
+
+ shrq $3,%rcx /* trap frame size in long words */
+ cld
+ rep
+ movsq /* copy trapframe */
+
+ movl %ss,%eax
+ pushq %rax /* tf_ss */
+ pushq %rdx /* tf_rsp (on kernel stack) */
+ pushfq /* tf_rflags */
+ movl %cs,%eax
+ pushq %rax /* tf_cs */
+ pushq $outofnmi /* tf_rip */
+ iretq
+outofnmi:
+ /*
+ * At this point the processor has exited NMI mode and is running
+ * with interrupts turned off on the normal kernel stack.
+ *
+ * If a pending NMI gets recognized at or after this point, it
+ * will cause a kernel callchain to be traced.
+ *
+ * We turn interrupts back on, and call the user callchain capture hook.
+ */
+ movq pmc_hook,%rax
+ orq %rax,%rax
+ jz nocallchain
+ movq PCPU(CURTHREAD),%rdi /* thread */
+ movq $PMC_FN_USER_CALLCHAIN,%rsi /* command */
+ movq %rsp,%rdx /* frame */
+ sti
+ call *%rax
+ cli
+nocallchain:
+#endif
+ testl %ebx,%ebx
+ jnz doreti_exit
+nmi_kernelexit:
+ /*
+ * Put back the preserved MSR_GSBASE value.
+ */
+ movl $MSR_GSBASE,%ecx
+ movq %r12,%rdx
+ movl %edx,%eax
+ shrq $32,%rdx
+ wrmsr
+nmi_restoreregs:
+ movq TF_RDI(%rsp),%rdi
+ movq TF_RSI(%rsp),%rsi
+ movq TF_RDX(%rsp),%rdx
+ movq TF_RCX(%rsp),%rcx
+ movq TF_R8(%rsp),%r8
+ movq TF_R9(%rsp),%r9
+ movq TF_RAX(%rsp),%rax
+ movq TF_RBX(%rsp),%rbx
+ movq TF_RBP(%rsp),%rbp
+ movq TF_R10(%rsp),%r10
+ movq TF_R11(%rsp),%r11
+ movq TF_R12(%rsp),%r12
+ movq TF_R13(%rsp),%r13
+ movq TF_R14(%rsp),%r14
+ movq TF_R15(%rsp),%r15
+ addq $TF_RIP,%rsp
+ jmp doreti_iret
+
+ENTRY(fork_trampoline)
+ movq %r12,%rdi /* function */
+ movq %rbx,%rsi /* arg1 */
+ movq %rsp,%rdx /* trapframe pointer */
+ call fork_exit
+ MEXITCOUNT
+ jmp doreti /* Handle any ASTs */
+
+/*
+ * To efficiently implement classification of trap and interrupt handlers
+ * for profiling, there must be only trap handlers between the labels btrap
+ * and bintr, and only interrupt handlers between the labels bintr and
+ * eintr. This is implemented (partly) by including files that contain
+ * some of the handlers. Before including the files, set up a normal asm
+ * environment so that the included files doen't need to know that they are
+ * included.
+ */
+
+#ifdef COMPAT_FREEBSD32
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+
+#include <amd64/ia32/ia32_exception.S>
+#endif
+
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+MCOUNT_LABEL(bintr)
+
+#include <amd64/amd64/apic_vector.S>
+
+#ifdef DEV_ATPIC
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+
+#include <amd64/amd64/atpic_vector.S>
+#endif
+
+ .text
+MCOUNT_LABEL(eintr)
+
+/*
+ * void doreti(struct trapframe)
+ *
+ * Handle return from interrupts, traps and syscalls.
+ */
+ .text
+ SUPERALIGN_TEXT
+ .type doreti,@function
+ .globl doreti
+doreti:
+ FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
+ /*
+ * Check if ASTs can be handled now.
+ */
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */
+ jz doreti_exit /* can't handle ASTs now if not */
+
+doreti_ast:
+ /*
+ * Check for ASTs atomically with returning. Disabling CPU
+ * interrupts provides sufficient locking even in the SMP case,
+ * since we will be informed of any new ASTs by an IPI.
+ */
+ cli
+ movq PCPU(CURTHREAD),%rax
+ testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
+ je doreti_exit
+ sti
+ movq %rsp,%rdi /* pass a pointer to the trapframe */
+ call ast
+ jmp doreti_ast
+
+ /*
+ * doreti_exit: pop registers, iret.
+ *
+ * The segment register pop is a special case, since it may
+ * fault if (for example) a sigreturn specifies bad segment
+ * registers. The fault is handled in trap.c.
+ */
+doreti_exit:
+ MEXITCOUNT
+ movq PCPU(CURPCB),%r8
+
+ /*
+ * Do not reload segment registers for kernel.
+ * Since we do not reload segments registers with sane
+ * values on kernel entry, descriptors referenced by
+ * segments registers might be not valid. This is fatal
+ * for user mode, but is not a problem for the kernel.
+ */
+ testb $SEL_RPL_MASK,TF_CS(%rsp)
+ jz ld_regs
+ testl $PCB_FULL_IRET,PCB_FLAGS(%r8)
+ jz ld_regs
+ andl $~PCB_FULL_IRET,PCB_FLAGS(%r8)
+ testl $TF_HASSEGS,TF_FLAGS(%rsp)
+ je set_segs
+
+do_segs:
+ /* Restore %fs and fsbase */
+ movw TF_FS(%rsp),%ax
+ .globl ld_fs
+ld_fs:
+ movw %ax,%fs
+ cmpw $KUF32SEL,%ax
+ jne 1f
+ movl $MSR_FSBASE,%ecx
+ movl PCB_FSBASE(%r8),%eax
+ movl PCB_FSBASE+4(%r8),%edx
+ .globl ld_fsbase
+ld_fsbase:
+ wrmsr
+1:
+ /* Restore %gs and gsbase */
+ movw TF_GS(%rsp),%si
+ pushfq
+ cli
+ movl $MSR_GSBASE,%ecx
+ /* Save current kernel %gs base into %r12d:%r13d */
+ rdmsr
+ movl %eax,%r12d
+ movl %edx,%r13d
+ .globl ld_gs
+ld_gs:
+ movw %si,%gs
+ /* Save user %gs base into %r14d:%r15d */
+ rdmsr
+ movl %eax,%r14d
+ movl %edx,%r15d
+ /* Restore kernel %gs base */
+ movl %r12d,%eax
+ movl %r13d,%edx
+ wrmsr
+ popfq
+ /*
+ * Restore user %gs base, either from PCB if used for TLS, or
+ * from the previously saved msr read.
+ */
+ movl $MSR_KGSBASE,%ecx
+ cmpw $KUG32SEL,%si
+ jne 1f
+ movl PCB_GSBASE(%r8),%eax
+ movl PCB_GSBASE+4(%r8),%edx
+ jmp ld_gsbase
+1:
+ movl %r14d,%eax
+ movl %r15d,%edx
+ .globl ld_gsbase
+ld_gsbase:
+ wrmsr /* May trap if non-canonical, but only for TLS. */
+ .globl ld_es
+ld_es:
+ movw TF_ES(%rsp),%es
+ .globl ld_ds
+ld_ds:
+ movw TF_DS(%rsp),%ds
+ld_regs:
+ movq TF_RDI(%rsp),%rdi
+ movq TF_RSI(%rsp),%rsi
+ movq TF_RDX(%rsp),%rdx
+ movq TF_RCX(%rsp),%rcx
+ movq TF_R8(%rsp),%r8
+ movq TF_R9(%rsp),%r9
+ movq TF_RAX(%rsp),%rax
+ movq TF_RBX(%rsp),%rbx
+ movq TF_RBP(%rsp),%rbp
+ movq TF_R10(%rsp),%r10
+ movq TF_R11(%rsp),%r11
+ movq TF_R12(%rsp),%r12
+ movq TF_R13(%rsp),%r13
+ movq TF_R14(%rsp),%r14
+ movq TF_R15(%rsp),%r15
+ testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ jz 1f /* keep running with kernel GS.base */
+ cli
+ swapgs
+1:
+ addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */
+ .globl doreti_iret
+doreti_iret:
+ iretq
+
+set_segs:
+ movw $KUDSEL,%ax
+ movw %ax,TF_DS(%rsp)
+ movw %ax,TF_ES(%rsp)
+ movw $KUF32SEL,TF_FS(%rsp)
+ movw $KUG32SEL,TF_GS(%rsp)
+ jmp do_segs
+
+ /*
+ * doreti_iret_fault. Alternative return code for
+ * the case where we get a fault in the doreti_exit code
+ * above. trap() (amd64/amd64/trap.c) catches this specific
+ * case, sends the process a signal and continues in the
+ * corresponding place in the code below.
+ */
+ ALIGN_TEXT
+ .globl doreti_iret_fault
+doreti_iret_fault:
+ subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movw %fs,TF_FS(%rsp)
+ movw %gs,TF_GS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+ movl $TF_HASSEGS,TF_FLAGS(%rsp)
+ movq %rdi,TF_RDI(%rsp)
+ movq %rsi,TF_RSI(%rsp)
+ movq %rdx,TF_RDX(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %r8,TF_R8(%rsp)
+ movq %r9,TF_R9(%rsp)
+ movq %rax,TF_RAX(%rsp)
+ movq %rbx,TF_RBX(%rsp)
+ movq %rbp,TF_RBP(%rsp)
+ movq %r10,TF_R10(%rsp)
+ movq %r11,TF_R11(%rsp)
+ movq %r12,TF_R12(%rsp)
+ movq %r13,TF_R13(%rsp)
+ movq %r14,TF_R14(%rsp)
+ movq %r15,TF_R15(%rsp)
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ movq $0,TF_ERR(%rsp) /* XXX should be the error code */
+ movq $0,TF_ADDR(%rsp)
+ FAKE_MCOUNT(TF_RIP(%rsp))
+ jmp calltrap
+
+ ALIGN_TEXT
+ .globl ds_load_fault
+ds_load_fault:
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movq %rsp,%rdi
+ call trap
+ movw $KUDSEL,TF_DS(%rsp)
+ jmp doreti
+
+ ALIGN_TEXT
+ .globl es_load_fault
+es_load_fault:
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movq %rsp,%rdi
+ call trap
+ movw $KUDSEL,TF_ES(%rsp)
+ jmp doreti
+
+ ALIGN_TEXT
+ .globl fs_load_fault
+fs_load_fault:
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ movq %rsp,%rdi
+ call trap
+ movw $KUF32SEL,TF_FS(%rsp)
+ jmp doreti
+
+ ALIGN_TEXT
+ .globl gs_load_fault
+gs_load_fault:
+ popfq
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movq %rsp,%rdi
+ call trap
+ movw $KUG32SEL,TF_GS(%rsp)
+ jmp doreti
+
+ ALIGN_TEXT
+ .globl fsbase_load_fault
+fsbase_load_fault:
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movq %rsp,%rdi
+ call trap
+ movq PCPU(CURTHREAD),%r8
+ movq TD_PCB(%r8),%r8
+ movq $0,PCB_FSBASE(%r8)
+ jmp doreti
+
+ ALIGN_TEXT
+ .globl gsbase_load_fault
+gsbase_load_fault:
+ movl $T_PROTFLT,TF_TRAPNO(%rsp)
+ testl $PSL_I,TF_RFLAGS(%rsp)
+ jz 1f
+ sti
+1:
+ movq %rsp,%rdi
+ call trap
+ movq PCPU(CURTHREAD),%r8
+ movq TD_PCB(%r8),%r8
+ movq $0,PCB_GSBASE(%r8)
+ jmp doreti
+
+#ifdef HWPMC_HOOKS
+ ENTRY(end_exceptions)
+#endif
Index: sys/amd64/amd64/genassym.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/genassym.c.orig
@@ -0,0 +1,233 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1982, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+#include "opt_hwpmc_hooks.h"
+#include "opt_kstack_pages.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/assym.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
+#include <sys/errno.h>
+#include <sys/mount.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/resourcevar.h>
+#include <sys/ucontext.h>
+#include <machine/tss.h>
+#include <sys/vmmeter.h>
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <sys/proc.h>
+#include <x86/apicreg.h>
+#include <machine/cpu.h>
+#include <machine/pcb.h>
+#include <machine/sigframe.h>
+#include <machine/proc.h>
+#include <machine/segments.h>
+
+ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
+ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
+ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
+
+ASSYM(P_MD, offsetof(struct proc, p_md));
+ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
+ASSYM(MD_LDT_SD, offsetof(struct mdproc, md_ldt_sd));
+
+ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags));
+ASSYM(TD_PROC, offsetof(struct thread, td_proc));
+ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
+
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+
+ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN);
+ASSYM(TDP_KTHREAD, TDP_KTHREAD);
+
+ASSYM(PAGE_SIZE, PAGE_SIZE);
+ASSYM(NPTEPG, NPTEPG);
+ASSYM(NPDEPG, NPDEPG);
+ASSYM(addr_PTmap, addr_PTmap);
+ASSYM(addr_PDmap, addr_PDmap);
+ASSYM(addr_PDPmap, addr_PDPmap);
+ASSYM(addr_PML4map, addr_PML4map);
+ASSYM(addr_PML4pml4e, addr_PML4pml4e);
+ASSYM(PDESIZE, sizeof(pd_entry_t));
+ASSYM(PTESIZE, sizeof(pt_entry_t));
+ASSYM(PAGE_SHIFT, PAGE_SHIFT);
+ASSYM(PAGE_MASK, PAGE_MASK);
+ASSYM(PDRSHIFT, PDRSHIFT);
+ASSYM(PDPSHIFT, PDPSHIFT);
+ASSYM(PML4SHIFT, PML4SHIFT);
+ASSYM(val_KPDPI, KPDPI);
+ASSYM(val_KPML4I, KPML4I);
+ASSYM(val_PML4PML4I, PML4PML4I);
+ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
+ASSYM(KERNBASE, KERNBASE);
+ASSYM(DMAP_MIN_ADDRESS, DMAP_MIN_ADDRESS);
+ASSYM(DMAP_MAX_ADDRESS, DMAP_MAX_ADDRESS);
+
+ASSYM(PCB_R15, offsetof(struct pcb, pcb_r15));
+ASSYM(PCB_R14, offsetof(struct pcb, pcb_r14));
+ASSYM(PCB_R13, offsetof(struct pcb, pcb_r13));
+ASSYM(PCB_R12, offsetof(struct pcb, pcb_r12));
+ASSYM(PCB_RBP, offsetof(struct pcb, pcb_rbp));
+ASSYM(PCB_RSP, offsetof(struct pcb, pcb_rsp));
+ASSYM(PCB_RBX, offsetof(struct pcb, pcb_rbx));
+ASSYM(PCB_RIP, offsetof(struct pcb, pcb_rip));
+ASSYM(PCB_FSBASE, offsetof(struct pcb, pcb_fsbase));
+ASSYM(PCB_GSBASE, offsetof(struct pcb, pcb_gsbase));
+ASSYM(PCB_KGSBASE, offsetof(struct pcb, pcb_kgsbase));
+ASSYM(PCB_CR0, offsetof(struct pcb, pcb_cr0));
+ASSYM(PCB_CR2, offsetof(struct pcb, pcb_cr2));
+ASSYM(PCB_CR3, offsetof(struct pcb, pcb_cr3));
+ASSYM(PCB_CR4, offsetof(struct pcb, pcb_cr4));
+ASSYM(PCB_DR0, offsetof(struct pcb, pcb_dr0));
+ASSYM(PCB_DR1, offsetof(struct pcb, pcb_dr1));
+ASSYM(PCB_DR2, offsetof(struct pcb, pcb_dr2));
+ASSYM(PCB_DR3, offsetof(struct pcb, pcb_dr3));
+ASSYM(PCB_DR6, offsetof(struct pcb, pcb_dr6));
+ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7));
+ASSYM(PCB_GDT, offsetof(struct pcb, pcb_gdt));
+ASSYM(PCB_IDT, offsetof(struct pcb, pcb_idt));
+ASSYM(PCB_LDT, offsetof(struct pcb, pcb_ldt));
+ASSYM(PCB_TR, offsetof(struct pcb, pcb_tr));
+ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
+ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+ASSYM(PCB_TSSP, offsetof(struct pcb, pcb_tssp));
+ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_save));
+ASSYM(PCB_EFER, offsetof(struct pcb, pcb_efer));
+ASSYM(PCB_STAR, offsetof(struct pcb, pcb_star));
+ASSYM(PCB_LSTAR, offsetof(struct pcb, pcb_lstar));
+ASSYM(PCB_CSTAR, offsetof(struct pcb, pcb_cstar));
+ASSYM(PCB_SFMASK, offsetof(struct pcb, pcb_sfmask));
+ASSYM(PCB_SIZE, sizeof(struct pcb));
+ASSYM(PCB_FULL_IRET, PCB_FULL_IRET);
+ASSYM(PCB_DBREGS, PCB_DBREGS);
+ASSYM(PCB_32BIT, PCB_32BIT);
+
+ASSYM(TSS_RSP0, offsetof(struct amd64tss, tss_rsp0));
+
+ASSYM(TF_R15, offsetof(struct trapframe, tf_r15));
+ASSYM(TF_R14, offsetof(struct trapframe, tf_r14));
+ASSYM(TF_R13, offsetof(struct trapframe, tf_r13));
+ASSYM(TF_R12, offsetof(struct trapframe, tf_r12));
+ASSYM(TF_R11, offsetof(struct trapframe, tf_r11));
+ASSYM(TF_R10, offsetof(struct trapframe, tf_r10));
+ASSYM(TF_R9, offsetof(struct trapframe, tf_r9));
+ASSYM(TF_R8, offsetof(struct trapframe, tf_r8));
+ASSYM(TF_RDI, offsetof(struct trapframe, tf_rdi));
+ASSYM(TF_RSI, offsetof(struct trapframe, tf_rsi));
+ASSYM(TF_RBP, offsetof(struct trapframe, tf_rbp));
+ASSYM(TF_RBX, offsetof(struct trapframe, tf_rbx));
+ASSYM(TF_RDX, offsetof(struct trapframe, tf_rdx));
+ASSYM(TF_RCX, offsetof(struct trapframe, tf_rcx));
+ASSYM(TF_RAX, offsetof(struct trapframe, tf_rax));
+ASSYM(TF_TRAPNO, offsetof(struct trapframe, tf_trapno));
+ASSYM(TF_ADDR, offsetof(struct trapframe, tf_addr));
+ASSYM(TF_ERR, offsetof(struct trapframe, tf_err));
+ASSYM(TF_RIP, offsetof(struct trapframe, tf_rip));
+ASSYM(TF_CS, offsetof(struct trapframe, tf_cs));
+ASSYM(TF_RFLAGS, offsetof(struct trapframe, tf_rflags));
+ASSYM(TF_RSP, offsetof(struct trapframe, tf_rsp));
+ASSYM(TF_SS, offsetof(struct trapframe, tf_ss));
+ASSYM(TF_DS, offsetof(struct trapframe, tf_ds));
+ASSYM(TF_ES, offsetof(struct trapframe, tf_es));
+ASSYM(TF_FS, offsetof(struct trapframe, tf_fs));
+ASSYM(TF_GS, offsetof(struct trapframe, tf_gs));
+ASSYM(TF_FLAGS, offsetof(struct trapframe, tf_flags));
+ASSYM(TF_SIZE, sizeof(struct trapframe));
+ASSYM(TF_HASSEGS, TF_HASSEGS);
+
+ASSYM(SIGF_HANDLER, offsetof(struct sigframe, sf_ahu.sf_handler));
+ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc));
+ASSYM(UC_EFLAGS, offsetof(ucontext_t, uc_mcontext.mc_rflags));
+ASSYM(ENOENT, ENOENT);
+ASSYM(EFAULT, EFAULT);
+ASSYM(ENAMETOOLONG, ENAMETOOLONG);
+ASSYM(MAXCOMLEN, MAXCOMLEN);
+ASSYM(MAXPATHLEN, MAXPATHLEN);
+ASSYM(PC_SIZEOF, sizeof(struct pcpu));
+ASSYM(PC_PRVSPACE, offsetof(struct pcpu, pc_prvspace));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread));
+ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread));
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
+ASSYM(PC_SCRATCH_RSP, offsetof(struct pcpu, pc_scratch_rsp));
+ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
+ASSYM(PC_TSSP, offsetof(struct pcpu, pc_tssp));
+ASSYM(PC_RSP0, offsetof(struct pcpu, pc_rsp0));
+ASSYM(PC_FS32P, offsetof(struct pcpu, pc_fs32p));
+ASSYM(PC_GS32P, offsetof(struct pcpu, pc_gs32p));
+ASSYM(PC_LDT, offsetof(struct pcpu, pc_ldt));
+ASSYM(PC_COMMONTSSP, offsetof(struct pcpu, pc_commontssp));
+ASSYM(PC_TSS, offsetof(struct pcpu, pc_tss));
+ASSYM(PC_PM_SAVE_CNT, offsetof(struct pcpu, pc_pm_save_cnt));
+
+ASSYM(LA_EOI, LAPIC_EOI * LAPIC_MEM_MUL);
+ASSYM(LA_ISR, LAPIC_ISR0 * LAPIC_MEM_MUL);
+
+ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL));
+ASSYM(KDSEL, GSEL(GDATA_SEL, SEL_KPL));
+ASSYM(KUCSEL, GSEL(GUCODE_SEL, SEL_UPL));
+ASSYM(KUDSEL, GSEL(GUDATA_SEL, SEL_UPL));
+ASSYM(KUC32SEL, GSEL(GUCODE32_SEL, SEL_UPL));
+ASSYM(KUF32SEL, GSEL(GUFS32_SEL, SEL_UPL));
+ASSYM(KUG32SEL, GSEL(GUGS32_SEL, SEL_UPL));
+ASSYM(TSSSEL, GSEL(GPROC0_SEL, SEL_KPL));
+ASSYM(LDTSEL, GSEL(GUSERLDT_SEL, SEL_KPL));
+ASSYM(SEL_RPL_MASK, SEL_RPL_MASK);
+
+ASSYM(__FreeBSD_version, __FreeBSD_version);
+
+#ifdef HWPMC_HOOKS
+ASSYM(PMC_FN_USER_CALLCHAIN, PMC_FN_USER_CALLCHAIN);
+#endif
Index: sys/amd64/amd64/locore.S
===================================================================
--- sys/amd64/amd64/locore.S
+++ sys/amd64/amd64/locore.S
@@ -31,7 +31,7 @@
#include <machine/pmap.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
/*
* Compiled KERNBASE location
Index: sys/amd64/amd64/machdep.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/machdep.c.orig
@@ -0,0 +1,2618 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 2003 Peter Wemm.
+ * Copyright (c) 1992 Terrence R. Lambert.
+ * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_atpic.h"
+#include "opt_compat.h"
+#include "opt_cpu.h"
+#include "opt_ddb.h"
+#include "opt_inet.h"
+#include "opt_isa.h"
+#include "opt_kstack_pages.h"
+#include "opt_maxmem.h"
+#include "opt_mp_watchdog.h"
+#include "opt_platform.h"
+#include "opt_sched.h"
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/cons.h>
+#include <sys/cpu.h>
+#include <sys/efi.h>
+#include <sys/eventhandler.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/ptrace.h>
+#include <sys/reboot.h>
+#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#ifdef SMP
+#include <sys/smp.h>
+#endif
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/ucontext.h>
+#include <sys/vmmeter.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_param.h>
+
+#ifdef DDB
+#ifndef KDB
+#error KDB must be enabled in order for DDB to work!
+#endif
+#include <ddb/ddb.h>
+#include <ddb/db_sym.h>
+#endif
+
+#include <net/netisr.h>
+
+#include <machine/clock.h>
+#include <machine/cpu.h>
+#include <machine/cputypes.h>
+#include <machine/frame.h>
+#include <machine/intr_machdep.h>
+#include <x86/mca.h>
+#include <machine/md_var.h>
+#include <machine/metadata.h>
+#include <machine/mp_watchdog.h>
+#include <machine/pc/bios.h>
+#include <machine/pcb.h>
+#include <machine/proc.h>
+#include <machine/reg.h>
+#include <machine/sigframe.h>
+#include <machine/specialreg.h>
+#include <machine/tss.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+#ifdef FDT
+#include <x86/fdt.h>
+#endif
+
+#ifdef DEV_ATPIC
+#include <x86/isa/icu.h>
+#else
+#include <x86/apicvar.h>
+#endif
+
+#include <isa/isareg.h>
+#include <isa/rtc.h>
+#include <x86/init.h>
+
+/* Sanity check for __curthread() */
+CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
+
+/*
+ * The PTI trampoline stack needs enough space for a hardware trapframe and a
+ * couple of scratch registers, as well as the trapframe left behind after an
+ * iret fault.
+ */
+CTASSERT(PC_PTI_STACK_SZ * sizeof(register_t) >= 2 * sizeof(struct pti_frame) -
+ offsetof(struct pti_frame, pti_rip));
+
+extern u_int64_t hammer_time(u_int64_t, u_int64_t);
+
+#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
+#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
+
+static void cpu_startup(void *);
+static void get_fpcontext(struct thread *td, mcontext_t *mcp,
+ char *xfpusave, size_t xfpusave_len);
+static int set_fpcontext(struct thread *td, mcontext_t *mcp,
+ char *xfpustate, size_t xfpustate_len);
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+/* Preload data parse function */
+static caddr_t native_parse_preload_data(u_int64_t);
+
+/* Native function to fetch and parse the e820 map */
+static void native_parse_memmap(caddr_t, vm_paddr_t *, int *);
+
+/* Default init_ops implementation. */
+struct init_ops init_ops = {
+ .parse_preload_data = native_parse_preload_data,
+ .early_clock_source_init = i8254_init,
+ .early_delay = i8254_delay,
+ .parse_memmap = native_parse_memmap,
+#ifdef SMP
+ .mp_bootaddress = mp_bootaddress,
+ .start_all_aps = native_start_all_aps,
+#endif
+ .msi_init = msi_init,
+};
+
+/*
+ * The file "conf/ldscript.amd64" defines the symbol "kernphys". Its value is
+ * the physical address at which the kernel is loaded.
+ */
+extern char kernphys[];
+
+/*
+ * Physical address of the EFI System Table. Stashed from the metadata hints
+ * passed into the kernel and used by the EFI code to call runtime services.
+ */
+vm_paddr_t efi_systbl_phys;
+
+/* Intel ICH registers */
+#define ICH_PMBASE 0x400
+#define ICH_SMI_EN ICH_PMBASE + 0x30
+
+int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
+
+int cold = 1;
+
+long Maxmem = 0;
+long realmem = 0;
+
+/*
+ * The number of PHYSMAP entries must be one less than the number of
+ * PHYSSEG entries because the PHYSMAP entry that spans the largest
+ * physical address that is accessible by ISA DMA is split into two
+ * PHYSSEG entries.
+ */
+#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
+
+vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
+vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
+
+/* must be 2 less so 0 0 can signal end of chunks */
+#define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
+#define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
+
+struct kva_md_info kmi;
+
+static struct trapframe proc0_tf;
+struct region_descriptor r_gdt, r_idt;
+
+struct pcpu __pcpu[MAXCPU];
+
+struct mtx icu_lock;
+
+struct mem_range_softc mem_range_softc;
+
+struct mtx dt_lock; /* lock for GDT and LDT */
+
+void (*vmm_resume_p)(void);
+
+static void
+cpu_startup(dummy)
+ void *dummy;
+{
+ uintmax_t memsize;
+ char *sysenv;
+
+ /*
+ * On MacBooks, we need to disallow the legacy USB circuit to
+ * generate an SMI# because this can cause several problems,
+ * namely: incorrect CPU frequency detection and failure to
+ * start the APs.
+ * We do this by disabling a bit in the SMI_EN (SMI Control and
+ * Enable register) of the Intel ICH LPC Interface Bridge.
+ */
+ sysenv = kern_getenv("smbios.system.product");
+ if (sysenv != NULL) {
+ if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
+ strncmp(sysenv, "MacBook3,1", 10) == 0 ||
+ strncmp(sysenv, "MacBook4,1", 10) == 0 ||
+ strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
+ strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
+ strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
+ strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
+ strncmp(sysenv, "Macmini1,1", 10) == 0) {
+ if (bootverbose)
+ printf("Disabling LEGACY_USB_EN bit on "
+ "Intel ICH.\n");
+ outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
+ }
+ freeenv(sysenv);
+ }
+
+ /*
+ * Good {morning,afternoon,evening,night}.
+ */
+ startrtclock();
+ printcpuinfo();
+
+ /*
+ * Display physical memory if SMBIOS reports reasonable amount.
+ */
+ memsize = 0;
+ sysenv = kern_getenv("smbios.memory.enabled");
+ if (sysenv != NULL) {
+ memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
+ freeenv(sysenv);
+ }
+ if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
+ memsize = ptoa((uintmax_t)Maxmem);
+ printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
+ realmem = atop(memsize);
+
+ /*
+ * Display any holes after the first chunk of extended memory.
+ */
+ if (bootverbose) {
+ int indx;
+
+ printf("Physical memory chunk(s):\n");
+ for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
+ vm_paddr_t size;
+
+ size = phys_avail[indx + 1] - phys_avail[indx];
+ printf(
+ "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
+ (uintmax_t)phys_avail[indx],
+ (uintmax_t)phys_avail[indx + 1] - 1,
+ (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
+ }
+ }
+
+ vm_ksubmap_init(&kmi);
+
+ printf("avail memory = %ju (%ju MB)\n",
+ ptoa((uintmax_t)vm_cnt.v_free_count),
+ ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
+
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
+ vm_pager_bufferinit();
+
+ cpu_setregs();
+}
+
+/*
+ * Send an interrupt to process.
+ *
+ * Stack is set up to allow sigcode stored
+ * at top to call routine, followed by call
+ * to sigreturn routine below. After sigreturn
+ * resets the signal mask, the stack, and the
+ * frame pointer, it returns to the user
+ * specified pc, psl.
+ */
+void
+sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+ struct sigframe sf, *sfp;
+ struct pcb *pcb;
+ struct proc *p;
+ struct thread *td;
+ struct sigacts *psp;
+ char *sp;
+ struct trapframe *regs;
+ char *xfpusave;
+ size_t xfpusave_len;
+ int sig;
+ int oonstack;
+
+ td = curthread;
+ pcb = td->td_pcb;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+ sig = ksi->ksi_signo;
+ psp = p->p_sigacts;
+ mtx_assert(&psp->ps_mtx, MA_OWNED);
+ regs = td->td_frame;
+ oonstack = sigonstack(regs->tf_rsp);
+
+ if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
+ xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ xfpusave = __builtin_alloca(xfpusave_len);
+ } else {
+ xfpusave_len = 0;
+ xfpusave = NULL;
+ }
+
+ /* Save user context. */
+ bzero(&sf, sizeof(sf));
+ sf.sf_uc.uc_sigmask = *mask;
+ sf.sf_uc.uc_stack = td->td_sigstk;
+ sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
+ ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
+ sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
+ bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
+ sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
+ get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
+ fpstate_drop(td);
+ update_pcb_bases(pcb);
+ sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
+ sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
+ bzero(sf.sf_uc.uc_mcontext.mc_spare,
+ sizeof(sf.sf_uc.uc_mcontext.mc_spare));
+ bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
+
+ /* Allocate space for the signal handler context. */
+ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
+#if defined(COMPAT_43)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+#endif
+ } else
+ sp = (char *)regs->tf_rsp - 128;
+ if (xfpusave != NULL) {
+ sp -= xfpusave_len;
+ sp = (char *)((unsigned long)sp & ~0x3Ful);
+ sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
+ }
+ sp -= sizeof(struct sigframe);
+ /* Align to 16 bytes. */
+ sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
+
+ /* Build the argument list for the signal handler. */
+ regs->tf_rdi = sig; /* arg 1 in %rdi */
+ regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
+ bzero(&sf.sf_si, sizeof(sf.sf_si));
+ if (SIGISMEMBER(psp->ps_siginfo, sig)) {
+ /* Signal handler installed with SA_SIGINFO. */
+ regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
+ sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
+
+ /* Fill in POSIX parts */
+ sf.sf_si = ksi->ksi_info;
+ sf.sf_si.si_signo = sig; /* maybe a translated signal */
+ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
+ } else {
+ /* Old FreeBSD-style arguments. */
+ regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
+ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
+ sf.sf_ahu.sf_handler = catcher;
+ }
+ mtx_unlock(&psp->ps_mtx);
+ PROC_UNLOCK(p);
+
+ /*
+ * Copy the sigframe out to the user's stack.
+ */
+ if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
+ (xfpusave != NULL && copyout(xfpusave,
+ (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
+ != 0)) {
+#ifdef DEBUG
+ printf("process %ld has trashed its stack\n", (long)p->p_pid);
+#endif
+ PROC_LOCK(p);
+ sigexit(td, SIGILL);
+ }
+
+ regs->tf_rsp = (long)sfp;
+ regs->tf_rip = p->p_sysent->sv_sigcode_base;
+ regs->tf_rflags &= ~(PSL_T | PSL_D);
+ regs->tf_cs = _ucodesel;
+ regs->tf_ds = _udatasel;
+ regs->tf_ss = _udatasel;
+ regs->tf_es = _udatasel;
+ regs->tf_fs = _ufssel;
+ regs->tf_gs = _ugssel;
+ regs->tf_flags = TF_HASSEGS;
+ PROC_LOCK(p);
+ mtx_lock(&psp->ps_mtx);
+}
+
+/*
+ * System call to cleanup state after a signal
+ * has been taken. Reset signal mask and
+ * stack state from context left by sendsig (above).
+ * Return to previous pc and psl as specified by
+ * context left by sendsig. Check carefully to
+ * make sure that the user has not modified the
+ * state to gain improper privileges.
+ *
+ * MPSAFE
+ */
+int
+sys_sigreturn(td, uap)
+ struct thread *td;
+ struct sigreturn_args /* {
+ const struct __ucontext *sigcntxp;
+ } */ *uap;
+{
+ ucontext_t uc;
+ struct pcb *pcb;
+ struct proc *p;
+ struct trapframe *regs;
+ ucontext_t *ucp;
+ char *xfpustate;
+ size_t xfpustate_len;
+ long rflags;
+ int cs, error, ret;
+ ksiginfo_t ksi;
+
+ pcb = td->td_pcb;
+ p = td->td_proc;
+
+ error = copyin(uap->sigcntxp, &uc, sizeof(uc));
+ if (error != 0) {
+ uprintf("pid %d (%s): sigreturn copyin failed\n",
+ p->p_pid, td->td_name);
+ return (error);
+ }
+ ucp = &uc;
+ if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
+ uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
+ td->td_name, ucp->uc_mcontext.mc_flags);
+ return (EINVAL);
+ }
+ regs = td->td_frame;
+ rflags = ucp->uc_mcontext.mc_rflags;
+ /*
+ * Don't allow users to change privileged or reserved flags.
+ */
+ if (!EFL_SECURE(rflags, regs->tf_rflags)) {
+ uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
+ td->td_name, rflags);
+ return (EINVAL);
+ }
+
+ /*
+ * Don't allow users to load a valid privileged %cs. Let the
+ * hardware check for invalid selectors, excess privilege in
+ * other selectors, invalid %eip's and invalid %esp's.
+ */
+ cs = ucp->uc_mcontext.mc_cs;
+ if (!CS_SECURE(cs)) {
+ uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
+ td->td_name, cs);
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = SIGBUS;
+ ksi.ksi_code = BUS_OBJERR;
+ ksi.ksi_trapno = T_PROTFLT;
+ ksi.ksi_addr = (void *)regs->tf_rip;
+ trapsignal(td, &ksi);
+ return (EINVAL);
+ }
+
+ if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
+ xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
+ if (xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu)) {
+ uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
+ p->p_pid, td->td_name, xfpustate_len);
+ return (EINVAL);
+ }
+ xfpustate = __builtin_alloca(xfpustate_len);
+ error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
+ xfpustate, xfpustate_len);
+ if (error != 0) {
+ uprintf(
+ "pid %d (%s): sigreturn copying xfpustate failed\n",
+ p->p_pid, td->td_name);
+ return (error);
+ }
+ } else {
+ xfpustate = NULL;
+ xfpustate_len = 0;
+ }
+ ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
+ if (ret != 0) {
+ uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
+ p->p_pid, td->td_name, ret);
+ return (ret);
+ }
+ bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
+ update_pcb_bases(pcb);
+ pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
+ pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
+
+#if defined(COMPAT_43)
+ if (ucp->uc_mcontext.mc_onstack & 1)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+ else
+ td->td_sigstk.ss_flags &= ~SS_ONSTACK;
+#endif
+
+ kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
+ return (EJUSTRETURN);
+}
+
+#ifdef COMPAT_FREEBSD4
+int
+freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
+{
+
+ return sys_sigreturn(td, (struct sigreturn_args *)uap);
+}
+#endif
+
+/*
+ * Reset registers to default values on exec.
+ */
+void
+exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
+{
+ struct trapframe *regs = td->td_frame;
+ struct pcb *pcb = td->td_pcb;
+
+ if (td->td_proc->p_md.md_ldt != NULL)
+ user_ldt_free(td);
+
+ update_pcb_bases(pcb);
+ pcb->pcb_fsbase = 0;
+ pcb->pcb_gsbase = 0;
+ clear_pcb_flags(pcb, PCB_32BIT);
+ pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
+
+ bzero((char *)regs, sizeof(struct trapframe));
+ regs->tf_rip = imgp->entry_addr;
+ regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
+ regs->tf_rdi = stack; /* argv */
+ regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
+ regs->tf_ss = _udatasel;
+ regs->tf_cs = _ucodesel;
+ regs->tf_ds = _udatasel;
+ regs->tf_es = _udatasel;
+ regs->tf_fs = _ufssel;
+ regs->tf_gs = _ugssel;
+ regs->tf_flags = TF_HASSEGS;
+
+ /*
+ * Reset the hardware debug registers if they were in use.
+ * They won't have any meaning for the newly exec'd process.
+ */
+ if (pcb->pcb_flags & PCB_DBREGS) {
+ pcb->pcb_dr0 = 0;
+ pcb->pcb_dr1 = 0;
+ pcb->pcb_dr2 = 0;
+ pcb->pcb_dr3 = 0;
+ pcb->pcb_dr6 = 0;
+ pcb->pcb_dr7 = 0;
+ if (pcb == curpcb) {
+ /*
+ * Clear the debug registers on the running
+ * CPU, otherwise they will end up affecting
+ * the next process we switch to.
+ */
+ reset_dbregs();
+ }
+ clear_pcb_flags(pcb, PCB_DBREGS);
+ }
+
+ /*
+ * Drop the FP state if we hold it, so that the process gets a
+ * clean FP state if it uses the FPU again.
+ */
+ fpstate_drop(td);
+}
+
+void
+cpu_setregs(void)
+{
+ register_t cr0;
+
+ cr0 = rcr0();
+ /*
+ * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
+ * BSP. See the comments there about why we set them.
+ */
+ cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
+ load_cr0(cr0);
+}
+
+/*
+ * Initialize amd64 and configure to run kernel
+ */
+
+/*
+ * Initialize segments & interrupt table
+ */
+
+struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
+static struct gate_descriptor idt0[NIDT];
+struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
+
+static char dblfault_stack[PAGE_SIZE] __aligned(16);
+
+static char nmi0_stack[PAGE_SIZE] __aligned(16);
+CTASSERT(sizeof(struct nmi_pcpu) == 16);
+
+struct amd64tss common_tss[MAXCPU];
+
+/*
+ * Software prototypes -- in more palatable form.
+ *
+ * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
+ * slots as corresponding segments for i386 kernel.
+ */
+struct soft_segment_descriptor gdt_segs[] = {
+/* GNULL_SEL 0 Null Descriptor */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0x0,
+ .ssd_type = 0,
+ .ssd_dpl = 0,
+ .ssd_p = 0,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+/* GNULL2_SEL 1 Null Descriptor */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0x0,
+ .ssd_type = 0,
+ .ssd_dpl = 0,
+ .ssd_p = 0,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+/* GUFS32_SEL 2 32 bit %gs Descriptor for user */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMRWA,
+ .ssd_dpl = SEL_UPL,
+ .ssd_p = 1,
+ .ssd_long = 0,
+ .ssd_def32 = 1,
+ .ssd_gran = 1 },
+/* GUGS32_SEL 3 32 bit %fs Descriptor for user */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMRWA,
+ .ssd_dpl = SEL_UPL,
+ .ssd_p = 1,
+ .ssd_long = 0,
+ .ssd_def32 = 1,
+ .ssd_gran = 1 },
+/* GCODE_SEL 4 Code Descriptor for kernel */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMERA,
+ .ssd_dpl = SEL_KPL,
+ .ssd_p = 1,
+ .ssd_long = 1,
+ .ssd_def32 = 0,
+ .ssd_gran = 1 },
+/* GDATA_SEL 5 Data Descriptor for kernel */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMRWA,
+ .ssd_dpl = SEL_KPL,
+ .ssd_p = 1,
+ .ssd_long = 1,
+ .ssd_def32 = 0,
+ .ssd_gran = 1 },
+/* GUCODE32_SEL 6 32 bit Code Descriptor for user */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMERA,
+ .ssd_dpl = SEL_UPL,
+ .ssd_p = 1,
+ .ssd_long = 0,
+ .ssd_def32 = 1,
+ .ssd_gran = 1 },
+/* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMRWA,
+ .ssd_dpl = SEL_UPL,
+ .ssd_p = 1,
+ .ssd_long = 0,
+ .ssd_def32 = 1,
+ .ssd_gran = 1 },
+/* GUCODE_SEL 8 64 bit Code Descriptor for user */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0xfffff,
+ .ssd_type = SDT_MEMERA,
+ .ssd_dpl = SEL_UPL,
+ .ssd_p = 1,
+ .ssd_long = 1,
+ .ssd_def32 = 0,
+ .ssd_gran = 1 },
+/* GPROC0_SEL 9 Proc 0 Tss Descriptor */
+{ .ssd_base = 0x0,
+ .ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1,
+ .ssd_type = SDT_SYSTSS,
+ .ssd_dpl = SEL_KPL,
+ .ssd_p = 1,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+/* Actually, the TSS is a system descriptor which is double size */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0x0,
+ .ssd_type = 0,
+ .ssd_dpl = 0,
+ .ssd_p = 0,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+/* GUSERLDT_SEL 11 LDT Descriptor */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0x0,
+ .ssd_type = 0,
+ .ssd_dpl = 0,
+ .ssd_p = 0,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+/* GUSERLDT_SEL 12 LDT Descriptor, double size */
+{ .ssd_base = 0x0,
+ .ssd_limit = 0x0,
+ .ssd_type = 0,
+ .ssd_dpl = 0,
+ .ssd_p = 0,
+ .ssd_long = 0,
+ .ssd_def32 = 0,
+ .ssd_gran = 0 },
+};
+
+void
+setidt(int idx, inthand_t *func, int typ, int dpl, int ist)
+{
+ struct gate_descriptor *ip;
+
+ ip = idt + idx;
+ ip->gd_looffset = (uintptr_t)func;
+ ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
+ ip->gd_ist = ist;
+ ip->gd_xx = 0;
+ ip->gd_type = typ;
+ ip->gd_dpl = dpl;
+ ip->gd_p = 1;
+ ip->gd_hioffset = ((uintptr_t)func)>>16 ;
+}
+
+extern inthand_t
+ IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
+ IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
+ IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
+ IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
+ IDTVEC(xmm), IDTVEC(dblfault),
+ IDTVEC(div_pti), IDTVEC(dbg_pti), IDTVEC(bpt_pti),
+ IDTVEC(ofl_pti), IDTVEC(bnd_pti), IDTVEC(ill_pti), IDTVEC(dna_pti),
+ IDTVEC(fpusegm_pti), IDTVEC(tss_pti), IDTVEC(missing_pti),
+ IDTVEC(stk_pti), IDTVEC(prot_pti), IDTVEC(page_pti), IDTVEC(mchk_pti),
+ IDTVEC(rsvd_pti), IDTVEC(fpu_pti), IDTVEC(align_pti),
+ IDTVEC(xmm_pti),
+#ifdef KDTRACE_HOOKS
+ IDTVEC(dtrace_ret), IDTVEC(dtrace_ret_pti),
+#endif
+#ifdef XENHVM
+ IDTVEC(xen_intr_upcall), IDTVEC(xen_intr_upcall_pti),
+#endif
+ IDTVEC(fast_syscall), IDTVEC(fast_syscall32),
+ IDTVEC(fast_syscall_pti);
+
+#ifdef DDB
+/*
+ * Display the index and function name of any IDT entries that don't use
+ * the default 'rsvd' entry point.
+ */
+DB_SHOW_COMMAND(idt, db_show_idt)
+{
+ struct gate_descriptor *ip;
+ int idx;
+ uintptr_t func;
+
+ ip = idt;
+ for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
+ func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
+ if (func != (uintptr_t)&IDTVEC(rsvd)) {
+ db_printf("%3d\t", idx);
+ db_printsym(func, DB_STGY_PROC);
+ db_printf("\n");
+ }
+ ip++;
+ }
+}
+
+/* Show privileged registers. */
+DB_SHOW_COMMAND(sysregs, db_show_sysregs)
+{
+ struct {
+ uint16_t limit;
+ uint64_t base;
+ } __packed idtr, gdtr;
+ uint16_t ldt, tr;
+
+ __asm __volatile("sidt %0" : "=m" (idtr));
+ db_printf("idtr\t0x%016lx/%04x\n",
+ (u_long)idtr.base, (u_int)idtr.limit);
+ __asm __volatile("sgdt %0" : "=m" (gdtr));
+ db_printf("gdtr\t0x%016lx/%04x\n",
+ (u_long)gdtr.base, (u_int)gdtr.limit);
+ __asm __volatile("sldt %0" : "=r" (ldt));
+ db_printf("ldtr\t0x%04x\n", ldt);
+ __asm __volatile("str %0" : "=r" (tr));
+ db_printf("tr\t0x%04x\n", tr);
+ db_printf("cr0\t0x%016lx\n", rcr0());
+ db_printf("cr2\t0x%016lx\n", rcr2());
+ db_printf("cr3\t0x%016lx\n", rcr3());
+ db_printf("cr4\t0x%016lx\n", rcr4());
+ if (rcr4() & CR4_XSAVE)
+ db_printf("xcr0\t0x%016lx\n", rxcr(0));
+ db_printf("EFER\t0x%016lx\n", rdmsr(MSR_EFER));
+ if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
+ db_printf("FEATURES_CTL\t%016lx\n",
+ rdmsr(MSR_IA32_FEATURE_CONTROL));
+ db_printf("DEBUG_CTL\t0x%016lx\n", rdmsr(MSR_DEBUGCTLMSR));
+ db_printf("PAT\t0x%016lx\n", rdmsr(MSR_PAT));
+ db_printf("GSBASE\t0x%016lx\n", rdmsr(MSR_GSBASE));
+}
+
+DB_SHOW_COMMAND(dbregs, db_show_dbregs)
+{
+
+ db_printf("dr0\t0x%016lx\n", rdr0());
+ db_printf("dr1\t0x%016lx\n", rdr1());
+ db_printf("dr2\t0x%016lx\n", rdr2());
+ db_printf("dr3\t0x%016lx\n", rdr3());
+ db_printf("dr6\t0x%016lx\n", rdr6());
+ db_printf("dr7\t0x%016lx\n", rdr7());
+}
+#endif
+
+void
+sdtossd(sd, ssd)
+ struct user_segment_descriptor *sd;
+ struct soft_segment_descriptor *ssd;
+{
+
+ ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
+ ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
+ ssd->ssd_type = sd->sd_type;
+ ssd->ssd_dpl = sd->sd_dpl;
+ ssd->ssd_p = sd->sd_p;
+ ssd->ssd_long = sd->sd_long;
+ ssd->ssd_def32 = sd->sd_def32;
+ ssd->ssd_gran = sd->sd_gran;
+}
+
+void
+ssdtosd(ssd, sd)
+ struct soft_segment_descriptor *ssd;
+ struct user_segment_descriptor *sd;
+{
+
+ sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
+ sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
+ sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
+ sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
+ sd->sd_type = ssd->ssd_type;
+ sd->sd_dpl = ssd->ssd_dpl;
+ sd->sd_p = ssd->ssd_p;
+ sd->sd_long = ssd->ssd_long;
+ sd->sd_def32 = ssd->ssd_def32;
+ sd->sd_gran = ssd->ssd_gran;
+}
+
+void
+ssdtosyssd(ssd, sd)
+ struct soft_segment_descriptor *ssd;
+ struct system_segment_descriptor *sd;
+{
+
+ sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
+ sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
+ sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
+ sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
+ sd->sd_type = ssd->ssd_type;
+ sd->sd_dpl = ssd->ssd_dpl;
+ sd->sd_p = ssd->ssd_p;
+ sd->sd_gran = ssd->ssd_gran;
+}
+
+#if !defined(DEV_ATPIC) && defined(DEV_ISA)
+#include <isa/isavar.h>
+#include <isa/isareg.h>
+/*
+ * Return a bitmap of the current interrupt requests. This is 8259-specific
+ * and is only suitable for use at probe time.
+ * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
+ * It shouldn't be here. There should probably be an APIC centric
+ * implementation in the apic driver code, if at all.
+ */
+intrmask_t
+isa_irq_pending(void)
+{
+ u_char irr1;
+ u_char irr2;
+
+ irr1 = inb(IO_ICU1);
+ irr2 = inb(IO_ICU2);
+ return ((irr2 << 8) | irr1);
+}
+#endif
+
+u_int basemem;
+
+static int
+add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
+ int *physmap_idxp)
+{
+ int i, insert_idx, physmap_idx;
+
+ physmap_idx = *physmap_idxp;
+
+ if (length == 0)
+ return (1);
+
+ /*
+ * Find insertion point while checking for overlap. Start off by
+ * assuming the new entry will be added to the end.
+ *
+ * NB: physmap_idx points to the next free slot.
+ */
+ insert_idx = physmap_idx;
+ for (i = 0; i <= physmap_idx; i += 2) {
+ if (base < physmap[i + 1]) {
+ if (base + length <= physmap[i]) {
+ insert_idx = i;
+ break;
+ }
+ if (boothowto & RB_VERBOSE)
+ printf(
+ "Overlapping memory regions, ignoring second region\n");
+ return (1);
+ }
+ }
+
+ /* See if we can prepend to the next entry. */
+ if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
+ physmap[insert_idx] = base;
+ return (1);
+ }
+
+ /* See if we can append to the previous entry. */
+ if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
+ physmap[insert_idx - 1] += length;
+ return (1);
+ }
+
+ physmap_idx += 2;
+ *physmap_idxp = physmap_idx;
+ if (physmap_idx == PHYSMAP_SIZE) {
+ printf(
+ "Too many segments in the physical address map, giving up\n");
+ return (0);
+ }
+
+ /*
+ * Move the last 'N' entries down to make room for the new
+ * entry if needed.
+ */
+ for (i = (physmap_idx - 2); i > insert_idx; i -= 2) {
+ physmap[i] = physmap[i - 2];
+ physmap[i + 1] = physmap[i - 1];
+ }
+
+ /* Insert the new entry. */
+ physmap[insert_idx] = base;
+ physmap[insert_idx + 1] = base + length;
+ return (1);
+}
+
+void
+bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize,
+ vm_paddr_t *physmap, int *physmap_idx)
+{
+ struct bios_smap *smap, *smapend;
+
+ smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
+
+ for (smap = smapbase; smap < smapend; smap++) {
+ if (boothowto & RB_VERBOSE)
+ printf("SMAP type=%02x base=%016lx len=%016lx\n",
+ smap->type, smap->base, smap->length);
+
+ if (smap->type != SMAP_TYPE_MEMORY)
+ continue;
+
+ if (!add_physmap_entry(smap->base, smap->length, physmap,
+ physmap_idx))
+ break;
+ }
+}
+
+static void
+add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
+ int *physmap_idx)
+{
+ struct efi_md *map, *p;
+ const char *type;
+ size_t efisz;
+ int ndesc, i;
+
+ static const char *types[] = {
+ "Reserved",
+ "LoaderCode",
+ "LoaderData",
+ "BootServicesCode",
+ "BootServicesData",
+ "RuntimeServicesCode",
+ "RuntimeServicesData",
+ "ConventionalMemory",
+ "UnusableMemory",
+ "ACPIReclaimMemory",
+ "ACPIMemoryNVS",
+ "MemoryMappedIO",
+ "MemoryMappedIOPortSpace",
+ "PalCode",
+ "PersistentMemory"
+ };
+
+ /*
+ * Memory map data provided by UEFI via the GetMemoryMap
+ * Boot Services API.
+ */
+ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
+ map = (struct efi_md *)((uint8_t *)efihdr + efisz);
+
+ if (efihdr->descriptor_size == 0)
+ return;
+ ndesc = efihdr->memory_size / efihdr->descriptor_size;
+
+ if (boothowto & RB_VERBOSE)
+ printf("%23s %12s %12s %8s %4s\n",
+ "Type", "Physical", "Virtual", "#Pages", "Attr");
+
+ for (i = 0, p = map; i < ndesc; i++,
+ p = efi_next_descriptor(p, efihdr->descriptor_size)) {
+ if (boothowto & RB_VERBOSE) {
+ if (p->md_type < nitems(types))
+ type = types[p->md_type];
+ else
+ type = "<INVALID>";
+ printf("%23s %012lx %12p %08lx ", type, p->md_phys,
+ p->md_virt, p->md_pages);
+ if (p->md_attr & EFI_MD_ATTR_UC)
+ printf("UC ");
+ if (p->md_attr & EFI_MD_ATTR_WC)
+ printf("WC ");
+ if (p->md_attr & EFI_MD_ATTR_WT)
+ printf("WT ");
+ if (p->md_attr & EFI_MD_ATTR_WB)
+ printf("WB ");
+ if (p->md_attr & EFI_MD_ATTR_UCE)
+ printf("UCE ");
+ if (p->md_attr & EFI_MD_ATTR_WP)
+ printf("WP ");
+ if (p->md_attr & EFI_MD_ATTR_RP)
+ printf("RP ");
+ if (p->md_attr & EFI_MD_ATTR_XP)
+ printf("XP ");
+ if (p->md_attr & EFI_MD_ATTR_NV)
+ printf("NV ");
+ if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
+ printf("MORE_RELIABLE ");
+ if (p->md_attr & EFI_MD_ATTR_RO)
+ printf("RO ");
+ if (p->md_attr & EFI_MD_ATTR_RT)
+ printf("RUNTIME");
+ printf("\n");
+ }
+
+ switch (p->md_type) {
+ case EFI_MD_TYPE_CODE:
+ case EFI_MD_TYPE_DATA:
+ case EFI_MD_TYPE_BS_CODE:
+ case EFI_MD_TYPE_BS_DATA:
+ case EFI_MD_TYPE_FREE:
+ /*
+ * We're allowed to use any entry with these types.
+ */
+ break;
+ default:
+ continue;
+ }
+
+ if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
+ physmap, physmap_idx))
+ break;
+ }
+}
+
+static char bootmethod[16] = "";
+SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
+ "System firmware boot method");
+
+static void
+native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx)
+{
+ struct bios_smap *smap;
+ struct efi_map_header *efihdr;
+ u_int32_t size;
+
+ /*
+ * Memory map from INT 15:E820.
+ *
+ * subr_module.c says:
+ * "Consumer may safely assume that size value precedes data."
+ * ie: an int32_t immediately precedes smap.
+ */
+
+ efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_MAP);
+ smap = (struct bios_smap *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_SMAP);
+ if (efihdr == NULL && smap == NULL)
+ panic("No BIOS smap or EFI map info from loader!");
+
+ if (efihdr != NULL) {
+ add_efi_map_entries(efihdr, physmap, physmap_idx);
+ strlcpy(bootmethod, "UEFI", sizeof(bootmethod));
+ } else {
+ size = *((u_int32_t *)smap - 1);
+ bios_add_smap_entries(smap, size, physmap, physmap_idx);
+ strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
+ }
+}
+
+#define PAGES_PER_GB (1024 * 1024 * 1024 / PAGE_SIZE)
+
+/*
+ * Populate the (physmap) array with base/bound pairs describing the
+ * available physical memory in the system, then test this memory and
+ * build the phys_avail array describing the actually-available memory.
+ *
+ * Total memory size may be set by the kernel environment variable
+ * hw.physmem or the compile-time define MAXMEM.
+ *
+ * XXX first should be vm_paddr_t.
+ */
+static void
+getmemsize(caddr_t kmdp, u_int64_t first)
+{
+ int i, physmap_idx, pa_indx, da_indx;
+ vm_paddr_t pa, physmap[PHYSMAP_SIZE];
+ u_long physmem_start, physmem_tunable, memtest;
+ pt_entry_t *pte;
+ quad_t dcons_addr, dcons_size;
+ int page_counter;
+
+ bzero(physmap, sizeof(physmap));
+ physmap_idx = 0;
+
+ init_ops.parse_memmap(kmdp, physmap, &physmap_idx);
+ physmap_idx -= 2;
+
+ /*
+ * Find the 'base memory' segment for SMP
+ */
+ basemem = 0;
+ for (i = 0; i <= physmap_idx; i += 2) {
+ if (physmap[i] <= 0xA0000) {
+ basemem = physmap[i + 1] / 1024;
+ break;
+ }
+ }
+ if (basemem == 0 || basemem > 640) {
+ if (bootverbose)
+ printf(
+ "Memory map doesn't contain a basemem segment, faking it");
+ basemem = 640;
+ }
+
+ /*
+ * Make hole for "AP -> long mode" bootstrap code. The
+ * mp_bootaddress vector is only available when the kernel
+ * is configured to support APs and APs for the system start
+ * in 32bit mode (e.g. SMP bare metal).
+ */
+ if (init_ops.mp_bootaddress) {
+ if (physmap[1] >= 0x100000000)
+ panic(
+ "Basemem segment is not suitable for AP bootstrap code!");
+ physmap[1] = init_ops.mp_bootaddress(physmap[1] / 1024);
+ }
+
+ /*
+ * Maxmem isn't the "maximum memory", it's one larger than the
+ * highest page of the physical address space. It should be
+ * called something like "Maxphyspage". We may adjust this
+ * based on ``hw.physmem'' and the results of the memory test.
+ */
+ Maxmem = atop(physmap[physmap_idx + 1]);
+
+#ifdef MAXMEM
+ Maxmem = MAXMEM / 4;
+#endif
+
+ if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
+ Maxmem = atop(physmem_tunable);
+
+ /*
+ * The boot memory test is disabled by default, as it takes a
+ * significant amount of time on large-memory systems, and is
+ * unfriendly to virtual machines as it unnecessarily touches all
+ * pages.
+ *
+ * A general name is used as the code may be extended to support
+ * additional tests beyond the current "page present" test.
+ */
+ memtest = 0;
+ TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
+
+ /*
+ * Don't allow MAXMEM or hw.physmem to extend the amount of memory
+ * in the system.
+ */
+ if (Maxmem > atop(physmap[physmap_idx + 1]))
+ Maxmem = atop(physmap[physmap_idx + 1]);
+
+ if (atop(physmap[physmap_idx + 1]) != Maxmem &&
+ (boothowto & RB_VERBOSE))
+ printf("Physical memory use set to %ldK\n", Maxmem * 4);
+
+ /* call pmap initialization to make new kernel address space */
+ pmap_bootstrap(&first);
+
+ /*
+ * Size up each available chunk of physical memory.
+ *
+ * XXX Some BIOSes corrupt low 64KB between suspend and resume.
+ * By default, mask off the first 16 pages unless we appear to be
+ * running in a VM.
+ */
+ physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT;
+ TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start);
+ if (physmap[0] < physmem_start) {
+ if (physmem_start < PAGE_SIZE)
+ physmap[0] = PAGE_SIZE;
+ else if (physmem_start >= physmap[1])
+ physmap[0] = round_page(physmap[1] - PAGE_SIZE);
+ else
+ physmap[0] = round_page(physmem_start);
+ }
+ pa_indx = 0;
+ da_indx = 1;
+ phys_avail[pa_indx++] = physmap[0];
+ phys_avail[pa_indx] = physmap[0];
+ dump_avail[da_indx] = physmap[0];
+ pte = CMAP1;
+
+ /*
+ * Get dcons buffer address
+ */
+ if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
+ getenv_quad("dcons.size", &dcons_size) == 0)
+ dcons_addr = 0;
+
+ /*
+ * physmap is in bytes, so when converting to page boundaries,
+ * round up the start address and round down the end address.
+ */
+ page_counter = 0;
+ if (memtest != 0)
+ printf("Testing system memory");
+ for (i = 0; i <= physmap_idx; i += 2) {
+ vm_paddr_t end;
+
+ end = ptoa((vm_paddr_t)Maxmem);
+ if (physmap[i + 1] < end)
+ end = trunc_page(physmap[i + 1]);
+ for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
+ int tmp, page_bad, full;
+ int *ptr = (int *)CADDR1;
+
+ full = FALSE;
+ /*
+ * block out kernel memory as not available.
+ */
+ if (pa >= (vm_paddr_t)kernphys && pa < first)
+ goto do_dump_avail;
+
+ /*
+ * block out dcons buffer
+ */
+ if (dcons_addr > 0
+ && pa >= trunc_page(dcons_addr)
+ && pa < dcons_addr + dcons_size)
+ goto do_dump_avail;
+
+ page_bad = FALSE;
+ if (memtest == 0)
+ goto skip_memtest;
+
+ /*
+ * Print a "." every GB to show we're making
+ * progress.
+ */
+ page_counter++;
+ if ((page_counter % PAGES_PER_GB) == 0)
+ printf(".");
+
+ /*
+ * map page into kernel: valid, read/write,non-cacheable
+ */
+ *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD;
+ invltlb();
+
+ tmp = *(int *)ptr;
+ /*
+ * Test for alternating 1's and 0's
+ */
+ *(volatile int *)ptr = 0xaaaaaaaa;
+ if (*(volatile int *)ptr != 0xaaaaaaaa)
+ page_bad = TRUE;
+ /*
+ * Test for alternating 0's and 1's
+ */
+ *(volatile int *)ptr = 0x55555555;
+ if (*(volatile int *)ptr != 0x55555555)
+ page_bad = TRUE;
+ /*
+ * Test for all 1's
+ */
+ *(volatile int *)ptr = 0xffffffff;
+ if (*(volatile int *)ptr != 0xffffffff)
+ page_bad = TRUE;
+ /*
+ * Test for all 0's
+ */
+ *(volatile int *)ptr = 0x0;
+ if (*(volatile int *)ptr != 0x0)
+ page_bad = TRUE;
+ /*
+ * Restore original value.
+ */
+ *(int *)ptr = tmp;
+
+skip_memtest:
+ /*
+ * Adjust array of valid/good pages.
+ */
+ if (page_bad == TRUE)
+ continue;
+ /*
+ * If this good page is a continuation of the
+ * previous set of good pages, then just increase
+ * the end pointer. Otherwise start a new chunk.
+ * Note that "end" points one higher than end,
+ * making the range >= start and < end.
+ * If we're also doing a speculative memory
+ * test and we at or past the end, bump up Maxmem
+ * so that we keep going. The first bad page
+ * will terminate the loop.
+ */
+ if (phys_avail[pa_indx] == pa) {
+ phys_avail[pa_indx] += PAGE_SIZE;
+ } else {
+ pa_indx++;
+ if (pa_indx == PHYS_AVAIL_ARRAY_END) {
+ printf(
+ "Too many holes in the physical address space, giving up\n");
+ pa_indx--;
+ full = TRUE;
+ goto do_dump_avail;
+ }
+ phys_avail[pa_indx++] = pa; /* start */
+ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
+ }
+ physmem++;
+do_dump_avail:
+ if (dump_avail[da_indx] == pa) {
+ dump_avail[da_indx] += PAGE_SIZE;
+ } else {
+ da_indx++;
+ if (da_indx == DUMP_AVAIL_ARRAY_END) {
+ da_indx--;
+ goto do_next;
+ }
+ dump_avail[da_indx++] = pa; /* start */
+ dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
+ }
+do_next:
+ if (full)
+ break;
+ }
+ }
+ *pte = 0;
+ invltlb();
+ if (memtest != 0)
+ printf("\n");
+
+ /*
+ * XXX
+ * The last chunk must contain at least one page plus the message
+ * buffer to avoid complicating other code (message buffer address
+ * calculation, etc.).
+ */
+ while (phys_avail[pa_indx - 1] + PAGE_SIZE +
+ round_page(msgbufsize) >= phys_avail[pa_indx]) {
+ physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
+ phys_avail[pa_indx--] = 0;
+ phys_avail[pa_indx--] = 0;
+ }
+
+ Maxmem = atop(phys_avail[pa_indx]);
+
+ /* Trim off space for the message buffer. */
+ phys_avail[pa_indx] -= round_page(msgbufsize);
+
+ /* Map the message buffer. */
+ msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
+}
+
+static caddr_t
+native_parse_preload_data(u_int64_t modulep)
+{
+ caddr_t kmdp;
+ char *envp;
+#ifdef DDB
+ vm_offset_t ksym_start;
+ vm_offset_t ksym_end;
+#endif
+
+ preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
+ preload_bootstrap_relocate(KERNBASE);
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+ boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
+ envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
+ if (envp != NULL)
+ envp += KERNBASE;
+ init_static_kenv(envp, 0);
+#ifdef DDB
+ ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
+ ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
+ db_fetch_ksymtab(ksym_start, ksym_end);
+#endif
+ efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
+
+ return (kmdp);
+}
+
+static void
+amd64_kdb_init(void)
+{
+ kdb_init();
+#ifdef KDB
+ if (boothowto & RB_KDB)
+ kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
+#endif
+}
+
+/* Set up the fast syscall stuff */
+void
+amd64_conf_fast_syscall(void)
+{
+ uint64_t msr;
+
+ msr = rdmsr(MSR_EFER) | EFER_SCE;
+ wrmsr(MSR_EFER, msr);
+ wrmsr(MSR_LSTAR, pti ? (u_int64_t)IDTVEC(fast_syscall_pti) :
+ (u_int64_t)IDTVEC(fast_syscall));
+ wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
+ msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
+ ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
+ wrmsr(MSR_STAR, msr);
+ wrmsr(MSR_SF_MASK, PSL_NT | PSL_T | PSL_I | PSL_C | PSL_D);
+}
+
+u_int64_t
+hammer_time(u_int64_t modulep, u_int64_t physfree)
+{
+ caddr_t kmdp;
+ int gsel_tss, x;
+ struct pcpu *pc;
+ struct nmi_pcpu *np;
+ struct xstate_hdr *xhdr;
+ u_int64_t rsp0;
+ char *env;
+ size_t kstack0_sz;
+ int late_console;
+
+ TSRAW(&thread0, TS_ENTER, __func__, NULL);
+
+ /*
+ * This may be done better later if it gets more high level
+ * components in it. If so just link td->td_proc here.
+ */
+ proc_linkup0(&proc0, &thread0);
+
+ kmdp = init_ops.parse_preload_data(modulep);
+
+ identify_cpu1();
+ identify_hypervisor();
+
+ /* Init basic tunables, hz etc */
+ init_param1();
+
+ thread0.td_kstack = physfree + KERNBASE;
+ thread0.td_kstack_pages = kstack_pages;
+ kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
+ bzero((void *)thread0.td_kstack, kstack0_sz);
+ physfree += kstack0_sz;
+
+ /*
+ * make gdt memory segments
+ */
+ for (x = 0; x < NGDT; x++) {
+ if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
+ x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
+ ssdtosd(&gdt_segs[x], &gdt[x]);
+ }
+ gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
+ ssdtosyssd(&gdt_segs[GPROC0_SEL],
+ (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
+
+ r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
+ r_gdt.rd_base = (long) gdt;
+ lgdt(&r_gdt);
+ pc = &__pcpu[0];
+
+ wrmsr(MSR_FSBASE, 0); /* User value */
+ wrmsr(MSR_GSBASE, (u_int64_t)pc);
+ wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
+
+ pcpu_init(pc, 0, sizeof(struct pcpu));
+ dpcpu_init((void *)(physfree + KERNBASE), 0);
+ physfree += DPCPU_SIZE;
+ PCPU_SET(prvspace, pc);
+ PCPU_SET(curthread, &thread0);
+ /* Non-late cninit() and printf() can be moved up to here. */
+ PCPU_SET(tssp, &common_tss[0]);
+ PCPU_SET(commontssp, &common_tss[0]);
+ PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
+ PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
+ PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
+ PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
+
+ /*
+ * Initialize mutexes.
+ *
+ * icu_lock: in order to allow an interrupt to occur in a critical
+ * section, to set pcpu->ipending (etc...) properly, we
+ * must be able to get the icu lock, so it can't be
+ * under witness.
+ */
+ mutex_init();
+ mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
+ mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
+
+ /* exceptions */
+ TUNABLE_INT_FETCH("vm.pmap.pti", &pti);
+
+ for (x = 0; x < NIDT; x++)
+ setidt(x, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_DE, pti ? &IDTVEC(div_pti) : &IDTVEC(div), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_DB, pti ? &IDTVEC(dbg_pti) : &IDTVEC(dbg), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
+ setidt(IDT_BP, pti ? &IDTVEC(bpt_pti) : &IDTVEC(bpt), SDT_SYSIGT,
+ SEL_UPL, 0);
+ setidt(IDT_OF, pti ? &IDTVEC(ofl_pti) : &IDTVEC(ofl), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_BR, pti ? &IDTVEC(bnd_pti) : &IDTVEC(bnd), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_UD, pti ? &IDTVEC(ill_pti) : &IDTVEC(ill), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_NM, pti ? &IDTVEC(dna_pti) : &IDTVEC(dna), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
+ setidt(IDT_FPUGP, pti ? &IDTVEC(fpusegm_pti) : &IDTVEC(fpusegm),
+ SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IDT_TS, pti ? &IDTVEC(tss_pti) : &IDTVEC(tss), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_NP, pti ? &IDTVEC(missing_pti) : &IDTVEC(missing),
+ SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IDT_SS, pti ? &IDTVEC(stk_pti) : &IDTVEC(stk), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_GP, pti ? &IDTVEC(prot_pti) : &IDTVEC(prot), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_PF, pti ? &IDTVEC(page_pti) : &IDTVEC(page), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_MF, pti ? &IDTVEC(fpu_pti) : &IDTVEC(fpu), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_AC, pti ? &IDTVEC(align_pti) : &IDTVEC(align), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_MC, pti ? &IDTVEC(mchk_pti) : &IDTVEC(mchk), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IDT_XF, pti ? &IDTVEC(xmm_pti) : &IDTVEC(xmm), SDT_SYSIGT,
+ SEL_KPL, 0);
+#ifdef KDTRACE_HOOKS
+ setidt(IDT_DTRACE_RET, pti ? &IDTVEC(dtrace_ret_pti) :
+ &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
+#endif
+#ifdef XENHVM
+ setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_UPL, 0);
+#endif
+ r_idt.rd_limit = sizeof(idt0) - 1;
+ r_idt.rd_base = (long) idt;
+ lidt(&r_idt);
+
+ /*
+ * Initialize the clock before the console so that console
+ * initialization can use DELAY().
+ */
+ clock_init();
+
+ /*
+ * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4)
+ * transition).
+ * Once bootblocks have updated, we can test directly for
+ * efi_systbl != NULL here...
+ */
+ if (preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP)
+ != NULL)
+ vty_set_preferred(VTY_VT);
+
+ finishidentcpu(); /* Final stage of CPU initialization */
+ initializecpu(); /* Initialize CPU registers */
+ initializecpucache();
+
+ /* doublefault stack space, runs on ist1 */
+ common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
+
+ /*
+ * NMI stack, runs on ist2. The pcpu pointer is stored just
+ * above the start of the ist2 stack.
+ */
+ np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
+ np->np_pcpu = (register_t) pc;
+ common_tss[0].tss_ist2 = (long) np;
+
+ /* Set the IO permission bitmap (empty due to tss seg limit) */
+ common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE;
+
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(gsel_tss);
+
+ amd64_conf_fast_syscall();
+
+ /*
+ * Temporary forge some valid pointer to PCB, for exception
+ * handlers. It is reinitialized properly below after FPU is
+ * set up. Also set up td_critnest to short-cut the page
+ * fault handler.
+ */
+ cpu_max_ext_state_size = sizeof(struct savefpu);
+ thread0.td_pcb = get_pcb_td(&thread0);
+ thread0.td_critnest = 1;
+
+ /*
+ * The console and kdb should be initialized even earlier than here,
+ * but some console drivers don't work until after getmemsize().
+ * Default to late console initialization to support these drivers.
+ * This loses mainly printf()s in getmemsize() and early debugging.
+ */
+ late_console = 1;
+ TUNABLE_INT_FETCH("debug.late_console", &late_console);
+ if (!late_console) {
+ cninit();
+ amd64_kdb_init();
+ }
+
+ getmemsize(kmdp, physfree);
+ init_param2(physmem);
+
+ /* now running on new page tables, configured,and u/iom is accessible */
+
+ if (late_console)
+ cninit();
+
+#ifdef DEV_ISA
+#ifdef DEV_ATPIC
+ elcr_probe();
+ atpic_startup();
+#else
+ /* Reset and mask the atpics and leave them shut down. */
+ atpic_reset();
+
+ /*
+ * Point the ICU spurious interrupt vectors at the APIC spurious
+ * interrupt handler.
+ */
+ setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
+#endif
+#else
+#error "have you forgotten the isa device?";
+#endif
+
+ if (late_console)
+ amd64_kdb_init();
+
+ msgbufinit(msgbufp, msgbufsize);
+ fpuinit();
+
+ /*
+ * Set up thread0 pcb after fpuinit calculated pcb + fpu save
+ * area size. Zero out the extended state header in fpu save
+ * area.
+ */
+ thread0.td_pcb = get_pcb_td(&thread0);
+ thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
+ bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
+ if (use_xsave) {
+ xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
+ 1);
+ xhdr->xstate_bv = xsave_mask;
+ }
+ /* make an initial tss so cpu can get interrupt stack on syscall! */
+ rsp0 = (vm_offset_t)thread0.td_pcb;
+ /* Ensure the stack is aligned to 16 bytes */
+ rsp0 &= ~0xFul;
+ common_tss[0].tss_rsp0 = pti ? ((vm_offset_t)PCPU_PTR(pti_stack) +
+ PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful : rsp0;
+ PCPU_SET(rsp0, rsp0);
+ PCPU_SET(curpcb, thread0.td_pcb);
+
+ /* transfer to user mode */
+
+ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
+ _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
+ _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
+ _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
+ _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
+
+ load_ds(_udatasel);
+ load_es(_udatasel);
+ load_fs(_ufssel);
+
+ /* setup proc 0's pcb */
+ thread0.td_pcb->pcb_flags = 0;
+ thread0.td_frame = &proc0_tf;
+
+ env = kern_getenv("kernelname");
+ if (env != NULL)
+ strlcpy(kernelname, env, sizeof(kernelname));
+
+ cpu_probe_amdc1e();
+
+#ifdef FDT
+ x86_init_fdt();
+#endif
+ thread0.td_critnest = 0;
+
+ TSEXIT();
+
+ /* Location of kernel stack for locore */
+ return ((u_int64_t)thread0.td_pcb);
+}
+
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+
+ pcpu->pc_acpi_id = 0xffffffff;
+}
+
+static int
+smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct bios_smap *smapbase;
+ struct bios_smap_xattr smap;
+ caddr_t kmdp;
+ uint32_t *smapattr;
+ int count, error, i;
+
+ /* Retrieve the system memory map from the loader. */
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+ smapbase = (struct bios_smap *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_SMAP);
+ if (smapbase == NULL)
+ return (0);
+ smapattr = (uint32_t *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
+ count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase);
+ error = 0;
+ for (i = 0; i < count; i++) {
+ smap.base = smapbase[i].base;
+ smap.length = smapbase[i].length;
+ smap.type = smapbase[i].type;
+ if (smapattr != NULL)
+ smap.xattr = smapattr[i];
+ else
+ smap.xattr = 0;
+ error = SYSCTL_OUT(req, &smap, sizeof(smap));
+ }
+ return (error);
+}
+SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
+ smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
+
+static int
+efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct efi_map_header *efihdr;
+ caddr_t kmdp;
+ uint32_t efisize;
+
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+ efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_MAP);
+ if (efihdr == NULL)
+ return (0);
+ efisize = *((uint32_t *)efihdr - 1);
+ return (SYSCTL_OUT(req, efihdr, efisize));
+}
+SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
+ efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map");
+
+void
+spinlock_enter(void)
+{
+ struct thread *td;
+ register_t flags;
+
+ td = curthread;
+ if (td->td_md.md_spinlock_count == 0) {
+ flags = intr_disable();
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_flags = flags;
+ critical_enter();
+ } else
+ td->td_md.md_spinlock_count++;
+}
+
+void
+spinlock_exit(void)
+{
+ struct thread *td;
+ register_t flags;
+
+ td = curthread;
+ flags = td->td_md.md_saved_flags;
+ td->td_md.md_spinlock_count--;
+ if (td->td_md.md_spinlock_count == 0) {
+ critical_exit();
+ intr_restore(flags);
+ }
+}
+
+/*
+ * Construct a PCB from a trapframe. This is called from kdb_trap() where
+ * we want to start a backtrace from the function that caused us to enter
+ * the debugger. We have the context in the trapframe, but base the trace
+ * on the PCB. The PCB doesn't have to be perfect, as long as it contains
+ * enough for a backtrace.
+ */
+void
+makectx(struct trapframe *tf, struct pcb *pcb)
+{
+
+ pcb->pcb_r12 = tf->tf_r12;
+ pcb->pcb_r13 = tf->tf_r13;
+ pcb->pcb_r14 = tf->tf_r14;
+ pcb->pcb_r15 = tf->tf_r15;
+ pcb->pcb_rbp = tf->tf_rbp;
+ pcb->pcb_rbx = tf->tf_rbx;
+ pcb->pcb_rip = tf->tf_rip;
+ pcb->pcb_rsp = tf->tf_rsp;
+}
+
+int
+ptrace_set_pc(struct thread *td, unsigned long addr)
+{
+
+ td->td_frame->tf_rip = addr;
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ return (0);
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+ td->td_frame->tf_rflags |= PSL_T;
+ return (0);
+}
+
+int
+ptrace_clear_single_step(struct thread *td)
+{
+ td->td_frame->tf_rflags &= ~PSL_T;
+ return (0);
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *tp;
+
+ tp = td->td_frame;
+ return (fill_frame_regs(tp, regs));
+}
+
+int
+fill_frame_regs(struct trapframe *tp, struct reg *regs)
+{
+ regs->r_r15 = tp->tf_r15;
+ regs->r_r14 = tp->tf_r14;
+ regs->r_r13 = tp->tf_r13;
+ regs->r_r12 = tp->tf_r12;
+ regs->r_r11 = tp->tf_r11;
+ regs->r_r10 = tp->tf_r10;
+ regs->r_r9 = tp->tf_r9;
+ regs->r_r8 = tp->tf_r8;
+ regs->r_rdi = tp->tf_rdi;
+ regs->r_rsi = tp->tf_rsi;
+ regs->r_rbp = tp->tf_rbp;
+ regs->r_rbx = tp->tf_rbx;
+ regs->r_rdx = tp->tf_rdx;
+ regs->r_rcx = tp->tf_rcx;
+ regs->r_rax = tp->tf_rax;
+ regs->r_rip = tp->tf_rip;
+ regs->r_cs = tp->tf_cs;
+ regs->r_rflags = tp->tf_rflags;
+ regs->r_rsp = tp->tf_rsp;
+ regs->r_ss = tp->tf_ss;
+ if (tp->tf_flags & TF_HASSEGS) {
+ regs->r_ds = tp->tf_ds;
+ regs->r_es = tp->tf_es;
+ regs->r_fs = tp->tf_fs;
+ regs->r_gs = tp->tf_gs;
+ } else {
+ regs->r_ds = 0;
+ regs->r_es = 0;
+ regs->r_fs = 0;
+ regs->r_gs = 0;
+ }
+ return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *tp;
+ register_t rflags;
+
+ tp = td->td_frame;
+ rflags = regs->r_rflags & 0xffffffff;
+ if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
+ return (EINVAL);
+ tp->tf_r15 = regs->r_r15;
+ tp->tf_r14 = regs->r_r14;
+ tp->tf_r13 = regs->r_r13;
+ tp->tf_r12 = regs->r_r12;
+ tp->tf_r11 = regs->r_r11;
+ tp->tf_r10 = regs->r_r10;
+ tp->tf_r9 = regs->r_r9;
+ tp->tf_r8 = regs->r_r8;
+ tp->tf_rdi = regs->r_rdi;
+ tp->tf_rsi = regs->r_rsi;
+ tp->tf_rbp = regs->r_rbp;
+ tp->tf_rbx = regs->r_rbx;
+ tp->tf_rdx = regs->r_rdx;
+ tp->tf_rcx = regs->r_rcx;
+ tp->tf_rax = regs->r_rax;
+ tp->tf_rip = regs->r_rip;
+ tp->tf_cs = regs->r_cs;
+ tp->tf_rflags = rflags;
+ tp->tf_rsp = regs->r_rsp;
+ tp->tf_ss = regs->r_ss;
+ if (0) { /* XXXKIB */
+ tp->tf_ds = regs->r_ds;
+ tp->tf_es = regs->r_es;
+ tp->tf_fs = regs->r_fs;
+ tp->tf_gs = regs->r_gs;
+ tp->tf_flags = TF_HASSEGS;
+ }
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ return (0);
+}
+
+/* XXX check all this stuff! */
+/* externalize from sv_xmm */
+static void
+fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
+{
+ struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
+ struct envxmm *penv_xmm = &sv_xmm->sv_env;
+ int i;
+
+ /* pcb -> fpregs */
+ bzero(fpregs, sizeof(*fpregs));
+
+ /* FPU control/status */
+ penv_fpreg->en_cw = penv_xmm->en_cw;
+ penv_fpreg->en_sw = penv_xmm->en_sw;
+ penv_fpreg->en_tw = penv_xmm->en_tw;
+ penv_fpreg->en_opcode = penv_xmm->en_opcode;
+ penv_fpreg->en_rip = penv_xmm->en_rip;
+ penv_fpreg->en_rdp = penv_xmm->en_rdp;
+ penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
+ penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
+
+ /* FPU registers */
+ for (i = 0; i < 8; ++i)
+ bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
+
+ /* SSE registers */
+ for (i = 0; i < 16; ++i)
+ bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
+}
+
+/* internalize from fpregs into sv_xmm */
+static void
+set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
+{
+ struct envxmm *penv_xmm = &sv_xmm->sv_env;
+ struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
+ int i;
+
+ /* fpregs -> pcb */
+ /* FPU control/status */
+ penv_xmm->en_cw = penv_fpreg->en_cw;
+ penv_xmm->en_sw = penv_fpreg->en_sw;
+ penv_xmm->en_tw = penv_fpreg->en_tw;
+ penv_xmm->en_opcode = penv_fpreg->en_opcode;
+ penv_xmm->en_rip = penv_fpreg->en_rip;
+ penv_xmm->en_rdp = penv_fpreg->en_rdp;
+ penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
+ penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
+
+ /* FPU registers */
+ for (i = 0; i < 8; ++i)
+ bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
+
+ /* SSE registers */
+ for (i = 0; i < 16; ++i)
+ bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
+}
+
+/* externalize from td->pcb */
+int
+fill_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+
+ KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
+ P_SHOULDSTOP(td->td_proc),
+ ("not suspended thread %p", td));
+ fpugetregs(td);
+ fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
+ return (0);
+}
+
+/* internalize to td->pcb */
+int
+set_fpregs(struct thread *td, struct fpreg *fpregs)
+{
+
+ set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
+ fpuuserinited(td);
+ return (0);
+}
+
+/*
+ * Get machine context.
+ */
+int
+get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
+{
+ struct pcb *pcb;
+ struct trapframe *tp;
+
+ pcb = td->td_pcb;
+ tp = td->td_frame;
+ PROC_LOCK(curthread->td_proc);
+ mcp->mc_onstack = sigonstack(tp->tf_rsp);
+ PROC_UNLOCK(curthread->td_proc);
+ mcp->mc_r15 = tp->tf_r15;
+ mcp->mc_r14 = tp->tf_r14;
+ mcp->mc_r13 = tp->tf_r13;
+ mcp->mc_r12 = tp->tf_r12;
+ mcp->mc_r11 = tp->tf_r11;
+ mcp->mc_r10 = tp->tf_r10;
+ mcp->mc_r9 = tp->tf_r9;
+ mcp->mc_r8 = tp->tf_r8;
+ mcp->mc_rdi = tp->tf_rdi;
+ mcp->mc_rsi = tp->tf_rsi;
+ mcp->mc_rbp = tp->tf_rbp;
+ mcp->mc_rbx = tp->tf_rbx;
+ mcp->mc_rcx = tp->tf_rcx;
+ mcp->mc_rflags = tp->tf_rflags;
+ if (flags & GET_MC_CLEAR_RET) {
+ mcp->mc_rax = 0;
+ mcp->mc_rdx = 0;
+ mcp->mc_rflags &= ~PSL_C;
+ } else {
+ mcp->mc_rax = tp->tf_rax;
+ mcp->mc_rdx = tp->tf_rdx;
+ }
+ mcp->mc_rip = tp->tf_rip;
+ mcp->mc_cs = tp->tf_cs;
+ mcp->mc_rsp = tp->tf_rsp;
+ mcp->mc_ss = tp->tf_ss;
+ mcp->mc_ds = tp->tf_ds;
+ mcp->mc_es = tp->tf_es;
+ mcp->mc_fs = tp->tf_fs;
+ mcp->mc_gs = tp->tf_gs;
+ mcp->mc_flags = tp->tf_flags;
+ mcp->mc_len = sizeof(*mcp);
+ get_fpcontext(td, mcp, NULL, 0);
+ update_pcb_bases(pcb);
+ mcp->mc_fsbase = pcb->pcb_fsbase;
+ mcp->mc_gsbase = pcb->pcb_gsbase;
+ mcp->mc_xfpustate = 0;
+ mcp->mc_xfpustate_len = 0;
+ bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
+ return (0);
+}
+
+/*
+ * Set machine context.
+ *
+ * However, we don't set any but the user modifiable flags, and we won't
+ * touch the cs selector.
+ */
+int
+set_mcontext(struct thread *td, mcontext_t *mcp)
+{
+ struct pcb *pcb;
+ struct trapframe *tp;
+ char *xfpustate;
+ long rflags;
+ int ret;
+
+ pcb = td->td_pcb;
+ tp = td->td_frame;
+ if (mcp->mc_len != sizeof(*mcp) ||
+ (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
+ return (EINVAL);
+ rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
+ (tp->tf_rflags & ~PSL_USERCHANGE);
+ if (mcp->mc_flags & _MC_HASFPXSTATE) {
+ if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
+ sizeof(struct savefpu))
+ return (EINVAL);
+ xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
+ ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
+ mcp->mc_xfpustate_len);
+ if (ret != 0)
+ return (ret);
+ } else
+ xfpustate = NULL;
+ ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
+ if (ret != 0)
+ return (ret);
+ tp->tf_r15 = mcp->mc_r15;
+ tp->tf_r14 = mcp->mc_r14;
+ tp->tf_r13 = mcp->mc_r13;
+ tp->tf_r12 = mcp->mc_r12;
+ tp->tf_r11 = mcp->mc_r11;
+ tp->tf_r10 = mcp->mc_r10;
+ tp->tf_r9 = mcp->mc_r9;
+ tp->tf_r8 = mcp->mc_r8;
+ tp->tf_rdi = mcp->mc_rdi;
+ tp->tf_rsi = mcp->mc_rsi;
+ tp->tf_rbp = mcp->mc_rbp;
+ tp->tf_rbx = mcp->mc_rbx;
+ tp->tf_rdx = mcp->mc_rdx;
+ tp->tf_rcx = mcp->mc_rcx;
+ tp->tf_rax = mcp->mc_rax;
+ tp->tf_rip = mcp->mc_rip;
+ tp->tf_rflags = rflags;
+ tp->tf_rsp = mcp->mc_rsp;
+ tp->tf_ss = mcp->mc_ss;
+ tp->tf_flags = mcp->mc_flags;
+ if (tp->tf_flags & TF_HASSEGS) {
+ tp->tf_ds = mcp->mc_ds;
+ tp->tf_es = mcp->mc_es;
+ tp->tf_fs = mcp->mc_fs;
+ tp->tf_gs = mcp->mc_gs;
+ }
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+ if (mcp->mc_flags & _MC_HASBASES) {
+ pcb->pcb_fsbase = mcp->mc_fsbase;
+ pcb->pcb_gsbase = mcp->mc_gsbase;
+ }
+ return (0);
+}
+
+static void
+get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
+ size_t xfpusave_len)
+{
+ size_t max_len, len;
+
+ mcp->mc_ownedfp = fpugetregs(td);
+ bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
+ sizeof(mcp->mc_fpstate));
+ mcp->mc_fpformat = fpuformat();
+ if (!use_xsave || xfpusave_len == 0)
+ return;
+ max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
+ len = xfpusave_len;
+ if (len > max_len) {
+ len = max_len;
+ bzero(xfpusave + max_len, len - max_len);
+ }
+ mcp->mc_flags |= _MC_HASFPXSTATE;
+ mcp->mc_xfpustate_len = len;
+ bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
+}
+
+static int
+set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
+ size_t xfpustate_len)
+{
+ int error;
+
+ if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
+ return (0);
+ else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
+ return (EINVAL);
+ else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
+ /* We don't care what state is left in the FPU or PCB. */
+ fpstate_drop(td);
+ error = 0;
+ } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
+ mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
+ error = fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate,
+ xfpustate, xfpustate_len);
+ } else
+ return (EINVAL);
+ return (error);
+}
+
+void
+fpstate_drop(struct thread *td)
+{
+
+ KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
+ critical_enter();
+ if (PCPU_GET(fpcurthread) == td)
+ fpudrop();
+ /*
+ * XXX force a full drop of the fpu. The above only drops it if we
+ * owned it.
+ *
+ * XXX I don't much like fpugetuserregs()'s semantics of doing a full
+ * drop. Dropping only to the pcb matches fnsave's behaviour.
+ * We only need to drop to !PCB_INITDONE in sendsig(). But
+ * sendsig() is the only caller of fpugetuserregs()... perhaps we just
+ * have too many layers.
+ */
+ clear_pcb_flags(curthread->td_pcb,
+ PCB_FPUINITDONE | PCB_USERFPUINITDONE);
+ critical_exit();
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+ struct pcb *pcb;
+
+ if (td == NULL) {
+ dbregs->dr[0] = rdr0();
+ dbregs->dr[1] = rdr1();
+ dbregs->dr[2] = rdr2();
+ dbregs->dr[3] = rdr3();
+ dbregs->dr[6] = rdr6();
+ dbregs->dr[7] = rdr7();
+ } else {
+ pcb = td->td_pcb;
+ dbregs->dr[0] = pcb->pcb_dr0;
+ dbregs->dr[1] = pcb->pcb_dr1;
+ dbregs->dr[2] = pcb->pcb_dr2;
+ dbregs->dr[3] = pcb->pcb_dr3;
+ dbregs->dr[6] = pcb->pcb_dr6;
+ dbregs->dr[7] = pcb->pcb_dr7;
+ }
+ dbregs->dr[4] = 0;
+ dbregs->dr[5] = 0;
+ dbregs->dr[8] = 0;
+ dbregs->dr[9] = 0;
+ dbregs->dr[10] = 0;
+ dbregs->dr[11] = 0;
+ dbregs->dr[12] = 0;
+ dbregs->dr[13] = 0;
+ dbregs->dr[14] = 0;
+ dbregs->dr[15] = 0;
+ return (0);
+}
+
+int
+set_dbregs(struct thread *td, struct dbreg *dbregs)
+{
+ struct pcb *pcb;
+ int i;
+
+ if (td == NULL) {
+ load_dr0(dbregs->dr[0]);
+ load_dr1(dbregs->dr[1]);
+ load_dr2(dbregs->dr[2]);
+ load_dr3(dbregs->dr[3]);
+ load_dr6(dbregs->dr[6]);
+ load_dr7(dbregs->dr[7]);
+ } else {
+ /*
+ * Don't let an illegal value for dr7 get set. Specifically,
+ * check for undefined settings. Setting these bit patterns
+ * result in undefined behaviour and can lead to an unexpected
+ * TRCTRAP or a general protection fault right here.
+ * Upper bits of dr6 and dr7 must not be set
+ */
+ for (i = 0; i < 4; i++) {
+ if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
+ return (EINVAL);
+ if (td->td_frame->tf_cs == _ucode32sel &&
+ DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
+ return (EINVAL);
+ }
+ if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
+ (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
+ return (EINVAL);
+
+ pcb = td->td_pcb;
+
+ /*
+ * Don't let a process set a breakpoint that is not within the
+ * process's address space. If a process could do this, it
+ * could halt the system by setting a breakpoint in the kernel
+ * (if ddb was enabled). Thus, we need to check to make sure
+ * that no breakpoints are being enabled for addresses outside
+ * process's address space.
+ *
+ * XXX - what about when the watched area of the user's
+ * address space is written into from within the kernel
+ * ... wouldn't that still cause a breakpoint to be generated
+ * from within kernel mode?
+ */
+
+ if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
+ /* dr0 is enabled */
+ if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+ }
+ if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
+ /* dr1 is enabled */
+ if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+ }
+ if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
+ /* dr2 is enabled */
+ if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+ }
+ if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
+ /* dr3 is enabled */
+ if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+ }
+
+ pcb->pcb_dr0 = dbregs->dr[0];
+ pcb->pcb_dr1 = dbregs->dr[1];
+ pcb->pcb_dr2 = dbregs->dr[2];
+ pcb->pcb_dr3 = dbregs->dr[3];
+ pcb->pcb_dr6 = dbregs->dr[6];
+ pcb->pcb_dr7 = dbregs->dr[7];
+
+ set_pcb_flags(pcb, PCB_DBREGS);
+ }
+
+ return (0);
+}
+
+void
+reset_dbregs(void)
+{
+
+ load_dr7(0); /* Turn off the control bits first */
+ load_dr0(0);
+ load_dr1(0);
+ load_dr2(0);
+ load_dr3(0);
+ load_dr6(0);
+}
+
+/*
+ * Return > 0 if a hardware breakpoint has been hit, and the
+ * breakpoint was in user space. Return 0, otherwise.
+ */
+int
+user_dbreg_trap(void)
+{
+ u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
+ u_int64_t bp; /* breakpoint bits extracted from dr6 */
+ int nbp; /* number of breakpoints that triggered */
+ caddr_t addr[4]; /* breakpoint addresses */
+ int i;
+
+ dr7 = rdr7();
+ if ((dr7 & 0x000000ff) == 0) {
+ /*
+ * all GE and LE bits in the dr7 register are zero,
+ * thus the trap couldn't have been caused by the
+ * hardware debug registers
+ */
+ return 0;
+ }
+
+ nbp = 0;
+ dr6 = rdr6();
+ bp = dr6 & 0x0000000f;
+
+ if (!bp) {
+ /*
+ * None of the breakpoint bits are set meaning this
+ * trap was not caused by any of the debug registers
+ */
+ return 0;
+ }
+
+ /*
+ * at least one of the breakpoints were hit, check to see
+ * which ones and if any of them are user space addresses
+ */
+
+ if (bp & 0x01) {
+ addr[nbp++] = (caddr_t)rdr0();
+ }
+ if (bp & 0x02) {
+ addr[nbp++] = (caddr_t)rdr1();
+ }
+ if (bp & 0x04) {
+ addr[nbp++] = (caddr_t)rdr2();
+ }
+ if (bp & 0x08) {
+ addr[nbp++] = (caddr_t)rdr3();
+ }
+
+ for (i = 0; i < nbp; i++) {
+ if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
+ /*
+ * addr[i] is in user space
+ */
+ return nbp;
+ }
+ }
+
+ /*
+ * None of the breakpoints are in user space.
+ */
+ return 0;
+}
+
+/*
+ * The pcb_flags is only modified by current thread, or by other threads
+ * when current thread is stopped. However, current thread may change it
+ * from the interrupt context in cpu_switch(), or in the trap handler.
+ * When we read-modify-write pcb_flags from C sources, compiler may generate
+ * code that is not atomic regarding the interrupt handler. If a trap or
+ * interrupt happens and any flag is modified from the handler, it can be
+ * clobbered with the cached value later. Therefore, we implement setting
+ * and clearing flags with single-instruction functions, which do not race
+ * with possible modification of the flags from the trap or interrupt context,
+ * because traps and interrupts are executed only on instruction boundary.
+ */
+void
+set_pcb_flags_raw(struct pcb *pcb, const u_int flags)
+{
+
+ __asm __volatile("orl %1,%0"
+ : "=m" (pcb->pcb_flags) : "ir" (flags), "m" (pcb->pcb_flags)
+ : "cc", "memory");
+
+}
+
+/*
+ * The support for RDFSBASE, WRFSBASE and similar instructions for %gs
+ * base requires that kernel saves MSR_FSBASE and MSR_{K,}GSBASE into
+ * pcb if user space modified the bases. We must save on the context
+ * switch or if the return to usermode happens through the doreti.
+ *
+ * Tracking of both events is performed by the pcb flag PCB_FULL_IRET,
+ * which have a consequence that the base MSRs must be saved each time
+ * the PCB_FULL_IRET flag is set. We disable interrupts to sync with
+ * context switches.
+ */
+void
+set_pcb_flags(struct pcb *pcb, const u_int flags)
+{
+ register_t r;
+
+ if (curpcb == pcb &&
+ (flags & PCB_FULL_IRET) != 0 &&
+ (pcb->pcb_flags & PCB_FULL_IRET) == 0 &&
+ (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) != 0) {
+ r = intr_disable();
+ if ((pcb->pcb_flags & PCB_FULL_IRET) == 0) {
+ if (rfs() == _ufssel)
+ pcb->pcb_fsbase = rdfsbase();
+ if (rgs() == _ugssel)
+ pcb->pcb_gsbase = rdmsr(MSR_KGSBASE);
+ }
+ set_pcb_flags_raw(pcb, flags);
+ intr_restore(r);
+ } else {
+ set_pcb_flags_raw(pcb, flags);
+ }
+}
+
+void
+clear_pcb_flags(struct pcb *pcb, const u_int flags)
+{
+
+ __asm __volatile("andl %1,%0"
+ : "=m" (pcb->pcb_flags) : "ir" (~flags), "m" (pcb->pcb_flags)
+ : "cc", "memory");
+}
+
+#ifdef KDB
+
+/*
+ * Provide inb() and outb() as functions. They are normally only available as
+ * inline functions, thus cannot be called from the debugger.
+ */
+
+/* silence compiler warnings */
+u_char inb_(u_short);
+void outb_(u_short, u_char);
+
+u_char
+inb_(u_short port)
+{
+ return inb(port);
+}
+
+void
+outb_(u_short port, u_char data)
+{
+ outb(port, data);
+}
+
+#endif /* KDB */
Index: sys/amd64/amd64/mp_machdep.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/mp_machdep.c.orig
@@ -0,0 +1,643 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 1996, by Steve Passe
+ * Copyright (c) 2003, by Peter Wemm
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. The name of the developer may NOT be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_cpu.h"
+#include "opt_ddb.h"
+#include "opt_kstack_pages.h"
+#include "opt_sched.h"
+#include "opt_smp.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#ifdef GPROF
+#include <sys/gmon.h>
+#endif
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_extern.h>
+
+#include <x86/apicreg.h>
+#include <machine/clock.h>
+#include <machine/cputypes.h>
+#include <machine/cpufunc.h>
+#include <x86/mca.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/psl.h>
+#include <machine/smp.h>
+#include <machine/specialreg.h>
+#include <machine/tss.h>
+#include <machine/cpu.h>
+#include <x86/init.h>
+
+#define WARMBOOT_TARGET 0
+#define WARMBOOT_OFF (KERNBASE + 0x0467)
+#define WARMBOOT_SEG (KERNBASE + 0x0469)
+
+#define CMOS_REG (0x70)
+#define CMOS_DATA (0x71)
+#define BIOS_RESET (0x0f)
+#define BIOS_WARM (0x0a)
+
+extern struct pcpu __pcpu[];
+
+/* Temporary variables for init_secondary() */
+char *doublefault_stack;
+char *mce_stack;
+char *nmi_stack;
+
+/*
+ * Local data and functions.
+ */
+
+static int start_ap(int apic_id);
+
+static u_int bootMP_size;
+static u_int boot_address;
+
+/*
+ * Calculate usable address in base memory for AP trampoline code.
+ */
+u_int
+mp_bootaddress(u_int basemem)
+{
+
+ bootMP_size = mptramp_end - mptramp_start;
+ boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
+ if (((basemem * 1024) - boot_address) < bootMP_size)
+ boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
+ /* 3 levels of page table pages */
+ mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
+
+ return mptramp_pagetables;
+}
+
+/*
+ * Initialize the IPI handlers and start up the AP's.
+ */
+void
+cpu_mp_start(void)
+{
+ int i;
+
+ /* Initialize the logical ID to APIC ID table. */
+ for (i = 0; i < MAXCPU; i++) {
+ cpu_apic_ids[i] = -1;
+ cpu_ipi_pending[i] = 0;
+ }
+
+ /* Install an inter-CPU IPI for TLB invalidation */
+ if (pmap_pcid_enabled) {
+ if (invpcid_works) {
+ setidt(IPI_INVLTLB, pti ?
+ IDTVEC(invltlb_invpcid_pti_pti) :
+ IDTVEC(invltlb_invpcid_nopti), SDT_SYSIGT,
+ SEL_KPL, 0);
+ setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_invpcid_pti) :
+<<<<<<< HEAD
+ IDTVEC(invlpg_invpcid), SDT_SYSIGT, SEL_KPL,
+ 0);
+=======
+ IDTVEC(invlpg_invpcid), SDT_SYSIGT, SEL_KPL, 0);
+>>>>>>> upstream/master
+ setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_invpcid_pti) :
+ IDTVEC(invlrng_invpcid), SDT_SYSIGT, SEL_KPL, 0);
+ } else {
+ setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pcid_pti) :
+ IDTVEC(invltlb_pcid), SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pcid_pti) :
+ IDTVEC(invlpg_pcid), SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pcid_pti) :
+ IDTVEC(invlrng_pcid), SDT_SYSIGT, SEL_KPL, 0);
+ }
+ } else {
+ setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pti) : IDTVEC(invltlb),
+ SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pti) : IDTVEC(invlpg),
+ SDT_SYSIGT, SEL_KPL, 0);
+ setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pti) : IDTVEC(invlrng),
+ SDT_SYSIGT, SEL_KPL, 0);
+ }
+
+ /* Install an inter-CPU IPI for cache invalidation. */
+ setidt(IPI_INVLCACHE, pti ? IDTVEC(invlcache_pti) : IDTVEC(invlcache),
+ SDT_SYSIGT, SEL_KPL, 0);
+
+ /* Install an inter-CPU IPI for all-CPU rendezvous */
+ setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
+ IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
+
+ /* Install generic inter-CPU IPI handler */
+ setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
+ IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
+
+ /* Install an inter-CPU IPI for CPU stop/restart */
+ setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
+ SDT_SYSIGT, SEL_KPL, 0);
+
+ /* Install an inter-CPU IPI for CPU suspend/resume */
+ setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
+ SDT_SYSIGT, SEL_KPL, 0);
+
+ /* Set boot_cpu_id if needed. */
+ if (boot_cpu_id == -1) {
+ boot_cpu_id = PCPU_GET(apic_id);
+ cpu_info[boot_cpu_id].cpu_bsp = 1;
+ } else
+ KASSERT(boot_cpu_id == PCPU_GET(apic_id),
+ ("BSP's APIC ID doesn't match boot_cpu_id"));
+
+ /* Probe logical/physical core configuration. */
+ topo_probe();
+
+ assign_cpu_ids();
+
+ /* Start each Application Processor */
+ init_ops.start_all_aps();
+
+ set_interrupt_apic_ids();
+}
+
+
+/*
+ * AP CPU's call this to initialize themselves.
+ */
+void
+init_secondary(void)
+{
+ struct pcpu *pc;
+ struct nmi_pcpu *np;
+ u_int64_t cr0;
+ int cpu, gsel_tss, x;
+ struct region_descriptor ap_gdt;
+
+ /* Set by the startup code for us to use */
+ cpu = bootAP;
+
+ /* Init tss */
+ common_tss[cpu] = common_tss[0];
+ common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
+ IOPERM_BITMAP_SIZE;
+ common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
+
+ /* The NMI stack runs on IST2. */
+ np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
+ common_tss[cpu].tss_ist2 = (long) np;
+
+ /* The MC# stack runs on IST3. */
+ np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
+ common_tss[cpu].tss_ist3 = (long) np;
+
+ /* Prepare private GDT */
+ gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
+ for (x = 0; x < NGDT; x++) {
+ if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
+ x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
+ ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
+ }
+ ssdtosyssd(&gdt_segs[GPROC0_SEL],
+ (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
+ ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
+ ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
+ lgdt(&ap_gdt); /* does magic intra-segment return */
+
+ /* Get per-cpu data */
+ pc = &__pcpu[cpu];
+
+ /* prime data page for it to use */
+ pcpu_init(pc, cpu, sizeof(struct pcpu));
+ dpcpu_init(dpcpu, cpu);
+ pc->pc_apic_id = cpu_apic_ids[cpu];
+ pc->pc_prvspace = pc;
+ pc->pc_curthread = 0;
+ pc->pc_tssp = &common_tss[cpu];
+ pc->pc_commontssp = &common_tss[cpu];
+ pc->pc_rsp0 = 0;
+ pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
+ GPROC0_SEL];
+ pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
+ pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
+ pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
+ GUSERLDT_SEL];
+ pc->pc_curpmap = kernel_pmap;
+ pc->pc_pcid_gen = 1;
+ pc->pc_pcid_next = PMAP_PCID_KERN + 1;
+ common_tss[cpu].tss_rsp0 = pti ? ((vm_offset_t)&pc->pc_pti_stack +
+ PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful : 0;
+
+ /* Save the per-cpu pointer for use by the NMI handler. */
+ np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
+ np->np_pcpu = (register_t) pc;
+
+ /* Save the per-cpu pointer for use by the MC# handler. */
+ np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
+ np->np_pcpu = (register_t) pc;
+
+ wrmsr(MSR_FSBASE, 0); /* User value */
+ wrmsr(MSR_GSBASE, (u_int64_t)pc);
+ wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
+ fix_cpuid();
+
+ lidt(&r_idt);
+
+ gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
+ ltr(gsel_tss);
+
+ /*
+ * Set to a known state:
+ * Set by mpboot.s: CR0_PG, CR0_PE
+ * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
+ */
+ cr0 = rcr0();
+ cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
+ load_cr0(cr0);
+
+ amd64_conf_fast_syscall();
+
+ /* signal our startup to the BSP. */
+ mp_naps++;
+
+ /* Spin until the BSP releases the AP's. */
+ while (atomic_load_acq_int(&aps_ready) == 0)
+ ia32_pause();
+
+ init_secondary_tail();
+}
+
+/*******************************************************************
+ * local functions and data
+ */
+
+/*
+ * start each AP in our list
+ */
+int
+native_start_all_aps(void)
+{
+ vm_offset_t va = boot_address + KERNBASE;
+ u_int64_t *pt4, *pt3, *pt2;
+ u_int32_t mpbioswarmvec;
+ int apic_id, cpu, i;
+ u_char mpbiosreason;
+
+ mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+
+ /* install the AP 1st level boot code */
+ pmap_kenter(va, boot_address);
+ pmap_invalidate_page(kernel_pmap, va);
+ bcopy(mptramp_start, (void *)va, bootMP_size);
+
+ /* Locate the page tables, they'll be below the trampoline */
+ pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
+ pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
+ pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
+
+ /* Create the initial 1GB replicated page tables */
+ for (i = 0; i < 512; i++) {
+ /* Each slot of the level 4 pages points to the same level 3 page */
+ pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
+ pt4[i] |= PG_V | PG_RW | PG_U;
+
+ /* Each slot of the level 3 pages points to the same level 2 page */
+ pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
+ pt3[i] |= PG_V | PG_RW | PG_U;
+
+ /* The level 2 page slots are mapped with 2MB pages for 1GB. */
+ pt2[i] = i * (2 * 1024 * 1024);
+ pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
+ }
+
+ /* save the current value of the warm-start vector */
+ mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
+ outb(CMOS_REG, BIOS_RESET);
+ mpbiosreason = inb(CMOS_DATA);
+
+ /* setup a vector to our boot code */
+ *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
+ *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
+ outb(CMOS_REG, BIOS_RESET);
+ outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
+
+ /* start each AP */
+ for (cpu = 1; cpu < mp_ncpus; cpu++) {
+ apic_id = cpu_apic_ids[cpu];
+
+ /* allocate and set up an idle stack data page */
+ bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
+ kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO);
+ doublefault_stack = (char *)kmem_malloc(kernel_arena,
+ PAGE_SIZE, M_WAITOK | M_ZERO);
+ mce_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
+
+ bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
+ bootAP = cpu;
+
+ /* attempt to start the Application Processor */
+ if (!start_ap(apic_id)) {
+ /* restore the warmstart vector */
+ *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
+ panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
+ }
+
+ CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
+ }
+
+ /* restore the warmstart vector */
+ *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
+
+ outb(CMOS_REG, BIOS_RESET);
+ outb(CMOS_DATA, mpbiosreason);
+
+ /* number of APs actually started */
+ return mp_naps;
+}
+
+
+/*
+ * This function starts the AP (application processor) identified
+ * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
+ * to accomplish this. This is necessary because of the nuances
+ * of the different hardware we might encounter. It isn't pretty,
+ * but it seems to work.
+ */
+static int
+start_ap(int apic_id)
+{
+ int vector, ms;
+ int cpus;
+
+ /* calculate the vector */
+ vector = (boot_address >> 12) & 0xff;
+
+ /* used as a watchpoint to signal AP startup */
+ cpus = mp_naps;
+
+ ipi_startup(apic_id, vector);
+
+ /* Wait up to 5 seconds for it to start. */
+ for (ms = 0; ms < 5000; ms++) {
+ if (mp_naps > cpus)
+ return 1; /* return SUCCESS */
+ DELAY(1000);
+ }
+ return 0; /* return FAILURE */
+}
+
+void
+invltlb_invpcid_handler(void)
+{
+ struct invpcid_descr d;
+ uint32_t generation;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_gbl[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ generation = smp_tlb_generation;
+ d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
+ d.pad = 0;
+ d.addr = 0;
+ invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
+ INVPCID_CTX);
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invltlb_invpcid_pti_handler(void)
+{
+ struct invpcid_descr d;
+ uint32_t generation;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_gbl[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ generation = smp_tlb_generation;
+ d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
+ d.pad = 0;
+ d.addr = 0;
+ if (smp_tlb_pmap == kernel_pmap) {
+ /*
+ * This invalidation actually needs to clear kernel
+ * mappings from the TLB in the current pmap, but
+ * since we were asked for the flush in the kernel
+ * pmap, achieve it by performing global flush.
+ */
+ invpcid(&d, INVPCID_CTXGLOB);
+ } else {
+ invpcid(&d, INVPCID_CTX);
+ d.pcid |= PMAP_PCID_USER_PT;
+ invpcid(&d, INVPCID_CTX);
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invltlb_pcid_handler(void)
+{
+ uint64_t kcr3, ucr3;
+ uint32_t generation, pcid;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_gbl[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ generation = smp_tlb_generation; /* Overlap with serialization */
+ if (smp_tlb_pmap == kernel_pmap) {
+ invltlb_glob();
+ } else {
+ /*
+ * The current pmap might not be equal to
+ * smp_tlb_pmap. The clearing of the pm_gen in
+ * pmap_invalidate_all() takes care of TLB
+ * invalidation when switching to the pmap on this
+ * CPU.
+ */
+ if (PCPU_GET(curpmap) == smp_tlb_pmap) {
+ pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
+ kcr3 = smp_tlb_pmap->pm_cr3 | pcid;
+ ucr3 = smp_tlb_pmap->pm_ucr3;
+ if (ucr3 != PMAP_NO_CR3) {
+ ucr3 |= PMAP_PCID_USER_PT | pcid;
+ pmap_pti_pcid_invalidate(ucr3, kcr3);
+ } else
+ load_cr3(kcr3);
+ }
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invlpg_invpcid_handler(void)
+{
+ struct invpcid_descr d;
+ uint32_t generation;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_pg[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ generation = smp_tlb_generation; /* Overlap with serialization */
+ invlpg(smp_tlb_addr1);
+ if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
+ d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
+ PMAP_PCID_USER_PT;
+ d.pad = 0;
+ d.addr = smp_tlb_addr1;
+ invpcid(&d, INVPCID_ADDR);
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invlpg_pcid_handler(void)
+{
+ uint64_t kcr3, ucr3;
+ uint32_t generation;
+ uint32_t pcid;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_pg[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ generation = smp_tlb_generation; /* Overlap with serialization */
+ invlpg(smp_tlb_addr1);
+ if (smp_tlb_pmap == PCPU_GET(curpmap) &&
+ (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
+ pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
+ kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
+ ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+ pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invlrng_invpcid_handler(void)
+{
+ struct invpcid_descr d;
+ vm_offset_t addr, addr2;
+ uint32_t generation;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_rng[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ addr = smp_tlb_addr1;
+ addr2 = smp_tlb_addr2;
+ generation = smp_tlb_generation; /* Overlap with serialization */
+ do {
+ invlpg(addr);
+ addr += PAGE_SIZE;
+ } while (addr < addr2);
+ if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
+ d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
+ PMAP_PCID_USER_PT;
+ d.pad = 0;
+ d.addr = smp_tlb_addr1;
+ do {
+ invpcid(&d, INVPCID_ADDR);
+ d.addr += PAGE_SIZE;
+ } while (d.addr < addr2);
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
+
+void
+invlrng_pcid_handler(void)
+{
+ vm_offset_t addr, addr2;
+ uint64_t kcr3, ucr3;
+ uint32_t generation;
+ uint32_t pcid;
+
+#ifdef COUNT_XINVLTLB_HITS
+ xhits_rng[PCPU_GET(cpuid)]++;
+#endif /* COUNT_XINVLTLB_HITS */
+#ifdef COUNT_IPIS
+ (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
+#endif /* COUNT_IPIS */
+
+ addr = smp_tlb_addr1;
+ addr2 = smp_tlb_addr2;
+ generation = smp_tlb_generation; /* Overlap with serialization */
+ do {
+ invlpg(addr);
+ addr += PAGE_SIZE;
+ } while (addr < addr2);
+ if (smp_tlb_pmap == PCPU_GET(curpmap) &&
+ (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
+ pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
+ kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
+ ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+ pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
+ }
+ PCPU_SET(smp_tlb_done, generation);
+}
Index: sys/amd64/amd64/mpboot.S
===================================================================
--- sys/amd64/amd64/mpboot.S
+++ sys/amd64/amd64/mpboot.S
@@ -29,7 +29,7 @@
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
.data /* So we can modify it */
Index: sys/amd64/amd64/pmap.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/pmap.c.orig
@@ -0,0 +1,8146 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 2003 Peter Wemm
+ * All rights reserved.
+ * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ * Copyright (c) 2014-2018 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Portions of this software were developed by
+ * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define AMD64_NPT_AWARE
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_pmap.h"
+#include "opt_vm.h"
+
+#include <sys/param.h>
+#include <sys/bitstring.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/sx.h>
+#include <sys/turnstile.h>
+#include <sys/vmem.h>
+#include <sys/vmmeter.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_radix.h>
+#include <vm/vm_reserv.h>
+#include <vm/uma.h>
+
+#include <machine/intr_machdep.h>
+#include <x86/apicvar.h>
+#include <machine/cpu.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/specialreg.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+#include <machine/tss.h>
+
+static __inline boolean_t
+pmap_type_guest(pmap_t pmap)
+{
+
+ return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
+}
+
+static __inline boolean_t
+pmap_emulate_ad_bits(pmap_t pmap)
+{
+
+ return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
+}
+
+static __inline pt_entry_t
+pmap_valid_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ mask = X86_PG_V;
+ break;
+ case PT_EPT:
+ if (pmap_emulate_ad_bits(pmap))
+ mask = EPT_PG_EMUL_V;
+ else
+ mask = EPT_PG_READ;
+ break;
+ default:
+ panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+static __inline pt_entry_t
+pmap_rw_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ mask = X86_PG_RW;
+ break;
+ case PT_EPT:
+ if (pmap_emulate_ad_bits(pmap))
+ mask = EPT_PG_EMUL_RW;
+ else
+ mask = EPT_PG_WRITE;
+ break;
+ default:
+ panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+static pt_entry_t pg_g;
+
+static __inline pt_entry_t
+pmap_global_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ mask = pg_g;
+ break;
+ case PT_RVI:
+ case PT_EPT:
+ mask = 0;
+ break;
+ default:
+ panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+static __inline pt_entry_t
+pmap_accessed_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ mask = X86_PG_A;
+ break;
+ case PT_EPT:
+ if (pmap_emulate_ad_bits(pmap))
+ mask = EPT_PG_READ;
+ else
+ mask = EPT_PG_A;
+ break;
+ default:
+ panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+static __inline pt_entry_t
+pmap_modified_bit(pmap_t pmap)
+{
+ pt_entry_t mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ mask = X86_PG_M;
+ break;
+ case PT_EPT:
+ if (pmap_emulate_ad_bits(pmap))
+ mask = EPT_PG_WRITE;
+ else
+ mask = EPT_PG_M;
+ break;
+ default:
+ panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+#if !defined(DIAGNOSTIC)
+#ifdef __GNUC_GNU_INLINE__
+#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
+#else
+#define PMAP_INLINE extern inline
+#endif
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
+#define pa_index(pa) ((pa) >> PDRSHIFT)
+#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+
+#define NPV_LIST_LOCKS MAXCPU
+
+#define PHYS_TO_PV_LIST_LOCK(pa) \
+ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+
+#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
+ struct rwlock **_lockp = (lockp); \
+ struct rwlock *_new_lock; \
+ \
+ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
+ if (_new_lock != *_lockp) { \
+ if (*_lockp != NULL) \
+ rw_wunlock(*_lockp); \
+ *_lockp = _new_lock; \
+ rw_wlock(*_lockp); \
+ } \
+} while (0)
+
+#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
+
+#define RELEASE_PV_LIST_LOCK(lockp) do { \
+ struct rwlock **_lockp = (lockp); \
+ \
+ if (*_lockp != NULL) { \
+ rw_wunlock(*_lockp); \
+ *_lockp = NULL; \
+ } \
+} while (0)
+
+#define VM_PAGE_TO_PV_LIST_LOCK(m) \
+ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
+
+struct pmap kernel_pmap_store;
+
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+
+int nkpt;
+SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
+ "Number of kernel page table pages allocated on bootup");
+
+static int ndmpdp;
+vm_paddr_t dmaplimit;
+vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
+pt_entry_t pg_nx;
+
+static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
+
+static int pat_works = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
+ "Is page attribute table fully functional?");
+
+static int pg_ps_enabled = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &pg_ps_enabled, 0, "Are large page mappings enabled?");
+
+#define PAT_INDEX_SIZE 8
+static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
+
+static u_int64_t KPTphys; /* phys addr of kernel level 1 */
+static u_int64_t KPDphys; /* phys addr of kernel level 2 */
+u_int64_t KPDPphys; /* phys addr of kernel level 3 */
+u_int64_t KPML4phys; /* phys addr of kernel level 4 */
+
+static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
+static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
+static int ndmpdpphys; /* number of DMPDPphys pages */
+
+/*
+ * pmap_mapdev support pre initialization (i.e. console)
+ */
+#define PMAP_PREINIT_MAPPING_COUNT 8
+static struct pmap_preinit_mapping {
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_size_t sz;
+ int mode;
+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
+static int pmap_initialized;
+
+/*
+ * Data for the pv entry allocation mechanism.
+ * Updates to pv_invl_gen are protected by the pv_list_locks[]
+ * elements, but reads are not.
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static struct mtx __exclusive_cache_line pv_chunks_mutex;
+static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
+static u_long pv_invl_gen[NPV_LIST_LOCKS];
+static struct md_page *pv_table;
+static struct md_page pv_dummy;
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+pt_entry_t *CMAP1 = NULL;
+caddr_t CADDR1 = 0;
+static vm_offset_t qframe = 0;
+static struct mtx qframe_mtx;
+
+static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
+
+int pmap_pcid_enabled = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
+int invpcid_works = 0;
+SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
+ "Is the invpcid instruction available ?");
+
+int pti = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &pti, 0,
+ "Page Table Isolation enabled");
+static vm_object_t pti_obj;
+static pml4_entry_t *pti_pml4;
+static vm_pindex_t pti_pg_idx;
+static bool pti_finalized;
+
+static int
+pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS)
+{
+ int i;
+ uint64_t res;
+
+ res = 0;
+ CPU_FOREACH(i) {
+ res += cpuid_to_pcpu[i]->pc_pm_save_cnt;
+ }
+ return (sysctl_handle_64(oidp, &res, 0, req));
+}
+SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW |
+ CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU",
+ "Count of saved TLB context on switch");
+
+static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
+ LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
+static struct mtx invl_gen_mtx;
+static u_long pmap_invl_gen = 0;
+/* Fake lock object to satisfy turnstiles interface. */
+static struct lock_object invl_gen_ts = {
+ .lo_name = "invlts",
+};
+
+static bool
+pmap_not_in_di(void)
+{
+
+ return (curthread->td_md.md_invl_gen.gen == 0);
+}
+
+#define PMAP_ASSERT_NOT_IN_DI() \
+ KASSERT(pmap_not_in_di(), ("DI already started"))
+
+/*
+ * Start a new Delayed Invalidation (DI) block of code, executed by
+ * the current thread. Within a DI block, the current thread may
+ * destroy both the page table and PV list entries for a mapping and
+ * then release the corresponding PV list lock before ensuring that
+ * the mapping is flushed from the TLBs of any processors with the
+ * pmap active.
+ */
+static void
+pmap_delayed_invl_started(void)
+{
+ struct pmap_invl_gen *invl_gen;
+ u_long currgen;
+
+ invl_gen = &curthread->td_md.md_invl_gen;
+ PMAP_ASSERT_NOT_IN_DI();
+ mtx_lock(&invl_gen_mtx);
+ if (LIST_EMPTY(&pmap_invl_gen_tracker))
+ currgen = pmap_invl_gen;
+ else
+ currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
+ invl_gen->gen = currgen + 1;
+ LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
+ mtx_unlock(&invl_gen_mtx);
+}
+
+/*
+ * Finish the DI block, previously started by the current thread. All
+ * required TLB flushes for the pages marked by
+ * pmap_delayed_invl_page() must be finished before this function is
+ * called.
+ *
+ * This function works by bumping the global DI generation number to
+ * the generation number of the current thread's DI, unless there is a
+ * pending DI that started earlier. In the latter case, bumping the
+ * global DI generation number would incorrectly signal that the
+ * earlier DI had finished. Instead, this function bumps the earlier
+ * DI's generation number to match the generation number of the
+ * current thread's DI.
+ */
+static void
+pmap_delayed_invl_finished(void)
+{
+ struct pmap_invl_gen *invl_gen, *next;
+ struct turnstile *ts;
+
+ invl_gen = &curthread->td_md.md_invl_gen;
+ KASSERT(invl_gen->gen != 0, ("missed invl_started"));
+ mtx_lock(&invl_gen_mtx);
+ next = LIST_NEXT(invl_gen, link);
+ if (next == NULL) {
+ turnstile_chain_lock(&invl_gen_ts);
+ ts = turnstile_lookup(&invl_gen_ts);
+ pmap_invl_gen = invl_gen->gen;
+ if (ts != NULL) {
+ turnstile_broadcast(ts, TS_SHARED_QUEUE);
+ turnstile_unpend(ts, TS_SHARED_LOCK);
+ }
+ turnstile_chain_unlock(&invl_gen_ts);
+ } else {
+ next->gen = invl_gen->gen;
+ }
+ LIST_REMOVE(invl_gen, link);
+ mtx_unlock(&invl_gen_mtx);
+ invl_gen->gen = 0;
+}
+
+#ifdef PV_STATS
+static long invl_wait;
+SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0,
+ "Number of times DI invalidation blocked pmap_remove_all/write");
+#endif
+
+static u_long *
+pmap_delayed_invl_genp(vm_page_t m)
+{
+
+ return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
+}
+
+/*
+ * Ensure that all currently executing DI blocks, that need to flush
+ * TLB for the given page m, actually flushed the TLB at the time the
+ * function returned. If the page m has an empty PV list and we call
+ * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
+ * valid mapping for the page m in either its page table or TLB.
+ *
+ * This function works by blocking until the global DI generation
+ * number catches up with the generation number associated with the
+ * given page m and its PV list. Since this function's callers
+ * typically own an object lock and sometimes own a page lock, it
+ * cannot sleep. Instead, it blocks on a turnstile to relinquish the
+ * processor.
+ */
+static void
+pmap_delayed_invl_wait(vm_page_t m)
+{
+ struct turnstile *ts;
+ u_long *m_gen;
+#ifdef PV_STATS
+ bool accounted = false;
+#endif
+
+ m_gen = pmap_delayed_invl_genp(m);
+ while (*m_gen > pmap_invl_gen) {
+#ifdef PV_STATS
+ if (!accounted) {
+ atomic_add_long(&invl_wait, 1);
+ accounted = true;
+ }
+#endif
+ ts = turnstile_trywait(&invl_gen_ts);
+ if (*m_gen > pmap_invl_gen)
+ turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
+ else
+ turnstile_cancel(ts);
+ }
+}
+
+/*
+ * Mark the page m's PV list as participating in the current thread's
+ * DI block. Any threads concurrently using m's PV list to remove or
+ * restrict all mappings to m will wait for the current thread's DI
+ * block to complete before proceeding.
+ *
+ * The function works by setting the DI generation number for m's PV
+ * list to at least the DI generation number of the current thread.
+ * This forces a caller of pmap_delayed_invl_wait() to block until
+ * current thread calls pmap_delayed_invl_finished().
+ */
+static void
+pmap_delayed_invl_page(vm_page_t m)
+{
+ u_long gen, *m_gen;
+
+ rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
+ gen = curthread->td_md.md_invl_gen.gen;
+ if (gen == 0)
+ return;
+ m_gen = pmap_delayed_invl_genp(m);
+ if (*m_gen < gen)
+ *m_gen = gen;
+}
+
+/*
+ * Crashdump maps.
+ */
+static caddr_t crashdumpmap;
+
+/*
+ * Internal flags for pmap_enter()'s helper functions.
+ */
+#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
+#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
+static int popcnt_pc_map_pq(uint64_t *map);
+static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
+static void reserve_pv_entries(pmap_t pmap, int needed,
+ struct rwlock **lockp);
+static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp);
+static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
+ u_int flags, struct rwlock **lockp);
+#if VM_NRESERVLEVEL > 0
+static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp);
+#endif
+static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
+static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
+ vm_offset_t va);
+
+static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
+static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
+static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
+ vm_offset_t va, struct rwlock **lockp);
+static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
+ vm_offset_t va);
+static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, struct rwlock **lockp);
+static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
+ u_int flags, vm_page_t m, struct rwlock **lockp);
+static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
+static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
+static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
+static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
+ pd_entry_t pde);
+static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
+static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
+#if VM_NRESERVLEVEL > 0
+static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+ struct rwlock **lockp);
+#endif
+static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
+ vm_prot_t prot);
+static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask);
+static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
+ bool exec);
+static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
+static pd_entry_t *pmap_pti_pde(vm_offset_t va);
+static void pmap_pti_wire_pte(void *pte);
+static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
+ struct spglist *free, struct rwlock **lockp);
+static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
+static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
+static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ struct spglist *free);
+static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ pd_entry_t *pde, struct spglist *free,
+ struct rwlock **lockp);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, struct rwlock **lockp);
+static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ pd_entry_t newpde);
+static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
+
+static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
+ struct rwlock **lockp);
+static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
+ struct rwlock **lockp);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
+ struct rwlock **lockp);
+
+static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct spglist *free);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
+static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
+
+/*
+ * Move the kernel virtual free pointer to the next
+ * 2MB. This is used to help improve performance
+ * by using a large (2MB) page for much of the kernel
+ * (.text, .data, .bss)
+ */
+static vm_offset_t
+pmap_kmem_choose(vm_offset_t addr)
+{
+ vm_offset_t newaddr = addr;
+
+ newaddr = roundup2(addr, NBPDR);
+ return (newaddr);
+}
+
+/********************/
+/* Inline functions */
+/********************/
+
+/* Return a non-clipped PD index for a given VA */
+static __inline vm_pindex_t
+pmap_pde_pindex(vm_offset_t va)
+{
+ return (va >> PDRSHIFT);
+}
+
+
+/* Return a pointer to the PML4 slot that corresponds to a VA */
+static __inline pml4_entry_t *
+pmap_pml4e(pmap_t pmap, vm_offset_t va)
+{
+
+ return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
+}
+
+/* Return a pointer to the PDP slot that corresponds to a VA */
+static __inline pdp_entry_t *
+pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
+{
+ pdp_entry_t *pdpe;
+
+ pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
+ return (&pdpe[pmap_pdpe_index(va)]);
+}
+
+/* Return a pointer to the PDP slot that corresponds to a VA */
+static __inline pdp_entry_t *
+pmap_pdpe(pmap_t pmap, vm_offset_t va)
+{
+ pml4_entry_t *pml4e;
+ pt_entry_t PG_V;
+
+ PG_V = pmap_valid_bit(pmap);
+ pml4e = pmap_pml4e(pmap, va);
+ if ((*pml4e & PG_V) == 0)
+ return (NULL);
+ return (pmap_pml4e_to_pdpe(pml4e, va));
+}
+
+/* Return a pointer to the PD slot that corresponds to a VA */
+static __inline pd_entry_t *
+pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
+{
+ pd_entry_t *pde;
+
+ pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
+ return (&pde[pmap_pde_index(va)]);
+}
+
+/* Return a pointer to the PD slot that corresponds to a VA */
+static __inline pd_entry_t *
+pmap_pde(pmap_t pmap, vm_offset_t va)
+{
+ pdp_entry_t *pdpe;
+ pt_entry_t PG_V;
+
+ PG_V = pmap_valid_bit(pmap);
+ pdpe = pmap_pdpe(pmap, va);
+ if (pdpe == NULL || (*pdpe & PG_V) == 0)
+ return (NULL);
+ return (pmap_pdpe_to_pde(pdpe, va));
+}
+
+/* Return a pointer to the PT slot that corresponds to a VA */
+static __inline pt_entry_t *
+pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
+{
+ pt_entry_t *pte;
+
+ pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
+ return (&pte[pmap_pte_index(va)]);
+}
+
+/* Return a pointer to the PT slot that corresponds to a VA */
+static __inline pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *pde;
+ pt_entry_t PG_V;
+
+ PG_V = pmap_valid_bit(pmap);
+ pde = pmap_pde(pmap, va);
+ if (pde == NULL || (*pde & PG_V) == 0)
+ return (NULL);
+ if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
+ return ((pt_entry_t *)pde);
+ return (pmap_pde_to_pte(pde, va));
+}
+
+static __inline void
+pmap_resident_count_inc(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ pmap->pm_stats.resident_count += count;
+}
+
+static __inline void
+pmap_resident_count_dec(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(pmap->pm_stats.resident_count >= count,
+ ("pmap %p resident count underflow %ld %d", pmap,
+ pmap->pm_stats.resident_count, count));
+ pmap->pm_stats.resident_count -= count;
+}
+
+PMAP_INLINE pt_entry_t *
+vtopte(vm_offset_t va)
+{
+ u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
+
+ KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
+
+ return (PTmap + ((va >> PAGE_SHIFT) & mask));
+}
+
+static __inline pd_entry_t *
+vtopde(vm_offset_t va)
+{
+ u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
+
+ KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
+
+ return (PDmap + ((va >> PDRSHIFT) & mask));
+}
+
+static u_int64_t
+allocpages(vm_paddr_t *firstaddr, int n)
+{
+ u_int64_t ret;
+
+ ret = *firstaddr;
+ bzero((void *)ret, n * PAGE_SIZE);
+ *firstaddr += n * PAGE_SIZE;
+ return (ret);
+}
+
+CTASSERT(powerof2(NDMPML4E));
+
+/* number of kernel PDP slots */
+#define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG)
+
+static void
+nkpt_init(vm_paddr_t addr)
+{
+ int pt_pages;
+
+#ifdef NKPT
+ pt_pages = NKPT;
+#else
+ pt_pages = howmany(addr, 1 << PDRSHIFT);
+ pt_pages += NKPDPE(pt_pages);
+
+ /*
+ * Add some slop beyond the bare minimum required for bootstrapping
+ * the kernel.
+ *
+ * This is quite important when allocating KVA for kernel modules.
+ * The modules are required to be linked in the negative 2GB of
+ * the address space. If we run out of KVA in this region then
+ * pmap_growkernel() will need to allocate page table pages to map
+ * the entire 512GB of KVA space which is an unnecessary tax on
+ * physical memory.
+ *
+ * Secondly, device memory mapped as part of setting up the low-
+ * level console(s) is taken from KVA, starting at virtual_avail.
+ * This is because cninit() is called after pmap_bootstrap() but
+ * before vm_init() and pmap_init(). 20MB for a frame buffer is
+ * not uncommon.
+ */
+ pt_pages += 32; /* 64MB additional slop. */
+#endif
+ nkpt = pt_pages;
+}
+
+static void
+create_pagetables(vm_paddr_t *firstaddr)
+{
+ int i, j, ndm1g, nkpdpe;
+ pt_entry_t *pt_p;
+ pd_entry_t *pd_p;
+ pdp_entry_t *pdp_p;
+ pml4_entry_t *p4_p;
+
+ /* Allocate page table pages for the direct map */
+ ndmpdp = howmany(ptoa(Maxmem), NBPDP);
+ if (ndmpdp < 4) /* Minimum 4GB of dirmap */
+ ndmpdp = 4;
+ ndmpdpphys = howmany(ndmpdp, NPDPEPG);
+ if (ndmpdpphys > NDMPML4E) {
+ /*
+ * Each NDMPML4E allows 512 GB, so limit to that,
+ * and then readjust ndmpdp and ndmpdpphys.
+ */
+ printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
+ Maxmem = atop(NDMPML4E * NBPML4);
+ ndmpdpphys = NDMPML4E;
+ ndmpdp = NDMPML4E * NPDEPG;
+ }
+ DMPDPphys = allocpages(firstaddr, ndmpdpphys);
+ ndm1g = 0;
+ if ((amd_feature & AMDID_PAGE1GB) != 0)
+ ndm1g = ptoa(Maxmem) >> PDPSHIFT;
+ if (ndm1g < ndmpdp)
+ DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
+ dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
+
+ /* Allocate pages */
+ KPML4phys = allocpages(firstaddr, 1);
+ KPDPphys = allocpages(firstaddr, NKPML4E);
+
+ /*
+ * Allocate the initial number of kernel page table pages required to
+ * bootstrap. We defer this until after all memory-size dependent
+ * allocations are done (e.g. direct map), so that we don't have to
+ * build in too much slop in our estimate.
+ *
+ * Note that when NKPML4E > 1, we have an empty page underneath
+ * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
+ * pages. (pmap_enter requires a PD page to exist for each KPML4E.)
+ */
+ nkpt_init(*firstaddr);
+ nkpdpe = NKPDPE(nkpt);
+
+ KPTphys = allocpages(firstaddr, nkpt);
+ KPDphys = allocpages(firstaddr, nkpdpe);
+
+ /* Fill in the underlying page table pages */
+ /* Nominally read-only (but really R/W) from zero to physfree */
+ /* XXX not fully used, underneath 2M pages */
+ pt_p = (pt_entry_t *)KPTphys;
+ for (i = 0; ptoa(i) < *firstaddr; i++)
+ pt_p[i] = ptoa(i) | X86_PG_RW | X86_PG_V | pg_g;
+
+ /* Now map the page tables at their location within PTmap */
+ pd_p = (pd_entry_t *)KPDphys;
+ for (i = 0; i < nkpt; i++)
+ pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
+
+ /* Map from zero to end of allocations under 2M pages */
+ /* This replaces some of the KPTphys entries above */
+ for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
+ pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS |
+ pg_g;
+
+ /* And connect up the PD to the PDP (leaving room for L4 pages) */
+ pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
+ for (i = 0; i < nkpdpe; i++)
+ pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
+ PG_U;
+
+ /*
+ * Now, set up the direct map region using 2MB and/or 1GB pages. If
+ * the end of physical memory is not aligned to a 1GB page boundary,
+ * then the residual physical memory is mapped with 2MB pages. Later,
+ * if pmap_mapdev{_attr}() uses the direct map for non-write-back
+ * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
+ * that are partially used.
+ */
+ pd_p = (pd_entry_t *)DMPDphys;
+ for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
+ pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
+ /* Preset PG_M and PG_A because demotion expects it. */
+ pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
+ X86_PG_M | X86_PG_A | pg_nx;
+ }
+ pdp_p = (pdp_entry_t *)DMPDPphys;
+ for (i = 0; i < ndm1g; i++) {
+ pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
+ /* Preset PG_M and PG_A because demotion expects it. */
+ pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
+ X86_PG_M | X86_PG_A | pg_nx;
+ }
+ for (j = 0; i < ndmpdp; i++, j++) {
+ pdp_p[i] = DMPDphys + ptoa(j);
+ pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_U;
+ }
+
+ /* And recursively map PML4 to itself in order to get PTmap */
+ p4_p = (pml4_entry_t *)KPML4phys;
+ p4_p[PML4PML4I] = KPML4phys;
+ p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | PG_U;
+
+ /* Connect the Direct Map slot(s) up to the PML4. */
+ for (i = 0; i < ndmpdpphys; i++) {
+ p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
+ p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | PG_U;
+ }
+
+ /* Connect the KVA slots up to the PML4 */
+ for (i = 0; i < NKPML4E; i++) {
+ p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
+ p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V | PG_U;
+ }
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ *
+ * On amd64 this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address "KERNBASE" to the actual
+ * (physical) address starting relative to 0]
+ */
+void
+pmap_bootstrap(vm_paddr_t *firstaddr)
+{
+ vm_offset_t va;
+ pt_entry_t *pte;
+ int i;
+
+ if (!pti)
+ pg_g = X86_PG_G;
+
+ /*
+ * Create an initial set of page tables to run the kernel in.
+ */
+ create_pagetables(firstaddr);
+
+ /*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated kernel page table pages so that vm_page structures
+ * representing these pages will be created. The vm_page structures
+ * are required for promotion of the corresponding kernel virtual
+ * addresses to superpage mappings.
+ */
+ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
+
+ virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
+ virtual_avail = pmap_kmem_choose(virtual_avail);
+
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+
+ /* XXX do %cr0 as well */
+ load_cr4(rcr4() | CR4_PGE);
+ load_cr3(KPML4phys);
+ if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
+ load_cr4(rcr4() | CR4_SMEP);
+
+ /*
+ * Initialize the kernel pmap (which is statically allocated).
+ */
+ PMAP_LOCK_INIT(kernel_pmap);
+ kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
+ kernel_pmap->pm_cr3 = KPML4phys;
+ kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
+ CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
+ TAILQ_INIT(&kernel_pmap->pm_pvchunk);
+ kernel_pmap->pm_flags = pmap_flags;
+
+ /*
+ * Initialize the TLB invalidations generation number lock.
+ */
+ mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
+
+ /*
+ * Reserve some special page table entries/VA space for temporary
+ * mapping of pages.
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
+
+ va = virtual_avail;
+ pte = vtopte(va);
+
+ /*
+ * Crashdump maps. The first page is reused as CMAP1 for the
+ * memory test.
+ */
+ SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
+ CADDR1 = crashdumpmap;
+
+ virtual_avail = va;
+
+ /*
+ * Initialize the PAT MSR.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment.
+ */
+ pmap_init_pat();
+
+ /* Initialize TLB Context Id. */
+ TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
+ if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
+ /* Check for INVPCID support */
+ invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID)
+ != 0;
+ for (i = 0; i < MAXCPU; i++) {
+ kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
+ kernel_pmap->pm_pcids[i].pm_gen = 1;
+ }
+ PCPU_SET(pcid_next, PMAP_PCID_KERN + 1);
+ PCPU_SET(pcid_gen, 1);
+ /*
+ * pcpu area for APs is zeroed during AP startup.
+ * pc_pcid_next and pc_pcid_gen are initialized by AP
+ * during pcpu setup.
+ */
+ load_cr4(rcr4() | CR4_PCIDE);
+ } else {
+ pmap_pcid_enabled = 0;
+ }
+}
+
+/*
+ * Setup the PAT MSR.
+ */
+void
+pmap_init_pat(void)
+{
+ int pat_table[PAT_INDEX_SIZE];
+ uint64_t pat_msr;
+ u_long cr0, cr4;
+ int i;
+
+ /* Bail if this CPU doesn't implement PAT. */
+ if ((cpu_feature & CPUID_PAT) == 0)
+ panic("no PAT??");
+
+ /* Set default PAT index table. */
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_table[i] = -1;
+ pat_table[PAT_WRITE_BACK] = 0;
+ pat_table[PAT_WRITE_THROUGH] = 1;
+ pat_table[PAT_UNCACHEABLE] = 3;
+ pat_table[PAT_WRITE_COMBINING] = 3;
+ pat_table[PAT_WRITE_PROTECTED] = 3;
+ pat_table[PAT_UNCACHED] = 3;
+
+ /* Initialize default PAT entries. */
+ pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
+ PAT_VALUE(1, PAT_WRITE_THROUGH) |
+ PAT_VALUE(2, PAT_UNCACHED) |
+ PAT_VALUE(3, PAT_UNCACHEABLE) |
+ PAT_VALUE(4, PAT_WRITE_BACK) |
+ PAT_VALUE(5, PAT_WRITE_THROUGH) |
+ PAT_VALUE(6, PAT_UNCACHED) |
+ PAT_VALUE(7, PAT_UNCACHEABLE);
+
+ if (pat_works) {
+ /*
+ * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
+ * Program 5 and 6 as WP and WC.
+ * Leave 4 and 7 as WB and UC.
+ */
+ pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
+ pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
+ PAT_VALUE(6, PAT_WRITE_COMBINING);
+ pat_table[PAT_UNCACHED] = 2;
+ pat_table[PAT_WRITE_PROTECTED] = 5;
+ pat_table[PAT_WRITE_COMBINING] = 6;
+ } else {
+ /*
+ * Just replace PAT Index 2 with WC instead of UC-.
+ */
+ pat_msr &= ~PAT_MASK(2);
+ pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
+ pat_table[PAT_WRITE_COMBINING] = 2;
+ }
+
+ /* Disable PGE. */
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+
+ /* Disable caches (CD = 1, NW = 0). */
+ cr0 = rcr0();
+ load_cr0((cr0 & ~CR0_NW) | CR0_CD);
+
+ /* Flushes caches and TLBs. */
+ wbinvd();
+ invltlb();
+
+ /* Update PAT and index table. */
+ wrmsr(MSR_PAT, pat_msr);
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_index[i] = pat_table[i];
+
+ /* Flush caches and TLBs again. */
+ wbinvd();
+ invltlb();
+
+ /* Restore caches and PGE. */
+ load_cr0(cr0);
+ load_cr4(cr4);
+}
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pat_mode = PAT_WRITE_BACK;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_page_t mpte;
+ vm_size_t s;
+ int error, i, pv_npg;
+
+ /*
+ * Initialize the vm page array entries for the kernel pmap's
+ * page table pages.
+ */
+ for (i = 0; i < nkpt; i++) {
+ mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
+ KASSERT(mpte >= vm_page_array &&
+ mpte < &vm_page_array[vm_page_array_size],
+ ("pmap_init: page table page is out of range"));
+ mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
+ mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
+ }
+
+ /*
+ * If the kernel is running on a virtual machine, then it must assume
+ * that MCA is enabled by the hypervisor. Moreover, the kernel must
+ * be prepared for the hypervisor changing the vendor and family that
+ * are reported by CPUID. Consequently, the workaround for AMD Family
+ * 10h Erratum 383 is enabled if the processor's feature set does not
+ * include at least one feature that is only supported by older Intel
+ * or newer AMD processors.
+ */
+ if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
+ (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
+ CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
+ AMDID2_FMA4)) == 0)
+ workaround_erratum383 = 1;
+
+ /*
+ * Are large page mappings enabled?
+ */
+ TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
+ if (pg_ps_enabled) {
+ KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
+ ("pmap_init: can't assign to pagesizes[1]"));
+ pagesizes[1] = NBPDR;
+ }
+
+ /*
+ * Initialize the pv chunk list mutex.
+ */
+ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
+
+ /*
+ * Initialize the pool of pv list locks.
+ */
+ for (i = 0; i < NPV_LIST_LOCKS; i++)
+ rw_init(&pv_list_locks[i], "pmap pv list");
+
+ /*
+ * Calculate the size of the pv head table for superpages.
+ */
+ pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
+
+ /*
+ * Allocate memory for the pv head table for superpages.
+ */
+ s = (vm_size_t)(pv_npg * sizeof(struct md_page));
+ s = round_page(s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < pv_npg; i++)
+ TAILQ_INIT(&pv_table[i].pv_list);
+ TAILQ_INIT(&pv_dummy.pv_list);
+
+ pmap_initialized = 1;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0)
+ continue;
+ /* Make the direct map consistent */
+ if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) {
+ (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
+ ppim->sz, ppim->mode);
+ }
+ if (!bootverbose)
+ continue;
+ printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
+ ppim->pa, ppim->va, ppim->sz, ppim->mode);
+ }
+
+ mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
+ error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
+ (vmem_addr_t *)&qframe);
+ if (error != 0)
+ panic("qframe allocation failed");
+}
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
+ "2MB page mapping counters");
+
+static u_long pmap_pde_demotions;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_pde_demotions, 0, "2MB page demotions");
+
+static u_long pmap_pde_mappings;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
+ &pmap_pde_mappings, 0, "2MB page mappings");
+
+static u_long pmap_pde_p_failures;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
+ &pmap_pde_p_failures, 0, "2MB page promotion failures");
+
+static u_long pmap_pde_promotions;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
+ &pmap_pde_promotions, 0, "2MB page promotions");
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
+ "1GB page mapping counters");
+
+static u_long pmap_pdpe_demotions;
+SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_pdpe_demotions, 0, "1GB page demotions");
+
+/***************************************************
+ * Low level helper routines.....
+ ***************************************************/
+
+static pt_entry_t
+pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
+{
+ int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ /* Verify that both PAT bits are not set at the same time */
+ KASSERT((entry & x86_pat_bits) != x86_pat_bits,
+ ("Invalid PAT bits in entry %#lx", entry));
+
+ /* Swap the PAT bits if one of them is set */
+ if ((entry & x86_pat_bits) != 0)
+ entry ^= x86_pat_bits;
+ break;
+ case PT_EPT:
+ /*
+ * Nothing to do - the memory attributes are represented
+ * the same way for regular pages and superpages.
+ */
+ break;
+ default:
+ panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
+ }
+
+ return (entry);
+}
+
+/*
+ * Determine the appropriate bits to set in a PTE or PDE for a specified
+ * caching mode.
+ */
+int
+pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
+{
+ int cache_bits, pat_flag, pat_idx;
+
+ if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
+ panic("Unknown caching mode %d\n", mode);
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ /* The PAT bit is different for PTE's and PDE's. */
+ pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
+
+ /* Map the caching mode to a PAT index. */
+ pat_idx = pat_index[mode];
+
+ /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
+ cache_bits = 0;
+ if (pat_idx & 0x4)
+ cache_bits |= pat_flag;
+ if (pat_idx & 0x2)
+ cache_bits |= PG_NC_PCD;
+ if (pat_idx & 0x1)
+ cache_bits |= PG_NC_PWT;
+ break;
+
+ case PT_EPT:
+ cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
+ break;
+
+ default:
+ panic("unsupported pmap type %d", pmap->pm_type);
+ }
+
+ return (cache_bits);
+}
+
+static int
+pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
+{
+ int mask;
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ case PT_RVI:
+ mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
+ break;
+ case PT_EPT:
+ mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
+ break;
+ default:
+ panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
+ }
+
+ return (mask);
+}
+
+bool
+pmap_ps_enabled(pmap_t pmap)
+{
+
+ return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
+}
+
+static void
+pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
+{
+
+ switch (pmap->pm_type) {
+ case PT_X86:
+ break;
+ case PT_RVI:
+ case PT_EPT:
+ /*
+ * XXX
+ * This is a little bogus since the generation number is
+ * supposed to be bumped up when a region of the address
+ * space is invalidated in the page tables.
+ *
+ * In this case the old PDE entry is valid but yet we want
+ * to make sure that any mappings using the old entry are
+ * invalidated in the TLB.
+ *
+ * The reason this works as expected is because we rendezvous
+ * "all" host cpus and force any vcpu context to exit as a
+ * side-effect.
+ */
+ atomic_add_acq_long(&pmap->pm_eptgen, 1);
+ break;
+ default:
+ panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
+ }
+ pde_store(pde, newpde);
+}
+
+/*
+ * After changing the page size for the specified virtual address in the page
+ * table, flush the corresponding entries from the processor's TLB. Only the
+ * calling processor's TLB is affected.
+ *
+ * The calling thread must be pinned to a processor.
+ */
+static void
+pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
+{
+ pt_entry_t PG_G;
+
+ if (pmap_type_guest(pmap))
+ return;
+
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
+
+ PG_G = pmap_global_bit(pmap);
+
+ if ((newpde & PG_PS) == 0)
+ /* Demotion: flush a specific 2MB page mapping. */
+ invlpg(va);
+ else if ((newpde & PG_G) == 0)
+ /*
+ * Promotion: flush every 4KB page mapping from the TLB
+ * because there are too many to flush individually.
+ */
+ invltlb();
+ else {
+ /*
+ * Promotion: flush every 4KB page mapping from the TLB,
+ * including any global (PG_G) mappings.
+ */
+ invltlb_glob();
+ }
+}
+#ifdef SMP
+
+/*
+ * For SMP, these functions have to use the IPI mechanism for coherence.
+ *
+ * N.B.: Before calling any of the following TLB invalidation functions,
+ * the calling processor must ensure that all stores updating a non-
+ * kernel page table are globally performed. Otherwise, another
+ * processor could cache an old, pre-update entry without being
+ * invalidated. This can happen one of two ways: (1) The pmap becomes
+ * active on another processor after its pm_active field is checked by
+ * one of the following functions but before a store updating the page
+ * table is globally performed. (2) The pmap becomes active on another
+ * processor before its pm_active field is checked but due to
+ * speculative loads one of the following functions stills reads the
+ * pmap as inactive on the other processor.
+ *
+ * The kernel page table is exempt because its pm_active field is
+ * immutable. The kernel page table is always active on every
+ * processor.
+ */
+
+/*
+ * Interrupt the cpus that are executing in the guest context.
+ * This will force the vcpu to exit and the cached EPT mappings
+ * will be invalidated by the host before the next vmresume.
+ */
+static __inline void
+pmap_invalidate_ept(pmap_t pmap)
+{
+ int ipinum;
+
+ sched_pin();
+ KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
+ ("pmap_invalidate_ept: absurd pm_active"));
+
+ /*
+ * The TLB mappings associated with a vcpu context are not
+ * flushed each time a different vcpu is chosen to execute.
+ *
+ * This is in contrast with a process's vtop mappings that
+ * are flushed from the TLB on each context switch.
+ *
+ * Therefore we need to do more than just a TLB shootdown on
+ * the active cpus in 'pmap->pm_active'. To do this we keep
+ * track of the number of invalidations performed on this pmap.
+ *
+ * Each vcpu keeps a cache of this counter and compares it
+ * just before a vmresume. If the counter is out-of-date an
+ * invept will be done to flush stale mappings from the TLB.
+ */
+ atomic_add_acq_long(&pmap->pm_eptgen, 1);
+
+ /*
+ * Force the vcpu to exit and trap back into the hypervisor.
+ */
+ ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
+ ipi_selected(pmap->pm_active, ipinum);
+ sched_unpin();
+}
+
+void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ cpuset_t *mask;
+ struct invpcid_descr d;
+ uint64_t kcr3, ucr3;
+<<<<<<< HEAD
+=======
+ uint32_t pcid;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ u_int cpuid, i;
+
+ if (pmap_type_guest(pmap)) {
+ pmap_invalidate_ept(pmap);
+ return;
+ }
+
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
+
+ sched_pin();
+ if (pmap == kernel_pmap) {
+ invlpg(va);
+ mask = &all_cpus;
+ } else {
+ cpuid = PCPU_GET(cpuid);
+ if (pmap == PCPU_GET(curpmap)) {
+ invlpg(va);
+<<<<<<< HEAD
+ if (pmap_pcid_enabled && pti) {
+ critical_enter();
+ if (invpcid_works) {
+ d.pcid = pmap->pm_pcids[cpuid].pm_pcid |
+ PMAP_PCID_USER_PT;
+=======
+ if (pmap_pcid_enabled && pmap->pm_ucr3 != PMAP_NO_CR3) {
+ /*
+ * Disable context switching. pm_pcid
+ * is recalculated on switch, which
+ * might make us use wrong pcid below.
+ */
+ critical_enter();
+ pcid = pmap->pm_pcids[cpuid].pm_pcid;
+
+ if (invpcid_works) {
+ d.pcid = pcid | PMAP_PCID_USER_PT;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ d.pad = 0;
+ d.addr = va;
+ invpcid(&d, INVPCID_ADDR);
+ } else {
+<<<<<<< HEAD
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[
+ cpuid].pm_pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
+ cpuid].pm_pcid | PMAP_PCID_USER_PT |
+ CR3_PCID_SAVE;
+=======
+ kcr3 = pmap->pm_cr3 | pcid |
+ CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pcid |
+ PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ pmap_pti_pcid_invlpg(ucr3, kcr3, va);
+ }
+ critical_exit();
+ }
+ } else if (pmap_pcid_enabled)
+ pmap->pm_pcids[cpuid].pm_gen = 0;
+ if (pmap_pcid_enabled) {
+ CPU_FOREACH(i) {
+ if (cpuid != i)
+ pmap->pm_pcids[i].pm_gen = 0;
+ }
+ }
+ mask = &pmap->pm_active;
+ }
+ smp_masked_invlpg(*mask, va, pmap);
+ sched_unpin();
+}
+
+/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
+#define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
+
+void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ cpuset_t *mask;
+ struct invpcid_descr d;
+ vm_offset_t addr;
+ uint64_t kcr3, ucr3;
+<<<<<<< HEAD
+=======
+ uint32_t pcid;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ u_int cpuid, i;
+
+ if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
+ pmap_invalidate_all(pmap);
+ return;
+ }
+
+ if (pmap_type_guest(pmap)) {
+ pmap_invalidate_ept(pmap);
+ return;
+ }
+
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
+
+ sched_pin();
+ cpuid = PCPU_GET(cpuid);
+ if (pmap == kernel_pmap) {
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+ mask = &all_cpus;
+ } else {
+ if (pmap == PCPU_GET(curpmap)) {
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+<<<<<<< HEAD
+ if (pmap_pcid_enabled && pti) {
+ critical_enter();
+ if (invpcid_works) {
+ d.pcid = pmap->pm_pcids[cpuid].
+ pm_pcid | PMAP_PCID_USER_PT;
+=======
+ if (pmap_pcid_enabled && pmap->pm_ucr3 != PMAP_NO_CR3) {
+ critical_enter();
+ pcid = pmap->pm_pcids[cpuid].pm_pcid;
+ if (invpcid_works) {
+ d.pcid = pcid | PMAP_PCID_USER_PT;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ d.pad = 0;
+ d.addr = sva;
+ for (; d.addr < eva; d.addr +=
+ PAGE_SIZE)
+ invpcid(&d, INVPCID_ADDR);
+ } else {
+<<<<<<< HEAD
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[
+ cpuid].pm_pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
+ cpuid].pm_pcid | PMAP_PCID_USER_PT |
+ CR3_PCID_SAVE;
+=======
+ kcr3 = pmap->pm_cr3 | pcid |
+ CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pcid |
+ PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ pmap_pti_pcid_invlrng(ucr3, kcr3, sva,
+ eva);
+ }
+ critical_exit();
+ }
+<<<<<<< HEAD
+
+=======
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ } else if (pmap_pcid_enabled) {
+ pmap->pm_pcids[cpuid].pm_gen = 0;
+ }
+ if (pmap_pcid_enabled) {
+ CPU_FOREACH(i) {
+ if (cpuid != i)
+ pmap->pm_pcids[i].pm_gen = 0;
+ }
+ }
+ mask = &pmap->pm_active;
+ }
+ smp_masked_invlpg_range(*mask, sva, eva, pmap);
+ sched_unpin();
+}
+
+void
+pmap_invalidate_all(pmap_t pmap)
+{
+ cpuset_t *mask;
+ struct invpcid_descr d;
+ uint64_t kcr3, ucr3;
+<<<<<<< HEAD
+=======
+ uint32_t pcid;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ u_int cpuid, i;
+
+ if (pmap_type_guest(pmap)) {
+ pmap_invalidate_ept(pmap);
+ return;
+ }
+
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
+
+ sched_pin();
+ if (pmap == kernel_pmap) {
+ if (pmap_pcid_enabled && invpcid_works) {
+ bzero(&d, sizeof(d));
+ invpcid(&d, INVPCID_CTXGLOB);
+ } else {
+ invltlb_glob();
+ }
+ mask = &all_cpus;
+ } else {
+ cpuid = PCPU_GET(cpuid);
+ if (pmap == PCPU_GET(curpmap)) {
+ if (pmap_pcid_enabled) {
+ critical_enter();
+<<<<<<< HEAD
+=======
+ pcid = pmap->pm_pcids[cpuid].pm_pcid;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ if (invpcid_works) {
+ d.pcid = pcid;
+ d.pad = 0;
+ d.addr = 0;
+ invpcid(&d, INVPCID_CTX);
+<<<<<<< HEAD
+ if (pti) {
+=======
+ if (pmap->pm_ucr3 != PMAP_NO_CR3) {
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ d.pcid |= PMAP_PCID_USER_PT;
+ invpcid(&d, INVPCID_CTX);
+ }
+ } else {
+<<<<<<< HEAD
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[
+ cpuid].pm_pcid;
+ if (pti) {
+ ucr3 = pmap->pm_ucr3 |
+ pmap->pm_pcids[cpuid].
+ pm_pcid | PMAP_PCID_USER_PT;
+ pmap_pti_pcid_invalidate(ucr3,
+ kcr3);
+ } else
+ load_cr3(kcr3);
+
+=======
+ kcr3 = pmap->pm_cr3 | pcid;
+ ucr3 = pmap->pm_ucr3;
+ if (ucr3 != PMAP_NO_CR3) {
+ ucr3 |= pcid | PMAP_PCID_USER_PT;
+ pmap_pti_pcid_invalidate(ucr3,
+ kcr3);
+ } else {
+ load_cr3(kcr3);
+ }
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ }
+ critical_exit();
+ } else {
+ invltlb();
+ }
+ } else if (pmap_pcid_enabled) {
+ pmap->pm_pcids[cpuid].pm_gen = 0;
+ }
+ if (pmap_pcid_enabled) {
+ CPU_FOREACH(i) {
+ if (cpuid != i)
+ pmap->pm_pcids[i].pm_gen = 0;
+ }
+ }
+ mask = &pmap->pm_active;
+ }
+ smp_masked_invltlb(*mask, pmap);
+ sched_unpin();
+}
+
+void
+pmap_invalidate_cache(void)
+{
+
+ sched_pin();
+ wbinvd();
+ smp_cache_flush();
+ sched_unpin();
+}
+
+struct pde_action {
+ cpuset_t invalidate; /* processors that invalidate their TLB */
+ pmap_t pmap;
+ vm_offset_t va;
+ pd_entry_t *pde;
+ pd_entry_t newpde;
+ u_int store; /* processor that updates the PDE */
+};
+
+static void
+pmap_update_pde_action(void *arg)
+{
+ struct pde_action *act = arg;
+
+ if (act->store == PCPU_GET(cpuid))
+ pmap_update_pde_store(act->pmap, act->pde, act->newpde);
+}
+
+static void
+pmap_update_pde_teardown(void *arg)
+{
+ struct pde_action *act = arg;
+
+ if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
+ pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
+}
+
+/*
+ * Change the page size for the specified virtual address in a way that
+ * prevents any possibility of the TLB ever having two entries that map the
+ * same virtual address using different page sizes. This is the recommended
+ * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
+ * machine check exception for a TLB state that is improperly diagnosed as a
+ * hardware error.
+ */
+static void
+pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
+{
+ struct pde_action act;
+ cpuset_t active, other_cpus;
+ u_int cpuid;
+
+ sched_pin();
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (pmap == kernel_pmap || pmap_type_guest(pmap))
+ active = all_cpus;
+ else {
+ active = pmap->pm_active;
+ }
+ if (CPU_OVERLAP(&active, &other_cpus)) {
+ act.store = cpuid;
+ act.invalidate = active;
+ act.va = va;
+ act.pmap = pmap;
+ act.pde = pde;
+ act.newpde = newpde;
+ CPU_SET(cpuid, &active);
+ smp_rendezvous_cpus(active,
+ smp_no_rendezvous_barrier, pmap_update_pde_action,
+ pmap_update_pde_teardown, &act);
+ } else {
+ pmap_update_pde_store(pmap, pde, newpde);
+ if (CPU_ISSET(cpuid, &active))
+ pmap_update_pde_invalidate(pmap, va, newpde);
+ }
+ sched_unpin();
+}
+#else /* !SMP */
+/*
+ * Normal, non-SMP, invalidation functions.
+ */
+void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ struct invpcid_descr d;
+ uint64_t kcr3, ucr3;
+<<<<<<< HEAD
+=======
+ uint32_t pcid;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+
+ if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
+ pmap->pm_eptgen++;
+ return;
+ }
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
+
+ if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
+ invlpg(va);
+<<<<<<< HEAD
+ if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled && pti) {
+ critical_enter();
+ if (invpcid_works) {
+ d.pcid = pmap->pm_pcids[0].pm_pcid |
+ PMAP_PCID_USER_PT;
+=======
+ if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
+ pmap->pm_ucr3 != PMAP_NO_CR3) {
+ critical_enter();
+ pcid = pmap->pm_pcids[0].pm_pcid;
+ if (invpcid_works) {
+ d.pcid = pcid | PMAP_PCID_USER_PT;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ d.pad = 0;
+ d.addr = va;
+ invpcid(&d, INVPCID_ADDR);
+ } else {
+<<<<<<< HEAD
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].
+ pm_pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[0].
+ pm_pcid | PMAP_PCID_USER_PT |
+ CR3_PCID_SAVE;
+=======
+ kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pcid |
+ PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ pmap_pti_pcid_invlpg(ucr3, kcr3, va);
+ }
+ critical_exit();
+ }
+ } else if (pmap_pcid_enabled)
+ pmap->pm_pcids[0].pm_gen = 0;
+}
+
+void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct invpcid_descr d;
+ vm_offset_t addr;
+ uint64_t kcr3, ucr3;
+
+ if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
+ pmap->pm_eptgen++;
+ return;
+ }
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
+
+ if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+<<<<<<< HEAD
+ if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled && pti) {
+=======
+ if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
+ pmap->pm_ucr3 != PMAP_NO_CR3) {
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ critical_enter();
+ if (invpcid_works) {
+ d.pcid = pmap->pm_pcids[0].pm_pcid |
+ PMAP_PCID_USER_PT;
+ d.pad = 0;
+ d.addr = sva;
+ for (; d.addr < eva; d.addr += PAGE_SIZE)
+ invpcid(&d, INVPCID_ADDR);
+ } else {
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].
+ pm_pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[0].
+ pm_pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+ pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
+ }
+ critical_exit();
+ }
+ } else if (pmap_pcid_enabled) {
+ pmap->pm_pcids[0].pm_gen = 0;
+ }
+}
+
+void
+pmap_invalidate_all(pmap_t pmap)
+{
+ struct invpcid_descr d;
+ uint64_t kcr3, ucr3;
+
+ if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
+ pmap->pm_eptgen++;
+ return;
+ }
+ KASSERT(pmap->pm_type == PT_X86,
+ ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
+
+ if (pmap == kernel_pmap) {
+ if (pmap_pcid_enabled && invpcid_works) {
+ bzero(&d, sizeof(d));
+ invpcid(&d, INVPCID_CTXGLOB);
+ } else {
+ invltlb_glob();
+ }
+ } else if (pmap == PCPU_GET(curpmap)) {
+ if (pmap_pcid_enabled) {
+ critical_enter();
+ if (invpcid_works) {
+ d.pcid = pmap->pm_pcids[0].pm_pcid;
+ d.pad = 0;
+ d.addr = 0;
+ invpcid(&d, INVPCID_CTX);
+<<<<<<< HEAD
+ if (pti) {
+=======
+ if (pmap->pm_ucr3 != PMAP_NO_CR3) {
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ d.pcid |= PMAP_PCID_USER_PT;
+ invpcid(&d, INVPCID_CTX);
+ }
+ } else {
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].pm_pcid;
+<<<<<<< HEAD
+ if (pti) {
+=======
+ if (pmap->pm_ucr3 != PMAP_NO_CR3) {
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
+ 0].pm_pcid | PMAP_PCID_USER_PT;
+ pmap_pti_pcid_invalidate(ucr3, kcr3);
+ } else
+ load_cr3(kcr3);
+ }
+ critical_exit();
+ } else {
+ invltlb();
+ }
+ } else if (pmap_pcid_enabled) {
+ pmap->pm_pcids[0].pm_gen = 0;
+ }
+}
+
+PMAP_INLINE void
+pmap_invalidate_cache(void)
+{
+
+ wbinvd();
+}
+
+static void
+pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
+{
+
+ pmap_update_pde_store(pmap, pde, newpde);
+ if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
+ pmap_update_pde_invalidate(pmap, va, newpde);
+ else
+ pmap->pm_pcids[0].pm_gen = 0;
+}
+#endif /* !SMP */
+
+static void
+pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
+{
+
+ /*
+ * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
+ * by a promotion that did not invalidate the 512 4KB page mappings
+ * that might exist in the TLB. Consequently, at this point, the TLB
+ * may hold both 4KB and 2MB page mappings for the address range [va,
+ * va + NBPDR). Therefore, the entire range must be invalidated here.
+ * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
+ * 4KB page mappings for the address range [va, va + NBPDR), and so a
+ * single INVLPG suffices to invalidate the 2MB page mapping from the
+ * TLB.
+ */
+ if ((pde & PG_PROMOTED) != 0)
+ pmap_invalidate_range(pmap, va, va + NBPDR - 1);
+ else
+ pmap_invalidate_page(pmap, va);
+}
+
+#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
+
+void
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
+{
+
+ if (force) {
+ sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
+
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
+ else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+ /*
+ * XXX: Some CPUs fault, hang, or trash the local APIC
+ * registers if we use CLFLUSH on the local APIC
+ * range. The local APIC is always uncached, so we
+ * don't need to flush for that range anyway.
+ */
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+
+ /*
+ * Otherwise, do per-cache line flush. Use the sfence
+ * instruction to insure that previous stores are
+ * included in the write-back. The processor
+ * propagates flush to other processors in the cache
+ * coherence domain.
+ */
+ sfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflushopt(sva);
+ sfence();
+ } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+ /*
+ * Writes are ordered by CLFLUSH on Intel CPUs.
+ */
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflush(sva);
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ } else {
+
+ /*
+ * No targeted cache flush methods are supported by CPU,
+ * or the supplied range is bigger than 2MB.
+ * Globally invalidate cache.
+ */
+ pmap_invalidate_cache();
+ }
+}
+
+/*
+ * Remove the specified set of pages from the data and instruction caches.
+ *
+ * In contrast to pmap_invalidate_cache_range(), this function does not
+ * rely on the CPU's self-snoop feature, because it is intended for use
+ * when moving pages into a different cache domain.
+ */
+void
+pmap_invalidate_cache_pages(vm_page_t *pages, int count)
+{
+ vm_offset_t daddr, eva;
+ int i;
+ bool useclflushopt;
+
+ useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
+ if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
+ ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
+ pmap_invalidate_cache();
+ else {
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ for (i = 0; i < count; i++) {
+ daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
+ eva = daddr + PAGE_SIZE;
+ for (; daddr < eva; daddr += cpu_clflush_line_size) {
+ if (useclflushopt)
+ clflushopt(daddr);
+ else
+ clflush(daddr);
+ }
+ }
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ }
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_V;
+ vm_paddr_t pa;
+
+ pa = 0;
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK(pmap);
+ pdpe = pmap_pdpe(pmap, va);
+ if (pdpe != NULL && (*pdpe & PG_V) != 0) {
+ if ((*pdpe & PG_PS) != 0)
+ pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
+ else {
+ pde = pmap_pdpe_to_pde(pdpe, va);
+ if ((*pde & PG_V) != 0) {
+ if ((*pde & PG_PS) != 0) {
+ pa = (*pde & PG_PS_FRAME) |
+ (va & PDRMASK);
+ } else {
+ pte = pmap_pde_to_pte(pde, va);
+ pa = (*pte & PG_FRAME) |
+ (va & PAGE_MASK);
+ }
+ }
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ return (pa);
+}
+
+/*
+ * Routine: pmap_extract_and_hold
+ * Function:
+ * Atomically extract and hold the physical page
+ * with the given pmap and virtual address pair
+ * if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pd_entry_t pde, *pdep;
+ pt_entry_t pte, PG_RW, PG_V;
+ vm_paddr_t pa;
+ vm_page_t m;
+
+ pa = 0;
+ m = NULL;
+ PG_RW = pmap_rw_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK(pmap);
+retry:
+ pdep = pmap_pde(pmap, va);
+ if (pdep != NULL && (pde = *pdep)) {
+ if (pde & PG_PS) {
+ if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
+ if (vm_page_pa_tryrelock(pmap, (pde &
+ PG_PS_FRAME) | (va & PDRMASK), &pa))
+ goto retry;
+ m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
+ (va & PDRMASK));
+ vm_page_hold(m);
+ }
+ } else {
+ pte = *pmap_pde_to_pte(pdep, va);
+ if ((pte & PG_V) &&
+ ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
+ if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
+ &pa))
+ goto retry;
+ m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
+ vm_page_hold(m);
+ }
+ }
+ }
+ PA_UNLOCK_COND(pa);
+ PMAP_UNLOCK(pmap);
+ return (m);
+}
+
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+ pd_entry_t pde;
+ vm_paddr_t pa;
+
+ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
+ pa = DMAP_TO_PHYS(va);
+ } else {
+ pde = *vtopde(va);
+ if (pde & PG_PS) {
+ pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
+ } else {
+ /*
+ * Beware of a concurrent promotion that changes the
+ * PDE at this point! For example, vtopte() must not
+ * be used to access the PTE because it would use the
+ * new PDE. It is, however, safe to use the old PDE
+ * because the page table page is preserved by the
+ * promotion.
+ */
+ pa = *pmap_pde_to_pte(&pde, va);
+ pa = (pa & PG_FRAME) | (va & PAGE_MASK);
+ }
+ }
+ return (pa);
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * Add a wired page to the kva.
+ * Note: not SMP coherent.
+ */
+PMAP_INLINE void
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g);
+}
+
+static __inline void
+pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
+{
+ pt_entry_t *pte;
+ int cache_bits;
+
+ pte = vtopte(va);
+ cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
+ pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g | cache_bits);
+}
+
+/*
+ * Remove a page from the kernel pagetables.
+ * Note: not SMP coherent.
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_clear(pte);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
+{
+ return PHYS_TO_DMAP(start);
+}
+
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+ pt_entry_t *endpte, oldpte, pa, *pte;
+ vm_page_t m;
+ int cache_bits;
+
+ oldpte = 0;
+ pte = vtopte(sva);
+ endpte = pte + count;
+ while (pte < endpte) {
+ m = *ma++;
+ cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
+ pa = VM_PAGE_TO_PHYS(m) | cache_bits;
+ if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
+ oldpte |= *pte;
+ pte_store(pte, pa | pg_g | X86_PG_RW | X86_PG_V);
+ }
+ pte++;
+ }
+ if (__predict_false((oldpte & X86_PG_V) != 0))
+ pmap_invalidate_range(kernel_pmap, sva, sva + count *
+ PAGE_SIZE);
+}
+
+/*
+ * This routine tears out page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qremove(vm_offset_t sva, int count)
+{
+ vm_offset_t va;
+
+ va = sva;
+ while (count-- > 0) {
+ KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
+ pmap_kremove(va);
+ va += PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+static __inline void
+pmap_free_zero_pages(struct spglist *free)
+{
+ vm_page_t m;
+ int count;
+
+ for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
+ /* Preserve the page's PG_ZERO setting. */
+ vm_page_free_toq(m);
+ }
+ atomic_subtract_int(&vm_cnt.v_wire_count, count);
+}
+
+/*
+ * Schedule the specified unused page table page to be freed. Specifically,
+ * add the page to the specified list of pages that will be released to the
+ * physical memory manager after the TLB has been updated.
+ */
+static __inline void
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
+{
+
+ if (set_PG_ZERO)
+ m->flags |= PG_ZERO;
+ else
+ m->flags &= ~PG_ZERO;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
+}
+
+/*
+ * Inserts the specified page table page into the specified pmap's collection
+ * of idle page table pages. Each of a pmap's page table pages is responsible
+ * for mapping a distinct range of virtual addresses. The pmap's collection is
+ * ordered by this virtual address range.
+ */
+static __inline int
+pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ return (vm_radix_insert(&pmap->pm_root, mpte));
+}
+
+/*
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
+ */
+static __inline vm_page_t
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
+}
+
+/*
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
+ */
+static inline boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, va, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /*
+ * unmap the page table page
+ */
+ if (m->pindex >= (NUPDE + NUPDPE)) {
+ /* PDP page */
+ pml4_entry_t *pml4;
+ pml4 = pmap_pml4e(pmap, va);
+ *pml4 = 0;
+ if (pmap->pm_pml4u != NULL && va <= VM_MAXUSER_ADDRESS) {
+ pml4 = &pmap->pm_pml4u[pmap_pml4e_index(va)];
+ *pml4 = 0;
+ }
+ } else if (m->pindex >= NUPDE) {
+ /* PD page */
+ pdp_entry_t *pdp;
+ pdp = pmap_pdpe(pmap, va);
+ *pdp = 0;
+ } else {
+ /* PTE page */
+ pd_entry_t *pd;
+ pd = pmap_pde(pmap, va);
+ *pd = 0;
+ }
+ pmap_resident_count_dec(pmap, 1);
+ if (m->pindex < NUPDE) {
+ /* We just released a PT, unhold the matching PD */
+ vm_page_t pdpg;
+
+ pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
+ pmap_unwire_ptp(pmap, va, pdpg, free);
+ }
+ if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
+ /* We just released a PD, unhold the matching PDP */
+ vm_page_t pdppg;
+
+ pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
+ pmap_unwire_ptp(pmap, va, pdppg, free);
+ }
+
+ /*
+ * Put page on a list so that it is released after
+ * *ALL* TLB shootdown is done
+ */
+ pmap_add_delayed_free_list(m, free, TRUE);
+}
+
+/*
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
+ */
+static int
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
+ struct spglist *free)
+{
+ vm_page_t mpte;
+
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (0);
+ KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
+ mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
+ return (pmap_unwire_ptp(pmap, va, mpte, free));
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+ int i;
+
+ PMAP_LOCK_INIT(pmap);
+ pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
+ pmap->pm_pml4u = NULL;
+ pmap->pm_cr3 = KPML4phys;
+ /* hack to keep pmap_pti_pcid_invalidate() alive */
+ pmap->pm_ucr3 = PMAP_NO_CR3;
+ pmap->pm_root.rt_root = 0;
+ CPU_ZERO(&pmap->pm_active);
+ TAILQ_INIT(&pmap->pm_pvchunk);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+ pmap->pm_flags = pmap_flags;
+ CPU_FOREACH(i) {
+ pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
+ pmap->pm_pcids[i].pm_gen = 0;
+ if (!pti)
+ __pcpu[i].pc_kcr3 = PMAP_NO_CR3;
+ }
+ PCPU_SET(curpmap, kernel_pmap);
+ pmap_activate(curthread);
+ CPU_FILL(&kernel_pmap->pm_active);
+}
+
+void
+pmap_pinit_pml4(vm_page_t pml4pg)
+{
+ pml4_entry_t *pm_pml4;
+ int i;
+
+ pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
+
+ /* Wire in kernel global address entries. */
+ for (i = 0; i < NKPML4E; i++) {
+ pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
+ X86_PG_V | PG_U;
+ }
+ for (i = 0; i < ndmpdpphys; i++) {
+ pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
+ X86_PG_V | PG_U;
+ }
+
+ /* install self-referential address mapping entry(s) */
+ pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
+ X86_PG_A | X86_PG_M;
+}
+
+static void
+pmap_pinit_pml4_pti(vm_page_t pml4pg)
+{
+ pml4_entry_t *pm_pml4;
+ int i;
+
+ pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
+ for (i = 0; i < NPML4EPG; i++)
+ pm_pml4[i] = pti_pml4[i];
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+int
+pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
+{
+ vm_page_t pml4pg, pml4pgu;
+ vm_paddr_t pml4phys;
+ int i;
+
+ /*
+ * allocate the page directory page
+ */
+ pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
+
+ pml4phys = VM_PAGE_TO_PHYS(pml4pg);
+ pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);
+ CPU_FOREACH(i) {
+ pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
+ pmap->pm_pcids[i].pm_gen = 0;
+ }
+ pmap->pm_cr3 = PMAP_NO_CR3; /* initialize to an invalid value */
+ pmap->pm_ucr3 = PMAP_NO_CR3;
+ pmap->pm_pml4u = NULL;
+
+ pmap->pm_type = pm_type;
+ if ((pml4pg->flags & PG_ZERO) == 0)
+ pagezero(pmap->pm_pml4);
+
+ /*
+ * Do not install the host kernel mappings in the nested page
+ * tables. These mappings are meaningless in the guest physical
+ * address space.
+ * Install minimal kernel mappings in PTI case.
+ */
+ if (pm_type == PT_X86) {
+ pmap->pm_cr3 = pml4phys;
+ pmap_pinit_pml4(pml4pg);
+ if (pti) {
+ pml4pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
+ pmap->pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(
+ VM_PAGE_TO_PHYS(pml4pgu));
+ pmap_pinit_pml4_pti(pml4pgu);
+ pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pml4pgu);
+ }
+ }
+
+ pmap->pm_root.rt_root = 0;
+ CPU_ZERO(&pmap->pm_active);
+ TAILQ_INIT(&pmap->pm_pvchunk);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+ pmap->pm_flags = flags;
+ pmap->pm_eptgen = 0;
+
+ return (1);
+}
+
+int
+pmap_pinit(pmap_t pmap)
+{
+
+ return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
+}
+
+/*
+ * This routine is called if the desired page table page does not exist.
+ *
+ * If page table page allocation fails, this routine may sleep before
+ * returning NULL. It sleeps only if a lock pointer was given.
+ *
+ * Note: If a page allocation fails at page table level two or three,
+ * one or two pages may be held during the wait, only to be released
+ * afterwards. This conservative approach is easily argued to avoid
+ * race conditions.
+ */
+static vm_page_t
+_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
+{
+ vm_page_t m, pdppg, pdpg;
+ pt_entry_t PG_A, PG_M, PG_RW, PG_V;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ /*
+ * Allocate a page table page.
+ */
+ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
+ if (lockp != NULL) {
+ RELEASE_PV_LIST_LOCK(lockp);
+ PMAP_UNLOCK(pmap);
+ PMAP_ASSERT_NOT_IN_DI();
+ VM_WAIT;
+ PMAP_LOCK(pmap);
+ }
+
+ /*
+ * Indicate the need to retry. While waiting, the page table
+ * page may have been allocated.
+ */
+ return (NULL);
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ /*
+ * Map the pagetable page into the process address space, if
+ * it isn't already there.
+ */
+
+ if (ptepindex >= (NUPDE + NUPDPE)) {
+ pml4_entry_t *pml4, *pml4u;
+ vm_pindex_t pml4index;
+
+ /* Wire up a new PDPE page */
+ pml4index = ptepindex - (NUPDE + NUPDPE);
+ pml4 = &pmap->pm_pml4[pml4index];
+ *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
+ if (pmap->pm_pml4u != NULL && pml4index < NUPML4E) {
+ /*
+ * PTI: Make all user-space mappings in the
+ * kernel-mode page table no-execute so that
+ * we detect any programming errors that leave
+ * the kernel-mode page table active on return
+ * to user space.
+ */
+ *pml4 |= pg_nx;
+
+ pml4u = &pmap->pm_pml4u[pml4index];
+ *pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
+ PG_A | PG_M;
+ }
+
+ } else if (ptepindex >= NUPDE) {
+ vm_pindex_t pml4index;
+ vm_pindex_t pdpindex;
+ pml4_entry_t *pml4;
+ pdp_entry_t *pdp;
+
+ /* Wire up a new PDE page */
+ pdpindex = ptepindex - NUPDE;
+ pml4index = pdpindex >> NPML4EPGSHIFT;
+
+ pml4 = &pmap->pm_pml4[pml4index];
+ if ((*pml4 & PG_V) == 0) {
+ /* Have to allocate a new pdp, recurse */
+ if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
+ lockp) == NULL) {
+ --m->wire_count;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ } else {
+ /* Add reference to pdp page */
+ pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
+ pdppg->wire_count++;
+ }
+ pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
+
+ /* Now find the pdp page */
+ pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
+ *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
+
+ } else {
+ vm_pindex_t pml4index;
+ vm_pindex_t pdpindex;
+ pml4_entry_t *pml4;
+ pdp_entry_t *pdp;
+ pd_entry_t *pd;
+
+ /* Wire up a new PTE page */
+ pdpindex = ptepindex >> NPDPEPGSHIFT;
+ pml4index = pdpindex >> NPML4EPGSHIFT;
+
+ /* First, find the pdp and check that its valid. */
+ pml4 = &pmap->pm_pml4[pml4index];
+ if ((*pml4 & PG_V) == 0) {
+ /* Have to allocate a new pd, recurse */
+ if (_pmap_allocpte(pmap, NUPDE + pdpindex,
+ lockp) == NULL) {
+ --m->wire_count;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
+ pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
+ } else {
+ pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
+ pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
+ if ((*pdp & PG_V) == 0) {
+ /* Have to allocate a new pd, recurse */
+ if (_pmap_allocpte(pmap, NUPDE + pdpindex,
+ lockp) == NULL) {
+ --m->wire_count;
+ atomic_subtract_int(&vm_cnt.v_wire_count,
+ 1);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ } else {
+ /* Add reference to the pd page */
+ pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
+ pdpg->wire_count++;
+ }
+ }
+ pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
+
+ /* Now we know where the page directory page is */
+ pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
+ *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
+ }
+
+ pmap_resident_count_inc(pmap, 1);
+
+ return (m);
+}
+
+static vm_page_t
+pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
+{
+ vm_pindex_t pdpindex, ptepindex;
+ pdp_entry_t *pdpe, PG_V;
+ vm_page_t pdpg;
+
+ PG_V = pmap_valid_bit(pmap);
+
+retry:
+ pdpe = pmap_pdpe(pmap, va);
+ if (pdpe != NULL && (*pdpe & PG_V) != 0) {
+ /* Add a reference to the pd page. */
+ pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
+ pdpg->wire_count++;
+ } else {
+ /* Allocate a pd page. */
+ ptepindex = pmap_pde_pindex(va);
+ pdpindex = ptepindex >> NPDPEPGSHIFT;
+ pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
+ if (pdpg == NULL && lockp != NULL)
+ goto retry;
+ }
+ return (pdpg);
+}
+
+static vm_page_t
+pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
+{
+ vm_pindex_t ptepindex;
+ pd_entry_t *pd, PG_V;
+ vm_page_t m;
+
+ PG_V = pmap_valid_bit(pmap);
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = pmap_pde_pindex(va);
+retry:
+ /*
+ * Get the page directory entry
+ */
+ pd = pmap_pde(pmap, va);
+
+ /*
+ * This supports switching from a 2MB page to a
+ * normal 4K page.
+ */
+ if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
+ if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
+ /*
+ * Invalidation of the 2MB page mapping may have caused
+ * the deallocation of the underlying PD page.
+ */
+ pd = NULL;
+ }
+ }
+
+ /*
+ * If the page table page is mapped, we just increment the
+ * hold count, and activate it.
+ */
+ if (pd != NULL && (*pd & PG_V) != 0) {
+ m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
+ m->wire_count++;
+ } else {
+ /*
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
+ */
+ m = _pmap_allocpte(pmap, ptepindex, lockp);
+ if (m == NULL && lockp != NULL)
+ goto retry;
+ }
+ return (m);
+}
+
+
+/***************************************************
+ * Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ vm_page_t m;
+ int i;
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("pmap_release: pmap resident count %ld != 0",
+ pmap->pm_stats.resident_count));
+ KASSERT(vm_radix_is_empty(&pmap->pm_root),
+ ("pmap_release: pmap has reserved page table page(s)"));
+ KASSERT(CPU_EMPTY(&pmap->pm_active),
+ ("releasing active pmap %p", pmap));
+
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4));
+
+ for (i = 0; i < NKPML4E; i++) /* KVA */
+ pmap->pm_pml4[KPML4BASE + i] = 0;
+ for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
+ pmap->pm_pml4[DMPML4I + i] = 0;
+ pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
+
+ m->wire_count--;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+
+ if (pmap->pm_pml4u != NULL) {
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4u));
+ m->wire_count--;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free(m);
+ }
+}
+
+static int
+kvm_size(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+
+ return sysctl_handle_long(oidp, &ksize, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_size, "LU", "Size of KVM");
+
+static int
+kvm_free(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+
+ return sysctl_handle_long(oidp, &kfree, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_free, "LU", "Amount of KVM free");
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+ vm_paddr_t paddr;
+ vm_page_t nkpg;
+ pd_entry_t *pde, newpdir;
+ pdp_entry_t *pdpe;
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+
+ /*
+ * Return if "addr" is within the range of kernel page table pages
+ * that were preallocated during pmap bootstrap. Moreover, leave
+ * "kernel_vm_end" and the kernel page table as they were.
+ *
+ * The correctness of this action is based on the following
+ * argument: vm_map_insert() allocates contiguous ranges of the
+ * kernel virtual address space. It calls this function if a range
+ * ends after "kernel_vm_end". If the kernel is mapped between
+ * "kernel_vm_end" and "addr", then the range cannot begin at
+ * "kernel_vm_end". In fact, its beginning address cannot be less
+ * than the kernel. Thus, there is no immediate need to allocate
+ * any new kernel page table pages between "kernel_vm_end" and
+ * "KERNBASE".
+ */
+ if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
+ return;
+
+ addr = roundup2(addr, NBPDR);
+ if (addr - 1 >= kernel_map->max_offset)
+ addr = kernel_map->max_offset;
+ while (kernel_vm_end < addr) {
+ pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
+ if ((*pdpe & X86_PG_V) == 0) {
+ /* We need a new PDP entry */
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
+ X86_PG_A | X86_PG_M);
+ continue; /* try again */
+ }
+ pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
+ if ((*pde & X86_PG_V) != 0) {
+ kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ continue;
+ }
+
+ nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
+ pde_store(pde, newpdir);
+
+ kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ }
+}
+
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 3);
+CTASSERT(_NPCPV == 168);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+ return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define PC_FREE0 0xfffffffffffffffful
+#define PC_FREE1 0xfffffffffffffffful
+#define PC_FREE2 0x000000fffffffffful
+
+static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+ "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+#endif
+
+static void
+reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
+{
+
+ if (pmap == NULL)
+ return;
+ pmap_invalidate_all(pmap);
+ if (pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ if (start_di)
+ pmap_delayed_invl_finished();
+}
+
+/*
+ * We are in a serious low memory condition. Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.
+ *
+ * Returns NULL if PV entries were reclaimed from the specified pmap.
+ *
+ * We do not, however, unmap 2mpages because subsequent accesses will
+ * allocate per-page pv entries until repromotion occurs, thereby
+ * exacerbating the shortage of free pv entries.
+ */
+static vm_page_t
+reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
+{
+ struct pv_chunk *pc, *pc_marker, *pc_marker_end;
+ struct pv_chunk_header pc_marker_b, pc_marker_end_b;
+ struct md_page *pvh;
+ pd_entry_t *pde;
+ pmap_t next_pmap, pmap;
+ pt_entry_t *pte, tpte;
+ pt_entry_t PG_G, PG_A, PG_M, PG_RW;
+ pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m, m_pc;
+ struct spglist free;
+ uint64_t inuse;
+ int bit, field, freed;
+ bool start_di;
+ static int active_reclaims = 0;
+
+ PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+ KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
+ pmap = NULL;
+ m_pc = NULL;
+ PG_G = PG_A = PG_M = PG_RW = 0;
+ SLIST_INIT(&free);
+ bzero(&pc_marker_b, sizeof(pc_marker_b));
+ bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
+ pc_marker = (struct pv_chunk *)&pc_marker_b;
+ pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
+
+ /*
+ * A delayed invalidation block should already be active if
+ * pmap_advise() or pmap_remove() called this function by way
+ * of pmap_demote_pde_locked().
+ */
+ start_di = pmap_not_in_di();
+
+ mtx_lock(&pv_chunks_mutex);
+ active_reclaims++;
+ TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
+ while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
+ SLIST_EMPTY(&free)) {
+ next_pmap = pc->pc_pmap;
+ if (next_pmap == NULL) {
+ /*
+ * The next chunk is a marker. However, it is
+ * not our marker, so active_reclaims must be
+ * > 1. Consequently, the next_chunk code
+ * will not rotate the pv_chunks list.
+ */
+ goto next_chunk;
+ }
+ mtx_unlock(&pv_chunks_mutex);
+
+ /*
+ * A pv_chunk can only be removed from the pc_lru list
+ * when both pc_chunks_mutex is owned and the
+ * corresponding pmap is locked.
+ */
+ if (pmap != next_pmap) {
+ reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
+ start_di);
+ pmap = next_pmap;
+ /* Avoid deadlock and lock recursion. */
+ if (pmap > locked_pmap) {
+ RELEASE_PV_LIST_LOCK(lockp);
+ PMAP_LOCK(pmap);
+ if (start_di)
+ pmap_delayed_invl_started();
+ mtx_lock(&pv_chunks_mutex);
+ continue;
+ } else if (pmap != locked_pmap) {
+ if (PMAP_TRYLOCK(pmap)) {
+ if (start_di)
+ pmap_delayed_invl_started();
+ mtx_lock(&pv_chunks_mutex);
+ continue;
+ } else {
+ pmap = NULL; /* pmap is not locked */
+ mtx_lock(&pv_chunks_mutex);
+ pc = TAILQ_NEXT(pc_marker, pc_lru);
+ if (pc == NULL ||
+ pc->pc_pmap != next_pmap)
+ continue;
+ goto next_chunk;
+ }
+ } else if (start_di)
+ pmap_delayed_invl_started();
+ PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ }
+
+ /*
+ * Destroy every non-wired, 4 KB page mapping in the chunk.
+ */
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ for (inuse = ~pc->pc_map[field] & pc_freemask[field];
+ inuse != 0; inuse &= ~(1UL << bit)) {
+ bit = bsfq(inuse);
+ pv = &pc->pc_pventry[field * 64 + bit];
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va);
+ if ((*pde & PG_PS) != 0)
+ continue;
+ pte = pmap_pde_to_pte(pde, va);
+ if ((*pte & PG_W) != 0)
+ continue;
+ tpte = pte_load_clear(pte);
+ if ((tpte & PG_G) != 0)
+ pmap_invalidate_page(pmap, va);
+ m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if ((tpte & PG_A) != 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ vm_page_aflag_clear(m,
+ PGA_WRITEABLE);
+ }
+ }
+ pmap_delayed_invl_page(m);
+ pc->pc_map[field] |= 1UL << bit;
+ pmap_unuse_pt(pmap, va, *pde, &free);
+ freed++;
+ }
+ }
+ if (freed == 0) {
+ mtx_lock(&pv_chunks_mutex);
+ goto next_chunk;
+ }
+ /* Every freed mapping is for a 4 KB page. */
+ pmap_resident_count_dec(pmap, freed);
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
+ pc->pc_map[2] == PC_FREE2) {
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
+ /* Entire chunk is free; return it. */
+ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ dump_drop_page(m_pc->phys_addr);
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ break;
+ }
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ mtx_lock(&pv_chunks_mutex);
+ /* One freed pv entry in locked_pmap is sufficient. */
+ if (pmap == locked_pmap)
+ break;
+next_chunk:
+ TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
+ if (active_reclaims == 1 && pmap != NULL) {
+ /*
+ * Rotate the pv chunks list so that we do not
+ * scan the same pv chunks that could not be
+ * freed (because they contained a wired
+ * and/or superpage mapping) on every
+ * invocation of reclaim_pv_chunk().
+ */
+ while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
+ MPASS(pc->pc_pmap != NULL);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ }
+ }
+ }
+ TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
+ TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
+ active_reclaims--;
+ mtx_unlock(&pv_chunks_mutex);
+ reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
+ if (m_pc == NULL && !SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
+ /* Recycle a freed page table page. */
+ m_pc->wire_count = 1;
+ }
+ pmap_free_zero_pages(&free);
+ return (m_pc);
+}
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+ struct pv_chunk *pc;
+ int idx, field, bit;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_frees, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, 1));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
+ pc = pv_to_chunk(pv);
+ idx = pv - &pc->pc_pventry[0];
+ field = idx / 64;
+ bit = idx % 64;
+ pc->pc_map[field] |= 1ul << bit;
+ if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
+ pc->pc_map[2] != PC_FREE2) {
+ /* 98% of the time, pc is already at the head of the list. */
+ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ return;
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+}
+
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+ vm_page_t m;
+
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
+ /* entire chunk is free, return it */
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ dump_drop_page(m->phys_addr);
+ vm_page_unwire(m, PQ_NONE);
+ vm_page_free(m);
+}
+
+/*
+ * Returns a new PV entry, allocating a new PV chunk from the system when
+ * needed. If this PV chunk allocation fails and a PV list lock pointer was
+ * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
+ * returned.
+ *
+ * The given PV list lock may be released.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, struct rwlock **lockp)
+{
+ int bit, field;
+ pv_entry_t pv;
+ struct pv_chunk *pc;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
+retry:
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ if (pc != NULL) {
+ for (field = 0; field < _NPCM; field++) {
+ if (pc->pc_map[field]) {
+ bit = bsfq(pc->pc_map[field]);
+ break;
+ }
+ }
+ if (field < _NPCM) {
+ pv = &pc->pc_pventry[field * 64 + bit];
+ pc->pc_map[field] &= ~(1ul << bit);
+ /* If this was the last item, move it to tail */
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
+ pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
+ return (pv);
+ }
+ }
+ /* No free items, allocate another chunk */
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED);
+ if (m == NULL) {
+ if (lockp == NULL) {
+ PV_STAT(pc_chunk_tryfail++);
+ return (NULL);
+ }
+ m = reclaim_pv_chunk(pmap, lockp);
+ if (m == NULL)
+ goto retry;
+ }
+ PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+ dump_add_page(m->phys_addr);
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ pv = &pc->pc_pventry[0];
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
+ return (pv);
+}
+
+/*
+ * Returns the number of one bits within the given PV chunk map.
+ *
+ * The erratas for Intel processors state that "POPCNT Instruction May
+ * Take Longer to Execute Than Expected". It is believed that the
+ * issue is the spurious dependency on the destination register.
+ * Provide a hint to the register rename logic that the destination
+ * value is overwritten, by clearing it, as suggested in the
+ * optimization manual. It should be cheap for unaffected processors
+ * as well.
+ *
+ * Reference numbers for erratas are
+ * 4th Gen Core: HSD146
+ * 5th Gen Core: BDM85
+ * 6th Gen Core: SKL029
+ */
+static int
+popcnt_pc_map_pq(uint64_t *map)
+{
+ u_long result, tmp;
+
+ __asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
+ "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
+ "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
+ : "=&r" (result), "=&r" (tmp)
+ : "m" (map[0]), "m" (map[1]), "m" (map[2]));
+ return (result);
+}
+
+/*
+ * Ensure that the number of spare PV entries in the specified pmap meets or
+ * exceeds the given count, "needed".
+ *
+ * The given PV list lock may be released.
+ */
+static void
+reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
+{
+ struct pch new_tail;
+ struct pv_chunk *pc;
+ int avail, free;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
+
+ /*
+ * Newly allocated PV chunks must be stored in a private list until
+ * the required number of PV chunks have been allocated. Otherwise,
+ * reclaim_pv_chunk() could recycle one of these chunks. In
+ * contrast, these chunks must be added to the pmap upon allocation.
+ */
+ TAILQ_INIT(&new_tail);
+retry:
+ avail = 0;
+ TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
+#ifndef __POPCNT__
+ if ((cpu_feature2 & CPUID2_POPCNT) == 0)
+ bit_count((bitstr_t *)pc->pc_map, 0,
+ sizeof(pc->pc_map) * NBBY, &free);
+ else
+#endif
+ free = popcnt_pc_map_pq(pc->pc_map);
+ if (free == 0)
+ break;
+ avail += free;
+ if (avail >= needed)
+ break;
+ }
+ for (; avail < needed; avail += _NPCPV) {
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED);
+ if (m == NULL) {
+ m = reclaim_pv_chunk(pmap, lockp);
+ if (m == NULL)
+ goto retry;
+ }
+ PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+ dump_add_page(m->phys_addr);
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0;
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
+ PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
+ }
+ if (!TAILQ_EMPTY(&new_tail)) {
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ }
+}
+
+/*
+ * First find and then remove the pv entry for the specified pmap and virtual
+ * address from the specified pv list. Returns the pv entry if found and NULL
+ * otherwise. This operation can be performed on pv lists for either 4KB or
+ * 2MB page mappings.
+ */
+static __inline pv_entry_t
+pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ break;
+ }
+ }
+ return (pv);
+}
+
+/*
+ * After demotion from a 2MB page mapping to 512 4KB page mappings,
+ * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
+ * entries for each of the 4KB page mappings.
+ */
+static void
+pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ struct pv_chunk *pc;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+ int bit, field;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((pa & PDRMASK) == 0,
+ ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+
+ /*
+ * Transfer the 2mpage's pv entry for this mapping to the first
+ * page's pv list. Once this transfer begins, the pv list lock
+ * must not be released until the last pv entry is reinstantiated.
+ */
+ pvh = pa_to_pvh(pa);
+ va = trunc_2mpage(va);
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
+ m = PHYS_TO_VM_PAGE(pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ /* Instantiate the remaining NPTEPG - 1 pv entries. */
+ PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
+ va_last = va + NBPDR - PAGE_SIZE;
+ for (;;) {
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
+ pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
+ for (field = 0; field < _NPCM; field++) {
+ while (pc->pc_map[field]) {
+ bit = bsfq(pc->pc_map[field]);
+ pc->pc_map[field] &= ~(1ul << bit);
+ pv = &pc->pc_pventry[field * 64 + bit];
+ va += PAGE_SIZE;
+ pv->pv_va = va;
+ m++;
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_pv_demote_pde: page %p is not managed", m));
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if (va == va_last)
+ goto out;
+ }
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ }
+out:
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
+}
+
+#if VM_NRESERVLEVEL > 0
+/*
+ * After promotion from 512 4KB page mappings to a single 2MB page mapping,
+ * replace the many pv entries for the 4KB page mappings by a single pv entry
+ * for the 2MB page mapping.
+ */
+static void
+pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+
+ KASSERT((pa & PDRMASK) == 0,
+ ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+
+ /*
+ * Transfer the first page's pv entry for this mapping to the 2mpage's
+ * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
+ * a transfer avoids the possibility that get_pv_entry() calls
+ * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
+ * mappings that is being promoted.
+ */
+ m = PHYS_TO_VM_PAGE(pa);
+ va = trunc_2mpage(va);
+ pv = pmap_pvh_remove(&m->md, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ /* Free the remaining NPTEPG - 1 pv entries. */
+ va_last = va + NBPDR - PAGE_SIZE;
+ do {
+ m++;
+ va += PAGE_SIZE;
+ pmap_pvh_free(&m->md, pmap, va);
+ } while (va < va_last);
+}
+#endif /* VM_NRESERVLEVEL > 0 */
+
+/*
+ * First find and then destroy the pv entry for the specified pmap and virtual
+ * address. This operation can be performed on pv lists for either 4KB or 2MB
+ * page mappings.
+ */
+static void
+pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
+ free_pv_entry(pmap, pv);
+}
+
+/*
+ * Conditionally create the PV entry for a 4KB page mapping if the required
+ * memory can be allocated without resorting to reclamation.
+ */
+static boolean_t
+pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct rwlock **lockp)
+{
+ pv_entry_t pv;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /* Pass NULL instead of the lock pointer to disable reclamation. */
+ if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
+ pv->pv_va = va;
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+/*
+ * Create the PV entry for a 2MB page mapping. Always returns true unless the
+ * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
+ * false if the PV entry cannot be allocated without resorting to reclamation.
+ */
+static bool
+pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
+ struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_paddr_t pa;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /* Pass NULL instead of the lock pointer to disable reclamation. */
+ if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
+ NULL : lockp)) == NULL)
+ return (false);
+ pv->pv_va = va;
+ pa = pde & PG_PS_FRAME;
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ return (true);
+}
+
+/*
+ * Fills a page table page with mappings to consecutive physical pages.
+ */
+static void
+pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
+{
+ pt_entry_t *pte;
+
+ for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
+ *pte = newpte;
+ newpte += PAGE_SIZE;
+ }
+}
+
+/*
+ * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
+ * mapping is invalidated.
+ */
+static boolean_t
+pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+{
+ struct rwlock *lock;
+ boolean_t rv;
+
+ lock = NULL;
+ rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ return (rv);
+}
+
+static boolean_t
+pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+ struct rwlock **lockp)
+{
+ pd_entry_t newpde, oldpde;
+ pt_entry_t *firstpte, newpte;
+ pt_entry_t PG_A, PG_G, PG_M, PG_RW, PG_V;
+ vm_paddr_t mptepa;
+ vm_page_t mpte;
+ struct spglist free;
+ vm_offset_t sva;
+ int PG_PTE_CACHE;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpde = *pde;
+ KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
+ ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
+ if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
+ NULL) {
+ KASSERT((oldpde & PG_W) == 0,
+ ("pmap_demote_pde: page table page for a wired mapping"
+ " is missing"));
+
+ /*
+ * Invalidate the 2MB page mapping and return "failure" if the
+ * mapping was never accessed or the allocation of the new
+ * page table page fails. If the 2MB page mapping belongs to
+ * the direct map region of the kernel's address space, then
+ * the page allocation request specifies the highest possible
+ * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
+ * normal. Page table pages are preallocated for every other
+ * part of the kernel address space, so the direct map region
+ * is the only part of the kernel address space that must be
+ * handled here.
+ */
+ if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
+ pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
+ DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+ SLIST_INIT(&free);
+ sva = trunc_2mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free, lockp);
+ if ((oldpde & PG_G) == 0)
+ pmap_invalidate_pde_page(pmap, sva, oldpde);
+ pmap_free_zero_pages(&free);
+ CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (FALSE);
+ }
+ if (va < VM_MAXUSER_ADDRESS)
+ pmap_resident_count_inc(pmap, 1);
+ }
+ mptepa = VM_PAGE_TO_PHYS(mpte);
+ firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
+ newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
+ KASSERT((oldpde & PG_A) != 0,
+ ("pmap_demote_pde: oldpde is missing PG_A"));
+ KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
+ ("pmap_demote_pde: oldpde is missing PG_M"));
+ newpte = oldpde & ~PG_PS;
+ newpte = pmap_swap_pat(pmap, newpte);
+
+ /*
+ * If the page table page is new, initialize it.
+ */
+ if (mpte->wire_count == 1) {
+ mpte->wire_count = NPTEPG;
+ pmap_fill_ptp(firstpte, newpte);
+ }
+ KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
+ ("pmap_demote_pde: firstpte and newpte map different physical"
+ " addresses"));
+
+ /*
+ * If the mapping has changed attributes, update the page table
+ * entries.
+ */
+ if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
+ pmap_fill_ptp(firstpte, newpte);
+
+ /*
+ * The spare PV entries must be reserved prior to demoting the
+ * mapping, that is, prior to changing the PDE. Otherwise, the state
+ * of the PDE and the PV lists will be inconsistent, which can result
+ * in reclaim_pv_chunk() attempting to remove a PV entry from the
+ * wrong PV list and pmap_pv_demote_pde() failing to find the expected
+ * PV entry for the 2MB page mapping that is being demoted.
+ */
+ if ((oldpde & PG_MANAGED) != 0)
+ reserve_pv_entries(pmap, NPTEPG - 1, lockp);
+
+ /*
+ * Demote the mapping. This pmap is locked. The old PDE has
+ * PG_A set. If the old PDE has PG_RW set, it also has PG_M
+ * set. Thus, there is no danger of a race with another
+ * processor changing the setting of PG_A and/or PG_M between
+ * the read above and the store below.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, newpde);
+ else
+ pde_store(pde, newpde);
+
+ /*
+ * Invalidate a stale recursive mapping of the page table page.
+ */
+ if (va >= VM_MAXUSER_ADDRESS)
+ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
+
+ /*
+ * Demote the PV entry.
+ */
+ if ((oldpde & PG_MANAGED) != 0)
+ pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
+
+ atomic_add_long(&pmap_pde_demotions, 1);
+ CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
+ " in pmap %p", va, pmap);
+ return (TRUE);
+}
+
+/*
+ * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
+ */
+static void
+pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+{
+ pd_entry_t newpde;
+ vm_paddr_t mptepa;
+ vm_page_t mpte;
+
+ KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mpte = pmap_remove_pt_page(pmap, va);
+ if (mpte == NULL)
+ panic("pmap_remove_kernel_pde: Missing pt page.");
+
+ mptepa = VM_PAGE_TO_PHYS(mpte);
+ newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
+
+ /*
+ * Initialize the page table page.
+ */
+ pagezero((void *)PHYS_TO_DMAP(mptepa));
+
+ /*
+ * Demote the mapping.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, newpde);
+ else
+ pde_store(pde, newpde);
+
+ /*
+ * Invalidate a stale recursive mapping of the page table page.
+ */
+ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
+}
+
+/*
+ * pmap_remove_pde: do the things to unmap a superpage in a process
+ */
+static int
+pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
+ struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pd_entry_t oldpde;
+ vm_offset_t eva, va;
+ vm_page_t m, mpte;
+ pt_entry_t PG_G, PG_A, PG_M, PG_RW;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((sva & PDRMASK) == 0,
+ ("pmap_remove_pde: sva is not 2mpage aligned"));
+ oldpde = pte_load_clear(pdq);
+ if (oldpde & PG_W)
+ pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
+ pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
+ if (oldpde & PG_MANAGED) {
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
+ pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
+ pmap_pvh_free(pvh, pmap, sva);
+ eva = sva + NBPDR;
+ for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ va < eva; va += PAGE_SIZE, m++) {
+ if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if (oldpde & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ pmap_delayed_invl_page(m);
+ }
+ }
+ if (pmap == kernel_pmap) {
+ pmap_remove_kernel_pde(pmap, pdq, sva);
+ } else {
+ mpte = pmap_remove_pt_page(pmap, sva);
+ if (mpte != NULL) {
+ pmap_resident_count_dec(pmap, 1);
+ KASSERT(mpte->wire_count == NPTEPG,
+ ("pmap_remove_pde: pte page wire count error"));
+ mpte->wire_count = 0;
+ pmap_add_delayed_free_list(mpte, free, FALSE);
+ }
+ }
+ return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
+}
+
+/*
+ * pmap_remove_pte: do the things to unmap a page in a process
+ */
+static int
+pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
+{
+ struct md_page *pvh;
+ pt_entry_t oldpte, PG_A, PG_M, PG_RW;
+ vm_page_t m;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpte = pte_load_clear(ptq);
+ if (oldpte & PG_W)
+ pmap->pm_stats.wired_count -= 1;
+ pmap_resident_count_dec(pmap, 1);
+ if (oldpte & PG_MANAGED) {
+ m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
+ if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if (oldpte & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ pmap_pvh_free(&m->md, pmap, va);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ pmap_delayed_invl_page(m);
+ }
+ return (pmap_unuse_pt(pmap, va, ptepde, free));
+}
+
+/*
+ * Remove a single page from a process address space
+ */
+static void
+pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ struct spglist *free)
+{
+ struct rwlock *lock;
+ pt_entry_t *pte, PG_V;
+
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ if ((*pde & PG_V) == 0)
+ return;
+ pte = pmap_pde_to_pte(pde, va);
+ if ((*pte & PG_V) == 0)
+ return;
+ lock = NULL;
+ pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ pmap_invalidate_page(pmap, va);
+}
+
+/*
+ * Removes the specified range of addresses from the page table page.
+ */
+static bool
+pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
+{
+ pt_entry_t PG_G, *pte;
+ vm_offset_t va;
+ bool anyvalid;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PG_G = pmap_global_bit(pmap);
+ anyvalid = false;
+ va = eva;
+ for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
+ sva += PAGE_SIZE) {
+ if (*pte == 0) {
+ if (va != eva) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = eva;
+ }
+ continue;
+ }
+ if ((*pte & PG_G) == 0)
+ anyvalid = true;
+ else if (va == eva)
+ va = sva;
+ if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
+ sva += PAGE_SIZE;
+ break;
+ }
+ }
+ if (va != eva)
+ pmap_invalidate_range(pmap, va, sva);
+ return (anyvalid);
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct rwlock *lock;
+ vm_offset_t va_next;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ pd_entry_t ptpaddr, *pde;
+ pt_entry_t PG_G, PG_V;
+ struct spglist free;
+ int anyvalid;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ anyvalid = 0;
+ SLIST_INIT(&free);
+
+ pmap_delayed_invl_started();
+ PMAP_LOCK(pmap);
+
+ /*
+ * special handling of removing one page. a very
+ * common operation and easy to short circuit some
+ * code.
+ */
+ if (sva + PAGE_SIZE == eva) {
+ pde = pmap_pde(pmap, sva);
+ if (pde && (*pde & PG_PS) == 0) {
+ pmap_remove_page(pmap, sva, pde, &free);
+ goto out;
+ }
+ }
+
+ lock = NULL;
+ for (; sva < eva; sva = va_next) {
+
+ if (pmap->pm_stats.resident_count == 0)
+ break;
+
+ pml4e = pmap_pml4e(pmap, sva);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ /*
+ * Calculate index for next page table.
+ */
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ ptpaddr = *pde;
+
+ /*
+ * Weed out invalid mappings.
+ */
+ if (ptpaddr == 0)
+ continue;
+
+ /*
+ * Check for large page.
+ */
+ if ((ptpaddr & PG_PS) != 0) {
+ /*
+ * Are we removing the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == va_next && eva >= va_next) {
+ /*
+ * The TLB entry for a PG_G mapping is
+ * invalidated by pmap_remove_pde().
+ */
+ if ((ptpaddr & PG_G) == 0)
+ anyvalid = 1;
+ pmap_remove_pde(pmap, pde, sva, &free, &lock);
+ continue;
+ } else if (!pmap_demote_pde_locked(pmap, pde, sva,
+ &lock)) {
+ /* The large page mapping was destroyed. */
+ continue;
+ } else
+ ptpaddr = *pde;
+ }
+
+ /*
+ * Limit our scan to either the end of the va represented
+ * by the current page table page, or to the end of the
+ * range being removed.
+ */
+ if (va_next > eva)
+ va_next = eva;
+
+ if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
+ anyvalid = 1;
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+out:
+ if (anyvalid)
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ pmap_delayed_invl_finished();
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+void
+pmap_remove_all(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
+ pd_entry_t *pde;
+ vm_offset_t va;
+ struct spglist free;
+ int pvh_gen, md_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_all: page %p is not managed", m));
+ SLIST_INIT(&free);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+retry:
+ rw_wlock(lock);
+ while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va);
+ (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
+ PMAP_UNLOCK(pmap);
+ }
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ pmap_resident_count_dec(pmap, 1);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
+ " a 2mpage in page %p's pv list", m));
+ pte = pmap_pde_to_pte(pde, pv->pv_va);
+ tpte = pte_load_clear(pte);
+ if (tpte & PG_W)
+ pmap->pm_stats.wired_count--;
+ if (tpte & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ free_pv_entry(pmap, pv);
+ PMAP_UNLOCK(pmap);
+ }
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ rw_wunlock(lock);
+ pmap_delayed_invl_wait(m);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * pmap_protect_pde: do the things to protect a 2mpage in a process
+ */
+static boolean_t
+pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
+{
+ pd_entry_t newpde, oldpde;
+ vm_offset_t eva, va;
+ vm_page_t m;
+ boolean_t anychanged;
+ pt_entry_t PG_G, PG_M, PG_RW;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((sva & PDRMASK) == 0,
+ ("pmap_protect_pde: sva is not 2mpage aligned"));
+ anychanged = FALSE;
+retry:
+ oldpde = newpde = *pde;
+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ eva = sva + NBPDR;
+ for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ va < eva; va += PAGE_SIZE, m++)
+ vm_page_dirty(m);
+ }
+ if ((prot & VM_PROT_WRITE) == 0)
+ newpde &= ~(PG_RW | PG_M);
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpde |= pg_nx;
+ if (newpde != oldpde) {
+ /*
+ * As an optimization to future operations on this PDE, clear
+ * PG_PROMOTED. The impending invalidation will remove any
+ * lingering 4KB page mappings from the TLB.
+ */
+ if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
+ goto retry;
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
+ else
+ anychanged = TRUE;
+ }
+ return (anychanged);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ vm_offset_t va_next;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ pd_entry_t ptpaddr, *pde;
+ pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
+ boolean_t anychanged;
+
+ KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
+ if (prot == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+ if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE|VM_PROT_EXECUTE))
+ return;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ anychanged = FALSE;
+
+ /*
+ * Although this function delays and batches the invalidation
+ * of stale TLB entries, it does not need to call
+ * pmap_delayed_invl_started() and
+ * pmap_delayed_invl_finished(), because it does not
+ * ordinarily destroy mappings. Stale TLB entries from
+ * protection-only changes need only be invalidated before the
+ * pmap lock is released, because protection-only changes do
+ * not destroy PV entries. Even operations that iterate over
+ * a physical page's PV list of mappings, like
+ * pmap_remove_write(), acquire the pmap lock for each
+ * mapping. Consequently, for protection-only changes, the
+ * pmap lock suffices to synchronize both page table and TLB
+ * updates.
+ *
+ * This function only destroys a mapping if pmap_demote_pde()
+ * fails. In that case, stale TLB entries are immediately
+ * invalidated.
+ */
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+
+ pml4e = pmap_pml4e(pmap, sva);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ ptpaddr = *pde;
+
+ /*
+ * Weed out invalid mappings.
+ */
+ if (ptpaddr == 0)
+ continue;
+
+ /*
+ * Check for large page.
+ */
+ if ((ptpaddr & PG_PS) != 0) {
+ /*
+ * Are we protecting the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == va_next && eva >= va_next) {
+ /*
+ * The TLB entry for a PG_G mapping is
+ * invalidated by pmap_protect_pde().
+ */
+ if (pmap_protect_pde(pmap, pde, sva, prot))
+ anychanged = TRUE;
+ continue;
+ } else if (!pmap_demote_pde(pmap, pde, sva)) {
+ /*
+ * The large page mapping was destroyed.
+ */
+ continue;
+ }
+ }
+
+ if (va_next > eva)
+ va_next = eva;
+
+ for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
+ sva += PAGE_SIZE) {
+ pt_entry_t obits, pbits;
+ vm_page_t m;
+
+retry:
+ obits = pbits = *pte;
+ if ((pbits & PG_V) == 0)
+ continue;
+
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
+ vm_page_dirty(m);
+ }
+ pbits &= ~(PG_RW | PG_M);
+ }
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pbits |= pg_nx;
+
+ if (pbits != obits) {
+ if (!atomic_cmpset_long(pte, obits, pbits))
+ goto retry;
+ if (obits & PG_G)
+ pmap_invalidate_page(pmap, sva);
+ else
+ anychanged = TRUE;
+ }
+ }
+ }
+ if (anychanged)
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+}
+
+#if VM_NRESERVLEVEL > 0
+/*
+ * Tries to promote the 512, contiguous 4KB page mappings that are within a
+ * single page table page (PTP) to a single 2MB page mapping. For promotion
+ * to occur, two conditions must be met: (1) the 4KB page mappings must map
+ * aligned, contiguous physical memory and (2) the 4KB page mappings must have
+ * identical characteristics.
+ */
+static void
+pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+ struct rwlock **lockp)
+{
+ pd_entry_t newpde;
+ pt_entry_t *firstpte, oldpte, pa, *pte;
+ pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V;
+ vm_page_t mpte;
+ int PG_PTE_CACHE;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_G = pmap_global_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Examine the first PTE in the specified PTP. Abort if this PTE is
+ * either invalid, unused, or does not map the first 4KB physical page
+ * within a 2MB page.
+ */
+ firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
+setpde:
+ newpde = *firstpte;
+ if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
+ atomic_add_long(&pmap_pde_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ if ((newpde & (PG_M | PG_RW)) == PG_RW) {
+ /*
+ * When PG_M is already clear, PG_RW can be cleared without
+ * a TLB invalidation.
+ */
+ if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
+ goto setpde;
+ newpde &= ~PG_RW;
+ }
+
+ /*
+ * Examine each of the other PTEs in the specified PTP. Abort if this
+ * PTE maps an unexpected 4KB physical page or does not have identical
+ * characteristics to the first PTE.
+ */
+ pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
+ for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
+setpte:
+ oldpte = *pte;
+ if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
+ atomic_add_long(&pmap_pde_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
+ /*
+ * When PG_M is already clear, PG_RW can be cleared
+ * without a TLB invalidation.
+ */
+ if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
+ goto setpte;
+ oldpte &= ~PG_RW;
+ CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
+ " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
+ (va & ~PDRMASK), pmap);
+ }
+ if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
+ atomic_add_long(&pmap_pde_p_failures, 1);
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ pa -= PAGE_SIZE;
+ }
+
+ /*
+ * Save the page table page in its current state until the PDE
+ * mapping the superpage is demoted by pmap_demote_pde() or
+ * destroyed by pmap_remove_pde().
+ */
+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ KASSERT(mpte >= vm_page_array &&
+ mpte < &vm_page_array[vm_page_array_size],
+ ("pmap_promote_pde: page table page is out of range"));
+ KASSERT(mpte->pindex == pmap_pde_pindex(va),
+ ("pmap_promote_pde: page table page's pindex is wrong"));
+ if (pmap_insert_pt_page(pmap, mpte)) {
+ atomic_add_long(&pmap_pde_p_failures, 1);
+ CTR2(KTR_PMAP,
+ "pmap_promote_pde: failure for va %#lx in pmap %p", va,
+ pmap);
+ return;
+ }
+
+ /*
+ * Promote the pv entries.
+ */
+ if ((newpde & PG_MANAGED) != 0)
+ pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
+
+ /*
+ * Propagate the PAT index to its proper position.
+ */
+ newpde = pmap_swap_pat(pmap, newpde);
+
+ /*
+ * Map the superpage.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, PG_PS | newpde);
+ else
+ pde_store(pde, PG_PROMOTED | PG_PS | newpde);
+
+ atomic_add_long(&pmap_pde_promotions, 1);
+ CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
+ " in pmap %p", va, pmap);
+}
+#endif /* VM_NRESERVLEVEL > 0 */
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ *
+ * When destroying both a page table and PV entry, this function
+ * performs the TLB invalidation before releasing the PV list
+ * lock, so we do not need pmap_delayed_invl_page() calls here.
+ */
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind)
+{
+ struct rwlock *lock;
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
+ pt_entry_t newpte, origpte;
+ pv_entry_t pv;
+ vm_paddr_t opa, pa;
+ vm_page_t mpte, om;
+ int rv;
+ boolean_t nosleep;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_G = pmap_global_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ va = trunc_page(va);
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
+ ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
+ va));
+ KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
+ va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
+ if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+ VM_OBJECT_ASSERT_LOCKED(m->object);
+ KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
+ ("pmap_enter: flags %u has reserved bits set", flags));
+ pa = VM_PAGE_TO_PHYS(m);
+ newpte = (pt_entry_t)(pa | PG_A | PG_V);
+ if ((flags & VM_PROT_WRITE) != 0)
+ newpte |= PG_M;
+ if ((prot & VM_PROT_WRITE) != 0)
+ newpte |= PG_RW;
+ KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
+ ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpte |= pg_nx;
+ if ((flags & PMAP_ENTER_WIRED) != 0)
+ newpte |= PG_W;
+ if (va < VM_MAXUSER_ADDRESS)
+ newpte |= PG_U;
+ if (pmap == kernel_pmap)
+ newpte |= PG_G;
+ newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
+
+ /*
+ * Set modified bit gratuitously for writeable mappings if
+ * the page is unmanaged. We do not want to take a fault
+ * to do the dirty bit accounting for these mappings.
+ */
+ if ((m->oflags & VPO_UNMANAGED) != 0) {
+ if ((newpte & PG_RW) != 0)
+ newpte |= PG_M;
+ } else
+ newpte |= PG_MANAGED;
+
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ if (psind == 1) {
+ /* Assert the required virtual and physical alignment. */
+ KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
+ KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
+ rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
+ goto out;
+ }
+ mpte = NULL;
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+retry:
+ pde = pmap_pde(pmap, va);
+ if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
+ pmap_demote_pde_locked(pmap, pde, va, &lock))) {
+ pte = pmap_pde_to_pte(pde, va);
+ if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ mpte->wire_count++;
+ }
+ } else if (va < VM_MAXUSER_ADDRESS) {
+ /*
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
+ */
+ nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+ mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
+ nosleep ? NULL : &lock);
+ if (mpte == NULL && nosleep) {
+ rv = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ goto retry;
+ } else
+ panic("pmap_enter: invalid page directory va=%#lx", va);
+
+ origpte = *pte;
+
+ /*
+ * Is the specified virtual address already mapped?
+ */
+ if ((origpte & PG_V) != 0) {
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is wired,
+ * the PT page will be also.
+ */
+ if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
+ pmap->pm_stats.wired_count++;
+ else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Remove the extra PT page reference.
+ */
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%lx", va));
+ }
+
+ /*
+ * Has the physical page changed?
+ */
+ opa = origpte & PG_FRAME;
+ if (opa == pa) {
+ /*
+ * No, might be a protection or wiring change.
+ */
+ if ((origpte & PG_MANAGED) != 0 &&
+ (newpte & PG_RW) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
+ goto unchanged;
+ goto validate;
+ }
+ } else {
+ /*
+ * Increment the counters.
+ */
+ if ((newpte & PG_W) != 0)
+ pmap->pm_stats.wired_count++;
+ pmap_resident_count_inc(pmap, 1);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((newpte & PG_MANAGED) != 0) {
+ pv = get_pv_entry(pmap, &lock);
+ pv->pv_va = va;
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if ((newpte & PG_RW) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ }
+
+ /*
+ * Update the PTE.
+ */
+ if ((origpte & PG_V) != 0) {
+validate:
+ origpte = pte_load_store(pte, newpte);
+ opa = origpte & PG_FRAME;
+ if (opa != pa) {
+ if ((origpte & PG_MANAGED) != 0) {
+ om = PHYS_TO_VM_PAGE(opa);
+ if ((origpte & (PG_M | PG_RW)) == (PG_M |
+ PG_RW))
+ vm_page_dirty(om);
+ if ((origpte & PG_A) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pmap_pvh_free(&om->md, pmap, va);
+ if ((om->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list) &&
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ }
+ } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
+ PG_RW)) == (PG_M | PG_RW)) {
+ if ((origpte & PG_MANAGED) != 0)
+ vm_page_dirty(m);
+
+ /*
+ * Although the PTE may still have PG_RW set, TLB
+ * invalidation may nonetheless be required because
+ * the PTE no longer has PG_M set.
+ */
+ } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
+ /*
+ * This PTE change does not require TLB invalidation.
+ */
+ goto unchanged;
+ }
+ if ((origpte & PG_A) != 0)
+ pmap_invalidate_page(pmap, va);
+ } else
+ pte_store(pte, newpte);
+
+unchanged:
+
+#if VM_NRESERVLEVEL > 0
+ /*
+ * If both the page table page and the reservation are fully
+ * populated, then attempt promotion.
+ */
+ if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
+ pmap_ps_enabled(pmap) &&
+ (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0)
+ pmap_promote_pde(pmap, pde, va, &lock);
+#endif
+
+ rv = KERN_SUCCESS;
+out:
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
+ * if successful. Returns false if (1) a page table page cannot be allocated
+ * without sleeping, (2) a mapping already exists at the specified virtual
+ * address, or (3) a PV entry cannot be allocated without reclaiming another
+ * PV entry.
+ */
+static bool
+pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ struct rwlock **lockp)
+{
+ pd_entry_t newpde;
+ pt_entry_t PG_V;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PG_V = pmap_valid_bit(pmap);
+ newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
+ PG_PS | PG_V;
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ newpde |= PG_MANAGED;
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpde |= pg_nx;
+ if (va < VM_MAXUSER_ADDRESS)
+ newpde |= PG_U;
+ return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
+ PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
+ KERN_SUCCESS);
+}
+
+/*
+ * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
+ * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
+ * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
+ * a mapping already exists at the specified virtual address. Returns
+ * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
+ * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
+ * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
+ *
+ * The parameter "m" is only used when creating a managed, writeable mapping.
+ */
+static int
+pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
+ vm_page_t m, struct rwlock **lockp)
+{
+ struct spglist free;
+ pd_entry_t oldpde, *pde;
+ pt_entry_t PG_G, PG_RW, PG_V;
+ vm_page_t mt, pdpg;
+
+ PG_G = pmap_global_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
+ ("pmap_enter_pde: newpde is missing PG_M"));
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ if ((pdpg = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
+ NULL : lockp)) == NULL) {
+ CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
+ pde = &pde[pmap_pde_index(va)];
+ oldpde = *pde;
+ if ((oldpde & PG_V) != 0) {
+ KASSERT(pdpg->wire_count > 1,
+ ("pmap_enter_pde: pdpg's wire count is too low"));
+ if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
+ pdpg->wire_count--;
+ CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (KERN_FAILURE);
+ }
+ /* Break the existing mapping(s). */
+ SLIST_INIT(&free);
+ if ((oldpde & PG_PS) != 0) {
+ /*
+ * The reference to the PD page that was acquired by
+ * pmap_allocpde() ensures that it won't be freed.
+ * However, if the PDE resulted from a promotion, then
+ * a reserved PT page could be freed.
+ */
+ (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
+ if ((oldpde & PG_G) == 0)
+ pmap_invalidate_pde_page(pmap, va, oldpde);
+ } else {
+ pmap_delayed_invl_started();
+ if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
+ lockp))
+ pmap_invalidate_all(pmap);
+ pmap_delayed_invl_finished();
+ }
+ pmap_free_zero_pages(&free);
+ if (va >= VM_MAXUSER_ADDRESS) {
+ mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ if (pmap_insert_pt_page(pmap, mt)) {
+ /*
+ * XXX Currently, this can't happen because
+ * we do not perform pmap_enter(psind == 1)
+ * on the kernel pmap.
+ */
+ panic("pmap_enter_pde: trie insert failed");
+ }
+ } else
+ KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
+ pde));
+ }
+ if ((newpde & PG_MANAGED) != 0) {
+ /*
+ * Abort this mapping if its PV entry could not be created.
+ */
+ if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
+ SLIST_INIT(&free);
+ if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
+ /*
+ * Although "va" is not mapped, paging-
+ * structure caches could nonetheless have
+ * entries that refer to the freed page table
+ * pages. Invalidate those entries.
+ */
+ pmap_invalidate_page(pmap, va);
+ pmap_free_zero_pages(&free);
+ }
+ CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ if ((newpde & PG_RW) != 0) {
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ vm_page_aflag_set(mt, PGA_WRITEABLE);
+ }
+ }
+
+ /*
+ * Increment counters.
+ */
+ if ((newpde & PG_W) != 0)
+ pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
+ pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
+
+ /*
+ * Map the superpage. (This is not a promoted mapping; there will not
+ * be any lingering 4KB page mappings in the TLB.)
+ */
+ pde_store(pde, newpde);
+
+ atomic_add_long(&pmap_pde_mappings, 1);
+ CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
+ " in pmap %p", va, pmap);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ struct rwlock *lock;
+ vm_offset_t va;
+ vm_page_t m, mpte;
+ vm_pindex_t diff, psize;
+
+ VM_OBJECT_ASSERT_LOCKED(m_start->object);
+
+ psize = atop(end - start);
+ mpte = NULL;
+ m = m_start;
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ va = start + ptoa(diff);
+ if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
+ m->psind == 1 && pmap_ps_enabled(pmap) &&
+ pmap_enter_2mpage(pmap, va, m, prot, &lock))
+ m = &m[NBPDR / PAGE_SIZE - 1];
+ else
+ mpte = pmap_enter_quick_locked(pmap, va, m, prot,
+ mpte, &lock);
+ m = TAILQ_NEXT(m, listq);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ struct rwlock *lock;
+
+ lock = NULL;
+ PMAP_LOCK(pmap);
+ (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+}
+
+static vm_page_t
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
+{
+ struct spglist free;
+ pt_entry_t *pte, PG_V;
+ vm_paddr_t pa;
+
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->oflags & VPO_UNMANAGED) != 0,
+ ("pmap_enter_quick_locked: managed mapping within the clean submap"));
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ vm_pindex_t ptepindex;
+ pd_entry_t *ptepa;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = pmap_pde_pindex(va);
+ if (mpte && (mpte->pindex == ptepindex)) {
+ mpte->wire_count++;
+ } else {
+ /*
+ * Get the page directory entry
+ */
+ ptepa = pmap_pde(pmap, va);
+
+ /*
+ * If the page table page is mapped, we just increment
+ * the hold count, and activate it. Otherwise, we
+ * attempt to allocate a page table page. If this
+ * attempt fails, we don't retry. Instead, we give up.
+ */
+ if (ptepa && (*ptepa & PG_V) != 0) {
+ if (*ptepa & PG_PS)
+ return (NULL);
+ mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
+ mpte->wire_count++;
+ } else {
+ /*
+ * Pass NULL instead of the PV list lock
+ * pointer, because we don't intend to sleep.
+ */
+ mpte = _pmap_allocpte(pmap, ptepindex, NULL);
+ if (mpte == NULL)
+ return (mpte);
+ }
+ }
+ pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
+ pte = &pte[pmap_pte_index(va)];
+ } else {
+ mpte = NULL;
+ pte = vtopte(va);
+ }
+ if (*pte) {
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0 &&
+ !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
+ if (mpte != NULL) {
+ SLIST_INIT(&free);
+ if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
+ /*
+ * Although "va" is not mapped, paging-
+ * structure caches could nonetheless have
+ * entries that refer to the freed page table
+ * pages. Invalidate those entries.
+ */
+ pmap_invalidate_page(pmap, va);
+ pmap_free_zero_pages(&free);
+ }
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap_resident_count_inc(pmap, 1);
+
+ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pa |= pg_nx;
+
+ /*
+ * Now validate mapping with RO protection
+ */
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ pte_store(pte, pa | PG_V | PG_U);
+ else
+ pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
+ return (mpte);
+}
+
+/*
+ * Make a temporary mapping for a physical address. This is only intended
+ * to be used for panic dumps.
+ */
+void *
+pmap_kenter_temporary(vm_paddr_t pa, int i)
+{
+ vm_offset_t va;
+
+ va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
+ pmap_kenter(va, pa);
+ invlpg(va);
+ return ((void *)crashdumpmap);
+}
+
+/*
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
+{
+ pd_entry_t *pde;
+ pt_entry_t PG_A, PG_M, PG_RW, PG_V;
+ vm_paddr_t pa, ptepa;
+ vm_page_t p, pdpg;
+ int pat_mode;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
+ ("pmap_object_init_pt: non-device object"));
+ if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
+ if (!pmap_ps_enabled(pmap))
+ return;
+ if (!vm_object_populate(object, pindex, pindex + atop(size)))
+ return;
+ p = vm_page_lookup(object, pindex);
+ KASSERT(p->valid == VM_PAGE_BITS_ALL,
+ ("pmap_object_init_pt: invalid page %p", p));
+ pat_mode = p->md.pat_mode;
+
+ /*
+ * Abort the mapping if the first page is not physically
+ * aligned to a 2MB page boundary.
+ */
+ ptepa = VM_PAGE_TO_PHYS(p);
+ if (ptepa & (NBPDR - 1))
+ return;
+
+ /*
+ * Skip the first page. Abort the mapping if the rest of
+ * the pages are not physically contiguous or have differing
+ * memory attributes.
+ */
+ p = TAILQ_NEXT(p, listq);
+ for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
+ pa += PAGE_SIZE) {
+ KASSERT(p->valid == VM_PAGE_BITS_ALL,
+ ("pmap_object_init_pt: invalid page %p", p));
+ if (pa != VM_PAGE_TO_PHYS(p) ||
+ pat_mode != p->md.pat_mode)
+ return;
+ p = TAILQ_NEXT(p, listq);
+ }
+
+ /*
+ * Map using 2MB pages. Since "ptepa" is 2M aligned and
+ * "size" is a multiple of 2M, adding the PAT setting to "pa"
+ * will not affect the termination of this loop.
+ */
+ PMAP_LOCK(pmap);
+ for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
+ pa < ptepa + size; pa += NBPDR) {
+ pdpg = pmap_allocpde(pmap, addr, NULL);
+ if (pdpg == NULL) {
+ /*
+ * The creation of mappings below is only an
+ * optimization. If a page directory page
+ * cannot be allocated without blocking,
+ * continue on to the next mapping rather than
+ * blocking.
+ */
+ addr += NBPDR;
+ continue;
+ }
+ pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
+ pde = &pde[pmap_pde_index(addr)];
+ if ((*pde & PG_V) == 0) {
+ pde_store(pde, pa | PG_PS | PG_M | PG_A |
+ PG_U | PG_RW | PG_V);
+ pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
+ atomic_add_long(&pmap_pde_mappings, 1);
+ } else {
+ /* Continue on if the PDE is already valid. */
+ pdpg->wire_count--;
+ KASSERT(pdpg->wire_count > 0,
+ ("pmap_object_init_pt: missing reference "
+ "to page directory page, va: 0x%lx", addr));
+ }
+ addr += NBPDR;
+ }
+ PMAP_UNLOCK(pmap);
+ }
+}
+
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware
+ * feature, so there is no need to invalidate any TLB entries.
+ * Since pmap_demote_pde() for the wired entry must never fail,
+ * pmap_delayed_invl_started()/finished() calls around the
+ * function are not needed.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va_next;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_V;
+
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ pml4e = pmap_pml4e(pmap, sva);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0) {
+ if ((*pde & PG_W) == 0)
+ panic("pmap_unwire: pde %#jx is missing PG_W",
+ (uintmax_t)*pde);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == va_next && eva >= va_next) {
+ atomic_clear_long(pde, PG_W);
+ pmap->pm_stats.wired_count -= NBPDR /
+ PAGE_SIZE;
+ continue;
+ } else if (!pmap_demote_pde(pmap, pde, sva))
+ panic("pmap_unwire: demotion failed");
+ }
+ if (va_next > eva)
+ va_next = eva;
+ for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ */
+ atomic_clear_long(pte, PG_W);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+ vm_offset_t src_addr)
+{
+ struct rwlock *lock;
+ struct spglist free;
+ vm_offset_t addr;
+ vm_offset_t end_addr = src_addr + len;
+ vm_offset_t va_next;
+ vm_page_t dst_pdpg, dstmpte, srcmpte;
+ pt_entry_t PG_A, PG_M, PG_V;
+
+ if (dst_addr != src_addr)
+ return;
+
+ if (dst_pmap->pm_type != src_pmap->pm_type)
+ return;
+
+ /*
+ * EPT page table entries that require emulation of A/D bits are
+ * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
+ * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
+ * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
+ * implementations flag an EPT misconfiguration for exec-only
+ * mappings we skip this function entirely for emulated pmaps.
+ */
+ if (pmap_emulate_ad_bits(dst_pmap))
+ return;
+
+ lock = NULL;
+ if (dst_pmap < src_pmap) {
+ PMAP_LOCK(dst_pmap);
+ PMAP_LOCK(src_pmap);
+ } else {
+ PMAP_LOCK(src_pmap);
+ PMAP_LOCK(dst_pmap);
+ }
+
+ PG_A = pmap_accessed_bit(dst_pmap);
+ PG_M = pmap_modified_bit(dst_pmap);
+ PG_V = pmap_valid_bit(dst_pmap);
+
+ for (addr = src_addr; addr < end_addr; addr = va_next) {
+ pt_entry_t *src_pte, *dst_pte;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ pd_entry_t srcptepaddr, *pde;
+
+ KASSERT(addr < UPT_MIN_ADDRESS,
+ ("pmap_copy: invalid to pmap_copy page tables"));
+
+ pml4e = pmap_pml4e(src_pmap, addr);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (addr + NBPML4) & ~PML4MASK;
+ if (va_next < addr)
+ va_next = end_addr;
+ continue;
+ }
+
+ pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (addr + NBPDP) & ~PDPMASK;
+ if (va_next < addr)
+ va_next = end_addr;
+ continue;
+ }
+
+ va_next = (addr + NBPDR) & ~PDRMASK;
+ if (va_next < addr)
+ va_next = end_addr;
+
+ pde = pmap_pdpe_to_pde(pdpe, addr);
+ srcptepaddr = *pde;
+ if (srcptepaddr == 0)
+ continue;
+
+ if (srcptepaddr & PG_PS) {
+ if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
+ continue;
+ dst_pdpg = pmap_allocpde(dst_pmap, addr, NULL);
+ if (dst_pdpg == NULL)
+ break;
+ pde = (pd_entry_t *)
+ PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
+ pde = &pde[pmap_pde_index(addr)];
+ if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
+ pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
+ PMAP_ENTER_NORECLAIM, &lock))) {
+ *pde = srcptepaddr & ~PG_W;
+ pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE);
+ atomic_add_long(&pmap_pde_mappings, 1);
+ } else
+ dst_pdpg->wire_count--;
+ continue;
+ }
+
+ srcptepaddr &= PG_FRAME;
+ srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
+ KASSERT(srcmpte->wire_count > 0,
+ ("pmap_copy: source page table page is unused"));
+
+ if (va_next > end_addr)
+ va_next = end_addr;
+
+ src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
+ src_pte = &src_pte[pmap_pte_index(addr)];
+ dstmpte = NULL;
+ while (addr < va_next) {
+ pt_entry_t ptetemp;
+ ptetemp = *src_pte;
+ /*
+ * we only virtual copy managed pages
+ */
+ if ((ptetemp & PG_MANAGED) != 0) {
+ if (dstmpte != NULL &&
+ dstmpte->pindex == pmap_pde_pindex(addr))
+ dstmpte->wire_count++;
+ else if ((dstmpte = pmap_allocpte(dst_pmap,
+ addr, NULL)) == NULL)
+ goto out;
+ dst_pte = (pt_entry_t *)
+ PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
+ dst_pte = &dst_pte[pmap_pte_index(addr)];
+ if (*dst_pte == 0 &&
+ pmap_try_insert_pv_entry(dst_pmap, addr,
+ PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
+ &lock)) {
+ /*
+ * Clear the wired, modified, and
+ * accessed (referenced) bits
+ * during the copy.
+ */
+ *dst_pte = ptetemp & ~(PG_W | PG_M |
+ PG_A);
+ pmap_resident_count_inc(dst_pmap, 1);
+ } else {
+ SLIST_INIT(&free);
+ if (pmap_unwire_ptp(dst_pmap, addr,
+ dstmpte, &free)) {
+ /*
+ * Although "addr" is not
+ * mapped, paging-structure
+ * caches could nonetheless
+ * have entries that refer to
+ * the freed page table pages.
+ * Invalidate those entries.
+ */
+ pmap_invalidate_page(dst_pmap,
+ addr);
+ pmap_free_zero_pages(&free);
+ }
+ goto out;
+ }
+ if (dstmpte->wire_count >= srcmpte->wire_count)
+ break;
+ }
+ addr += PAGE_SIZE;
+ src_pte++;
+ }
+ }
+out:
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(src_pmap);
+ PMAP_UNLOCK(dst_pmap);
+}
+
+/*
+ * Zero the specified hardware page.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ pagezero((void *)va);
+}
+
+/*
+ * Zero an an area within a single hardware page. off and size must not
+ * cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ if (off == 0 && size == PAGE_SIZE)
+ pagezero((void *)va);
+ else
+ bzero((char *)va + off, size);
+}
+
+/*
+ * Copy 1 specified hardware page to another.
+ */
+void
+pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
+{
+ vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
+ vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
+
+ pagecopy((void *)src, (void *)dst);
+}
+
+int unmapped_buf_allowed = 1;
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_page_t pages[2];
+ vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
+ int cnt;
+ boolean_t mapped;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ pages[0] = ma[a_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ pages[1] = mb[b_offset >> PAGE_SHIFT];
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
+ a_cp = (char *)vaddr[0] + a_pg_offset;
+ b_cp = (char *)vaddr[1] + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ if (__predict_false(mapped))
+ pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ struct md_page *pvh;
+ struct rwlock *lock;
+ pv_entry_t pv;
+ int loops = 0;
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ }
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ struct rwlock *lock;
+ struct md_page *pvh;
+ pmap_t pmap;
+ pt_entry_t *pte;
+ pv_entry_t pv;
+ int count, md_gen, pvh_gen;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (0);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ count = 0;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pde(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ }
+ rw_runlock(lock);
+ return (count);
+}
+
+/*
+ * Returns TRUE if the given page is mapped individually or as part of
+ * a 2mpage. Otherwise, returns FALSE.
+ */
+boolean_t
+pmap_page_is_mapped(vm_page_t m)
+{
+ struct rwlock *lock;
+ boolean_t rv;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (FALSE);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+ rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * Destroy all managed, non-wired mappings in the given user-space
+ * pmap. This pmap cannot be active on any processor besides the
+ * caller.
+ *
+ * This function cannot be applied to the kernel pmap. Moreover, it
+ * is not intended for general use. It is only to be used during
+ * process termination. Consequently, it can be implemented in ways
+ * that make it faster than pmap_remove(). First, it can more quickly
+ * destroy mappings by iterating over the pmap's collection of PV
+ * entries, rather than searching the page table. Second, it doesn't
+ * have to test and clear the page table entries atomically, because
+ * no processor is currently accessing the user address space. In
+ * particular, a page table entry's dirty bit won't change state once
+ * this function starts.
+ *
+ * Although this function destroys all of the pmap's managed,
+ * non-wired mappings, it can delay and batch the invalidation of TLB
+ * entries without calling pmap_delayed_invl_started() and
+ * pmap_delayed_invl_finished(). Because the pmap is not active on
+ * any other processor, none of these TLB entries will ever be used
+ * before their eventual invalidation. Consequently, there is no need
+ * for either pmap_remove_all() or pmap_remove_write() to wait for
+ * that eventual TLB invalidation.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+ pd_entry_t ptepde;
+ pt_entry_t *pte, tpte;
+ pt_entry_t PG_M, PG_RW, PG_V;
+ struct spglist free;
+ vm_page_t m, mpte, mt;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ struct pv_chunk *pc, *npc;
+ struct rwlock *lock;
+ int64_t bit;
+ uint64_t inuse, bitmask;
+ int allfree, field, freed, idx;
+ boolean_t superpage;
+ vm_paddr_t pa;
+
+ /*
+ * Assert that the given pmap is only active on the current
+ * CPU. Unfortunately, we cannot block another CPU from
+ * activating the pmap while this function is executing.
+ */
+ KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
+#ifdef INVARIANTS
+ {
+ cpuset_t other_cpus;
+
+ other_cpus = all_cpus;
+ critical_enter();
+ CPU_CLR(PCPU_GET(cpuid), &other_cpus);
+ CPU_AND(&other_cpus, &pmap->pm_active);
+ critical_exit();
+ KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
+ }
+#endif
+
+ lock = NULL;
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ SLIST_INIT(&free);
+ PMAP_LOCK(pmap);
+ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+ allfree = 1;
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
+ while (inuse != 0) {
+ bit = bsfq(inuse);
+ bitmask = 1UL << bit;
+ idx = field * 64 + bit;
+ pv = &pc->pc_pventry[idx];
+ inuse &= ~bitmask;
+
+ pte = pmap_pdpe(pmap, pv->pv_va);
+ ptepde = *pte;
+ pte = pmap_pdpe_to_pde(pte, pv->pv_va);
+ tpte = *pte;
+ if ((tpte & (PG_PS | PG_V)) == PG_V) {
+ superpage = FALSE;
+ ptepde = tpte;
+ pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
+ PG_FRAME);
+ pte = &pte[pmap_pte_index(pv->pv_va)];
+ tpte = *pte;
+ } else {
+ /*
+ * Keep track whether 'tpte' is a
+ * superpage explicitly instead of
+ * relying on PG_PS being set.
+ *
+ * This is because PG_PS is numerically
+ * identical to PG_PTE_PAT and thus a
+ * regular page could be mistaken for
+ * a superpage.
+ */
+ superpage = TRUE;
+ }
+
+ if ((tpte & PG_V) == 0) {
+ panic("bad pte va %lx pte %lx",
+ pv->pv_va, tpte);
+ }
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (tpte & PG_W) {
+ allfree = 0;
+ continue;
+ }
+
+ if (superpage)
+ pa = tpte & PG_PS_FRAME;
+ else
+ pa = tpte & PG_FRAME;
+
+ m = PHYS_TO_VM_PAGE(pa);
+ KASSERT(m->phys_addr == pa,
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tpte));
+
+ KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+ m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %#jx",
+ (uintmax_t)tpte));
+
+ pte_clear(pte);
+
+ /*
+ * Update the vm_page_t clean/reference bits.
+ */
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if (superpage) {
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ } else
+ vm_page_dirty(m);
+ }
+
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
+
+ /* Mark free */
+ pc->pc_map[field] |= bitmask;
+ if (superpage) {
+ pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
+ pvh = pa_to_pvh(tpte & PG_PS_FRAME);
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ if ((mt->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&mt->md.pv_list))
+ vm_page_aflag_clear(mt, PGA_WRITEABLE);
+ }
+ mpte = pmap_remove_pt_page(pmap, pv->pv_va);
+ if (mpte != NULL) {
+ pmap_resident_count_dec(pmap, 1);
+ KASSERT(mpte->wire_count == NPTEPG,
+ ("pmap_remove_pages: pte page wire count error"));
+ mpte->wire_count = 0;
+ pmap_add_delayed_free_list(mpte, &free, FALSE);
+ }
+ } else {
+ pmap_resident_count_dec(pmap, 1);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if ((m->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
+ freed++;
+ }
+ }
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
+ if (allfree) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+ }
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(&free);
+}
+
+static boolean_t
+pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
+{
+ struct rwlock *lock;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ pt_entry_t *pte, mask;
+ pt_entry_t PG_A, PG_M, PG_RW, PG_V;
+ pmap_t pmap;
+ int md_gen, pvh_gen;
+ boolean_t rv;
+
+ rv = FALSE;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pte(pmap, pv->pv_va);
+ mask = 0;
+ if (modified) {
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ mask |= PG_RW | PG_M;
+ }
+ if (accessed) {
+ PG_A = pmap_accessed_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ mask |= PG_V | PG_A;
+ }
+ rv = (*pte & mask) == mask;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen ||
+ pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ pte = pmap_pde(pmap, pv->pv_va);
+ mask = 0;
+ if (modified) {
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ mask |= PG_RW | PG_M;
+ }
+ if (accessed) {
+ PG_A = pmap_accessed_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ mask |= PG_V | PG_A;
+ }
+ rv = (*pte & mask) == mask;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+ }
+out:
+ rw_runlock(lock);
+ return (rv);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
+ * is clear, no PTEs can have PG_M set.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return (FALSE);
+ return (pmap_page_test_mappings(m, FALSE, TRUE));
+}
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is eligible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_V;
+ boolean_t rv;
+
+ PG_V = pmap_valid_bit(pmap);
+ rv = FALSE;
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, addr);
+ if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
+ pte = pmap_pde_to_pte(pde, addr);
+ rv = (*pte & PG_V) == 0;
+ }
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
+ return (pmap_page_test_mappings(m, TRUE, FALSE));
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ struct md_page *pvh;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pv_entry_t next_pv, pv;
+ pd_entry_t *pde;
+ pt_entry_t oldpte, *pte, PG_M, PG_RW;
+ vm_offset_t va;
+ int pvh_gen, md_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * set by another thread while the object is locked. Thus,
+ * if PGA_WRITEABLE is clear, no page table entries need updating.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+retry_pv_loop:
+ rw_wlock(lock);
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
+ PG_RW = pmap_rw_bit(pmap);
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va);
+ if ((*pde & PG_RW) != 0)
+ (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
+ PMAP_UNLOCK(pmap);
+ }
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen ||
+ md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0,
+ ("pmap_remove_write: found a 2mpage in page %p's pv list",
+ m));
+ pte = pmap_pde_to_pte(pde, pv->pv_va);
+retry:
+ oldpte = *pte;
+ if (oldpte & PG_RW) {
+ if (!atomic_cmpset_long(pte, oldpte, oldpte &
+ ~(PG_RW | PG_M)))
+ goto retry;
+ if ((oldpte & PG_M) != 0)
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ rw_wunlock(lock);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ pmap_delayed_invl_wait(m);
+}
+
+static __inline boolean_t
+safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
+{
+
+ if (!pmap_emulate_ad_bits(pmap))
+ return (TRUE);
+
+ KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
+
+ /*
+ * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
+ * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
+ * if the EPT_PG_WRITE bit is set.
+ */
+ if ((pte & EPT_PG_WRITE) != 0)
+ return (FALSE);
+
+ /*
+ * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
+ */
+ if ((pte & EPT_PG_EXECUTE) == 0 ||
+ ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
+ return (TRUE);
+ else
+ return (FALSE);
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits. This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified(). However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages. Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
+ *
+ * A DI block is not needed within this function, because
+ * invalidations are performed before the PV list lock is
+ * released.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv, pvf;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pd_entry_t oldpde, *pde;
+ pt_entry_t *pte, PG_A, PG_M, PG_RW;
+ vm_offset_t va;
+ vm_paddr_t pa;
+ int cleared, md_gen, not_cleared, pvh_gen;
+ struct spglist free;
+ boolean_t demoted;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ SLIST_INIT(&free);
+ cleared = 0;
+ pa = VM_PAGE_TO_PHYS(m);
+ lock = PHYS_TO_PV_LIST_LOCK(pa);
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
+ rw_wlock(lock);
+retry:
+ not_cleared = 0;
+ if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
+ goto small_mappings;
+ pv = pvf;
+ do {
+ if (pvf == NULL)
+ pvf = pv;
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, pv->pv_va);
+ oldpde = *pde;
+ if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ /*
+ * Although "oldpde" is mapping a 2MB page, because
+ * this function is called at a 4KB page granularity,
+ * we only update the 4KB page under test.
+ */
+ vm_page_dirty(m);
+ }
+ if ((oldpde & PG_A) != 0) {
+ /*
+ * Since this reference bit is shared by 512 4KB
+ * pages, it should not be cleared every time it is
+ * tested. Apply a simple "hash" function on the
+ * physical page number, the virtual superpage number,
+ * and the pmap address to select one 4KB page out of
+ * the 512 on which testing the reference bit will
+ * result in clearing that reference bit. This
+ * function is designed to avoid the selection of the
+ * same 4KB page for every 2MB page mapping.
+ *
+ * On demotion, a mapping that hasn't been referenced
+ * is simply destroyed. To avoid the possibility of a
+ * subsequent page fault on a demoted wired mapping,
+ * always leave its reference bit set. Moreover,
+ * since the superpage is wired, the current state of
+ * its reference bit won't affect page replacement.
+ */
+ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
+ (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
+ (oldpde & PG_W) == 0) {
+ if (safe_to_clear_referenced(pmap, oldpde)) {
+ atomic_clear_long(pde, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ demoted = FALSE;
+ } else if (pmap_demote_pde_locked(pmap, pde,
+ pv->pv_va, &lock)) {
+ /*
+ * Remove the mapping to a single page
+ * so that a subsequent access may
+ * repromote. Since the underlying
+ * page table page is fully populated,
+ * this removal never frees a page
+ * table page.
+ */
+ demoted = TRUE;
+ va += VM_PAGE_TO_PHYS(m) - (oldpde &
+ PG_PS_FRAME);
+ pte = pmap_pde_to_pte(pde, va);
+ pmap_remove_pte(pmap, pte, va, *pde,
+ NULL, &lock);
+ pmap_invalidate_page(pmap, va);
+ } else
+ demoted = TRUE;
+
+ if (demoted) {
+ /*
+ * The superpage mapping was removed
+ * entirely and therefore 'pv' is no
+ * longer valid.
+ */
+ if (pvf == pv)
+ pvf = NULL;
+ pv = NULL;
+ }
+ cleared++;
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
+ } else
+ not_cleared++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ }
+ if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
+ goto out;
+ } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
+small_mappings:
+ if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
+ goto out;
+ pv = pvf;
+ do {
+ if (pvf == NULL)
+ pvf = pv;
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0,
+ ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
+ m));
+ pte = pmap_pde_to_pte(pde, pv->pv_va);
+ if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if ((*pte & PG_A) != 0) {
+ if (safe_to_clear_referenced(pmap, *pte)) {
+ atomic_clear_long(pte, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ cleared++;
+ } else if ((*pte & PG_W) == 0) {
+ /*
+ * Wired pages cannot be paged out so
+ * doing accessed bit emulation for
+ * them is wasted effort. We do the
+ * hard work for unwired pages only.
+ */
+ pmap_remove_pte(pmap, pte, pv->pv_va,
+ *pde, &free, &lock);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ cleared++;
+ if (pvf == pv)
+ pvf = NULL;
+ pv = NULL;
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
+ } else
+ not_cleared++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ }
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
+ not_cleared < PMAP_TS_REFERENCED_MAX);
+out:
+ rw_wunlock(lock);
+ pmap_free_zero_pages(&free);
+ return (cleared + not_cleared);
+}
+
+/*
+ * Apply the given advice to the specified range of addresses within the
+ * given pmap. Depending on the advice, clear the referenced and/or
+ * modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+ struct rwlock *lock;
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ pd_entry_t oldpde, *pde;
+ pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
+ vm_offset_t va, va_next;
+ vm_page_t m;
+ boolean_t anychanged;
+
+ if (advice != MADV_DONTNEED && advice != MADV_FREE)
+ return;
+
+ /*
+ * A/D bit emulation requires an alternate code path when clearing
+ * the modified and accessed bits below. Since this function is
+ * advisory in nature we skip it entirely for pmaps that require
+ * A/D bit emulation.
+ */
+ if (pmap_emulate_ad_bits(pmap))
+ return;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_G = pmap_global_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ anychanged = FALSE;
+ pmap_delayed_invl_started();
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ pml4e = pmap_pml4e(pmap, sva);
+ if ((*pml4e & PG_V) == 0) {
+ va_next = (sva + NBPML4) & ~PML4MASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
+ if ((*pdpe & PG_V) == 0) {
+ va_next = (sva + NBPDP) & ~PDPMASK;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+ va_next = (sva + NBPDR) & ~PDRMASK;
+ if (va_next < sva)
+ va_next = eva;
+ pde = pmap_pdpe_to_pde(pdpe, sva);
+ oldpde = *pde;
+ if ((oldpde & PG_V) == 0)
+ continue;
+ else if ((oldpde & PG_PS) != 0) {
+ if ((oldpde & PG_MANAGED) == 0)
+ continue;
+ lock = NULL;
+ if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
+ if (lock != NULL)
+ rw_wunlock(lock);
+
+ /*
+ * The large page mapping was destroyed.
+ */
+ continue;
+ }
+
+ /*
+ * Unless the page mappings are wired, remove the
+ * mapping to a single page so that a subsequent
+ * access may repromote. Since the underlying page
+ * table page is fully populated, this removal never
+ * frees a page table page.
+ */
+ if ((oldpde & PG_W) == 0) {
+ pte = pmap_pde_to_pte(pde, sva);
+ KASSERT((*pte & PG_V) != 0,
+ ("pmap_advise: invalid PTE"));
+ pmap_remove_pte(pmap, pte, sva, *pde, NULL,
+ &lock);
+ anychanged = TRUE;
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ }
+ if (va_next > eva)
+ va_next = eva;
+ va = va_next;
+ for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
+ goto maybe_invlrng;
+ else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if (advice == MADV_DONTNEED) {
+ /*
+ * Future calls to pmap_is_modified()
+ * can be avoided by making the page
+ * dirty now.
+ */
+ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
+ vm_page_dirty(m);
+ }
+ atomic_clear_long(pte, PG_M | PG_A);
+ } else if ((*pte & PG_A) != 0)
+ atomic_clear_long(pte, PG_A);
+ else
+ goto maybe_invlrng;
+
+ if ((*pte & PG_G) != 0) {
+ if (va == va_next)
+ va = sva;
+ } else
+ anychanged = TRUE;
+ continue;
+maybe_invlrng:
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
+ }
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
+ }
+ if (anychanged)
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ pmap_delayed_invl_finished();
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+ struct md_page *pvh;
+ pmap_t pmap;
+ pv_entry_t next_pv, pv;
+ pd_entry_t oldpde, *pde;
+ pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V;
+ struct rwlock *lock;
+ vm_offset_t va;
+ int md_gen, pvh_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ KASSERT(!vm_page_xbusied(m),
+ ("pmap_clear_modify: page %p is exclusive busied", m));
+
+ /*
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
+ * If the object containing the page is locked and the page is not
+ * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
+ */
+ if ((m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
+ pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_wlock(lock);
+restart:
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va);
+ oldpde = *pde;
+ if ((oldpde & PG_RW) != 0) {
+ if (pmap_demote_pde_locked(pmap, pde, va, &lock)) {
+ if ((oldpde & PG_W) == 0) {
+ /*
+ * Write protect the mapping to a
+ * single page so that a subsequent
+ * write access may repromote.
+ */
+ va += VM_PAGE_TO_PHYS(m) - (oldpde &
+ PG_PS_FRAME);
+ pte = pmap_pde_to_pte(pde, va);
+ oldpte = *pte;
+ if ((oldpte & PG_V) != 0) {
+ while (!atomic_cmpset_long(pte,
+ oldpte,
+ oldpte & ~(PG_M | PG_RW)))
+ oldpte = *pte;
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, va);
+ }
+ }
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ pvh_gen = pvh->pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
+ " a 2mpage in page %p's pv list", m));
+ pte = pmap_pde_to_pte(pde, pv->pv_va);
+ if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ atomic_clear_long(pte, PG_M);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ rw_wunlock(lock);
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/* Adjust the cache mode for a 4KB page mapped via a PTE. */
+static __inline void
+pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask)
+{
+ u_int opte, npte;
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PTE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opte = *(u_int *)pte;
+ npte = opte & ~mask;
+ npte |= cache_bits;
+ } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
+}
+
+/* Adjust the cache mode for a 2MB page mapped via a PDE. */
+static __inline void
+pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
+{
+ u_int opde, npde;
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PDE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opde = *(u_int *)pde;
+ npde = opde & ~mask;
+ npde |= cache_bits;
+ } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
+}
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t va, offset;
+ vm_size_t tmpsize;
+ int i;
+
+ offset = pa & PAGE_MASK;
+ size = round_page(offset + size);
+ pa = trunc_page(pa);
+
+ if (!pmap_initialized) {
+ va = 0;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0) {
+ ppim->pa = pa;
+ ppim->sz = size;
+ ppim->mode = mode;
+ ppim->va = virtual_avail;
+ virtual_avail += size;
+ va = ppim->va;
+ break;
+ }
+ }
+ if (va == 0)
+ panic("%s: too many preinit mappings", __func__);
+ } else {
+ /*
+ * If we have a preinit mapping, re-use it.
+ */
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->pa == pa && ppim->sz == size &&
+ ppim->mode == mode)
+ return ((void *)(ppim->va + offset));
+ }
+ /*
+ * If the specified range of physical addresses fits within
+ * the direct map window, use the direct map.
+ */
+ if (pa < dmaplimit && pa + size < dmaplimit) {
+ va = PHYS_TO_DMAP(pa);
+ if (!pmap_change_attr(va, size, mode))
+ return ((void *)(va + offset));
+ }
+ va = kva_alloc(size);
+ if (va == 0)
+ panic("%s: Couldn't allocate KVA", __func__);
+ }
+ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
+ pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
+ pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
+ pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
+ return ((void *)(va + offset));
+}
+
+void *
+pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
+}
+
+void *
+pmap_mapbios(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
+}
+
+void
+pmap_unmapdev(vm_offset_t va, vm_size_t size)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t offset;
+ int i;
+
+ /* If we gave a direct map region in pmap_mapdev, do nothing */
+ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
+ return;
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+ va = trunc_page(va);
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == va && ppim->sz == size) {
+ if (pmap_initialized)
+ return;
+ ppim->pa = 0;
+ ppim->va = 0;
+ ppim->sz = 0;
+ ppim->mode = 0;
+ if (va + size == virtual_avail)
+ virtual_avail = va;
+ return;
+ }
+ }
+ if (pmap_initialized)
+ kva_free(va, size);
+}
+
+/*
+ * Tries to demote a 1GB page mapping.
+ */
+static boolean_t
+pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
+{
+ pdp_entry_t newpdpe, oldpdpe;
+ pd_entry_t *firstpde, newpde, *pde;
+ pt_entry_t PG_A, PG_M, PG_RW, PG_V;
+ vm_paddr_t pdpgpa;
+ vm_page_t pdpg;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpdpe = *pdpe;
+ KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
+ ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
+ if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+ CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (FALSE);
+ }
+ pdpgpa = VM_PAGE_TO_PHYS(pdpg);
+ firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
+ newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
+ KASSERT((oldpdpe & PG_A) != 0,
+ ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
+ KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
+ ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
+ newpde = oldpdpe;
+
+ /*
+ * Initialize the page directory page.
+ */
+ for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
+ *pde = newpde;
+ newpde += NBPDR;
+ }
+
+ /*
+ * Demote the mapping.
+ */
+ *pdpe = newpdpe;
+
+ /*
+ * Invalidate a stale recursive mapping of the page directory page.
+ */
+ pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
+
+ pmap_pdpe_demotions++;
+ CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
+ " in pmap %p", va, pmap);
+ return (TRUE);
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+ m->md.pat_mode = ma;
+
+ /*
+ * If "m" is a normal page, update its direct mapping. This update
+ * can be relied upon to perform any cache operations that are
+ * required for data coherence.
+ */
+ if ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
+ m->md.pat_mode))
+ panic("memory attribute change on the direct map failed");
+}
+
+/*
+ * Changes the specified virtual address range's memory type to that given by
+ * the parameter "mode". The specified virtual address range must be
+ * completely contained within either the direct map or the kernel map. If
+ * the virtual address range is contained within the kernel map, then the
+ * memory type for each of the corresponding ranges of the direct map is also
+ * changed. (The corresponding ranges of the direct map are those ranges that
+ * map the same physical pages as the specified virtual address range.) These
+ * changes to the direct map are necessary because Intel describes the
+ * behavior of their processors as "undefined" if two or more mappings to the
+ * same physical page have different memory types.
+ *
+ * Returns zero if the change completed successfully, and either EINVAL or
+ * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
+ * of the virtual address range was not mapped, and ENOMEM is returned if
+ * there was insufficient memory available to complete the change. In the
+ * latter case, the memory type may have been changed on some part of the
+ * virtual address range or the direct map.
+ */
+int
+pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
+{
+ int error;
+
+ PMAP_LOCK(kernel_pmap);
+ error = pmap_change_attr_locked(va, size, mode);
+ PMAP_UNLOCK(kernel_pmap);
+ return (error);
+}
+
+static int
+pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
+{
+ vm_offset_t base, offset, tmpva;
+ vm_paddr_t pa_start, pa_end, pa_end1;
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ int cache_bits_pte, cache_bits_pde, error;
+ boolean_t changed;
+
+ PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
+ base = trunc_page(va);
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+
+ /*
+ * Only supported on kernel virtual addresses, including the direct
+ * map but excluding the recursive map.
+ */
+ if (base < DMAP_MIN_ADDRESS)
+ return (EINVAL);
+
+ cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
+ cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
+ changed = FALSE;
+
+ /*
+ * Pages that aren't mapped aren't supported. Also break down 2MB pages
+ * into 4KB pages if required.
+ */
+ for (tmpva = base; tmpva < base + size; ) {
+ pdpe = pmap_pdpe(kernel_pmap, tmpva);
+ if (pdpe == NULL || *pdpe == 0)
+ return (EINVAL);
+ if (*pdpe & PG_PS) {
+ /*
+ * If the current 1GB page already has the required
+ * memory type, then we need not demote this page. Just
+ * increment tmpva to the next 1GB page frame.
+ */
+ if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) {
+ tmpva = trunc_1gpage(tmpva) + NBPDP;
+ continue;
+ }
+
+ /*
+ * If the current offset aligns with a 1GB page frame
+ * and there is at least 1GB left within the range, then
+ * we need not break down this page into 2MB pages.
+ */
+ if ((tmpva & PDPMASK) == 0 &&
+ tmpva + PDPMASK < base + size) {
+ tmpva += NBPDP;
+ continue;
+ }
+ if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
+ return (ENOMEM);
+ }
+ pde = pmap_pdpe_to_pde(pdpe, tmpva);
+ if (*pde == 0)
+ return (EINVAL);
+ if (*pde & PG_PS) {
+ /*
+ * If the current 2MB page already has the required
+ * memory type, then we need not demote this page. Just
+ * increment tmpva to the next 2MB page frame.
+ */
+ if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) {
+ tmpva = trunc_2mpage(tmpva) + NBPDR;
+ continue;
+ }
+
+ /*
+ * If the current offset aligns with a 2MB page frame
+ * and there is at least 2MB left within the range, then
+ * we need not break down this page into 4KB pages.
+ */
+ if ((tmpva & PDRMASK) == 0 &&
+ tmpva + PDRMASK < base + size) {
+ tmpva += NBPDR;
+ continue;
+ }
+ if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
+ return (ENOMEM);
+ }
+ pte = pmap_pde_to_pte(pde, tmpva);
+ if (*pte == 0)
+ return (EINVAL);
+ tmpva += PAGE_SIZE;
+ }
+ error = 0;
+
+ /*
+ * Ok, all the pages exist, so run through them updating their
+ * cache mode if required.
+ */
+ pa_start = pa_end = 0;
+ for (tmpva = base; tmpva < base + size; ) {
+ pdpe = pmap_pdpe(kernel_pmap, tmpva);
+ if (*pdpe & PG_PS) {
+ if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) {
+ pmap_pde_attr(pdpe, cache_bits_pde,
+ X86_PG_PDE_CACHE);
+ changed = TRUE;
+ }
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ (*pdpe & PG_PS_FRAME) < dmaplimit) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pdpe & PG_PS_FRAME;
+ pa_end = pa_start + NBPDP;
+ } else if (pa_end == (*pdpe & PG_PS_FRAME))
+ pa_end += NBPDP;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pdpe & PG_PS_FRAME;
+ pa_end = pa_start + NBPDP;
+ }
+ }
+ tmpva = trunc_1gpage(tmpva) + NBPDP;
+ continue;
+ }
+ pde = pmap_pdpe_to_pde(pdpe, tmpva);
+ if (*pde & PG_PS) {
+ if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) {
+ pmap_pde_attr(pde, cache_bits_pde,
+ X86_PG_PDE_CACHE);
+ changed = TRUE;
+ }
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ (*pde & PG_PS_FRAME) < dmaplimit) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pde & PG_PS_FRAME;
+ pa_end = pa_start + NBPDR;
+ } else if (pa_end == (*pde & PG_PS_FRAME))
+ pa_end += NBPDR;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pde & PG_PS_FRAME;
+ pa_end = pa_start + NBPDR;
+ }
+ }
+ tmpva = trunc_2mpage(tmpva) + NBPDR;
+ } else {
+ pte = pmap_pde_to_pte(pde, tmpva);
+ if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) {
+ pmap_pte_attr(pte, cache_bits_pte,
+ X86_PG_PTE_CACHE);
+ changed = TRUE;
+ }
+ if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
+ (*pte & PG_FRAME) < dmaplimit) {
+ if (pa_start == pa_end) {
+ /* Start physical address run. */
+ pa_start = *pte & PG_FRAME;
+ pa_end = pa_start + PAGE_SIZE;
+ } else if (pa_end == (*pte & PG_FRAME))
+ pa_end += PAGE_SIZE;
+ else {
+ /* Run ended, update direct map. */
+ error = pmap_change_attr_locked(
+ PHYS_TO_DMAP(pa_start),
+ pa_end - pa_start, mode);
+ if (error != 0)
+ break;
+ /* Start physical address run. */
+ pa_start = *pte & PG_FRAME;
+ pa_end = pa_start + PAGE_SIZE;
+ }
+ }
+ tmpva += PAGE_SIZE;
+ }
+ }
+ if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
+ pa_end1 = MIN(pa_end, dmaplimit);
+ if (pa_start != pa_end1)
+ error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
+ pa_end1 - pa_start, mode);
+ }
+
+ /*
+ * Flush CPU caches if required to make sure any data isn't cached that
+ * shouldn't be, etc.
+ */
+ if (changed) {
+ pmap_invalidate_range(kernel_pmap, base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
+ }
+ return (error);
+}
+
+/*
+ * Demotes any mapping within the direct map region that covers more than the
+ * specified range of physical addresses. This range's size must be a power
+ * of two and its starting address must be a multiple of its size. Since the
+ * demotion does not change any attributes of the mapping, a TLB invalidation
+ * is not mandatory. The caller may, however, request a TLB invalidation.
+ */
+void
+pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
+{
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ vm_offset_t va;
+ boolean_t changed;
+
+ if (len == 0)
+ return;
+ KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
+ KASSERT((base & (len - 1)) == 0,
+ ("pmap_demote_DMAP: base is not a multiple of len"));
+ if (len < NBPDP && base < dmaplimit) {
+ va = PHYS_TO_DMAP(base);
+ changed = FALSE;
+ PMAP_LOCK(kernel_pmap);
+ pdpe = pmap_pdpe(kernel_pmap, va);
+ if ((*pdpe & X86_PG_V) == 0)
+ panic("pmap_demote_DMAP: invalid PDPE");
+ if ((*pdpe & PG_PS) != 0) {
+ if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
+ panic("pmap_demote_DMAP: PDPE failed");
+ changed = TRUE;
+ }
+ if (len < NBPDR) {
+ pde = pmap_pdpe_to_pde(pdpe, va);
+ if ((*pde & X86_PG_V) == 0)
+ panic("pmap_demote_DMAP: invalid PDE");
+ if ((*pde & PG_PS) != 0) {
+ if (!pmap_demote_pde(kernel_pmap, pde, va))
+ panic("pmap_demote_DMAP: PDE failed");
+ changed = TRUE;
+ }
+ }
+ if (changed && invalidate)
+ pmap_invalidate_page(kernel_pmap, va);
+ PMAP_UNLOCK(kernel_pmap);
+ }
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
+{
+ pd_entry_t *pdep;
+ pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
+ vm_paddr_t pa;
+ int val;
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ PMAP_LOCK(pmap);
+retry:
+ pdep = pmap_pde(pmap, addr);
+ if (pdep != NULL && (*pdep & PG_V)) {
+ if (*pdep & PG_PS) {
+ pte = *pdep;
+ /* Compute the physical address of the 4KB page. */
+ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
+ PG_FRAME;
+ val = MINCORE_SUPER;
+ } else {
+ pte = *pmap_pde_to_pte(pdep, addr);
+ pa = pte & PG_FRAME;
+ val = 0;
+ }
+ } else {
+ pte = 0;
+ pa = 0;
+ val = 0;
+ }
+ if ((pte & PG_V) != 0) {
+ val |= MINCORE_INCORE;
+ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+ if ((pte & PG_A) != 0)
+ val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+ }
+ if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
+ (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
+ (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
+ /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
+ if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
+ goto retry;
+ } else
+ PA_UNLOCK_COND(*locked_pa);
+ PMAP_UNLOCK(pmap);
+ return (val);
+}
+
+static uint64_t
+pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
+{
+ uint32_t gen, new_gen, pcid_next;
+
+ CRITICAL_ASSERT(curthread);
+ gen = PCPU_GET(pcid_gen);
+ if (!pti && (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN ||
+ pmap->pm_pcids[cpuid].pm_gen == gen))
+ return (CR3_PCID_SAVE);
+ pcid_next = PCPU_GET(pcid_next);
+ KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
+ (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
+ ("cpu %d pcid_next %#x", cpuid, pcid_next));
+ if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
+ (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
+ new_gen = gen + 1;
+ if (new_gen == 0)
+ new_gen = 1;
+ PCPU_SET(pcid_gen, new_gen);
+ pcid_next = PMAP_PCID_KERN + 1;
+ } else {
+ new_gen = gen;
+ }
+ pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
+ pmap->pm_pcids[cpuid].pm_gen = new_gen;
+ PCPU_SET(pcid_next, pcid_next + 1);
+ return (0);
+}
+
+void
+pmap_activate_sw(struct thread *td)
+{
+ pmap_t oldpmap, pmap;
+ struct invpcid_descr d;
+ uint64_t cached, cr3, kcr3, ucr3;
+ register_t rflags;
+ u_int cpuid;
+
+ oldpmap = PCPU_GET(curpmap);
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ if (oldpmap == pmap)
+ return;
+ cpuid = PCPU_GET(cpuid);
+#ifdef SMP
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
+#else
+ CPU_SET(cpuid, &pmap->pm_active);
+#endif
+ cr3 = rcr3();
+ if (pmap_pcid_enabled) {
+ cached = pmap_pcid_alloc(pmap, cpuid);
+ KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 &&
+ pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
+ ("pmap %p cpu %d pcid %#x", pmap, cpuid,
+ pmap->pm_pcids[cpuid].pm_pcid));
+ KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
+ pmap == kernel_pmap,
+ ("non-kernel pmap thread %p pmap %p cpu %d pcid %#x",
+ td, pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
+
+ /*
+ * If the INVPCID instruction is not available,
+ * invltlb_pcid_handler() is used for handle
+ * invalidate_all IPI, which checks for curpmap ==
+ * smp_tlb_pmap. Below operations sequence has a
+ * window where %CR3 is loaded with the new pmap's
+ * PML4 address, but curpmap value is not yet updated.
+ * This causes invltlb IPI handler, called between the
+ * updates, to execute as NOP, which leaves stale TLB
+ * entries.
+ *
+ * Note that the most typical use of
+ * pmap_activate_sw(), from the context switch, is
+ * immune to this race, because interrupts are
+ * disabled (while the thread lock is owned), and IPI
+ * happends after curpmap is updated. Protect other
+ * callers in a similar way, by disabling interrupts
+ * around the %cr3 register reload and curpmap
+ * assignment.
+ */
+ if (!invpcid_works)
+ rflags = intr_disable();
+
+ if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) {
+ load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
+ cached);
+ if (cached)
+ PCPU_INC(pm_save_cnt);
+ }
+ PCPU_SET(curpmap, pmap);
+ if (pti) {
+<<<<<<< HEAD
+ PCPU_SET(kcr3, pmap->pm_cr3 |
+ pmap->pm_pcids[cpuid].pm_pcid | CR3_PCID_SAVE);
+ PCPU_SET(ucr3, pmap->pm_ucr3 | PMAP_PCID_USER_PT |
+ pmap->pm_pcids[cpuid].pm_pcid | CR3_PCID_SAVE);
+=======
+ kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
+ ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
+ PMAP_PCID_USER_PT;
+
+ /*
+ * Manually invalidate translations cached
+ * from the user page table, which are not
+ * flushed by reload of cr3 with the kernel
+ * page table pointer above.
+ */
+ if (pmap->pm_ucr3 != PMAP_NO_CR3) {
+ if (invpcid_works) {
+ d.pcid = PMAP_PCID_USER_PT |
+ pmap->pm_pcids[cpuid].pm_pcid;
+ d.pad = 0;
+ d.addr = 0;
+ invpcid(&d, INVPCID_CTX);
+ } else {
+ pmap_pti_pcid_invalidate(ucr3, kcr3);
+ }
+ }
+
+ PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
+ PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ }
+ if (!invpcid_works)
+ intr_restore(rflags);
+ } else if (cr3 != pmap->pm_cr3) {
+ load_cr3(pmap->pm_cr3);
+ PCPU_SET(curpmap, pmap);
+ if (pti) {
+ PCPU_SET(kcr3, pmap->pm_cr3);
+ PCPU_SET(ucr3, pmap->pm_ucr3);
+ }
+ }
+#ifdef SMP
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+#else
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+#endif
+}
+
+void
+pmap_activate(struct thread *td)
+{
+
+ critical_enter();
+ pmap_activate_sw(td);
+ critical_exit();
+}
+
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
+/*
+ * Increase the starting virtual address of the given mapping if a
+ * different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t *addr, vm_size_t size)
+{
+ vm_offset_t superpage_offset;
+
+ if (size < NBPDR)
+ return;
+ if (object != NULL && (object->flags & OBJ_COLORED) != 0)
+ offset += ptoa(object->pg_color);
+ superpage_offset = offset & PDRMASK;
+ if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
+ (*addr & PDRMASK) == superpage_offset)
+ return;
+ if ((*addr & PDRMASK) < superpage_offset)
+ *addr = (*addr & ~PDRMASK) + superpage_offset;
+ else
+ *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
+}
+
+#ifdef INVARIANTS
+static unsigned long num_dirty_emulations;
+SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
+ &num_dirty_emulations, 0, NULL);
+
+static unsigned long num_accessed_emulations;
+SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
+ &num_accessed_emulations, 0, NULL);
+
+static unsigned long num_superpage_accessed_emulations;
+SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
+ &num_superpage_accessed_emulations, 0, NULL);
+
+static unsigned long ad_emulation_superpage_promotions;
+SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
+ &ad_emulation_superpage_promotions, 0, NULL);
+#endif /* INVARIANTS */
+
+int
+pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
+{
+ int rv;
+ struct rwlock *lock;
+#if VM_NRESERVLEVEL > 0
+ vm_page_t m, mpte;
+#endif
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
+
+ KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
+ ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
+
+ if (!pmap_emulate_ad_bits(pmap))
+ return (-1);
+
+ PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_V = pmap_valid_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
+
+ rv = -1;
+ lock = NULL;
+ PMAP_LOCK(pmap);
+
+ pde = pmap_pde(pmap, va);
+ if (pde == NULL || (*pde & PG_V) == 0)
+ goto done;
+
+ if ((*pde & PG_PS) != 0) {
+ if (ftype == VM_PROT_READ) {
+#ifdef INVARIANTS
+ atomic_add_long(&num_superpage_accessed_emulations, 1);
+#endif
+ *pde |= PG_A;
+ rv = 0;
+ }
+ goto done;
+ }
+
+ pte = pmap_pde_to_pte(pde, va);
+ if ((*pte & PG_V) == 0)
+ goto done;
+
+ if (ftype == VM_PROT_WRITE) {
+ if ((*pte & PG_RW) == 0)
+ goto done;
+ /*
+ * Set the modified and accessed bits simultaneously.
+ *
+ * Intel EPT PTEs that do software emulation of A/D bits map
+ * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
+ * An EPT misconfiguration is triggered if the PTE is writable
+ * but not readable (WR=10). This is avoided by setting PG_A
+ * and PG_M simultaneously.
+ */
+ *pte |= PG_M | PG_A;
+ } else {
+ *pte |= PG_A;
+ }
+
+#if VM_NRESERVLEVEL > 0
+ /* try to promote the mapping */
+ if (va < VM_MAXUSER_ADDRESS)
+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ else
+ mpte = NULL;
+
+ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
+
+ if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
+ pmap_ps_enabled(pmap) &&
+ (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0) {
+ pmap_promote_pde(pmap, pde, va, &lock);
+#ifdef INVARIANTS
+ atomic_add_long(&ad_emulation_superpage_promotions, 1);
+#endif
+ }
+#endif
+
+#ifdef INVARIANTS
+ if (ftype == VM_PROT_WRITE)
+ atomic_add_long(&num_dirty_emulations, 1);
+ else
+ atomic_add_long(&num_accessed_emulations, 1);
+#endif
+ rv = 0; /* success */
+done:
+ if (lock != NULL)
+ rw_wunlock(lock);
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+void
+pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
+{
+ pml4_entry_t *pml4;
+ pdp_entry_t *pdp;
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_V;
+ int idx;
+
+ idx = 0;
+ PG_V = pmap_valid_bit(pmap);
+ PMAP_LOCK(pmap);
+
+ pml4 = pmap_pml4e(pmap, va);
+ ptr[idx++] = *pml4;
+ if ((*pml4 & PG_V) == 0)
+ goto done;
+
+ pdp = pmap_pml4e_to_pdpe(pml4, va);
+ ptr[idx++] = *pdp;
+ if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
+ goto done;
+
+ pde = pmap_pdpe_to_pde(pdp, va);
+ ptr[idx++] = *pde;
+ if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
+ goto done;
+
+ pte = pmap_pde_to_pte(pde, va);
+ ptr[idx++] = *pte;
+
+done:
+ PMAP_UNLOCK(pmap);
+ *num = idx;
+}
+
+/**
+ * Get the kernel virtual address of a set of physical pages. If there are
+ * physical addresses not covered by the DMAP perform a transient mapping
+ * that will be removed when calling pmap_unmap_io_transient.
+ *
+ * \param page The pages the caller wishes to obtain the virtual
+ * address on the kernel memory map.
+ * \param vaddr On return contains the kernel virtual memory address
+ * of the pages passed in the page parameter.
+ * \param count Number of pages passed in.
+ * \param can_fault TRUE if the thread using the mapped pages can take
+ * page faults, FALSE otherwise.
+ *
+ * \returns TRUE if the caller must call pmap_unmap_io_transient when
+ * finished or FALSE otherwise.
+ *
+ */
+boolean_t
+pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ boolean_t needs_mapping;
+ pt_entry_t *pte;
+ int cache_bits, error, i;
+
+ /*
+ * Allocate any KVA space that we need, this is done in a separate
+ * loop to prevent calling vmem_alloc while pinned.
+ */
+ needs_mapping = FALSE;
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (__predict_false(paddr >= dmaplimit)) {
+ error = vmem_alloc(kernel_arena, PAGE_SIZE,
+ M_BESTFIT | M_WAITOK, &vaddr[i]);
+ KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ needs_mapping = TRUE;
+ } else {
+ vaddr[i] = PHYS_TO_DMAP(paddr);
+ }
+ }
+
+ /* Exit early if everything is covered by the DMAP */
+ if (!needs_mapping)
+ return (FALSE);
+
+ /*
+ * NB: The sequence of updating a page table followed by accesses
+ * to the corresponding pages used in the !DMAP case is subject to
+ * the situation described in the "AMD64 Architecture Programmer's
+ * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
+ * Coherency Considerations". Therefore, issuing the INVLPG right
+ * after modifying the PTE bits is crucial.
+ */
+ if (!can_fault)
+ sched_pin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (paddr >= dmaplimit) {
+ if (can_fault) {
+ /*
+ * Slow path, since we can get page faults
+ * while mappings are active don't pin the
+ * thread to the CPU and instead add a global
+ * mapping visible to all CPUs.
+ */
+ pmap_qenter(vaddr[i], &page[i], 1);
+ } else {
+ pte = vtopte(vaddr[i]);
+ cache_bits = pmap_cache_bits(kernel_pmap,
+ page[i]->md.pat_mode, 0);
+ pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
+ cache_bits);
+ invlpg(vaddr[i]);
+ }
+ }
+ }
+
+ return (needs_mapping);
+}
+
+void
+pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ int i;
+
+ if (!can_fault)
+ sched_unpin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (paddr >= dmaplimit) {
+ if (can_fault)
+ pmap_qremove(vaddr[i], 1);
+ vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
+ }
+ }
+}
+
+vm_offset_t
+pmap_quick_enter_page(vm_page_t m)
+{
+ vm_paddr_t paddr;
+
+ paddr = VM_PAGE_TO_PHYS(m);
+ if (paddr < dmaplimit)
+ return (PHYS_TO_DMAP(paddr));
+ mtx_lock_spin(&qframe_mtx);
+ KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
+ pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
+ X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
+ return (qframe);
+}
+
+void
+pmap_quick_remove_page(vm_offset_t addr)
+{
+
+ if (addr != qframe)
+ return;
+ pte_store(vtopte(qframe), 0);
+ invlpg(qframe);
+ mtx_unlock_spin(&qframe_mtx);
+}
+
+static vm_page_t
+pmap_pti_alloc_page(void)
+{
+ vm_page_t m;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+ m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_NOBUSY |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ return (m);
+}
+
+static bool
+pmap_pti_free_page(vm_page_t m)
+{
+
+ KASSERT(m->wire_count > 0, ("page %p not wired", m));
+ m->wire_count--;
+ if (m->wire_count != 0)
+ return (false);
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+ return (true);
+}
+
+static void
+pmap_pti_init(void)
+{
+ vm_page_t pml4_pg;
+ pdp_entry_t *pdpe;
+ vm_offset_t va;
+ int i;
+
+ if (!pti)
+ return;
+ pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
+ VM_OBJECT_WLOCK(pti_obj);
+ pml4_pg = pmap_pti_alloc_page();
+ pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
+ for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
+ va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
+ pdpe = pmap_pti_pdpe(va);
+ pmap_pti_wire_pte(pdpe);
+ }
+ pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
+ (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
+ pmap_pti_add_kva_locked((vm_offset_t)gdt, (vm_offset_t)gdt +
+ sizeof(struct user_segment_descriptor) * NGDT * MAXCPU, false);
+ pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
+ sizeof(struct gate_descriptor) * NIDT, false);
+ pmap_pti_add_kva_locked((vm_offset_t)common_tss,
+ (vm_offset_t)common_tss + sizeof(struct amd64tss) * MAXCPU, false);
+ CPU_FOREACH(i) {
+ /* Doublefault stack IST 1 */
+ va = common_tss[i].tss_ist1;
+ pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
+ /* NMI stack IST 2 */
+ va = common_tss[i].tss_ist2 + sizeof(struct nmi_pcpu);
+ pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
+ /* MC# stack IST 3 */
+ va = common_tss[i].tss_ist3 + sizeof(struct nmi_pcpu);
+ pmap_pti_add_kva_locked(va - PAGE_SIZE, va, false);
+ }
+ pmap_pti_add_kva_locked((vm_offset_t)kernphys + KERNBASE,
+ (vm_offset_t)etext, true);
+ pti_finalized = true;
+ VM_OBJECT_WUNLOCK(pti_obj);
+}
+SYSINIT(pmap_pti, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_pti_init, NULL);
+
+static pdp_entry_t *
+pmap_pti_pdpe(vm_offset_t va)
+{
+ pml4_entry_t *pml4e;
+ pdp_entry_t *pdpe;
+ vm_page_t m;
+ vm_pindex_t pml4_idx;
+ vm_paddr_t mphys;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+
+ pml4_idx = pmap_pml4e_index(va);
+ pml4e = &pti_pml4[pml4_idx];
+ m = NULL;
+ if (*pml4e == 0) {
+ if (pti_finalized)
+ panic("pml4 alloc after finalization\n");
+ m = pmap_pti_alloc_page();
+ if (*pml4e != 0) {
+ pmap_pti_free_page(m);
+ mphys = *pml4e & ~PAGE_MASK;
+ } else {
+ mphys = VM_PAGE_TO_PHYS(m);
+ *pml4e = mphys | X86_PG_RW | X86_PG_V;
+ }
+ } else {
+ mphys = *pml4e & ~PAGE_MASK;
+ }
+ pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
+ return (pdpe);
+}
+
+static void
+pmap_pti_wire_pte(void *pte)
+{
+ vm_page_t m;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
+ m->wire_count++;
+}
+
+static void
+pmap_pti_unwire_pde(void *pde, bool only_ref)
+{
+ vm_page_t m;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
+ MPASS(m->wire_count > 0);
+ MPASS(only_ref || m->wire_count > 1);
+ pmap_pti_free_page(m);
+}
+
+static void
+pmap_pti_unwire_pte(void *pte, vm_offset_t va)
+{
+ vm_page_t m;
+ pd_entry_t *pde;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
+ MPASS(m->wire_count > 0);
+ if (pmap_pti_free_page(m)) {
+ pde = pmap_pti_pde(va);
+ MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
+ *pde = 0;
+ pmap_pti_unwire_pde(pde, false);
+ }
+}
+
+static pd_entry_t *
+pmap_pti_pde(vm_offset_t va)
+{
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ vm_page_t m;
+ vm_pindex_t pd_idx;
+ vm_paddr_t mphys;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+
+ pdpe = pmap_pti_pdpe(va);
+ if (*pdpe == 0) {
+ m = pmap_pti_alloc_page();
+ if (*pdpe != 0) {
+ pmap_pti_free_page(m);
+ MPASS((*pdpe & X86_PG_PS) == 0);
+ mphys = *pdpe & ~PAGE_MASK;
+ } else {
+ mphys = VM_PAGE_TO_PHYS(m);
+ *pdpe = mphys | X86_PG_RW | X86_PG_V;
+ }
+ } else {
+ MPASS((*pdpe & X86_PG_PS) == 0);
+ mphys = *pdpe & ~PAGE_MASK;
+ }
+
+ pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
+ pd_idx = pmap_pde_index(va);
+ pde += pd_idx;
+ return (pde);
+}
+
+static pt_entry_t *
+pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ vm_page_t m;
+ vm_paddr_t mphys;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+
+ pde = pmap_pti_pde(va);
+ if (unwire_pde != NULL) {
+ *unwire_pde = true;
+ pmap_pti_wire_pte(pde);
+ }
+ if (*pde == 0) {
+ m = pmap_pti_alloc_page();
+ if (*pde != 0) {
+ pmap_pti_free_page(m);
+ MPASS((*pde & X86_PG_PS) == 0);
+ mphys = *pde & ~(PAGE_MASK | pg_nx);
+ } else {
+ mphys = VM_PAGE_TO_PHYS(m);
+ *pde = mphys | X86_PG_RW | X86_PG_V;
+ if (unwire_pde != NULL)
+ *unwire_pde = false;
+ }
+ } else {
+ MPASS((*pde & X86_PG_PS) == 0);
+ mphys = *pde & ~(PAGE_MASK | pg_nx);
+ }
+
+ pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
+ pte += pmap_pte_index(va);
+
+ return (pte);
+}
+
+static void
+pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
+{
+ vm_paddr_t pa;
+ pd_entry_t *pde;
+ pt_entry_t *pte, ptev;
+ bool unwire_pde;
+
+ VM_OBJECT_ASSERT_WLOCKED(pti_obj);
+
+ sva = trunc_page(sva);
+ MPASS(sva > VM_MAXUSER_ADDRESS);
+ eva = round_page(eva);
+ MPASS(sva < eva);
+ for (; sva < eva; sva += PAGE_SIZE) {
+ pte = pmap_pti_pte(sva, &unwire_pde);
+ pa = pmap_kextract(sva);
+ ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A |
+ (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
+ VM_MEMATTR_DEFAULT, FALSE);
+ if (*pte == 0) {
+ pte_store(pte, ptev);
+ pmap_pti_wire_pte(pte);
+ } else {
+ KASSERT(!pti_finalized,
+ ("pti overlap after fin %#lx %#lx %#lx",
+ sva, *pte, ptev));
+ KASSERT(*pte == ptev,
+ ("pti non-identical pte after fin %#lx %#lx %#lx",
+ sva, *pte, ptev));
+ }
+ if (unwire_pde) {
+ pde = pmap_pti_pde(sva);
+ pmap_pti_unwire_pde(pde, true);
+ }
+ }
+}
+
+void
+pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
+{
+
+ if (!pti)
+ return;
+ VM_OBJECT_WLOCK(pti_obj);
+ pmap_pti_add_kva_locked(sva, eva, exec);
+ VM_OBJECT_WUNLOCK(pti_obj);
+}
+
+void
+pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
+{
+ pt_entry_t *pte;
+ vm_offset_t va;
+
+ if (!pti)
+ return;
+ sva = rounddown2(sva, PAGE_SIZE);
+ MPASS(sva > VM_MAXUSER_ADDRESS);
+ eva = roundup2(eva, PAGE_SIZE);
+ MPASS(sva < eva);
+ VM_OBJECT_WLOCK(pti_obj);
+ for (va = sva; va < eva; va += PAGE_SIZE) {
+ pte = pmap_pti_pte(va, NULL);
+ KASSERT((*pte & X86_PG_V) != 0,
+ ("invalid pte va %#lx pte %#lx pt %#lx", va,
+ (u_long)pte, *pte));
+ pte_clear(pte);
+ pmap_pti_unwire_pte(pte, va);
+ }
+ pmap_invalidate_range(kernel_pmap, sva, eva);
+ VM_OBJECT_WUNLOCK(pti_obj);
+}
+
+#include "opt_ddb.h"
+#ifdef DDB
+#include <sys/kdb.h>
+#include <ddb/ddb.h>
+
+DB_SHOW_COMMAND(pte, pmap_print_pte)
+{
+ pmap_t pmap;
+ pml4_entry_t *pml4;
+ pdp_entry_t *pdp;
+ pd_entry_t *pde;
+ pt_entry_t *pte, PG_V;
+ vm_offset_t va;
+
+ if (!have_addr) {
+ db_printf("show pte addr\n");
+ return;
+ }
+ va = (vm_offset_t)addr;
+
+ if (kdb_thread != NULL)
+ pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
+ else
+ pmap = PCPU_GET(curpmap);
+
+ PG_V = pmap_valid_bit(pmap);
+ pml4 = pmap_pml4e(pmap, va);
+ db_printf("VA %#016lx pml4e %#016lx", va, *pml4);
+ if ((*pml4 & PG_V) == 0) {
+ db_printf("\n");
+ return;
+ }
+ pdp = pmap_pml4e_to_pdpe(pml4, va);
+ db_printf(" pdpe %#016lx", *pdp);
+ if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
+ db_printf("\n");
+ return;
+ }
+ pde = pmap_pdpe_to_pde(pdp, va);
+ db_printf(" pde %#016lx", *pde);
+ if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
+ db_printf("\n");
+ return;
+ }
+ pte = pmap_pde_to_pte(pde, va);
+ db_printf(" pte %#016lx\n", *pte);
+}
+
+DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
+{
+ vm_paddr_t a;
+
+ if (have_addr) {
+ a = (vm_paddr_t)addr;
+ db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
+ } else {
+ db_printf("show phys2dmap addr\n");
+ }
+}
+#endif
Index: sys/amd64/amd64/sigtramp.S
===================================================================
--- sys/amd64/amd64/sigtramp.S
+++ sys/amd64/amd64/sigtramp.S
@@ -30,7 +30,7 @@
#include <machine/asmacros.h>
-#include "assym.s"
+#include "assym.S"
.text
/**********************************************************************
Index: sys/amd64/amd64/support.S
===================================================================
--- sys/amd64/amd64/support.S
+++ sys/amd64/amd64/support.S
@@ -36,7 +36,7 @@
#include <machine/specialreg.h>
#include <machine/pmap.h>
-#include "assym.s"
+#include "assym.S"
.text
Index: sys/amd64/amd64/support.S.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/support.S.orig
@@ -0,0 +1,861 @@
+/*-
+ * Copyright (c) 2003 Peter Wemm.
+ * Copyright (c) 1993 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+
+#include <machine/asmacros.h>
+#include <machine/pmap.h>
+
+#include "assym.S"
+
+ .text
+
+/*
+ * bcopy family
+ * void bzero(void *buf, u_int len)
+ */
+
+/* done */
+ENTRY(bzero)
+ PUSH_FRAME_POINTER
+ movq %rsi,%rcx
+ xorl %eax,%eax
+ shrq $3,%rcx
+ rep
+ stosq
+ movq %rsi,%rcx
+ andq $7,%rcx
+ rep
+ stosb
+ POP_FRAME_POINTER
+ ret
+END(bzero)
+
+/* Address: %rdi */
+ENTRY(pagezero)
+ PUSH_FRAME_POINTER
+ movq $PAGE_SIZE/8,%rcx
+ xorl %eax,%eax
+ rep
+ stosq
+ POP_FRAME_POINTER
+ ret
+END(pagezero)
+
+/* Address: %rdi */
+ENTRY(sse2_pagezero)
+ PUSH_FRAME_POINTER
+ movq $-PAGE_SIZE,%rdx
+ subq %rdx,%rdi
+ xorl %eax,%eax
+ jmp 1f
+ /*
+ * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
+ * cache line.
+ */
+ .p2align 5,0x90
+1:
+ movnti %rax,(%rdi,%rdx)
+ movnti %rax,8(%rdi,%rdx)
+ movnti %rax,16(%rdi,%rdx)
+ movnti %rax,24(%rdi,%rdx)
+ addq $32,%rdx
+ jne 1b
+ sfence
+ POP_FRAME_POINTER
+ ret
+END(sse2_pagezero)
+
+ENTRY(bcmp)
+ PUSH_FRAME_POINTER
+ movq %rdx,%rcx
+ shrq $3,%rcx
+ repe
+ cmpsq
+ jne 1f
+
+ movq %rdx,%rcx
+ andq $7,%rcx
+ repe
+ cmpsb
+1:
+ setne %al
+ movsbl %al,%eax
+ POP_FRAME_POINTER
+ ret
+END(bcmp)
+
+/*
+ * bcopy(src, dst, cnt)
+ * rdi, rsi, rdx
+ * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
+ */
+ENTRY(bcopy)
+ PUSH_FRAME_POINTER
+ xchgq %rsi,%rdi
+ movq %rdx,%rcx
+
+ movq %rdi,%rax
+ subq %rsi,%rax
+ cmpq %rcx,%rax /* overlapping && src < dst? */
+ jb 1f
+
+ shrq $3,%rcx /* copy by 64-bit words */
+ rep
+ movsq
+ movq %rdx,%rcx
+ andq $7,%rcx /* any bytes left? */
+ rep
+ movsb
+ POP_FRAME_POINTER
+ ret
+
+ /* ALIGN_TEXT */
+1:
+ addq %rcx,%rdi /* copy backwards */
+ addq %rcx,%rsi
+ decq %rdi
+ decq %rsi
+ andq $7,%rcx /* any fractional bytes? */
+ std
+ rep
+ movsb
+ movq %rdx,%rcx /* copy remainder by 32-bit words */
+ shrq $3,%rcx
+ subq $7,%rsi
+ subq $7,%rdi
+ rep
+ movsq
+ cld
+ POP_FRAME_POINTER
+ ret
+END(bcopy)
+
+/*
+ * Note: memcpy does not support overlapping copies
+ */
+ENTRY(memcpy)
+ PUSH_FRAME_POINTER
+ movq %rdi,%rax
+ movq %rdx,%rcx
+ shrq $3,%rcx /* copy by 64-bit words */
+ rep
+ movsq
+ movq %rdx,%rcx
+ andq $7,%rcx /* any bytes left? */
+ rep
+ movsb
+ POP_FRAME_POINTER
+ ret
+END(memcpy)
+
+/*
+ * pagecopy(%rdi=from, %rsi=to)
+ */
+ENTRY(pagecopy)
+ PUSH_FRAME_POINTER
+ movq $-PAGE_SIZE,%rax
+ movq %rax,%rdx
+ subq %rax,%rdi
+ subq %rax,%rsi
+1:
+ prefetchnta (%rdi,%rax)
+ addq $64,%rax
+ jne 1b
+2:
+ movq (%rdi,%rdx),%rax
+ movnti %rax,(%rsi,%rdx)
+ movq 8(%rdi,%rdx),%rax
+ movnti %rax,8(%rsi,%rdx)
+ movq 16(%rdi,%rdx),%rax
+ movnti %rax,16(%rsi,%rdx)
+ movq 24(%rdi,%rdx),%rax
+ movnti %rax,24(%rsi,%rdx)
+ addq $32,%rdx
+ jne 2b
+ sfence
+ POP_FRAME_POINTER
+ ret
+END(pagecopy)
+
+/* fillw(pat, base, cnt) */
+/* %rdi,%rsi, %rdx */
+ENTRY(fillw)
+ PUSH_FRAME_POINTER
+ movq %rdi,%rax
+ movq %rsi,%rdi
+ movq %rdx,%rcx
+ rep
+ stosw
+ POP_FRAME_POINTER
+ ret
+END(fillw)
+
+/*****************************************************************************/
+/* copyout and fubyte family */
+/*****************************************************************************/
+/*
+ * Access user memory from inside the kernel. These routines should be
+ * the only places that do this.
+ *
+ * These routines set curpcb->pcb_onfault for the time they execute. When a
+ * protection violation occurs inside the functions, the trap handler
+ * returns to *curpcb->pcb_onfault instead of the function.
+ */
+
+/*
+ * copyout(from_kernel, to_user, len)
+ * %rdi, %rsi, %rdx
+ */
+ENTRY(copyout)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rax
+ movq $copyout_fault,PCB_ONFAULT(%rax)
+ testq %rdx,%rdx /* anything to do? */
+ jz done_copyout
+
+ /*
+ * Check explicitly for non-user addresses. This check is essential
+ * because it prevents usermode from writing into the kernel. We do
+ * not verify anywhere else that the user did not specify a rogue
+ * address.
+ */
+ /*
+ * First, prevent address wrapping.
+ */
+ movq %rsi,%rax
+ addq %rdx,%rax
+ jc copyout_fault
+/*
+ * XXX STOP USING VM_MAXUSER_ADDRESS.
+ * It is an end address, not a max, so every time it is used correctly it
+ * looks like there is an off by one error, and of course it caused an off
+ * by one error in several places.
+ */
+ movq $VM_MAXUSER_ADDRESS,%rcx
+ cmpq %rcx,%rax
+ ja copyout_fault
+
+ xchgq %rdi,%rsi
+ /* bcopy(%rsi, %rdi, %rdx) */
+ movq %rdx,%rcx
+
+ shrq $3,%rcx
+ rep
+ movsq
+ movb %dl,%cl
+ andb $7,%cl
+ rep
+ movsb
+
+done_copyout:
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rdx
+ movq %rax,PCB_ONFAULT(%rdx)
+ POP_FRAME_POINTER
+ ret
+
+ ALIGN_TEXT
+copyout_fault:
+ movq PCPU(CURPCB),%rdx
+ movq $0,PCB_ONFAULT(%rdx)
+ movq $EFAULT,%rax
+ POP_FRAME_POINTER
+ ret
+END(copyout)
+
+/*
+ * copyin(from_user, to_kernel, len)
+ * %rdi, %rsi, %rdx
+ */
+ENTRY(copyin)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rax
+ movq $copyin_fault,PCB_ONFAULT(%rax)
+ testq %rdx,%rdx /* anything to do? */
+ jz done_copyin
+
+ /*
+ * make sure address is valid
+ */
+ movq %rdi,%rax
+ addq %rdx,%rax
+ jc copyin_fault
+ movq $VM_MAXUSER_ADDRESS,%rcx
+ cmpq %rcx,%rax
+ ja copyin_fault
+
+ xchgq %rdi,%rsi
+ movq %rdx,%rcx
+ movb %cl,%al
+ shrq $3,%rcx /* copy longword-wise */
+ rep
+ movsq
+ movb %al,%cl
+ andb $7,%cl /* copy remaining bytes */
+ rep
+ movsb
+
+done_copyin:
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rdx
+ movq %rax,PCB_ONFAULT(%rdx)
+ POP_FRAME_POINTER
+ ret
+
+ ALIGN_TEXT
+copyin_fault:
+ movq PCPU(CURPCB),%rdx
+ movq $0,PCB_ONFAULT(%rdx)
+ movq $EFAULT,%rax
+ POP_FRAME_POINTER
+ ret
+END(copyin)
+
+/*
+ * casueword32. Compare and set user integer. Returns -1 on fault,
+ * 0 if access was successful. Old value is written to *oldp.
+ * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
+ */
+ENTRY(casueword32)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%r8
+ movq $fusufault,PCB_ONFAULT(%r8)
+
+ movq $VM_MAXUSER_ADDRESS-4,%rax
+ cmpq %rax,%rdi /* verify address is valid */
+ ja fusufault
+
+ movl %esi,%eax /* old */
+#ifdef SMP
+ lock
+#endif
+ cmpxchgl %ecx,(%rdi) /* new = %ecx */
+
+ /*
+ * The old value is in %eax. If the store succeeded it will be the
+ * value we expected (old) from before the store, otherwise it will
+ * be the current value. Save %eax into %esi to prepare the return
+ * value.
+ */
+ movl %eax,%esi
+ xorl %eax,%eax
+ movq %rax,PCB_ONFAULT(%r8)
+
+ /*
+ * Access the oldp after the pcb_onfault is cleared, to correctly
+ * catch corrupted pointer.
+ */
+ movl %esi,(%rdx) /* oldp = %rdx */
+ POP_FRAME_POINTER
+ ret
+END(casueword32)
+
+/*
+ * casueword. Compare and set user long. Returns -1 on fault,
+ * 0 if access was successful. Old value is written to *oldp.
+ * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
+ */
+ENTRY(casueword)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%r8
+ movq $fusufault,PCB_ONFAULT(%r8)
+
+ movq $VM_MAXUSER_ADDRESS-4,%rax
+ cmpq %rax,%rdi /* verify address is valid */
+ ja fusufault
+
+ movq %rsi,%rax /* old */
+#ifdef SMP
+ lock
+#endif
+ cmpxchgq %rcx,(%rdi) /* new = %rcx */
+
+ /*
+ * The old value is in %rax. If the store succeeded it will be the
+ * value we expected (old) from before the store, otherwise it will
+ * be the current value.
+ */
+ movq %rax,%rsi
+ xorl %eax,%eax
+ movq %rax,PCB_ONFAULT(%r8)
+ movq %rsi,(%rdx)
+ POP_FRAME_POINTER
+ ret
+END(casueword)
+
+/*
+ * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
+ * byte from user memory.
+ * addr = %rdi, valp = %rsi
+ */
+
+ALTENTRY(fueword64)
+ENTRY(fueword)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-8,%rax
+ cmpq %rax,%rdi /* verify address is valid */
+ ja fusufault
+
+ xorl %eax,%eax
+ movq (%rdi),%r11
+ movq %rax,PCB_ONFAULT(%rcx)
+ movq %r11,(%rsi)
+ POP_FRAME_POINTER
+ ret
+END(fueword64)
+END(fueword)
+
+ENTRY(fueword32)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-4,%rax
+ cmpq %rax,%rdi /* verify address is valid */
+ ja fusufault
+
+ xorl %eax,%eax
+ movl (%rdi),%r11d
+ movq %rax,PCB_ONFAULT(%rcx)
+ movl %r11d,(%rsi)
+ POP_FRAME_POINTER
+ ret
+END(fueword32)
+
+/*
+ * fuswintr() and suswintr() are specialized variants of fuword16() and
+ * suword16(), respectively. They are called from the profiling code,
+ * potentially at interrupt time. If they fail, that's okay; good things
+ * will happen later. They always fail for now, until the trap code is
+ * able to deal with this.
+ */
+ALTENTRY(suswintr)
+ENTRY(fuswintr)
+ movq $-1,%rax
+ ret
+END(suswintr)
+END(fuswintr)
+
+ENTRY(fuword16)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-2,%rax
+ cmpq %rax,%rdi
+ ja fusufault
+
+ movzwl (%rdi),%eax
+ movq $0,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(fuword16)
+
+ENTRY(fubyte)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-1,%rax
+ cmpq %rax,%rdi
+ ja fusufault
+
+ movzbl (%rdi),%eax
+ movq $0,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(fubyte)
+
+ ALIGN_TEXT
+fusufault:
+ movq PCPU(CURPCB),%rcx
+ xorl %eax,%eax
+ movq %rax,PCB_ONFAULT(%rcx)
+ decq %rax
+ POP_FRAME_POINTER
+ ret
+
+/*
+ * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
+ * user memory.
+ * addr = %rdi, value = %rsi
+ */
+ALTENTRY(suword64)
+ENTRY(suword)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-8,%rax
+ cmpq %rax,%rdi /* verify address validity */
+ ja fusufault
+
+ movq %rsi,(%rdi)
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rcx
+ movq %rax,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(suword64)
+END(suword)
+
+ENTRY(suword32)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-4,%rax
+ cmpq %rax,%rdi /* verify address validity */
+ ja fusufault
+
+ movl %esi,(%rdi)
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rcx
+ movq %rax,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(suword32)
+
+ENTRY(suword16)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-2,%rax
+ cmpq %rax,%rdi /* verify address validity */
+ ja fusufault
+
+ movw %si,(%rdi)
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rcx /* restore trashed register */
+ movq %rax,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(suword16)
+
+ENTRY(subyte)
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%rcx
+ movq $fusufault,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS-1,%rax
+ cmpq %rax,%rdi /* verify address validity */
+ ja fusufault
+
+ movl %esi,%eax
+ movb %al,(%rdi)
+ xorl %eax,%eax
+ movq PCPU(CURPCB),%rcx /* restore trashed register */
+ movq %rax,PCB_ONFAULT(%rcx)
+ POP_FRAME_POINTER
+ ret
+END(subyte)
+
+/*
+ * copyinstr(from, to, maxlen, int *lencopied)
+ * %rdi, %rsi, %rdx, %rcx
+ *
+ * copy a string from 'from' to 'to', stop when a 0 character is reached.
+ * return ENAMETOOLONG if string is longer than maxlen, and
+ * EFAULT on protection violations. If lencopied is non-zero,
+ * return the actual length in *lencopied.
+ */
+ENTRY(copyinstr)
+ PUSH_FRAME_POINTER
+ movq %rdx,%r8 /* %r8 = maxlen */
+ movq %rcx,%r9 /* %r9 = *len */
+ xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
+ movq PCPU(CURPCB),%rcx
+ movq $cpystrflt,PCB_ONFAULT(%rcx)
+
+ movq $VM_MAXUSER_ADDRESS,%rax
+
+ /* make sure 'from' is within bounds */
+ subq %rsi,%rax
+ jbe cpystrflt
+
+ /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
+ cmpq %rdx,%rax
+ jae 1f
+ movq %rax,%rdx
+ movq %rax,%r8
+1:
+ incq %rdx
+
+2:
+ decq %rdx
+ jz 3f
+
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 2b
+
+ /* Success -- 0 byte reached */
+ decq %rdx
+ xorl %eax,%eax
+ jmp cpystrflt_x
+3:
+ /* rdx is zero - return ENAMETOOLONG or EFAULT */
+ movq $VM_MAXUSER_ADDRESS,%rax
+ cmpq %rax,%rsi
+ jae cpystrflt
+4:
+ movq $ENAMETOOLONG,%rax
+ jmp cpystrflt_x
+
+cpystrflt:
+ movq $EFAULT,%rax
+
+cpystrflt_x:
+ /* set *lencopied and return %eax */
+ movq PCPU(CURPCB),%rcx
+ movq $0,PCB_ONFAULT(%rcx)
+
+ testq %r9,%r9
+ jz 1f
+ subq %rdx,%r8
+ movq %r8,(%r9)
+1:
+ POP_FRAME_POINTER
+ ret
+END(copyinstr)
+
+/*
+ * copystr(from, to, maxlen, int *lencopied)
+ * %rdi, %rsi, %rdx, %rcx
+ */
+ENTRY(copystr)
+ PUSH_FRAME_POINTER
+ movq %rdx,%r8 /* %r8 = maxlen */
+
+ xchgq %rdi,%rsi
+ incq %rdx
+1:
+ decq %rdx
+ jz 4f
+ lodsb
+ stosb
+ orb %al,%al
+ jnz 1b
+
+ /* Success -- 0 byte reached */
+ decq %rdx
+ xorl %eax,%eax
+ jmp 6f
+4:
+ /* rdx is zero -- return ENAMETOOLONG */
+ movq $ENAMETOOLONG,%rax
+
+6:
+
+ testq %rcx,%rcx
+ jz 7f
+ /* set *lencopied and return %rax */
+ subq %rdx,%r8
+ movq %r8,(%rcx)
+7:
+ POP_FRAME_POINTER
+ ret
+END(copystr)
+
+/*
+ * Handling of special amd64 registers and descriptor tables etc
+ */
+/* void lgdt(struct region_descriptor *rdp); */
+ENTRY(lgdt)
+ /* reload the descriptor table */
+ lgdt (%rdi)
+
+ /* flush the prefetch q */
+ jmp 1f
+ nop
+1:
+ movl $KDSEL,%eax
+ movl %eax,%ds
+ movl %eax,%es
+ movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
+ movl %eax,%gs
+ movl %eax,%ss
+
+ /* reload code selector by turning return into intersegmental return */
+ popq %rax
+ pushq $KCSEL
+ pushq %rax
+ MEXITCOUNT
+ lretq
+END(lgdt)
+
+/*****************************************************************************/
+/* setjump, longjump */
+/*****************************************************************************/
+
+ENTRY(setjmp)
+ movq %rbx,0(%rdi) /* save rbx */
+ movq %rsp,8(%rdi) /* save rsp */
+ movq %rbp,16(%rdi) /* save rbp */
+ movq %r12,24(%rdi) /* save r12 */
+ movq %r13,32(%rdi) /* save r13 */
+ movq %r14,40(%rdi) /* save r14 */
+ movq %r15,48(%rdi) /* save r15 */
+ movq 0(%rsp),%rdx /* get rta */
+ movq %rdx,56(%rdi) /* save rip */
+ xorl %eax,%eax /* return(0); */
+ ret
+END(setjmp)
+
+ENTRY(longjmp)
+ movq 0(%rdi),%rbx /* restore rbx */
+ movq 8(%rdi),%rsp /* restore rsp */
+ movq 16(%rdi),%rbp /* restore rbp */
+ movq 24(%rdi),%r12 /* restore r12 */
+ movq 32(%rdi),%r13 /* restore r13 */
+ movq 40(%rdi),%r14 /* restore r14 */
+ movq 48(%rdi),%r15 /* restore r15 */
+ movq 56(%rdi),%rdx /* get rta */
+ movq %rdx,0(%rsp) /* put in return frame */
+ xorl %eax,%eax /* return(1); */
+ incl %eax
+ ret
+END(longjmp)
+
+/*
+ * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
+ * return an error.)
+ */
+ENTRY(rdmsr_safe)
+/* int rdmsr_safe(u_int msr, uint64_t *data) */
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%r8
+ movq $msr_onfault,PCB_ONFAULT(%r8)
+ movl %edi,%ecx
+ rdmsr /* Read MSR pointed by %ecx. Returns
+ hi byte in edx, lo in %eax */
+ salq $32,%rdx /* sign-shift %rdx left */
+ movl %eax,%eax /* zero-extend %eax -> %rax */
+ orq %rdx,%rax
+ movq %rax,(%rsi)
+ xorq %rax,%rax
+ movq %rax,PCB_ONFAULT(%r8)
+ POP_FRAME_POINTER
+ ret
+
+/*
+ * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
+ * return an error.)
+ */
+ENTRY(wrmsr_safe)
+/* int wrmsr_safe(u_int msr, uint64_t data) */
+ PUSH_FRAME_POINTER
+ movq PCPU(CURPCB),%r8
+ movq $msr_onfault,PCB_ONFAULT(%r8)
+ movl %edi,%ecx
+ movl %esi,%eax
+ sarq $32,%rsi
+ movl %esi,%edx
+ wrmsr /* Write MSR pointed by %ecx. Accepts
+ hi byte in edx, lo in %eax. */
+ xorq %rax,%rax
+ movq %rax,PCB_ONFAULT(%r8)
+ POP_FRAME_POINTER
+ ret
+
+/*
+ * MSR operations fault handler
+ */
+ ALIGN_TEXT
+msr_onfault:
+ movq $0,PCB_ONFAULT(%r8)
+ movl $EFAULT,%eax
+ POP_FRAME_POINTER
+ ret
+
+<<<<<<< HEAD
+=======
+/*
+ * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
+ * Invalidates address space addressed by ucr3, then returns to kcr3.
+ * Done in assembler to ensure no other memory accesses happen while
+ * on ucr3.
+ */
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ ALIGN_TEXT
+ENTRY(pmap_pti_pcid_invalidate)
+ pushfq
+ cli
+ movq %rdi,%cr3 /* to user page table */
+ movq %rsi,%cr3 /* back to kernel */
+ popfq
+ retq
+
+<<<<<<< HEAD
+=======
+/*
+ * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
+ * Invalidates virtual address va in address space ucr3, then returns to kcr3.
+ */
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ ALIGN_TEXT
+ENTRY(pmap_pti_pcid_invlpg)
+ pushfq
+ cli
+ movq %rdi,%cr3 /* to user page table */
+ invlpg (%rdx)
+ movq %rsi,%cr3 /* back to kernel */
+ popfq
+ retq
+
+<<<<<<< HEAD
+=======
+/*
+ * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
+ * vm_offset_t eva);
+ * Invalidates virtual addresses between sva and eva in address space ucr3,
+ * then returns to kcr3.
+ */
+>>>>>>> b0792bd72799... Update to current PTI-PCID patch
+ ALIGN_TEXT
+ENTRY(pmap_pti_pcid_invlrng)
+ pushfq
+ cli
+ movq %rdi,%cr3 /* to user page table */
+1: invlpg (%rdx)
+ addq $PAGE_SIZE,%rdx
+ cmpq %rdx,%rcx
+ ja 1b
+ movq %rsi,%cr3 /* back to kernel */
+ popfq
+ retq
Index: sys/amd64/amd64/sys_machdep.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/sys_machdep.c.orig
@@ -0,0 +1,748 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2003 Peter Wemm.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_capsicum.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/capsicum.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/sysproto.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h> /* for kernel_map */
+#include <vm/vm_extern.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/specialreg.h>
+#include <machine/sysarch.h>
+#include <machine/tss.h>
+#include <machine/vmparam.h>
+
+#include <security/audit/audit.h>
+
+#define MAX_LD 8192
+
+int max_ldt_segment = 512;
+SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
+ &max_ldt_segment, 0,
+ "Maximum number of allowed LDT segments in the single address space");
+
+static void
+max_ldt_segment_init(void *arg __unused)
+{
+
+ if (max_ldt_segment <= 0)
+ max_ldt_segment = 1;
+ if (max_ldt_segment > MAX_LD)
+ max_ldt_segment = MAX_LD;
+}
+SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
+
+static void user_ldt_derefl(struct proc_ldt *pldt);
+
+#ifndef _SYS_SYSPROTO_H_
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+#endif
+
+int
+sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
+{
+ struct i386_ldt_args *largs, la;
+ struct user_segment_descriptor *lp;
+ int error = 0;
+
+ /*
+ * XXXKIB check that the BSM generation code knows to encode
+ * the op argument.
+ */
+ AUDIT_ARG_CMD(uap->op);
+ if (uap_space == UIO_USERSPACE) {
+ error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
+ if (error != 0)
+ return (error);
+ largs = &la;
+ } else
+ largs = (struct i386_ldt_args *)uap->parms;
+
+ switch (uap->op) {
+ case I386_GET_LDT:
+ error = amd64_get_ldt(td, largs);
+ break;
+ case I386_SET_LDT:
+ if (largs->descs != NULL && largs->num > max_ldt_segment)
+ return (EINVAL);
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ if (largs->descs != NULL) {
+ lp = malloc(largs->num * sizeof(struct
+ user_segment_descriptor), M_TEMP, M_WAITOK);
+ error = copyin(largs->descs, lp, largs->num *
+ sizeof(struct user_segment_descriptor));
+ if (error == 0)
+ error = amd64_set_ldt(td, largs, lp);
+ free(lp, M_TEMP);
+ } else {
+ error = amd64_set_ldt(td, largs, NULL);
+ }
+ break;
+ }
+ return (error);
+}
+
+void
+update_gdt_gsbase(struct thread *td, uint32_t base)
+{
+ struct user_segment_descriptor *sd;
+
+ if (td != curthread)
+ return;
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ critical_enter();
+ sd = PCPU_GET(gs32p);
+ sd->sd_lobase = base & 0xffffff;
+ sd->sd_hibase = (base >> 24) & 0xff;
+ critical_exit();
+}
+
+void
+update_gdt_fsbase(struct thread *td, uint32_t base)
+{
+ struct user_segment_descriptor *sd;
+
+ if (td != curthread)
+ return;
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ critical_enter();
+ sd = PCPU_GET(fs32p);
+ sd->sd_lobase = base & 0xffffff;
+ sd->sd_hibase = (base >> 24) & 0xff;
+ critical_exit();
+}
+
+int
+sysarch(struct thread *td, struct sysarch_args *uap)
+{
+ int error = 0;
+ struct pcb *pcb = curthread->td_pcb;
+ uint32_t i386base;
+ uint64_t a64base;
+ struct i386_ioperm_args iargs;
+ struct i386_get_xfpustate i386xfpu;
+ struct amd64_get_xfpustate a64xfpu;
+
+#ifdef CAPABILITY_MODE
+ /*
+ * When adding new operations, add a new case statement here to
+ * explicitly indicate whether or not the operation is safe to
+ * perform in capability mode.
+ */
+ if (IN_CAPABILITY_MODE(td)) {
+ switch (uap->op) {
+ case I386_GET_LDT:
+ case I386_SET_LDT:
+ case I386_GET_IOPERM:
+ case I386_GET_FSBASE:
+ case I386_SET_FSBASE:
+ case I386_GET_GSBASE:
+ case I386_SET_GSBASE:
+ case I386_GET_XFPUSTATE:
+ case AMD64_GET_FSBASE:
+ case AMD64_SET_FSBASE:
+ case AMD64_GET_GSBASE:
+ case AMD64_SET_GSBASE:
+ case AMD64_GET_XFPUSTATE:
+ break;
+
+ case I386_SET_IOPERM:
+ default:
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CAPFAIL))
+ ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
+#endif
+ return (ECAPMODE);
+ }
+ }
+#endif
+
+ if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
+ return (sysarch_ldt(td, uap, UIO_USERSPACE));
+ /*
+ * XXXKIB check that the BSM generation code knows to encode
+ * the op argument.
+ */
+ AUDIT_ARG_CMD(uap->op);
+ switch (uap->op) {
+ case I386_GET_IOPERM:
+ case I386_SET_IOPERM:
+ if ((error = copyin(uap->parms, &iargs,
+ sizeof(struct i386_ioperm_args))) != 0)
+ return (error);
+ break;
+ case I386_GET_XFPUSTATE:
+ if ((error = copyin(uap->parms, &i386xfpu,
+ sizeof(struct i386_get_xfpustate))) != 0)
+ return (error);
+ a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
+ a64xfpu.len = i386xfpu.len;
+ break;
+ case AMD64_GET_XFPUSTATE:
+ if ((error = copyin(uap->parms, &a64xfpu,
+ sizeof(struct amd64_get_xfpustate))) != 0)
+ return (error);
+ break;
+ default:
+ break;
+ }
+
+ switch (uap->op) {
+ case I386_GET_IOPERM:
+ error = amd64_get_ioperm(td, &iargs);
+ if (error == 0)
+ error = copyout(&iargs, uap->parms,
+ sizeof(struct i386_ioperm_args));
+ break;
+ case I386_SET_IOPERM:
+ error = amd64_set_ioperm(td, &iargs);
+ break;
+ case I386_GET_FSBASE:
+ update_pcb_bases(pcb);
+ i386base = pcb->pcb_fsbase;
+ error = copyout(&i386base, uap->parms, sizeof(i386base));
+ break;
+ case I386_SET_FSBASE:
+ error = copyin(uap->parms, &i386base, sizeof(i386base));
+ if (!error) {
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+ pcb->pcb_fsbase = i386base;
+ td->td_frame->tf_fs = _ufssel;
+ update_gdt_fsbase(td, i386base);
+ }
+ break;
+ case I386_GET_GSBASE:
+ update_pcb_bases(pcb);
+ i386base = pcb->pcb_gsbase;
+ error = copyout(&i386base, uap->parms, sizeof(i386base));
+ break;
+ case I386_SET_GSBASE:
+ error = copyin(uap->parms, &i386base, sizeof(i386base));
+ if (!error) {
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+ pcb->pcb_gsbase = i386base;
+ td->td_frame->tf_gs = _ugssel;
+ update_gdt_gsbase(td, i386base);
+ }
+ break;
+ case AMD64_GET_FSBASE:
+ update_pcb_bases(pcb);
+ error = copyout(&pcb->pcb_fsbase, uap->parms,
+ sizeof(pcb->pcb_fsbase));
+ break;
+
+ case AMD64_SET_FSBASE:
+ error = copyin(uap->parms, &a64base, sizeof(a64base));
+ if (!error) {
+ if (a64base < VM_MAXUSER_ADDRESS) {
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+ pcb->pcb_fsbase = a64base;
+ td->td_frame->tf_fs = _ufssel;
+ } else
+ error = EINVAL;
+ }
+ break;
+
+ case AMD64_GET_GSBASE:
+ update_pcb_bases(pcb);
+ error = copyout(&pcb->pcb_gsbase, uap->parms,
+ sizeof(pcb->pcb_gsbase));
+ break;
+
+ case AMD64_SET_GSBASE:
+ error = copyin(uap->parms, &a64base, sizeof(a64base));
+ if (!error) {
+ if (a64base < VM_MAXUSER_ADDRESS) {
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+ pcb->pcb_gsbase = a64base;
+ td->td_frame->tf_gs = _ugssel;
+ } else
+ error = EINVAL;
+ }
+ break;
+
+ case I386_GET_XFPUSTATE:
+ case AMD64_GET_XFPUSTATE:
+ if (a64xfpu.len > cpu_max_ext_state_size -
+ sizeof(struct savefpu))
+ return (EINVAL);
+ fpugetregs(td);
+ error = copyout((char *)(get_pcb_user_save_td(td) + 1),
+ a64xfpu.addr, a64xfpu.len);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+int
+amd64_set_ioperm(td, uap)
+ struct thread *td;
+ struct i386_ioperm_args *uap;
+{
+ char *iomap;
+ struct amd64tss *tssp;
+ struct system_segment_descriptor *tss_sd;
+ struct pcb *pcb;
+ u_int i;
+ int error;
+
+ if ((error = priv_check(td, PRIV_IO)) != 0)
+ return (error);
+ if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
+ return (error);
+ if (uap->start > uap->start + uap->length ||
+ uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
+ return (EINVAL);
+
+ /*
+ * XXX
+ * While this is restricted to root, we should probably figure out
+ * whether any other driver is using this i/o address, as so not to
+ * cause confusion. This probably requires a global 'usage registry'.
+ */
+ pcb = td->td_pcb;
+ if (pcb->pcb_tssp == NULL) {
+ tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
+ ctob(IOPAGES+1), M_WAITOK);
+ iomap = (char *)&tssp[1];
+ memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
+ critical_enter();
+ /* Takes care of tss_rsp0. */
+ memcpy(tssp, &common_tss[PCPU_GET(cpuid)],
+ sizeof(struct amd64tss));
+ tssp->tss_iobase = sizeof(*tssp);
+ pcb->pcb_tssp = tssp;
+ tss_sd = PCPU_GET(tss);
+ tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
+ tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
+ tss_sd->sd_type = SDT_SYSTSS;
+ ltr(GSEL(GPROC0_SEL, SEL_KPL));
+ PCPU_SET(tssp, tssp);
+ critical_exit();
+ } else
+ iomap = (char *)&pcb->pcb_tssp[1];
+ for (i = uap->start; i < uap->start + uap->length; i++) {
+ if (uap->enable)
+ iomap[i >> 3] &= ~(1 << (i & 7));
+ else
+ iomap[i >> 3] |= (1 << (i & 7));
+ }
+ return (error);
+}
+
+int
+amd64_get_ioperm(td, uap)
+ struct thread *td;
+ struct i386_ioperm_args *uap;
+{
+ int i, state;
+ char *iomap;
+
+ if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
+ return (EINVAL);
+ if (td->td_pcb->pcb_tssp == NULL) {
+ uap->length = 0;
+ goto done;
+ }
+
+ iomap = (char *)&td->td_pcb->pcb_tssp[1];
+
+ i = uap->start;
+ state = (iomap[i >> 3] >> (i & 7)) & 1;
+ uap->enable = !state;
+ uap->length = 1;
+
+ for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
+ if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
+ break;
+ uap->length++;
+ }
+
+done:
+ return (0);
+}
+
+/*
+ * Update the GDT entry pointing to the LDT to point to the LDT of the
+ * current process.
+ */
+static void
+set_user_ldt(struct mdproc *mdp)
+{
+
+ *PCPU_GET(ldt) = mdp->md_ldt_sd;
+ lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
+}
+
+static void
+set_user_ldt_rv(struct vmspace *vmsp)
+{
+ struct thread *td;
+
+ td = curthread;
+ if (vmsp != td->td_proc->p_vmspace)
+ return;
+
+ set_user_ldt(&td->td_proc->p_md);
+}
+
+struct proc_ldt *
+user_ldt_alloc(struct proc *p, int force)
+{
+ struct proc_ldt *pldt, *new_ldt;
+ struct mdproc *mdp;
+ struct soft_segment_descriptor sldt;
+
+ mtx_assert(&dt_lock, MA_OWNED);
+ mdp = &p->p_md;
+ if (!force && mdp->md_ldt != NULL)
+ return (mdp->md_ldt);
+ mtx_unlock(&dt_lock);
+ new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
+ max_ldt_segment * sizeof(struct user_segment_descriptor),
+ M_WAITOK | M_ZERO);
+ new_ldt->ldt_refcnt = 1;
+ sldt.ssd_base = (uint64_t)new_ldt->ldt_base;
+ sldt.ssd_limit = max_ldt_segment *
+ sizeof(struct user_segment_descriptor) - 1;
+ sldt.ssd_type = SDT_SYSLDT;
+ sldt.ssd_dpl = SEL_KPL;
+ sldt.ssd_p = 1;
+ sldt.ssd_long = 0;
+ sldt.ssd_def32 = 0;
+ sldt.ssd_gran = 0;
+ mtx_lock(&dt_lock);
+ pldt = mdp->md_ldt;
+ if (pldt != NULL && !force) {
+ kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base,
+ max_ldt_segment * sizeof(struct user_segment_descriptor));
+ free(new_ldt, M_SUBPROC);
+ return (pldt);
+ }
+
+ if (pldt != NULL) {
+ bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
+ sizeof(struct user_segment_descriptor));
+ user_ldt_derefl(pldt);
+ }
+ critical_enter();
+ ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
+ atomic_thread_fence_rel();
+ mdp->md_ldt = new_ldt;
+ critical_exit();
+ smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, NULL,
+ p->p_vmspace);
+
+ return (mdp->md_ldt);
+}
+
+void
+user_ldt_free(struct thread *td)
+{
+ struct proc *p = td->td_proc;
+ struct mdproc *mdp = &p->p_md;
+ struct proc_ldt *pldt;
+
+ mtx_lock(&dt_lock);
+ if ((pldt = mdp->md_ldt) == NULL) {
+ mtx_unlock(&dt_lock);
+ return;
+ }
+
+ critical_enter();
+ mdp->md_ldt = NULL;
+ atomic_thread_fence_rel();
+ bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
+ if (td == curthread)
+ lldt(GSEL(GNULL_SEL, SEL_KPL));
+ critical_exit();
+ user_ldt_deref(pldt);
+}
+
+static void
+user_ldt_derefl(struct proc_ldt *pldt)
+{
+
+ if (--pldt->ldt_refcnt == 0) {
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
+ max_ldt_segment * sizeof(struct user_segment_descriptor));
+ free(pldt, M_SUBPROC);
+ }
+}
+
+void
+user_ldt_deref(struct proc_ldt *pldt)
+{
+
+ mtx_assert(&dt_lock, MA_OWNED);
+ user_ldt_derefl(pldt);
+ mtx_unlock(&dt_lock);
+}
+
+/*
+ * Note for the authors of compat layers (linux, etc): copyout() in
+ * the function below is not a problem since it presents data in
+ * arch-specific format (i.e. i386-specific in this case), not in
+ * the OS-specific one.
+ */
+int
+amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
+{
+ struct proc_ldt *pldt;
+ struct user_segment_descriptor *lp;
+ uint64_t *data;
+ u_int i, num;
+ int error;
+
+#ifdef DEBUG
+ printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
+ uap->start, uap->num, (void *)uap->descs);
+#endif
+
+ pldt = td->td_proc->p_md.md_ldt;
+ if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
+ td->td_retval[0] = 0;
+ return (0);
+ }
+ num = min(uap->num, max_ldt_segment - uap->start);
+ lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
+ data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
+ M_WAITOK);
+ mtx_lock(&dt_lock);
+ for (i = 0; i < num; i++)
+ data[i] = ((volatile uint64_t *)lp)[i];
+ mtx_unlock(&dt_lock);
+ error = copyout(data, uap->descs, num *
+ sizeof(struct user_segment_descriptor));
+ free(data, M_TEMP);
+ if (error == 0)
+ td->td_retval[0] = num;
+ return (error);
+}
+
+int
+amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
+ struct user_segment_descriptor *descs)
+{
+ struct mdproc *mdp;
+ struct proc_ldt *pldt;
+ struct user_segment_descriptor *dp;
+ struct proc *p;
+ u_int largest_ld, i;
+ int error;
+
+#ifdef DEBUG
+ printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
+ uap->start, uap->num, (void *)uap->descs);
+#endif
+ mdp = &td->td_proc->p_md;
+ error = 0;
+
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ p = td->td_proc;
+ if (descs == NULL) {
+ /* Free descriptors */
+ if (uap->start == 0 && uap->num == 0)
+ uap->num = max_ldt_segment;
+ if (uap->num == 0)
+ return (EINVAL);
+ if ((pldt = mdp->md_ldt) == NULL ||
+ uap->start >= max_ldt_segment)
+ return (0);
+ largest_ld = uap->start + uap->num;
+ if (largest_ld > max_ldt_segment)
+ largest_ld = max_ldt_segment;
+ if (largest_ld < uap->start)
+ return (EINVAL);
+ mtx_lock(&dt_lock);
+ for (i = uap->start; i < largest_ld; i++)
+ ((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
+ mtx_unlock(&dt_lock);
+ return (0);
+ }
+
+ if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
+ /* verify range of descriptors to modify */
+ largest_ld = uap->start + uap->num;
+ if (uap->start >= max_ldt_segment ||
+ largest_ld > max_ldt_segment ||
+ largest_ld < uap->start)
+ return (EINVAL);
+ }
+
+ /* Check descriptors for access violations */
+ for (i = 0; i < uap->num; i++) {
+ dp = &descs[i];
+
+ switch (dp->sd_type) {
+ case SDT_SYSNULL: /* system null */
+ dp->sd_p = 0;
+ break;
+ case SDT_SYS286TSS:
+ case SDT_SYSLDT:
+ case SDT_SYS286BSY:
+ case SDT_SYS286CGT:
+ case SDT_SYSTASKGT:
+ case SDT_SYS286IGT:
+ case SDT_SYS286TGT:
+ case SDT_SYSNULL2:
+ case SDT_SYSTSS:
+ case SDT_SYSNULL3:
+ case SDT_SYSBSY:
+ case SDT_SYSCGT:
+ case SDT_SYSNULL4:
+ case SDT_SYSIGT:
+ case SDT_SYSTGT:
+ return (EACCES);
+
+ /* memory segment types */
+ case SDT_MEMEC: /* memory execute only conforming */
+ case SDT_MEMEAC: /* memory execute only accessed conforming */
+ case SDT_MEMERC: /* memory execute read conforming */
+ case SDT_MEMERAC: /* memory execute read accessed conforming */
+ /* Must be "present" if executable and conforming. */
+ if (dp->sd_p == 0)
+ return (EACCES);
+ break;
+ case SDT_MEMRO: /* memory read only */
+ case SDT_MEMROA: /* memory read only accessed */
+ case SDT_MEMRW: /* memory read write */
+ case SDT_MEMRWA: /* memory read write accessed */
+ case SDT_MEMROD: /* memory read only expand dwn limit */
+ case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
+ case SDT_MEMRWD: /* memory read write expand dwn limit */
+ case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
+ case SDT_MEME: /* memory execute only */
+ case SDT_MEMEA: /* memory execute only accessed */
+ case SDT_MEMER: /* memory execute read */
+ case SDT_MEMERA: /* memory execute read accessed */
+ break;
+ default:
+ return(EINVAL);
+ }
+
+ /* Only user (ring-3) descriptors may be present. */
+ if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
+ return (EACCES);
+ }
+
+ if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
+ /* Allocate a free slot */
+ mtx_lock(&dt_lock);
+ pldt = user_ldt_alloc(p, 0);
+ if (pldt == NULL) {
+ mtx_unlock(&dt_lock);
+ return (ENOMEM);
+ }
+
+ /*
+ * start scanning a bit up to leave room for NVidia and
+ * Wine, which still user the "Blat" method of allocation.
+ */
+ i = 16;
+ dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
+ for (; i < max_ldt_segment; ++i, ++dp) {
+ if (dp->sd_type == SDT_SYSNULL)
+ break;
+ }
+ if (i >= max_ldt_segment) {
+ mtx_unlock(&dt_lock);
+ return (ENOSPC);
+ }
+ uap->start = i;
+ error = amd64_set_ldt_data(td, i, 1, descs);
+ mtx_unlock(&dt_lock);
+ } else {
+ largest_ld = uap->start + uap->num;
+ if (largest_ld > max_ldt_segment)
+ return (EINVAL);
+ mtx_lock(&dt_lock);
+ if (user_ldt_alloc(p, 0) != NULL) {
+ error = amd64_set_ldt_data(td, uap->start, uap->num,
+ descs);
+ }
+ mtx_unlock(&dt_lock);
+ }
+ if (error == 0)
+ td->td_retval[0] = uap->start;
+ return (error);
+}
+
+int
+amd64_set_ldt_data(struct thread *td, int start, int num,
+ struct user_segment_descriptor *descs)
+{
+ struct mdproc *mdp;
+ struct proc_ldt *pldt;
+ volatile uint64_t *dst, *src;
+ int i;
+
+ mtx_assert(&dt_lock, MA_OWNED);
+
+ mdp = &td->td_proc->p_md;
+ pldt = mdp->md_ldt;
+ dst = (volatile uint64_t *)(pldt->ldt_base);
+ src = (volatile uint64_t *)descs;
+ for (i = 0; i < num; i++)
+ dst[start + i] = src[i];
+ return (0);
+}
Index: sys/amd64/amd64/trap.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/trap.c.orig
@@ -0,0 +1,947 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (C) 1994, David Greenman
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the University of Utah, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * AMD64 Trap and System call handling
+ */
+
+#include "opt_clock.h"
+#include "opt_cpu.h"
+#include "opt_hwpmc_hooks.h"
+#include "opt_isa.h"
+#include "opt_kdb.h"
+#include "opt_stack.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/pioctl.h>
+#include <sys/ptrace.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/resourcevar.h>
+#include <sys/signalvar.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/uio.h>
+#include <sys/vmmeter.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+PMC_SOFT_DEFINE( , , page_fault, all);
+PMC_SOFT_DEFINE( , , page_fault, read);
+PMC_SOFT_DEFINE( , , page_fault, write);
+#endif
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+
+#include <machine/cpu.h>
+#include <machine/intr_machdep.h>
+#include <x86/mca.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+#include <machine/stack.h>
+#include <machine/tss.h>
+
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+#endif
+
+void __noinline trap(struct trapframe *frame);
+void trap_check(struct trapframe *frame);
+void dblfault_handler(struct trapframe *frame);
+
+static int trap_pfault(struct trapframe *, int);
+static void trap_fatal(struct trapframe *, vm_offset_t);
+
+#define MAX_TRAP_MSG 32
+static char *trap_msg[] = {
+ "", /* 0 unused */
+ "privileged instruction fault", /* 1 T_PRIVINFLT */
+ "", /* 2 unused */
+ "breakpoint instruction fault", /* 3 T_BPTFLT */
+ "", /* 4 unused */
+ "", /* 5 unused */
+ "arithmetic trap", /* 6 T_ARITHTRAP */
+ "", /* 7 unused */
+ "", /* 8 unused */
+ "general protection fault", /* 9 T_PROTFLT */
+ "trace trap", /* 10 T_TRCTRAP */
+ "", /* 11 unused */
+ "page fault", /* 12 T_PAGEFLT */
+ "", /* 13 unused */
+ "alignment fault", /* 14 T_ALIGNFLT */
+ "", /* 15 unused */
+ "", /* 16 unused */
+ "", /* 17 unused */
+ "integer divide fault", /* 18 T_DIVIDE */
+ "non-maskable interrupt trap", /* 19 T_NMI */
+ "overflow trap", /* 20 T_OFLOW */
+ "FPU bounds check fault", /* 21 T_BOUND */
+ "FPU device not available", /* 22 T_DNA */
+ "double fault", /* 23 T_DOUBLEFLT */
+ "FPU operand fetch fault", /* 24 T_FPOPFLT */
+ "invalid TSS fault", /* 25 T_TSSFLT */
+ "segment not present fault", /* 26 T_SEGNPFLT */
+ "stack fault", /* 27 T_STKFLT */
+ "machine check trap", /* 28 T_MCHK */
+ "SIMD floating-point exception", /* 29 T_XMMFLT */
+ "reserved (unknown) fault", /* 30 T_RESERVED */
+ "", /* 31 unused (reserved) */
+ "DTrace pid return trap", /* 32 T_DTRACE_RET */
+};
+
+static int prot_fault_translation;
+SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
+ &prot_fault_translation, 0,
+ "Select signal to deliver on protection fault");
+static int uprintf_signal;
+SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
+ &uprintf_signal, 0,
+ "Print debugging information on trap signal to ctty");
+
+/*
+ * Exception, fault, and trap interface to the FreeBSD kernel.
+ * This common code is called from assembly language IDT gate entry
+ * routines that prepare a suitable stack frame, and restore this
+ * frame after the exception has been processed.
+ */
+
+void
+trap(struct trapframe *frame)
+{
+ ksiginfo_t ksi;
+ struct thread *td;
+ struct proc *p;
+ register_t addr;
+#ifdef KDB
+ register_t dr6;
+#endif
+ int signo, ucode;
+ u_int type;
+
+ td = curthread;
+ p = td->td_proc;
+ signo = 0;
+ ucode = 0;
+ addr = 0;
+
+ VM_CNT_INC(v_trap);
+ type = frame->tf_trapno;
+
+#ifdef SMP
+ /* Handler for NMI IPIs used for stopping CPUs. */
+ if (type == T_NMI && ipi_nmi_handler() == 0)
+ return;
+#endif
+
+#ifdef KDB
+ if (kdb_active) {
+ kdb_reenter();
+ return;
+ }
+#endif
+
+ if (type == T_RESERVED) {
+ trap_fatal(frame, 0);
+ return;
+ }
+
+ if (type == T_NMI) {
+#ifdef HWPMC_HOOKS
+ /*
+ * CPU PMCs interrupt using an NMI. If the PMC module is
+ * active, pass the 'rip' value to the PMC module's interrupt
+ * handler. A non-zero return value from the handler means that
+ * the NMI was consumed by it and we can return immediately.
+ */
+ if (pmc_intr != NULL &&
+ (*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
+ return;
+#endif
+
+#ifdef STACK
+ if (stack_nmi_handler(frame) != 0)
+ return;
+#endif
+ }
+
+ if (type == T_MCHK) {
+ mca_intr();
+ return;
+ }
+
+ if ((frame->tf_rflags & PSL_I) == 0) {
+ /*
+ * Buggy application or kernel code has disabled
+ * interrupts and then trapped. Enabling interrupts
+ * now is wrong, but it is better than running with
+ * interrupts disabled until they are accidentally
+ * enabled later.
+ */
+ if (TRAPF_USERMODE(frame))
+ uprintf(
+ "pid %ld (%s): trap %d with interrupts disabled\n",
+ (long)curproc->p_pid, curthread->td_name, type);
+ else if (type != T_NMI && type != T_BPTFLT &&
+ type != T_TRCTRAP) {
+ /*
+ * XXX not quite right, since this may be for a
+ * multiple fault in user mode.
+ */
+ printf("kernel trap %d with interrupts disabled\n",
+ type);
+
+ /*
+ * We shouldn't enable interrupts while holding a
+ * spin lock.
+ */
+ if (td->td_md.md_spinlock_count == 0)
+ enable_intr();
+ }
+ }
+
+ if (TRAPF_USERMODE(frame)) {
+ /* user trap */
+
+ td->td_pticks = 0;
+ td->td_frame = frame;
+ addr = frame->tf_rip;
+ if (td->td_cowgen != p->p_cowgen)
+ thread_cow_update(td);
+
+ switch (type) {
+ case T_PRIVINFLT: /* privileged instruction fault */
+ signo = SIGILL;
+ ucode = ILL_PRVOPC;
+ break;
+
+ case T_BPTFLT: /* bpt instruction fault */
+ case T_TRCTRAP: /* trace trap */
+ enable_intr();
+#ifdef KDTRACE_HOOKS
+ if (type == T_BPTFLT) {
+ if (dtrace_pid_probe_ptr != NULL &&
+ dtrace_pid_probe_ptr(frame) == 0)
+ return;
+ }
+#endif
+ frame->tf_rflags &= ~PSL_T;
+ signo = SIGTRAP;
+ ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
+ break;
+
+ case T_ARITHTRAP: /* arithmetic trap */
+ ucode = fputrap_x87();
+ if (ucode == -1)
+ return;
+ signo = SIGFPE;
+ break;
+
+ case T_PROTFLT: /* general protection fault */
+ signo = SIGBUS;
+ ucode = BUS_OBJERR;
+ break;
+ case T_STKFLT: /* stack fault */
+ case T_SEGNPFLT: /* segment not present fault */
+ signo = SIGBUS;
+ ucode = BUS_ADRERR;
+ break;
+ case T_TSSFLT: /* invalid TSS fault */
+ signo = SIGBUS;
+ ucode = BUS_OBJERR;
+ break;
+ case T_ALIGNFLT:
+ signo = SIGBUS;
+ ucode = BUS_ADRALN;
+ break;
+ case T_DOUBLEFLT: /* double fault */
+ default:
+ signo = SIGBUS;
+ ucode = BUS_OBJERR;
+ break;
+
+ case T_PAGEFLT: /* page fault */
+ /*
+ * Emulator can take care about this trap?
+ */
+ if (*p->p_sysent->sv_trap != NULL &&
+ (*p->p_sysent->sv_trap)(td) == 0)
+ return;
+
+ addr = frame->tf_addr;
+ signo = trap_pfault(frame, TRUE);
+ if (signo == -1)
+ return;
+ if (signo == 0)
+ goto userret;
+ if (signo == SIGSEGV) {
+ ucode = SEGV_MAPERR;
+ } else if (prot_fault_translation == 0) {
+ /*
+ * Autodetect. This check also covers
+ * the images without the ABI-tag ELF
+ * note.
+ */
+ if (SV_CURPROC_ABI() == SV_ABI_FREEBSD &&
+ p->p_osrel >= P_OSREL_SIGSEGV) {
+ signo = SIGSEGV;
+ ucode = SEGV_ACCERR;
+ } else {
+ signo = SIGBUS;
+ ucode = BUS_PAGE_FAULT;
+ }
+ } else if (prot_fault_translation == 1) {
+ /*
+ * Always compat mode.
+ */
+ signo = SIGBUS;
+ ucode = BUS_PAGE_FAULT;
+ } else {
+ /*
+ * Always SIGSEGV mode.
+ */
+ signo = SIGSEGV;
+ ucode = SEGV_ACCERR;
+ }
+ break;
+
+ case T_DIVIDE: /* integer divide fault */
+ ucode = FPE_INTDIV;
+ signo = SIGFPE;
+ break;
+
+#ifdef DEV_ISA
+ case T_NMI:
+ nmi_handle_intr(type, frame);
+ return;
+#endif
+
+ case T_OFLOW: /* integer overflow fault */
+ ucode = FPE_INTOVF;
+ signo = SIGFPE;
+ break;
+
+ case T_BOUND: /* bounds check fault */
+ ucode = FPE_FLTSUB;
+ signo = SIGFPE;
+ break;
+
+ case T_DNA:
+ /* transparent fault (due to context switch "late") */
+ KASSERT(PCB_USER_FPU(td->td_pcb),
+ ("kernel FPU ctx has leaked"));
+ fpudna();
+ return;
+
+ case T_FPOPFLT: /* FPU operand fetch fault */
+ ucode = ILL_COPROC;
+ signo = SIGILL;
+ break;
+
+ case T_XMMFLT: /* SIMD floating-point exception */
+ ucode = fputrap_sse();
+ if (ucode == -1)
+ return;
+ signo = SIGFPE;
+ break;
+#ifdef KDTRACE_HOOKS
+ case T_DTRACE_RET:
+ enable_intr();
+ if (dtrace_return_probe_ptr != NULL)
+ dtrace_return_probe_ptr(frame);
+ return;
+#endif
+ }
+ } else {
+ /* kernel trap */
+
+ KASSERT(cold || td->td_ucred != NULL,
+ ("kernel trap doesn't have ucred"));
+ switch (type) {
+ case T_PAGEFLT: /* page fault */
+ (void) trap_pfault(frame, FALSE);
+ return;
+
+ case T_DNA:
+ if (PCB_USER_FPU(td->td_pcb))
+ panic("Unregistered use of FPU in kernel");
+ fpudna();
+ return;
+
+ case T_ARITHTRAP: /* arithmetic trap */
+ case T_XMMFLT: /* SIMD floating-point exception */
+ case T_FPOPFLT: /* FPU operand fetch fault */
+ /*
+ * For now, supporting kernel handler
+ * registration for FPU traps is overkill.
+ */
+ trap_fatal(frame, 0);
+ return;
+
+ case T_STKFLT: /* stack fault */
+ case T_PROTFLT: /* general protection fault */
+ case T_SEGNPFLT: /* segment not present fault */
+ if (td->td_intr_nesting_level != 0)
+ break;
+
+ /*
+ * Invalid segment selectors and out of bounds
+ * %rip's and %rsp's can be set up in user mode.
+ * This causes a fault in kernel mode when the
+ * kernel tries to return to user mode. We want
+ * to get this fault so that we can fix the
+ * problem here and not have to check all the
+ * selectors and pointers when the user changes
+ * them.
+ */
+ if (frame->tf_rip == (long)doreti_iret) {
+ frame->tf_rip = (long)doreti_iret_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_ds) {
+ frame->tf_rip = (long)ds_load_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_es) {
+ frame->tf_rip = (long)es_load_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_fs) {
+ frame->tf_rip = (long)fs_load_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_gs) {
+ frame->tf_rip = (long)gs_load_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_gsbase) {
+ frame->tf_rip = (long)gsbase_load_fault;
+ return;
+ }
+ if (frame->tf_rip == (long)ld_fsbase) {
+ frame->tf_rip = (long)fsbase_load_fault;
+ return;
+ }
+ if (curpcb->pcb_onfault != NULL) {
+ frame->tf_rip = (long)curpcb->pcb_onfault;
+ return;
+ }
+ break;
+
+ case T_TSSFLT:
+ /*
+ * PSL_NT can be set in user mode and isn't cleared
+ * automatically when the kernel is entered. This
+ * causes a TSS fault when the kernel attempts to
+ * `iret' because the TSS link is uninitialized. We
+ * want to get this fault so that we can fix the
+ * problem here and not every time the kernel is
+ * entered.
+ */
+ if (frame->tf_rflags & PSL_NT) {
+ frame->tf_rflags &= ~PSL_NT;
+ return;
+ }
+ break;
+
+ case T_TRCTRAP: /* trace trap */
+ /*
+ * Ignore debug register trace traps due to
+ * accesses in the user's address space, which
+ * can happen under several conditions such as
+ * if a user sets a watchpoint on a buffer and
+ * then passes that buffer to a system call.
+ * We still want to get TRCTRAPS for addresses
+ * in kernel space because that is useful when
+ * debugging the kernel.
+ */
+ if (user_dbreg_trap()) {
+ /*
+ * Reset breakpoint bits because the
+ * processor doesn't
+ */
+ load_dr6(rdr6() & ~0xf);
+ return;
+ }
+ /*
+ * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
+ */
+ case T_BPTFLT:
+ /*
+ * If KDB is enabled, let it handle the debugger trap.
+ * Otherwise, debugger traps "can't happen".
+ */
+#ifdef KDB
+ /* XXX %dr6 is not quite reentrant. */
+ dr6 = rdr6();
+ load_dr6(dr6 & ~0x4000);
+ if (kdb_trap(type, dr6, frame))
+ return;
+#endif
+ break;
+
+#ifdef DEV_ISA
+ case T_NMI:
+ nmi_handle_intr(type, frame);
+ return;
+#endif
+ }
+
+ trap_fatal(frame, 0);
+ return;
+ }
+
+ /* Translate fault for emulators (e.g. Linux) */
+ if (*p->p_sysent->sv_transtrap != NULL)
+ signo = (*p->p_sysent->sv_transtrap)(signo, type);
+
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = signo;
+ ksi.ksi_code = ucode;
+ ksi.ksi_trapno = type;
+ ksi.ksi_addr = (void *)addr;
+ if (uprintf_signal) {
+ uprintf("pid %d comm %s: signal %d err %lx code %d type %d "
+ "addr 0x%lx rsp 0x%lx rip 0x%lx "
+ "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
+ p->p_pid, p->p_comm, signo, frame->tf_err, ucode, type,
+ addr, frame->tf_rsp, frame->tf_rip,
+ fubyte((void *)(frame->tf_rip + 0)),
+ fubyte((void *)(frame->tf_rip + 1)),
+ fubyte((void *)(frame->tf_rip + 2)),
+ fubyte((void *)(frame->tf_rip + 3)),
+ fubyte((void *)(frame->tf_rip + 4)),
+ fubyte((void *)(frame->tf_rip + 5)),
+ fubyte((void *)(frame->tf_rip + 6)),
+ fubyte((void *)(frame->tf_rip + 7)));
+ }
+ KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled"));
+ trapsignal(td, &ksi);
+userret:
+ userret(td, frame);
+ KASSERT(PCB_USER_FPU(td->td_pcb),
+ ("Return from trap with kernel FPU ctx leaked"));
+}
+
+/*
+ * Ensure that we ignore any DTrace-induced faults. This function cannot
+ * be instrumented, so it cannot generate such faults itself.
+ */
+void
+trap_check(struct trapframe *frame)
+{
+
+#ifdef KDTRACE_HOOKS
+ if (dtrace_trap_func != NULL &&
+ (*dtrace_trap_func)(frame, frame->tf_trapno) != 0)
+ return;
+#endif
+ trap(frame);
+}
+
+static int
+trap_pfault(struct trapframe *frame, int usermode)
+{
+ struct thread *td;
+ struct proc *p;
+ vm_map_t map;
+ vm_offset_t va;
+ int rv;
+ vm_prot_t ftype;
+ vm_offset_t eva;
+
+ td = curthread;
+ p = td->td_proc;
+ eva = frame->tf_addr;
+
+ if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
+ /*
+ * Due to both processor errata and lazy TLB invalidation when
+ * access restrictions are removed from virtual pages, memory
+ * accesses that are allowed by the physical mapping layer may
+ * nonetheless cause one spurious page fault per virtual page.
+ * When the thread is executing a "no faulting" section that
+ * is bracketed by vm_fault_{disable,enable}_pagefaults(),
+ * every page fault is treated as a spurious page fault,
+ * unless it accesses the same virtual address as the most
+ * recent page fault within the same "no faulting" section.
+ */
+ if (td->td_md.md_spurflt_addr != eva ||
+ (td->td_pflags & TDP_RESETSPUR) != 0) {
+ /*
+ * Do nothing to the TLB. A stale TLB entry is
+ * flushed automatically by a page fault.
+ */
+ td->td_md.md_spurflt_addr = eva;
+ td->td_pflags &= ~TDP_RESETSPUR;
+ return (0);
+ }
+ } else {
+ /*
+ * If we get a page fault while in a critical section, then
+ * it is most likely a fatal kernel page fault. The kernel
+ * is already going to panic trying to get a sleep lock to
+ * do the VM lookup, so just consider it a fatal trap so the
+ * kernel can print out a useful trap message and even get
+ * to the debugger.
+ *
+ * If we get a page fault while holding a non-sleepable
+ * lock, then it is most likely a fatal kernel page fault.
+ * If WITNESS is enabled, then it's going to whine about
+ * bogus LORs with various VM locks, so just skip to the
+ * fatal trap handling directly.
+ */
+ if (td->td_critnest != 0 ||
+ WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
+ "Kernel page fault") != 0) {
+ trap_fatal(frame, eva);
+ return (-1);
+ }
+ }
+ va = trunc_page(eva);
+ if (va >= VM_MIN_KERNEL_ADDRESS) {
+ /*
+ * Don't allow user-mode faults in kernel address space.
+ */
+ if (usermode)
+ return (SIGSEGV);
+
+ map = kernel_map;
+ } else {
+ map = &p->p_vmspace->vm_map;
+
+ /*
+ * When accessing a usermode address, kernel must be
+ * ready to accept the page fault, and provide a
+ * handling routine. Since accessing the address
+ * without the handler is a bug, do not try to handle
+ * it normally, and panic immediately.
+ */
+ if (!usermode && (td->td_intr_nesting_level != 0 ||
+ curpcb->pcb_onfault == NULL)) {
+ trap_fatal(frame, eva);
+ return (-1);
+ }
+ }
+
+ /*
+ * If the trap was caused by errant bits in the PTE then panic.
+ */
+ if (frame->tf_err & PGEX_RSV) {
+ trap_fatal(frame, eva);
+ return (-1);
+ }
+
+ /*
+ * PGEX_I is defined only if the execute disable bit capability is
+ * supported and enabled.
+ */
+ if (frame->tf_err & PGEX_W)
+ ftype = VM_PROT_WRITE;
+ else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
+ ftype = VM_PROT_EXECUTE;
+ else
+ ftype = VM_PROT_READ;
+
+ /* Fault in the page. */
+ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+ if (rv == KERN_SUCCESS) {
+#ifdef HWPMC_HOOKS
+ if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
+ PMC_SOFT_CALL_TF( , , page_fault, all, frame);
+ if (ftype == VM_PROT_READ)
+ PMC_SOFT_CALL_TF( , , page_fault, read,
+ frame);
+ else
+ PMC_SOFT_CALL_TF( , , page_fault, write,
+ frame);
+ }
+#endif
+ return (0);
+ }
+ if (!usermode) {
+ if (td->td_intr_nesting_level == 0 &&
+ curpcb->pcb_onfault != NULL) {
+ frame->tf_rip = (long)curpcb->pcb_onfault;
+ return (0);
+ }
+ trap_fatal(frame, eva);
+ return (-1);
+ }
+ return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
+}
+
+static void
+trap_fatal(frame, eva)
+ struct trapframe *frame;
+ vm_offset_t eva;
+{
+ int code, ss;
+ u_int type;
+ struct soft_segment_descriptor softseg;
+ char *msg;
+
+ code = frame->tf_err;
+ type = frame->tf_trapno;
+ sdtossd(&gdt[NGDT * PCPU_GET(cpuid) + IDXSEL(frame->tf_cs & 0xffff)],
+ &softseg);
+
+ if (type <= MAX_TRAP_MSG)
+ msg = trap_msg[type];
+ else
+ msg = "UNKNOWN";
+ printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
+ TRAPF_USERMODE(frame) ? "user" : "kernel");
+#ifdef SMP
+ /* two separate prints in case of a trap on an unmapped page */
+ printf("cpuid = %d; ", PCPU_GET(cpuid));
+ printf("apic id = %02x\n", PCPU_GET(apic_id));
+#endif
+ if (type == T_PAGEFLT) {
+ printf("fault virtual address = 0x%lx\n", eva);
+ printf("fault code = %s %s %s, %s\n",
+ code & PGEX_U ? "user" : "supervisor",
+ code & PGEX_W ? "write" : "read",
+ code & PGEX_I ? "instruction" : "data",
+ code & PGEX_RSV ? "reserved bits in PTE" :
+ code & PGEX_P ? "protection violation" : "page not present");
+ }
+ printf("instruction pointer = 0x%lx:0x%lx\n",
+ frame->tf_cs & 0xffff, frame->tf_rip);
+ ss = frame->tf_ss & 0xffff;
+ printf("stack pointer = 0x%x:0x%lx\n", ss, frame->tf_rsp);
+ printf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
+ printf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
+ softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
+ printf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
+ softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
+ softseg.ssd_gran);
+ printf("processor eflags = ");
+ if (frame->tf_rflags & PSL_T)
+ printf("trace trap, ");
+ if (frame->tf_rflags & PSL_I)
+ printf("interrupt enabled, ");
+ if (frame->tf_rflags & PSL_NT)
+ printf("nested task, ");
+ if (frame->tf_rflags & PSL_RF)
+ printf("resume, ");
+ printf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
+ printf("current process = %d (%s)\n",
+ curproc->p_pid, curthread->td_name);
+
+#ifdef KDB
+ if (debugger_on_panic || kdb_active)
+ if (kdb_trap(type, 0, frame))
+ return;
+#endif
+ printf("trap number = %d\n", type);
+ if (type <= MAX_TRAP_MSG)
+ panic("%s", trap_msg[type]);
+ else
+ panic("unknown/reserved trap");
+}
+
+/*
+ * Double fault handler. Called when a fault occurs while writing
+ * a frame for a trap/exception onto the stack. This usually occurs
+ * when the stack overflows (such is the case with infinite recursion,
+ * for example).
+ */
+void
+dblfault_handler(struct trapframe *frame)
+{
+#ifdef KDTRACE_HOOKS
+ if (dtrace_doubletrap_func != NULL)
+ (*dtrace_doubletrap_func)();
+#endif
+ printf("\nFatal double fault\n"
+ "rip %#lx rsp %#lx rbp %#lx\n"
+ "rax %#lx rdx %#lx rbx %#lx\n"
+ "rcx %#lx rsi %#lx rdi %#lx\n"
+ "r8 %#lx r9 %#lx r10 %#lx\n"
+ "r11 %#lx r12 %#lx r13 %#lx\n"
+ "r14 %#lx r15 %#lx rflags %#lx\n"
+ "cs %#lx ss %#lx ds %#hx es %#hx fs %#hx gs %#hx\n"
+ "fsbase %#lx gsbase %#lx kgsbase %#lx\n",
+ frame->tf_rip, frame->tf_rsp, frame->tf_rbp,
+ frame->tf_rax, frame->tf_rdx, frame->tf_rbx,
+ frame->tf_rcx, frame->tf_rdi, frame->tf_rsi,
+ frame->tf_r8, frame->tf_r9, frame->tf_r10,
+ frame->tf_r11, frame->tf_r12, frame->tf_r13,
+ frame->tf_r14, frame->tf_r15, frame->tf_rflags,
+ frame->tf_cs, frame->tf_ss, frame->tf_ds, frame->tf_es,
+ frame->tf_fs, frame->tf_gs,
+ rdmsr(MSR_FSBASE), rdmsr(MSR_GSBASE), rdmsr(MSR_KGSBASE));
+#ifdef SMP
+ /* two separate prints in case of a trap on an unmapped page */
+ printf("cpuid = %d; ", PCPU_GET(cpuid));
+ printf("apic id = %02x\n", PCPU_GET(apic_id));
+#endif
+ panic("double fault");
+}
+
+int
+cpu_fetch_syscall_args(struct thread *td)
+{
+ struct proc *p;
+ struct trapframe *frame;
+ register_t *argp;
+ struct syscall_args *sa;
+ caddr_t params;
+ int reg, regcnt, error;
+
+ p = td->td_proc;
+ frame = td->td_frame;
+ sa = &td->td_sa;
+ reg = 0;
+ regcnt = 6;
+
+ params = (caddr_t)frame->tf_rsp + sizeof(register_t);
+ sa->code = frame->tf_rax;
+
+ if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
+ sa->code = frame->tf_rdi;
+ reg++;
+ regcnt--;
+ }
+ if (p->p_sysent->sv_mask)
+ sa->code &= p->p_sysent->sv_mask;
+
+ if (sa->code >= p->p_sysent->sv_size)
+ sa->callp = &p->p_sysent->sv_table[0];
+ else
+ sa->callp = &p->p_sysent->sv_table[sa->code];
+
+ sa->narg = sa->callp->sy_narg;
+ KASSERT(sa->narg <= sizeof(sa->args) / sizeof(sa->args[0]),
+ ("Too many syscall arguments!"));
+ error = 0;
+ argp = &frame->tf_rdi;
+ argp += reg;
+ bcopy(argp, sa->args, sizeof(sa->args[0]) * regcnt);
+ if (sa->narg > regcnt) {
+ KASSERT(params != NULL, ("copyin args with no params!"));
+ error = copyin(params, &sa->args[regcnt],
+ (sa->narg - regcnt) * sizeof(sa->args[0]));
+ }
+
+ if (error == 0) {
+ td->td_retval[0] = 0;
+ td->td_retval[1] = frame->tf_rdx;
+ }
+
+ return (error);
+}
+
+#include "../../kern/subr_syscall.c"
+
+/*
+ * System call handler for native binaries. The trap frame is already
+ * set up by the assembler trampoline and a pointer to it is saved in
+ * td_frame.
+ */
+void
+amd64_syscall(struct thread *td, int traced)
+{
+ int error;
+ ksiginfo_t ksi;
+
+#ifdef DIAGNOSTIC
+ if (!TRAPF_USERMODE(td->td_frame)) {
+ panic("syscall");
+ /* NOT REACHED */
+ }
+#endif
+ error = syscallenter(td);
+
+ /*
+ * Traced syscall.
+ */
+ if (__predict_false(traced)) {
+ td->td_frame->tf_rflags &= ~PSL_T;
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = SIGTRAP;
+ ksi.ksi_code = TRAP_TRACE;
+ ksi.ksi_addr = (void *)td->td_frame->tf_rip;
+ trapsignal(td, &ksi);
+ }
+
+ KASSERT(PCB_USER_FPU(td->td_pcb),
+ ("System call %s returning with kernel FPU ctx leaked",
+ syscallname(td->td_proc, td->td_sa.code)));
+ KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
+ ("System call %s returning with mangled pcb_save",
+ syscallname(td->td_proc, td->td_sa.code)));
+ KASSERT(td->td_md.md_invl_gen.gen == 0,
+ ("System call %s returning with leaked invl_gen %lu",
+ syscallname(td->td_proc, td->td_sa.code),
+ td->td_md.md_invl_gen.gen));
+
+ syscallret(td, error);
+
+ /*
+ * If the user-supplied value of %rip is not a canonical
+ * address, then some CPUs will trigger a ring 0 #GP during
+ * the sysret instruction. However, the fault handler would
+ * execute in ring 0 with the user's %gs and %rsp which would
+ * not be safe. Instead, use the full return path which
+ * catches the problem safely.
+ */
+ if (__predict_false(td->td_frame->tf_rip >= VM_MAXUSER_ADDRESS))
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+}
Index: sys/amd64/amd64/vm_machdep.c.orig
===================================================================
--- /dev/null
+++ sys/amd64/amd64/vm_machdep.c.orig
@@ -0,0 +1,725 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1982, 1986 The Regents of the University of California.
+ * Copyright (c) 1989, 1990 William Jolitz
+ * Copyright (c) 1994 John Dyson
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department, and William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
+ * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_isa.h"
+#include "opt_cpu.h"
+#include "opt_compat.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/pioctl.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+#include <sys/vmmeter.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/smp.h>
+#include <machine/specialreg.h>
+#include <machine/tss.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+
+#include <isa/isareg.h>
+
+static void cpu_reset_real(void);
+#ifdef SMP
+static void cpu_reset_proxy(void);
+static u_int cpu_reset_proxyid;
+static volatile u_int cpu_reset_proxy_active;
+#endif
+
+_Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
+ "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
+_Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
+ "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
+_Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
+ "OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
+
+struct savefpu *
+get_pcb_user_save_td(struct thread *td)
+{
+ vm_offset_t p;
+
+ p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
+ KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
+ return ((struct savefpu *)p);
+}
+
+struct savefpu *
+get_pcb_user_save_pcb(struct pcb *pcb)
+{
+ vm_offset_t p;
+
+ p = (vm_offset_t)(pcb + 1);
+ return ((struct savefpu *)p);
+}
+
+struct pcb *
+get_pcb_td(struct thread *td)
+{
+ vm_offset_t p;
+
+ p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
+ roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
+ sizeof(struct pcb);
+ return ((struct pcb *)p);
+}
+
+void *
+alloc_fpusave(int flags)
+{
+ void *res;
+ struct savefpu_ymm *sf;
+
+ res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
+ if (use_xsave) {
+ sf = (struct savefpu_ymm *)res;
+ bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
+ sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
+ }
+ return (res);
+}
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
+{
+ struct proc *p1;
+ struct pcb *pcb2;
+ struct mdproc *mdp1, *mdp2;
+ struct proc_ldt *pldt;
+
+ p1 = td1->td_proc;
+ if ((flags & RFPROC) == 0) {
+ if ((flags & RFMEM) == 0) {
+ /* unshare user LDT */
+ mdp1 = &p1->p_md;
+ mtx_lock(&dt_lock);
+ if ((pldt = mdp1->md_ldt) != NULL &&
+ pldt->ldt_refcnt > 1 &&
+ user_ldt_alloc(p1, 1) == NULL)
+ panic("could not copy LDT");
+ mtx_unlock(&dt_lock);
+ }
+ return;
+ }
+
+ /* Ensure that td1's pcb is up to date. */
+ fpuexit(td1);
+ update_pcb_bases(td1->td_pcb);
+
+ /* Point the pcb to the top of the stack */
+ pcb2 = get_pcb_td(td2);
+ td2->td_pcb = pcb2;
+
+ /* Copy td1's pcb */
+ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
+
+ /* Properly initialize pcb_save */
+ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
+ bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
+ cpu_max_ext_state_size);
+
+ /* Point mdproc and then copy over td1's contents */
+ mdp2 = &p2->p_md;
+ bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
+
+ /*
+ * Create a new fresh stack for the new process.
+ * Copy the trap frame for the return to user mode as if from a
+ * syscall. This copies most of the user mode register values.
+ */
+ td2->td_frame = (struct trapframe *)td2->td_pcb - 1;
+ bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
+
+ td2->td_frame->tf_rax = 0; /* Child returns zero */
+ td2->td_frame->tf_rflags &= ~PSL_C; /* success */
+ td2->td_frame->tf_rdx = 1;
+
+ /*
+ * If the parent process has the trap bit set (i.e. a debugger had
+ * single stepped the process to the system call), we need to clear
+ * the trap flag from the new frame unless the debugger had set PF_FORK
+ * on the parent. Otherwise, the child will receive a (likely
+ * unexpected) SIGTRAP when it executes the first instruction after
+ * returning to userland.
+ */
+ if ((p1->p_pfsflags & PF_FORK) == 0)
+ td2->td_frame->tf_rflags &= ~PSL_T;
+
+ /*
+ * Set registers for trampoline to user mode. Leave space for the
+ * return address on stack. These are the kernel mode register values.
+ */
+ pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */
+ pcb2->pcb_rbp = 0;
+ pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *);
+ pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */
+ pcb2->pcb_rip = (register_t)fork_trampoline;
+ /*-
+ * pcb2->pcb_dr*: cloned above.
+ * pcb2->pcb_savefpu: cloned above.
+ * pcb2->pcb_flags: cloned above.
+ * pcb2->pcb_onfault: cloned above (always NULL here?).
+ * pcb2->pcb_[fg]sbase: cloned above
+ */
+
+ /* Setup to release spin count in fork_exit(). */
+ td2->td_md.md_spinlock_count = 1;
+ td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
+ td2->td_md.md_invl_gen.gen = 0;
+
+ /* As an i386, do not copy io permission bitmap. */
+ pcb2->pcb_tssp = NULL;
+
+ /* New segment registers. */
+ set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
+
+ /* Copy the LDT, if necessary. */
+ mdp1 = &td1->td_proc->p_md;
+ mdp2 = &p2->p_md;
+ if (mdp1->md_ldt == NULL) {
+ mdp2->md_ldt = NULL;
+ return;
+ }
+ mtx_lock(&dt_lock);
+ if (mdp1->md_ldt != NULL) {
+ if (flags & RFMEM) {
+ mdp1->md_ldt->ldt_refcnt++;
+ mdp2->md_ldt = mdp1->md_ldt;
+ bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct
+ system_segment_descriptor));
+ } else {
+ mdp2->md_ldt = NULL;
+ mdp2->md_ldt = user_ldt_alloc(p2, 0);
+ if (mdp2->md_ldt == NULL)
+ panic("could not copy LDT");
+ amd64_set_ldt_data(td2, 0, max_ldt_segment,
+ (struct user_segment_descriptor *)
+ mdp1->md_ldt->ldt_base);
+ }
+ } else
+ mdp2->md_ldt = NULL;
+ mtx_unlock(&dt_lock);
+
+ /*
+ * Now, cpu_switch() can schedule the new process.
+ * pcb_rsp is loaded pointing to the cpu_switch() stack frame
+ * containing the return address when exiting cpu_switch.
+ * This will normally be to fork_trampoline(), which will have
+ * %ebx loaded with the new proc's pointer. fork_trampoline()
+ * will set up a stack to call fork_return(p, frame); to complete
+ * the return to user-mode.
+ */
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
+{
+ /*
+ * Note that the trap frame follows the args, so the function
+ * is really called like this: func(arg, frame);
+ */
+ td->td_pcb->pcb_r12 = (long) func; /* function */
+ td->td_pcb->pcb_rbx = (long) arg; /* first arg */
+}
+
+void
+cpu_exit(struct thread *td)
+{
+
+ /*
+ * If this process has a custom LDT, release it.
+ */
+ if (td->td_proc->p_md.md_ldt != NULL)
+ user_ldt_free(td);
+}
+
+void
+cpu_thread_exit(struct thread *td)
+{
+ struct pcb *pcb;
+
+ critical_enter();
+ if (td == PCPU_GET(fpcurthread))
+ fpudrop();
+ critical_exit();
+
+ pcb = td->td_pcb;
+
+ /* Disable any hardware breakpoints. */
+ if (pcb->pcb_flags & PCB_DBREGS) {
+ reset_dbregs();
+ clear_pcb_flags(pcb, PCB_DBREGS);
+ }
+}
+
+void
+cpu_thread_clean(struct thread *td)
+{
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+
+ /*
+ * Clean TSS/iomap
+ */
+ if (pcb->pcb_tssp != NULL) {
+ kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_tssp,
+ ctob(IOPAGES + 1));
+ pcb->pcb_tssp = NULL;
+ }
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+}
+
+void
+cpu_thread_alloc(struct thread *td)
+{
+ struct pcb *pcb;
+ struct xstate_hdr *xhdr;
+
+ td->td_pcb = pcb = get_pcb_td(td);
+ td->td_frame = (struct trapframe *)pcb - 1;
+ pcb->pcb_save = get_pcb_user_save_pcb(pcb);
+ if (use_xsave) {
+ xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
+ bzero(xhdr, sizeof(*xhdr));
+ xhdr->xstate_bv = xsave_mask;
+ }
+}
+
+void
+cpu_thread_free(struct thread *td)
+{
+
+ cpu_thread_clean(td);
+}
+
+void
+cpu_set_syscall_retval(struct thread *td, int error)
+{
+
+ switch (error) {
+ case 0:
+ td->td_frame->tf_rax = td->td_retval[0];
+ td->td_frame->tf_rdx = td->td_retval[1];
+ td->td_frame->tf_rflags &= ~PSL_C;
+ break;
+
+ case ERESTART:
+ /*
+ * Reconstruct pc, we know that 'syscall' is 2 bytes,
+ * lcall $X,y is 7 bytes, int 0x80 is 2 bytes.
+ * We saved this in tf_err.
+ * %r10 (which was holding the value of %rcx) is restored
+ * for the next iteration.
+ * %r10 restore is only required for freebsd/amd64 processes,
+ * but shall be innocent for any ia32 ABI.
+ *
+ * Require full context restore to get the arguments
+ * in the registers reloaded at return to usermode.
+ */
+ td->td_frame->tf_rip -= td->td_frame->tf_err;
+ td->td_frame->tf_r10 = td->td_frame->tf_rcx;
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
+ break;
+
+ case EJUSTRETURN:
+ break;
+
+ default:
+ td->td_frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error);
+ td->td_frame->tf_rflags |= PSL_C;
+ break;
+ }
+}
+
+/*
+ * Initialize machine state, mostly pcb and trap frame for a new
+ * thread, about to return to userspace. Put enough state in the new
+ * thread's PCB to get it to go back to the fork_return(), which
+ * finalizes the thread state and handles peculiarities of the first
+ * return to userspace for the new thread.
+ */
+void
+cpu_copy_thread(struct thread *td, struct thread *td0)
+{
+ struct pcb *pcb2;
+
+ /* Point the pcb to the top of the stack. */
+ pcb2 = td->td_pcb;
+
+ /*
+ * Copy the upcall pcb. This loads kernel regs.
+ * Those not loaded individually below get their default
+ * values here.
+ */
+ update_pcb_bases(td0->td_pcb);
+ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
+ clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE |
+ PCB_KERNFPU);
+ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
+ bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
+ cpu_max_ext_state_size);
+ set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
+
+ /*
+ * Create a new fresh stack for the new thread.
+ */
+ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
+
+ /* If the current thread has the trap bit set (i.e. a debugger had
+ * single stepped the process to the system call), we need to clear
+ * the trap flag from the new frame. Otherwise, the new thread will
+ * receive a (likely unexpected) SIGTRAP when it executes the first
+ * instruction after returning to userland.
+ */
+ td->td_frame->tf_rflags &= ~PSL_T;
+
+ /*
+ * Set registers for trampoline to user mode. Leave space for the
+ * return address on stack. These are the kernel mode register values.
+ */
+ pcb2->pcb_r12 = (register_t)fork_return; /* trampoline arg */
+ pcb2->pcb_rbp = 0;
+ pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *); /* trampoline arg */
+ pcb2->pcb_rbx = (register_t)td; /* trampoline arg */
+ pcb2->pcb_rip = (register_t)fork_trampoline;
+ /*
+ * If we didn't copy the pcb, we'd need to do the following registers:
+ * pcb2->pcb_dr*: cloned above.
+ * pcb2->pcb_savefpu: cloned above.
+ * pcb2->pcb_onfault: cloned above (always NULL here?).
+ * pcb2->pcb_[fg]sbase: cloned above
+ */
+
+ /* Setup to release spin count in fork_exit(). */
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
+}
+
+/*
+ * Set that machine state for performing an upcall that starts
+ * the entry function with the given argument.
+ */
+void
+cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
+ stack_t *stack)
+{
+
+ /*
+ * Do any extra cleaning that needs to be done.
+ * The thread may have optional components
+ * that are not present in a fresh thread.
+ * This may be a recycled thread so make it look
+ * as though it's newly allocated.
+ */
+ cpu_thread_clean(td);
+
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ /*
+ * Set the trap frame to point at the beginning of the entry
+ * function.
+ */
+ td->td_frame->tf_rbp = 0;
+ td->td_frame->tf_rsp =
+ (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
+ td->td_frame->tf_rip = (uintptr_t)entry;
+
+ /* Return address sentinel value to stop stack unwinding. */
+ suword32((void *)td->td_frame->tf_rsp, 0);
+
+ /* Pass the argument to the entry point. */
+ suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)),
+ (uint32_t)(uintptr_t)arg);
+
+ return;
+ }
+#endif
+
+ /*
+ * Set the trap frame to point at the beginning of the uts
+ * function.
+ */
+ td->td_frame->tf_rbp = 0;
+ td->td_frame->tf_rsp =
+ ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f;
+ td->td_frame->tf_rsp -= 8;
+ td->td_frame->tf_rip = (register_t)entry;
+ td->td_frame->tf_ds = _udatasel;
+ td->td_frame->tf_es = _udatasel;
+ td->td_frame->tf_fs = _ufssel;
+ td->td_frame->tf_gs = _ugssel;
+ td->td_frame->tf_flags = TF_HASSEGS;
+
+ /* Return address sentinel value to stop stack unwinding. */
+ suword((void *)td->td_frame->tf_rsp, 0);
+
+ /* Pass the argument to the entry point. */
+ td->td_frame->tf_rdi = (register_t)arg;
+}
+
+int
+cpu_set_user_tls(struct thread *td, void *tls_base)
+{
+ struct pcb *pcb;
+
+ if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS)
+ return (EINVAL);
+
+ pcb = td->td_pcb;
+ set_pcb_flags(pcb, PCB_FULL_IRET);
+#ifdef COMPAT_FREEBSD32
+ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
+ pcb->pcb_gsbase = (register_t)tls_base;
+ return (0);
+ }
+#endif
+ pcb->pcb_fsbase = (register_t)tls_base;
+ return (0);
+}
+
+#ifdef SMP
+static void
+cpu_reset_proxy()
+{
+ cpuset_t tcrp;
+
+ cpu_reset_proxy_active = 1;
+ while (cpu_reset_proxy_active == 1)
+ ia32_pause(); /* Wait for other cpu to see that we've started */
+
+ CPU_SETOF(cpu_reset_proxyid, &tcrp);
+ stop_cpus(tcrp);
+ printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
+ DELAY(1000000);
+ cpu_reset_real();
+}
+#endif
+
+void
+cpu_reset()
+{
+#ifdef SMP
+ cpuset_t map;
+ u_int cnt;
+
+ if (smp_started) {
+ map = all_cpus;
+ CPU_CLR(PCPU_GET(cpuid), &map);
+ CPU_NAND(&map, &stopped_cpus);
+ if (!CPU_EMPTY(&map)) {
+ printf("cpu_reset: Stopping other CPUs\n");
+ stop_cpus(map);
+ }
+
+ if (PCPU_GET(cpuid) != 0) {
+ cpu_reset_proxyid = PCPU_GET(cpuid);
+ cpustop_restartfunc = cpu_reset_proxy;
+ cpu_reset_proxy_active = 0;
+ printf("cpu_reset: Restarting BSP\n");
+
+ /* Restart CPU #0. */
+ CPU_SETOF(0, &started_cpus);
+ wmb();
+
+ cnt = 0;
+ while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
+ ia32_pause();
+ cnt++; /* Wait for BSP to announce restart */
+ }
+ if (cpu_reset_proxy_active == 0)
+ printf("cpu_reset: Failed to restart BSP\n");
+ enable_intr();
+ cpu_reset_proxy_active = 2;
+
+ while (1)
+ ia32_pause();
+ /* NOTREACHED */
+ }
+
+ DELAY(1000000);
+ }
+#endif
+ cpu_reset_real();
+ /* NOTREACHED */
+}
+
+static void
+cpu_reset_real()
+{
+ struct region_descriptor null_idt;
+ int b;
+
+ disable_intr();
+
+ /*
+ * Attempt to do a CPU reset via the keyboard controller,
+ * do not turn off GateA20, as any machine that fails
+ * to do the reset here would then end up in no man's land.
+ */
+ outb(IO_KBD + 4, 0xFE);
+ DELAY(500000); /* wait 0.5 sec to see if that did it */
+
+ /*
+ * Attempt to force a reset via the Reset Control register at
+ * I/O port 0xcf9. Bit 2 forces a system reset when it
+ * transitions from 0 to 1. Bit 1 selects the type of reset
+ * to attempt: 0 selects a "soft" reset, and 1 selects a
+ * "hard" reset. We try a "hard" reset. The first write sets
+ * bit 1 to select a "hard" reset and clears bit 2. The
+ * second write forces a 0 -> 1 transition in bit 2 to trigger
+ * a reset.
+ */
+ outb(0xcf9, 0x2);
+ outb(0xcf9, 0x6);
+ DELAY(500000); /* wait 0.5 sec to see if that did it */
+
+ /*
+ * Attempt to force a reset via the Fast A20 and Init register
+ * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
+ * Bit 0 asserts INIT# when set to 1. We are careful to only
+ * preserve bit 1 while setting bit 0. We also must clear bit
+ * 0 before setting it if it isn't already clear.
+ */
+ b = inb(0x92);
+ if (b != 0xff) {
+ if ((b & 0x1) != 0)
+ outb(0x92, b & 0xfe);
+ outb(0x92, b | 0x1);
+ DELAY(500000); /* wait 0.5 sec to see if that did it */
+ }
+
+ printf("No known reset method worked, attempting CPU shutdown\n");
+ DELAY(1000000); /* wait 1 sec for printf to complete */
+
+ /* Wipe the IDT. */
+ null_idt.rd_limit = 0;
+ null_idt.rd_base = 0;
+ lidt(&null_idt);
+
+ /* "good night, sweet prince .... <THUNK!>" */
+ breakpoint();
+
+ /* NOTREACHED */
+ while(1);
+}
+
+/*
+ * Software interrupt handler for queued VM system processing.
+ */
+void
+swi_vm(void *dummy)
+{
+ if (busdma_swi_pending != 0)
+ busdma_swi();
+}
+
+/*
+ * Tell whether this address is in some physical memory region.
+ * Currently used by the kernel coredump code in order to avoid
+ * dumping the ``ISA memory hole'' which could cause indefinite hangs,
+ * or other unpredictable behaviour.
+ */
+
+int
+is_physical_memory(vm_paddr_t addr)
+{
+
+#ifdef DEV_ISA
+ /* The ISA ``memory hole''. */
+ if (addr >= 0xa0000 && addr < 0x100000)
+ return 0;
+#endif
+
+ /*
+ * stuff other tests for known memory-mapped devices (PCI?)
+ * here
+ */
+
+ return 1;
+}
Index: sys/amd64/amd64/xen-locore.S
===================================================================
--- sys/amd64/amd64/xen-locore.S
+++ sys/amd64/amd64/xen-locore.S
@@ -40,7 +40,7 @@
#define __ASSEMBLY__
#include <xen/interface/elfnote.h>
-#include "assym.s"
+#include "assym.S"
.section __xen_guest
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "FreeBSD")
Index: sys/amd64/ia32/ia32_exception.S
===================================================================
--- sys/amd64/ia32/ia32_exception.S
+++ sys/amd64/ia32/ia32_exception.S
@@ -28,7 +28,7 @@
#include <machine/asmacros.h>
-#include "assym.s"
+#include "assym.S"
.text
/*
Index: sys/amd64/linux/linux_support.s
===================================================================
--- sys/amd64/linux/linux_support.s
+++ sys/amd64/linux/linux_support.s
@@ -31,7 +31,7 @@
#include "linux_assym.h" /* system definitions */
#include <machine/asmacros.h> /* miscellaneous asm macros */
-#include "assym.s"
+#include "assym.S"
futex_fault:
movq $0,PCB_ONFAULT(%r8)
Index: sys/amd64/linux32/linux32_support.s
===================================================================
--- sys/amd64/linux32/linux32_support.s
+++ sys/amd64/linux32/linux32_support.s
@@ -31,7 +31,7 @@
#include "linux32_assym.h" /* system definitions */
#include <machine/asmacros.h> /* miscellaneous asm macros */
-#include "assym.s"
+#include "assym.S"
futex_fault:
movq $0,PCB_ONFAULT(%r8)
Index: sys/amd64/sgx/sgx_support.S
===================================================================
--- sys/amd64/sgx/sgx_support.S
+++ sys/amd64/sgx/sgx_support.S
@@ -34,7 +34,7 @@
#include <machine/asmacros.h>
#include <amd64/sgx/sgxvar.h>
-#include "assym.s"
+#include "assym.S"
.text
Index: sys/arm/arm/bcopy_page.S
===================================================================
--- sys/arm/arm/bcopy_page.S
+++ sys/arm/arm/bcopy_page.S
@@ -42,7 +42,7 @@
__FBSDID("$FreeBSD$");
-#include "assym.s"
+#include "assym.S"
#ifndef _ARM_ARCH_5E
Index: sys/arm/arm/bcopyinout.S
===================================================================
--- sys/arm/arm/bcopyinout.S
+++ sys/arm/arm/bcopyinout.S
@@ -36,7 +36,7 @@
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asm.h>
#include <sys/errno.h>
Index: sys/arm/arm/copystr.S
===================================================================
--- sys/arm/arm/copystr.S
+++ sys/arm/arm/copystr.S
@@ -38,7 +38,7 @@
* Created : 16/05/95
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asm.h>
#include <machine/armreg.h>
__FBSDID("$FreeBSD$");
Index: sys/arm/arm/cpu_asm-v6.S
===================================================================
--- sys/arm/arm/cpu_asm-v6.S
+++ sys/arm/arm/cpu_asm-v6.S
@@ -26,7 +26,7 @@
*
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asm.h>
#include <machine/asmacros.h>
Index: sys/arm/arm/exception.S
===================================================================
--- sys/arm/arm/exception.S
+++ sys/arm/arm/exception.S
@@ -46,7 +46,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asm.h>
#include <machine/armreg.h>
Index: sys/arm/arm/fusu.S
===================================================================
--- sys/arm/arm/fusu.S
+++ sys/arm/arm/fusu.S
@@ -35,7 +35,7 @@
#include <machine/asm.h>
#include <machine/armreg.h>
-#include "assym.s"
+#include "assym.S"
__FBSDID("$FreeBSD$");
.syntax unified
Index: sys/arm/arm/hypervisor-stub.S
===================================================================
--- sys/arm/arm/hypervisor-stub.S
+++ sys/arm/arm/hypervisor-stub.S
@@ -24,7 +24,7 @@
* SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/asmacros.h>
Index: sys/arm/arm/in_cksum_arm.S
===================================================================
--- sys/arm/arm/in_cksum_arm.S
+++ sys/arm/arm/in_cksum_arm.S
@@ -43,7 +43,7 @@
#include "opt_inet.h"
#include <machine/asm.h>
-#include "assym.s"
+#include "assym.S"
__FBSDID("$FreeBSD$");
.syntax unified
Index: sys/arm/arm/locore-v4.S
===================================================================
--- sys/arm/arm/locore-v4.S
+++ sys/arm/arm/locore-v4.S
@@ -33,7 +33,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
Index: sys/arm/arm/locore-v6.S
===================================================================
--- sys/arm/arm/locore-v6.S
+++ sys/arm/arm/locore-v6.S
@@ -28,7 +28,7 @@
* SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/asmacros.h>
@@ -38,7 +38,7 @@
__FBSDID("$FreeBSD$");
-/* We map 64MB of kernel unless overridden in assym.s by the kernel option. */
+/* We map 64MB of kernel unless overridden in assym.S by the kernel option. */
#ifndef LOCORE_MAP_MB
#define LOCORE_MAP_MB 64
#endif
Index: sys/arm/arm/support.S
===================================================================
--- sys/arm/arm/support.S
+++ sys/arm/arm/support.S
@@ -89,7 +89,7 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
-#include "assym.s"
+#include "assym.S"
.syntax unified
Index: sys/arm/arm/swtch-v4.S
===================================================================
--- sys/arm/arm/swtch-v4.S
+++ sys/arm/arm/swtch-v4.S
@@ -78,7 +78,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
#include <machine/asm.h>
Index: sys/arm/arm/swtch-v6.S
===================================================================
--- sys/arm/arm/swtch-v6.S
+++ sys/arm/arm/swtch-v6.S
@@ -78,7 +78,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
#include <machine/asm.h>
Index: sys/arm/arm/swtch.S
===================================================================
--- sys/arm/arm/swtch.S
+++ sys/arm/arm/swtch.S
@@ -78,7 +78,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asm.h>
#include <machine/asmacros.h>
Index: sys/arm/conf/LINT
===================================================================
--- /dev/null
+++ sys/arm/conf/LINT
@@ -0,0 +1,916 @@
+ident LINT
+maxusers 10
+makeoptions CONF_CFLAGS=-fno-builtin
+makeoptions DESTDIR=/tmp
+options MAXDSIZ=(1024UL*1024*1024)
+options MAXSSIZ=(128UL*1024*1024)
+options DFLDSIZ=(1024UL*1024*1024)
+options BLKDEV_IOSIZE=8192
+options DFLTPHYS=(64*1024)
+options MAXPHYS=(128*1024)
+options INCLUDE_CONFIG_FILE
+options BOOTVERBOSE=1
+options BOOTHOWTO=RB_MULTIPLE
+options GEOM_AES
+options GEOM_BDE
+options GEOM_BSD
+options GEOM_CACHE
+options GEOM_CONCAT
+options GEOM_ELI
+options GEOM_FOX
+options GEOM_GATE
+options GEOM_JOURNAL
+options GEOM_LABEL
+options GEOM_LINUX_LVM
+options GEOM_MAP
+options GEOM_MBR
+options GEOM_MIRROR
+options GEOM_MULTIPATH
+options GEOM_NOP
+options GEOM_PART_APM
+options GEOM_PART_BSD
+options GEOM_PART_BSD64
+options GEOM_PART_EBR
+options GEOM_PART_EBR_COMPAT
+options GEOM_PART_GPT
+options GEOM_PART_LDM
+options GEOM_PART_MBR
+options GEOM_PART_VTOC8
+options GEOM_RAID
+options GEOM_RAID3
+options GEOM_SHSEC
+options GEOM_STRIPE
+options GEOM_SUNLABEL
+options GEOM_UZIP
+options GEOM_VINUM
+options GEOM_VIRSTOR
+options GEOM_VOL
+options GEOM_ZERO
+options ROOTDEVNAME=\"ufs:da0s2e\"
+options SCHED_4BSD
+options SCHED_STATS
+options SMP
+options EARLY_AP_STARTUP
+options MAXCPU=32
+options MAXMEMDOM=2
+options VM_NUMA_ALLOC
+options DEVICE_NUMA
+options NO_ADAPTIVE_MUTEXES
+options NO_ADAPTIVE_RWLOCKS
+options NO_ADAPTIVE_SX
+options MUTEX_NOINLINE
+options RWLOCK_NOINLINE
+options SX_NOINLINE
+options PREEMPTION
+options FULL_PREEMPTION
+options WITNESS
+options WITNESS_KDB
+options WITNESS_SKIPSPIN
+options LOCK_PROFILING
+options MPROF_BUFFERS="1536"
+options MPROF_HASH_SIZE="1543"
+options CALLOUT_PROFILING
+options SLEEPQUEUE_PROFILING
+options TURNSTILE_PROFILING
+options UMTX_PROFILING
+options COMPAT_43
+options COMPAT_43TTY
+options COMPAT_FREEBSD4
+options COMPAT_FREEBSD5
+options COMPAT_FREEBSD6
+options COMPAT_FREEBSD7
+options COMPAT_FREEBSD9
+options COMPAT_FREEBSD10
+options COMPAT_FREEBSD11
+options COMPAT_LINUXKPI
+options SYSVSHM
+options SYSVSEM
+options SYSVMSG
+options KDB
+options KDB_TRACE
+options KDB_UNATTENDED
+options DDB
+options DDB_NUMSYM
+options GDB
+options SYSCTL_DEBUG
+options TEXTDUMP_PREFERRED
+options TEXTDUMP_VERBOSE
+options NO_SYSCTL_DESCR
+options MALLOC_DEBUG_MAXZONES=8
+options DEBUG_MEMGUARD
+options DEBUG_REDZONE
+options KTRACE
+options KTRACE_REQUEST_POOL=101
+options KTR
+options KTR_BOOT_ENTRIES=1024
+options KTR_ENTRIES=(128*1024)
+options KTR_COMPILE=(KTR_ALL)
+options KTR_MASK=KTR_INTR
+options KTR_CPUMASK=0x3
+options KTR_VERBOSE
+options ALQ
+options KTR_ALQ
+options INVARIANTS
+options INVARIANT_SUPPORT
+options DIAGNOSTIC
+options REGRESSION
+options COMPILING_LINT
+options STACK
+options NUM_CORE_FILES=5
+device hwpmc
+options HWPMC_DEBUG
+options HWPMC_HOOKS
+options INET
+options INET6
+options RATELIMIT
+options ROUTETABLES=2
+options TCP_OFFLOAD
+options IPSEC
+options IPSEC_SUPPORT
+options NETSMB
+options LIBMCHAIN
+options LIBALIAS
+options SCTP
+options SCTP_DEBUG
+options SCTP_WITH_NO_CSUM
+options SCTP_LOCK_LOGGING
+options SCTP_MBUF_LOGGING
+options SCTP_MBCNT_LOGGING
+options SCTP_PACKET_LOGGING
+options SCTP_LTRACE_CHUNKS
+options SCTP_LTRACE_ERRORS
+options ALTQ
+options ALTQ_CBQ
+options ALTQ_RED
+options ALTQ_RIO
+options ALTQ_CODEL
+options ALTQ_HFSC
+options ALTQ_FAIRQ
+options ALTQ_CDNR
+options ALTQ_PRIQ
+options ALTQ_NOPCC
+options ALTQ_DEBUG
+options NETGRAPH
+options NETGRAPH_DEBUG
+options NETGRAPH_ASYNC
+options NETGRAPH_ATMLLC
+options NETGRAPH_ATM_ATMPIF
+options NETGRAPH_BLUETOOTH
+options NETGRAPH_BLUETOOTH_BT3C
+options NETGRAPH_BLUETOOTH_HCI
+options NETGRAPH_BLUETOOTH_L2CAP
+options NETGRAPH_BLUETOOTH_SOCKET
+options NETGRAPH_BLUETOOTH_UBT
+options NETGRAPH_BLUETOOTH_UBTBCMFW
+options NETGRAPH_BPF
+options NETGRAPH_BRIDGE
+options NETGRAPH_CAR
+options NETGRAPH_CISCO
+options NETGRAPH_DEFLATE
+options NETGRAPH_DEVICE
+options NETGRAPH_ECHO
+options NETGRAPH_EIFACE
+options NETGRAPH_ETHER
+options NETGRAPH_FRAME_RELAY
+options NETGRAPH_GIF
+options NETGRAPH_GIF_DEMUX
+options NETGRAPH_HOLE
+options NETGRAPH_IFACE
+options NETGRAPH_IP_INPUT
+options NETGRAPH_IPFW
+options NETGRAPH_KSOCKET
+options NETGRAPH_L2TP
+options NETGRAPH_LMI
+options NETGRAPH_MPPC_COMPRESSION
+options NETGRAPH_MPPC_ENCRYPTION
+options NETGRAPH_NETFLOW
+options NETGRAPH_NAT
+options NETGRAPH_ONE2MANY
+options NETGRAPH_PATCH
+options NETGRAPH_PIPE
+options NETGRAPH_PPP
+options NETGRAPH_PPPOE
+options NETGRAPH_PPTPGRE
+options NETGRAPH_PRED1
+options NETGRAPH_RFC1490
+options NETGRAPH_SOCKET
+options NETGRAPH_SPLIT
+options NETGRAPH_SPPP
+options NETGRAPH_TAG
+options NETGRAPH_TCPMSS
+options NETGRAPH_TEE
+options NETGRAPH_UI
+options NETGRAPH_VJC
+options NETGRAPH_VLAN
+options NGATM_ATM
+options NGATM_ATMBASE
+options NGATM_SSCOP
+options NGATM_SSCFU
+options NGATM_UNI
+options NGATM_CCATM
+device mn
+device loop
+device ether
+device vlan
+device vxlan
+device wlan
+options IEEE80211_DEBUG
+options IEEE80211_AMPDU_AGE
+options IEEE80211_SUPPORT_MESH
+options IEEE80211_SUPPORT_TDMA
+device wlan_wep
+device wlan_ccmp
+device wlan_tkip
+device wlan_xauth
+device wlan_acl
+device wlan_amrr
+device token
+device fddi
+device arcnet
+device sppp
+device bpf
+device netmap
+device disc
+device epair
+device edsc
+device tap
+device tun
+device gif
+device gre
+device me
+options XBONEHACK
+device stf
+device pf
+device pflog
+device pfsync
+device if_bridge
+device carp
+device enc
+device lagg
+options MROUTING
+options IPFIREWALL
+options IPFIREWALL_VERBOSE
+options IPFIREWALL_VERBOSE_LIMIT=100
+options IPFIREWALL_DEFAULT_TO_ACCEPT
+options IPFIREWALL_NAT
+options IPFIREWALL_NAT64
+options IPFIREWALL_NPTV6
+options IPDIVERT
+options IPFILTER
+options IPFILTER_LOG
+options IPFILTER_LOOKUP
+options IPFILTER_DEFAULT_BLOCK
+options IPSTEALTH
+options PF_DEFAULT_TO_DROP
+options TCPDEBUG
+options TCPPCAP
+options TCP_HHOOK
+options RADIX_MPATH
+options MBUF_STRESS_TEST
+options MBUF_PROFILING
+options ACCEPT_FILTER_DATA
+options ACCEPT_FILTER_DNS
+options ACCEPT_FILTER_HTTP
+options TCP_SIGNATURE
+options DUMMYNET
+options FFS
+options NFSCL
+options AUTOFS
+options CD9660
+options FDESCFS
+options FUSE
+options MSDOSFS
+options NFSLOCKD
+options NFSD
+options KGSSAPI
+options NULLFS
+options PROCFS
+options PSEUDOFS
+options PSEUDOFS_TRACE
+options SMBFS
+options TMPFS
+options UDF
+options UNIONFS
+options NFS_ROOT
+options SOFTUPDATES
+options UFS_EXTATTR
+options UFS_EXTATTR_AUTOSTART
+options UFS_ACL
+options UFS_DIRHASH
+options UFS_GJOURNAL
+options MD_ROOT_SIZE=10
+options MD_ROOT
+options QUOTA
+options SUIDDIR
+options NFS_MINATTRTIMO=3
+options NFS_MAXATTRTIMO=60
+options NFS_MINDIRATTRTIMO=30
+options NFS_MAXDIRATTRTIMO=60
+options NFS_DEBUG
+options EXT2FS
+device random
+device mem
+device ksyms
+options CD9660_ICONV
+options MSDOSFS_ICONV
+options UDF_ICONV
+options _KPOSIX_PRIORITY_SCHEDULING
+options P1003_1B_SEMAPHORES
+options P1003_1B_MQUEUE
+options AUDIT
+options MAC
+options MAC_BIBA
+options MAC_BSDEXTENDED
+options MAC_IFOFF
+options MAC_LOMAC
+options MAC_MLS
+options MAC_NONE
+options MAC_PARTITION
+options MAC_PORTACL
+options MAC_SEEOTHERUIDS
+options MAC_STUB
+options MAC_TEST
+options CAPABILITIES
+options CAPABILITY_MODE
+options HZ=100
+options PPS_SYNC
+options FFCLOCK
+device scbus
+device ch
+device da
+device sa
+device cd
+device ses
+device pt
+device targ
+device targbh
+device pass
+device sg
+device ctl
+options CAMDEBUG
+options CAM_DEBUG_COMPILE=-1
+options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_PROBE|CAM_DEBUG_PERIPH)
+options CAM_DEBUG_BUS=-1
+options CAM_DEBUG_TARGET=-1
+options CAM_DEBUG_LUN=-1
+options CAM_DEBUG_DELAY=1
+options CAM_MAX_HIGHPOWER=4
+options SCSI_NO_SENSE_STRINGS
+options SCSI_NO_OP_STRINGS
+options SCSI_DELAY=5000
+options CAM_IOSCHED_DYNAMIC
+options CHANGER_MIN_BUSY_SECONDS=2
+options CHANGER_MAX_BUSY_SECONDS=10
+options SA_IO_TIMEOUT=4
+options SA_SPACE_TIMEOUT=60
+options SA_REWIND_TIMEOUT=(2*60)
+options SA_ERASE_TIMEOUT=(4*60)
+options SA_1FM_AT_EOD
+options SCSI_PT_DEFAULT_TIMEOUT=60
+options SES_ENABLE_PASSTHROUGH
+device pty
+device nmdm
+device md
+device snp
+device ccd
+device firmware
+options LIBICONV
+options MSGBUF_SIZE=40960
+device pci
+options PCI_HP
+options PCI_IOV
+options KBD_DISABLE_KEYMAP_LOAD
+options KBD_INSTALL_CDEV
+device kbdmux
+options KBDMUX_DFLT_KEYMAP
+makeoptions KBDMUX_DFLT_KEYMAP=it.iso
+options FB_DEBUG
+device splash
+device blank_saver
+device daemon_saver
+device dragon_saver
+device fade_saver
+device fire_saver
+device green_saver
+device logo_saver
+device rain_saver
+device snake_saver
+device star_saver
+device warp_saver
+device sc
+options MAXCONS=16
+options SC_ALT_MOUSE_IMAGE
+options SC_DFLT_FONT
+makeoptions SC_DFLT_FONT=cp850
+options SC_DISABLE_KDBKEY
+options SC_DISABLE_REBOOT
+options SC_HISTORY_SIZE=200
+options SC_MOUSE_CHAR=0x3
+options SC_PIXEL_MODE
+options SC_NORM_ATTR=(FG_GREEN|BG_BLACK)
+options SC_NORM_REV_ATTR=(FG_YELLOW|BG_GREEN)
+options SC_KERNEL_CONS_ATTR=(FG_RED|BG_BLACK)
+options SC_KERNEL_CONS_REV_ATTR=(FG_BLACK|BG_RED)
+options SC_CUT_SPACES2TABS
+options SC_CUT_SEPCHARS=\"x09\"
+options SC_TWOBUTTON_MOUSE
+options SC_NO_CUTPASTE
+options SC_NO_FONT_LOADING
+options SC_NO_HISTORY
+options SC_NO_MODE_CHANGE
+options SC_NO_SYSMOUSE
+options SC_NO_SUSPEND_VTYSWITCH
+options TEKEN_CONS25
+options TEKEN_UTF8
+device vt
+options VT_ALT_TO_ESC_HACK=1
+options VT_MAXWINDOWS=16
+options VT_TWOBUTTON_MOUSE
+options VT_FB_DEFAULT_HEIGHT=480
+options VT_FB_DEFAULT_WIDTH=640
+options TERMINAL_NORM_ATTR=(FG_GREEN|BG_BLACK)
+options TERMINAL_KERN_ATTR=(FG_LIGHTRED|BG_BLACK)
+device bt
+device adv
+device adw
+device aha
+device aic
+device ahc
+device ahd
+device esp
+device iscsi_initiator
+device isp
+device ispfw
+device mpt
+device ncr
+device sym
+device trm
+options AHC_ALLOW_MEMIO
+options AHC_DUMP_EEPROM
+options AHC_TMODE_ENABLE
+options AHC_DEBUG
+options AHC_DEBUG_OPTS
+options AHC_REG_PRETTY_PRINT
+options AHD_DEBUG
+options AHD_DEBUG_OPTS=0xFFFFFFFF
+options AHD_REG_PRETTY_PRINT
+options AHD_TMODE_ENABLE
+options ADW_ALLOW_MEMIO
+options ISCSI_INITIATOR_DEBUG=9
+options ISP_TARGET_MODE=1
+options ISP_DEFAULT_ROLES=0
+device dpt
+options DPT_RESET_HBA
+device ciss
+device iir
+device mly
+device ida
+device mlx
+device amr
+device amrp
+device mfi
+device mfip
+options MFI_DEBUG
+device mrsas
+device twe
+device ahci
+device mvs
+device siis
+device ata
+device fdc
+options FDC_DEBUG
+device uart
+options UART_PPS_ON_CTS
+options UART_POLL_FREQ
+options BREAK_TO_DEBUGGER
+options ALT_BREAK_TO_DEBUGGER
+device scc
+device puc
+device mii
+device mii_bitbang
+device miibus
+device acphy
+device amphy
+device atphy
+device axphy
+device bmtphy
+device bnxt
+device brgphy
+device ciphy
+device e1000phy
+device gentbi
+device icsphy
+device ip1000phy
+device jmphy
+device lxtphy
+device mlphy
+device nsgphy
+device nsphy
+device nsphyter
+device pnaphy
+device qsphy
+device rdcphy
+device rgephy
+device rlphy
+device rlswitch
+device smcphy
+device tdkphy
+device tlphy
+device truephy
+device xmphy
+device cm
+device ep
+device ex
+device fe
+device sn
+device an
+device wi
+device xe
+device ae
+device age
+device alc
+device ale
+device bce
+device bfe
+device bge
+device cas
+device dc
+device et
+device fxp
+device gem
+device hme
+device jme
+device lge
+device mlx5
+device mlx5en
+device msk
+device my
+device nge
+device re
+device rl
+device pcn
+device sf
+device sge
+device sis
+device sk
+device ste
+device stge
+device tl
+device tx
+device vr
+device vte
+device wb
+device xl
+device cxgb
+device cxgb_t3fw
+device cxgbe
+device cxgbev
+device de
+device em
+device ixgb
+device ix
+device ixv
+device le
+device mxge
+device nxge
+device oce
+device ti
+device txp
+device vx
+device vxge
+device fpa
+device lmc
+device ath
+device ath_hal
+options AH_SUPPORT_AR5416
+options AH_RXCFG_SDMAMW_4BYTES
+device ath_rate_sample
+device bwi
+device bwn
+device malo
+device mwl
+device mwlfw
+device ral
+device rtwn
+device rtwnfw
+options MCLSHIFT=12
+options MSIZE=512
+options LIBMBPOOL
+device sound
+device snd_ad1816
+device snd_als4000
+device snd_atiixp
+device snd_cmi
+device snd_cs4281
+device snd_csa
+device snd_ds1
+device snd_emu10k1
+device snd_emu10kx
+device snd_envy24
+device snd_envy24ht
+device snd_es137x
+device snd_ess
+device snd_fm801
+device snd_gusc
+device snd_hda
+device snd_hdspe
+device snd_ich
+device snd_maestro
+device snd_maestro3
+device snd_mss
+device snd_neomagic
+device snd_sb16
+device snd_sb8
+device snd_sbc
+device snd_solo
+device snd_spicds
+device snd_t4dwave
+device snd_uaudio
+device snd_via8233
+device snd_via82c686
+device snd_vibes
+options SND_DEBUG
+options SND_DIAGNOSTIC
+options SND_FEEDER_MULTIFORMAT
+options SND_FEEDER_FULL_MULTIFORMAT
+options SND_FEEDER_RATE_HP
+options SND_PCM_64
+options SND_OLDSTEREO
+device joy
+device cmx
+device bktr
+device cbb
+device pccard
+device cardbus
+device mmc
+device mmcsd
+device sdhci
+device smbus
+device intpm
+device alpm
+device ichsmb
+device viapm
+device amdpm
+device amdsmb
+device nfpm
+device nfsmb
+device ismt
+device smb
+device jedec_ts
+device iicbus
+device iicbb
+device ic
+device iic
+device iicsmb
+device iicoc
+device ds1307
+device ds133x
+device ds1374
+device ds1672
+device ds3231
+device icee
+device lm75
+device nxprtc
+device s35390a
+options PPC_PROBE_CHIPSET
+options DEBUG_1284
+options PERIPH_1284
+options DONTPROBE_1284
+options VP0_DEBUG
+options LPT_DEBUG
+options PPC_DEBUG
+options PLIP_DEBUG
+options PCFCLOCK_VERBOSE
+options PCFCLOCK_MAX_RETRIES=5
+device ppc
+device ppbus
+device vpo
+device lpt
+device plip
+device ppi
+device pps
+device lpbb
+device pcfclock
+device etherswitch
+device miiproxy
+device arswitch
+device ip17x
+device rtl8366rb
+device ukswitch
+options BOOTP
+options BOOTP_NFSROOT
+options BOOTP_NFSV3
+options BOOTP_COMPAT
+options BOOTP_WIRED_TO=fxp0
+options BOOTP_BLOCKSIZE=8192
+options SW_WATCHDOG
+options DEADLKRES
+options NSFBUFS=1024
+options DEBUG_LOCKS
+device uhci
+device ohci
+device ehci
+device xhci
+device usb
+device udbp
+device ufm
+device ugold
+device uled
+device uhid
+device ukbd
+device ulpt
+device umass
+device usfs
+device umct
+device umodem
+device ums
+device atp
+device wsp
+device uep
+device urio
+device ucom
+device u3g
+device uark
+device ubsa
+device uftdi
+device uipaq
+device uplcom
+device uslcom
+device uvisor
+device uvscom
+device uether
+device aue
+device axe
+device axge
+device cdce
+device cue
+device kue
+device rue
+device udav
+device ure
+device mos
+device uhso
+device rsu
+device rum
+device run
+device uath
+device upgt
+device ural
+device urndis
+device urtw
+device zyd
+device usie
+options USB_DEBUG
+options U3G_DEBUG
+options UKBD_DFLT_KEYMAP
+makeoptions UKBD_DFLT_KEYMAP=jp
+options UPLCOM_INTR_INTERVAL=100
+options UVSCOM_DEFAULT_OPKTSIZE=8
+options UVSCOM_INTR_INTERVAL=100
+device firewire
+device sbp
+device sbp_targ
+device fwe
+device fwip
+device dcons
+device dcons_crom
+options DCONS_BUF_SIZE=16384
+options DCONS_POLL_HZ=100
+options DCONS_FORCE_CONSOLE=0
+options DCONS_FORCE_GDB=1
+device crypto
+device cryptodev
+device rndtest
+device ccr
+device hifn
+options HIFN_DEBUG
+options HIFN_RNDTEST
+device ubsec
+options UBSEC_DEBUG
+options UBSEC_RNDTEST
+options INIT_PATH=/sbin/init:/rescue/init
+options BUS_DEBUG
+options DEBUG_VFS_LOCKS
+options SOCKBUF_DEBUG
+options IFMEDIA_DEBUG
+options VERBOSE_SYSINIT
+options SEMMNI=11
+options SEMMNS=61
+options SEMMNU=31
+options SEMMSL=61
+options SEMOPM=101
+options SEMUME=11
+options SHMALL=1025
+options SHMMAX=(SHMMAXPGS*PAGE_SIZE+1)
+options SHMMAXPGS=1025
+options SHMMIN=2
+options SHMMNI=33
+options SHMSEG=9
+options PANIC_REBOOT_WAIT_TIME=16
+options DIRECTIO
+options NSWBUF_MIN=120
+options CAM_DEBUG_DELAY
+options CLUSTERDEBUG
+options DEBUG
+options LOCKF_DEBUG
+options MSGMNB=2049
+options MSGMNI=41
+options MSGSEG=2049
+options MSGSSZ=16
+options MSGTQL=41
+options NBUF=512
+options SCSI_NCR_DEBUG
+options SCSI_NCR_MAX_SYNC=10000
+options SCSI_NCR_MAX_WIDE=1
+options SCSI_NCR_MYADDR=7
+options SC_DEBUG_LEVEL=5
+options SC_RENDER_DEBUG
+options VFS_BIO_DEBUG
+options KSTACK_MAX_PAGES=32
+options KSTACK_USAGE_PROF
+options AAC_DEBUG
+options RACCT
+options RCTL
+options BROOKTREE_ALLOC_PAGES=(217*4+1)
+options MAXFILES=999
+options RANDOM_ENABLE_UMA
+options IMAGACT_BINMISC
+options GZIO
+options BHND_LOGLEVEL
+device evdev
+options EVDEV_SUPPORT
+options EVDEV_DEBUG
+device uinput
+options UINPUT_DEBUG
+options EKCD
+machine arm
+cpu CPU_ARM9
+cpu CPU_ARM9E
+cpu CPU_FA526
+cpu CPU_XSCALE_81342
+cpu CPU_XSCALE_IXP425
+cpu CPU_XSCALE_IXP435
+cpu CPU_XSCALE_PXA2X0
+files "../at91/files.at91"
+files "../cavium/cns11xx/files.econa"
+files "../mv/files.mv"
+files "../mv/discovery/files.db78xxx"
+files "../mv/kirkwood/files.kirkwood"
+files "../mv/orion/files.db88f5xxx"
+files "../mv/orion/files.ts7800"
+files "../xscale/i8134x/files.crb"
+files "../xscale/i8134x/files.i81342"
+files "../xscale/ixp425/files.avila"
+files "../xscale/ixp425/files.ixp425"
+files "../xscale/pxa/files.pxa"
+options PHYSADDR=0x00000000
+options KERNVIRTADDR=0xc0000000
+makeoptions LDFLAGS="-zmuldefs"
+makeoptions KERNPHYSADDR=0x00000000
+makeoptions KERNVIRTADDR=0xc0000000
+options FDT
+options SOC_MV_DISCOVERY
+options SOC_MV_KIRKWOOD
+options SOC_MV_ORION
+options ARM_MANY_BOARD
+device at91_board_bwct
+device at91_board_ethernut5
+device at91_board_hl200
+device at91_board_hl201
+device at91_board_kb920x
+device at91_board_qila9g20
+device at91_board_sam9260ek
+device at91_board_sam9g20ek
+device at91_board_sam9x25ek
+device at91_board_tsc4370
+device at91rm9200
+device nand
+device twsi
+nooptions SMP
+nooptions MAXCPU
+nooptions COMPAT_FREEBSD4
+nooptions COMPAT_FREEBSD5
+nooptions COMPAT_FREEBSD6
+nooptions COMPAT_FREEBSD7
+nooptions COMPAT_FREEBSD9
+nooption PPC_PROBE_CHIPSET
+nodevice fdc
+nodevice sym
+nodevice ukbd
+nodevice sc
+nodevice blank_saver
+nodevice daemon_saver
+nodevice dragon_saver
+nodevice fade_saver
+nodevice fire_saver
+nodevice green_saver
+nodevice logo_saver
+nodevice rain_saver
+nodevice snake_saver
+nodevice star_saver
+nodevice warp_saver
+nodevice ccr
+nodevice cxgbe
+nodevice cxgbev
+nodevice snd_cmi
+options KDTRACE_HOOKS
Index: sys/arm64/arm64/bzero.S
===================================================================
--- sys/arm64/arm64/bzero.S
+++ sys/arm64/arm64/bzero.S
@@ -30,7 +30,7 @@
__FBSDID("$FreeBSD$");
-#include "assym.s"
+#include "assym.S"
/*
* void bzero(void *p, size_t size)
Index: sys/arm64/arm64/copyinout.S
===================================================================
--- sys/arm64/arm64/copyinout.S
+++ sys/arm64/arm64/copyinout.S
@@ -35,7 +35,7 @@
#include <machine/vmparam.h>
-#include "assym.s"
+#include "assym.S"
/*
* Fault handler for the copy{in,out} functions below.
Index: sys/arm64/arm64/exception.S
===================================================================
--- sys/arm64/arm64/exception.S
+++ sys/arm64/arm64/exception.S
@@ -28,7 +28,7 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
-#include "assym.s"
+#include "assym.S"
.text
Index: sys/arm64/arm64/locore.S
===================================================================
--- sys/arm64/arm64/locore.S
+++ sys/arm64/arm64/locore.S
@@ -26,7 +26,7 @@
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_kstack_pages.h"
#include <sys/syscall.h>
#include <machine/asm.h>
Index: sys/arm64/arm64/support.S
===================================================================
--- sys/arm64/arm64/support.S
+++ sys/arm64/arm64/support.S
@@ -36,7 +36,7 @@
#include <machine/param.h>
#include <machine/vmparam.h>
-#include "assym.s"
+#include "assym.S"
/*
* One of the fu* or su* functions failed, return -1.
Index: sys/arm64/arm64/swtch.S
===================================================================
--- sys/arm64/arm64/swtch.S
+++ sys/arm64/arm64/swtch.S
@@ -29,7 +29,7 @@
*
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_kstack_pages.h"
#include "opt_sched.h"
Index: sys/arm64/conf/GENERIC.orig
===================================================================
--- /dev/null
+++ sys/arm64/conf/GENERIC.orig
@@ -0,0 +1,249 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/arm64
+#
+# For more information on this file, please read the config(5) manual page,
+# and/or the handbook section on Kernel Configuration Files:
+#
+# https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+cpu ARM64
+ident GENERIC
+
+makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
+makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
+
+options SCHED_ULE # ULE scheduler
+options PREEMPTION # Enable kernel thread preemption
+options VIMAGE # Subsystem virtualization, e.g. VNET
+options INET # InterNETworking
+options INET6 # IPv6 communications protocols
+options IPSEC # IP (v4/v6) security
+options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5
+options TCP_HHOOK # hhook(9) framework for TCP
+options TCP_OFFLOAD # TCP offload
+options SCTP # Stream Control Transmission Protocol
+options FFS # Berkeley Fast Filesystem
+options SOFTUPDATES # Enable FFS soft updates support
+options UFS_ACL # Support for access control lists
+options UFS_DIRHASH # Improve performance on big directories
+options UFS_GJOURNAL # Enable gjournal-based UFS journaling
+options QUOTA # Enable disk quotas for UFS
+options MD_ROOT # MD is a potential root device
+options NFSCL # Network Filesystem Client
+options NFSD # Network Filesystem Server
+options NFSLOCKD # Network Lock Manager
+options NFS_ROOT # NFS usable as /, requires NFSCL
+options MSDOSFS # MSDOS Filesystem
+options CD9660 # ISO 9660 Filesystem
+options PROCFS # Process filesystem (requires PSEUDOFS)
+options PSEUDOFS # Pseudo-filesystem framework
+options GEOM_PART_GPT # GUID Partition Tables.
+options GEOM_RAID # Soft RAID functionality.
+options GEOM_LABEL # Provides labelization
+options COMPAT_FREEBSD11 # Compatible with FreeBSD11
+options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI
+options KTRACE # ktrace(1) support
+options STACK # stack(9) support
+options SYSVSHM # SYSV-style shared memory
+options SYSVMSG # SYSV-style message queues
+options SYSVSEM # SYSV-style semaphores
+options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
+options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
+options KBD_INSTALL_CDEV # install a CDEV entry in /dev
+options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
+options AUDIT # Security event auditing
+options CAPABILITY_MODE # Capsicum capability mode
+options CAPABILITIES # Capsicum capabilities
+options MAC # TrustedBSD MAC Framework
+options KDTRACE_FRAME # Ensure frames are compiled in
+options KDTRACE_HOOKS # Kernel DTrace hooks
+options VFP # Floating-point support
+options RACCT # Resource accounting framework
+options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default
+options RCTL # Resource limits
+options SMP
+options INTRNG
+
+# Debugging support. Always need this:
+options KDB # Enable kernel debugger support.
+options KDB_TRACE # Print a stack trace for a panic.
+# For full debugger support use (turn off in stable branch):
+options DDB # Support DDB.
+#options GDB # Support remote GDB.
+options DEADLKRES # Enable the deadlock resolver
+options INVARIANTS # Enable calls of extra sanity checking
+options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS
+options WITNESS # Enable checks to detect deadlocks and cycles
+options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
+options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
+
+# SoC support
+options SOC_ALLWINNER_A64
+options SOC_ALLWINNER_H5
+options SOC_CAVM_THUNDERX
+options SOC_HISI_HI6220
+options SOC_BRCM_BCM2837
+
+# Annapurna Alpine drivers
+device al_ccu # Alpine Cache Coherency Unit
+device al_nb_service # Alpine North Bridge Service
+device al_iofic # I/O Fabric Interrupt Controller
+device al_serdes # Serializer/Deserializer
+device al_udma # Universal DMA
+
+# VirtIO support
+device virtio
+device virtio_pci
+device virtio_mmio
+device virtio_blk
+device vtnet
+
+# CPU frequency control
+device cpufreq
+
+# Bus drivers
+device pci
+device al_pci # Annapurna Alpine PCI-E
+options PCI_HP # PCI-Express native HotPlug
+options PCI_IOV # PCI SR-IOV support
+
+# Ethernet NICs
+device mdio
+device mii
+device miibus # MII bus support
+device awg # Allwinner EMAC Gigabit Ethernet
+device axgbe # AMD Opteron A1100 integrated NIC
+device em # Intel PRO/1000 Gigabit Ethernet Family
+device ix # Intel 10Gb Ethernet Family
+device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
+device neta # Marvell Armada 370/38x/XP/3700 NIC
+device smc # SMSC LAN91C111
+device vnic # Cavium ThunderX NIC
+device al_eth # Annapurna Alpine Ethernet NIC
+
+# Block devices
+device ahci
+device scbus
+device da
+
+# ATA/SCSI peripherals
+device pass # Passthrough device (direct ATA/SCSI access)
+
+# MMC/SD/SDIO Card slot support
+device sdhci
+device aw_mmc # Allwinner SD/MMC controller
+device mmc # mmc/sd bus
+device mmcsd # mmc/sd flash cards
+device dwmmc
+
+# Serial (COM) ports
+device uart # Generic UART driver
+device uart_mvebu # Armada 3700 UART driver
+device uart_ns8250 # ns8250-type UART driver
+device uart_snps
+device pl011
+
+# USB support
+options USB_DEBUG # enable debug msgs
+device aw_ehci # Allwinner EHCI USB interface (USB 2.0)
+device aw_usbphy # Allwinner USB PHY
+device dwcotg # DWC OTG controller
+device ohci # OHCI USB interface
+device ehci # EHCI USB interface (USB 2.0)
+device ehci_mv # Marvell EHCI USB interface
+device xhci # XHCI PCI->USB interface (USB 3.0)
+device xhci_mv # Marvell XHCI USB interface
+device usb # USB Bus (required)
+device ukbd # Keyboard
+device umass # Disks/Mass storage - Requires scbus and da
+
+# USB ethernet support
+device smcphy
+device smsc
+
+# GPIO
+device aw_gpio # Allwinner GPIO controller
+device gpio
+device gpioled
+device fdt_pinctrl
+
+# I2C
+device aw_rsb # Allwinner Reduced Serial Bus
+device bcm2835_bsc # Broadcom BCM283x I2C bus
+device iicbus
+device iic
+device twsi # Allwinner I2C controller
+
+# Clock and reset controllers
+device aw_ccu # Allwinner clock controller
+
+# Interrupt controllers
+device aw_nmi # Allwinner NMI support
+
+# Real-time clock support
+device aw_rtc # Allwinner Real-time Clock
+device mv_rtc # Marvell Real-time Clock
+
+# Watchdog controllers
+device aw_wdog # Allwinner Watchdog
+
+# Power management controllers
+device axp81x # X-Powers AXP81x PMIC
+
+# EFUSE
+device aw_sid # Allwinner Secure ID EFUSE
+
+# Thermal sensors
+device aw_thermal # Allwinner Thermal Sensor Controller
+
+# SPI
+device spibus
+device bcm2835_spi # Broadcom BCM283x SPI bus
+
+# Console
+device vt
+device kbdmux
+
+# Pseudo devices.
+device loop # Network loopback
+device random # Entropy device
+device ether # Ethernet support
+device vlan # 802.1Q VLAN support
+device tun # Packet tunnel.
+device md # Memory "disks"
+device gif # IPv6 and IPv4 tunneling
+device firmware # firmware assist module
+device psci # Support for ARM PSCI
+
+# EXT_RESOURCES pseudo devices
+options EXT_RESOURCES
+device clk
+device phy
+device hwreset
+device regulator
+
+# The `bpf' device enables the Berkeley Packet Filter.
+# Be aware of the administrative consequences of enabling this!
+# Note that 'bpf' is required for DHCP.
+device bpf # Berkeley packet filter
+
+# Chip-specific errata
+options THUNDERX_PASS_1_1_ERRATA
+
+options FDT
+device acpi
+
+# The crypto framework is required by IPSEC
+device crypto # Required by IPSEC
Index: sys/cddl/dev/dtrace/aarch64/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/aarch64/dtrace_asm.S
+++ sys/cddl/dev/dtrace/aarch64/dtrace_asm.S
@@ -35,7 +35,7 @@
#include <machine/armreg.h>
#include <machine/asm.h>
-#include "assym.s"
+#include "assym.S"
/*
void dtrace_membar_producer(void)
Index: sys/cddl/dev/dtrace/amd64/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/amd64/dtrace_asm.S
+++ sys/cddl/dev/dtrace/amd64/dtrace_asm.S
@@ -34,7 +34,7 @@
#include <sys/cpuvar_defs.h>
#include <sys/dtrace.h>
-#include "assym.s"
+#include "assym.S"
#define INTR_POP \
MEXITCOUNT; \
Index: sys/cddl/dev/dtrace/arm/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/arm/dtrace_asm.S
+++ sys/cddl/dev/dtrace/arm/dtrace_asm.S
@@ -35,7 +35,7 @@
#include <machine/asm.h>
#include <machine/armreg.h>
-#include "assym.s"
+#include "assym.S"
/*
void dtrace_membar_producer(void)
Index: sys/cddl/dev/dtrace/i386/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/i386/dtrace_asm.S
+++ sys/cddl/dev/dtrace/i386/dtrace_asm.S
@@ -32,7 +32,7 @@
#include <sys/cpuvar_defs.h>
#include <sys/dtrace.h>
-#include "assym.s"
+#include "assym.S"
ENTRY(dtrace_invop_start)
Index: sys/cddl/dev/dtrace/powerpc/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/powerpc/dtrace_asm.S
+++ sys/cddl/dev/dtrace/powerpc/dtrace_asm.S
@@ -28,7 +28,7 @@
* Use is subject to license terms.
*/
-#include "assym.s"
+#include "assym.S"
#define _ASM
Index: sys/cddl/dev/dtrace/riscv/dtrace_asm.S
===================================================================
--- sys/cddl/dev/dtrace/riscv/dtrace_asm.S
+++ sys/cddl/dev/dtrace/riscv/dtrace_asm.S
@@ -37,7 +37,7 @@
#include <machine/riscvreg.h>
#include <machine/asm.h>
-#include "assym.s"
+#include "assym.S"
/*
void dtrace_membar_producer(void)
Index: sys/conf/files.amd64
===================================================================
--- sys/conf/files.amd64
+++ sys/conf/files.amd64
@@ -105,7 +105,7 @@
#
amd64/acpica/acpi_machdep.c optional acpi
acpi_wakecode.o optional acpi \
- dependency "$S/amd64/acpica/acpi_wakecode.S assym.s" \
+ dependency "$S/amd64/acpica/acpi_wakecode.S assym.S" \
compile-with "${NORMAL_S}" \
no-obj no-implicit-rule before-depend \
clean "acpi_wakecode.o"
Index: sys/conf/files.i386
===================================================================
--- sys/conf/files.i386
+++ sys/conf/files.i386
@@ -439,7 +439,7 @@
dev/isci/scil/scif_sas_timer.c optional isci
i386/acpica/acpi_machdep.c optional acpi
acpi_wakecode.o optional acpi \
- dependency "$S/i386/acpica/acpi_wakecode.S assym.s" \
+ dependency "$S/i386/acpica/acpi_wakecode.S assym.S" \
compile-with "${NORMAL_S}" \
no-obj no-implicit-rule before-depend \
clean "acpi_wakecode.o"
Index: sys/conf/kern.post.mk
===================================================================
--- sys/conf/kern.post.mk
+++ sys/conf/kern.post.mk
@@ -154,7 +154,7 @@
.endif
${SYSTEM_LD_TAIL}
-OBJS_DEPEND_GUESS+= assym.s vnode_if.h ${BEFORE_DEPEND:M*.h} \
+OBJS_DEPEND_GUESS+= assym.S vnode_if.h ${BEFORE_DEPEND:M*.h} \
${MFILES:T:S/.m$/.h/}
.for mfile in ${MFILES}
@@ -183,7 +183,7 @@
${CC} ${HACK_EXTRA_FLAGS} -nostdlib hack.c -o hack.pico
rm -f hack.c
-assym.s: $S/kern/genassym.sh genassym.o
+assym.S: $S/kern/genassym.sh genassym.o
NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh genassym.o > ${.TARGET}
genassym.o: $S/$M/$M/genassym.c
@@ -209,7 +209,7 @@
.endif
kernel-depend: .depend
-SRCS= assym.s vnode_if.h ${BEFORE_DEPEND} ${CFILES} \
+SRCS= assym.S vnode_if.h ${BEFORE_DEPEND} ${CFILES} \
${SYSTEM_CFILES} ${GEN_CFILES} ${SFILES} \
${MFILES:T:S/.m$/.h/}
DEPENDOBJS+= ${SYSTEM_OBJS} genassym.o
Index: sys/conf/kmod.mk
===================================================================
--- sys/conf/kmod.mk
+++ sys/conf/kmod.mk
@@ -454,14 +454,14 @@
${AWK} -f ${SYSDIR}/tools/acpi_quirks2h.awk ${SYSDIR}/dev/acpica/acpi_quirks
.endif
-.if !empty(SRCS:Massym.s) || !empty(DPSRCS:Massym.s)
-CLEANFILES+= assym.s genassym.o
+.if !empty(SRCS:Massym.S) || !empty(DPSRCS:Massym.S)
+CLEANFILES+= assym.S genassym.o
DEPENDOBJS+= genassym.o
-assym.s: genassym.o
+assym.S: genassym.o
.if defined(KERNBUILDDIR)
genassym.o: opt_global.h
.endif
-assym.s: ${SYSDIR}/kern/genassym.sh
+assym.S: ${SYSDIR}/kern/genassym.sh
sh ${SYSDIR}/kern/genassym.sh genassym.o > ${.TARGET}
genassym.o: ${SYSDIR}/${MACHINE}/${MACHINE}/genassym.c
genassym.o: ${SRCS:Mopt_*.h}
Index: sys/dev/hyperv/vmbus/amd64/vmbus_vector.S
===================================================================
--- sys/dev/hyperv/vmbus/amd64/vmbus_vector.S
+++ sys/dev/hyperv/vmbus/amd64/vmbus_vector.S
@@ -26,7 +26,7 @@
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include <machine/asmacros.h>
#include <machine/specialreg.h>
Index: sys/dev/hyperv/vmbus/i386/vmbus_vector.S
===================================================================
--- sys/dev/hyperv/vmbus/i386/vmbus_vector.S
+++ sys/dev/hyperv/vmbus/i386/vmbus_vector.S
@@ -29,7 +29,7 @@
#include <machine/asmacros.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
/*
* This is the Hyper-V vmbus channel direct callback interrupt.
Index: sys/i386/acpica/acpi_wakecode.S
===================================================================
--- sys/i386/acpica/acpi_wakecode.S
+++ sys/i386/acpica/acpi_wakecode.S
@@ -34,7 +34,7 @@
#include <machine/specialreg.h>
#include <machine/timerreg.h>
-#include "assym.s"
+#include "assym.S"
/*
* Resume entry point. The BIOS enters here in real mode after POST with
Index: sys/i386/i386/apic_vector.s
===================================================================
--- sys/i386/i386/apic_vector.s
+++ sys/i386/i386/apic_vector.s
@@ -42,7 +42,7 @@
#include <machine/specialreg.h>
#include <x86/apicreg.h>
-#include "assym.s"
+#include "assym.S"
.text
SUPERALIGN_TEXT
Index: sys/i386/i386/apic_vector.s.orig
===================================================================
--- /dev/null
+++ sys/i386/i386/apic_vector.s.orig
@@ -0,0 +1,314 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $FreeBSD$
+ */
+
+/*
+ * Interrupt entry points for external interrupts triggered by I/O APICs
+ * as well as IPI handlers.
+ */
+
+#include "opt_smp.h"
+
+#include <machine/asmacros.h>
+#include <machine/specialreg.h>
+#include <x86/apicreg.h>
+
+#include "assym.S"
+
+ .text
+ SUPERALIGN_TEXT
+ /* End Of Interrupt to APIC */
+as_lapic_eoi:
+ cmpl $0,x2apic_mode
+ jne 1f
+ movl lapic_map,%eax
+ movl $0,LA_EOI(%eax)
+ ret
+1:
+ movl $MSR_APIC_EOI,%ecx
+ xorl %eax,%eax
+ xorl %edx,%edx
+ wrmsr
+ ret
+
+/*
+ * I/O Interrupt Entry Point. Rather than having one entry point for
+ * each interrupt source, we use one entry point for each 32-bit word
+ * in the ISR. The handler determines the highest bit set in the ISR,
+ * translates that into a vector, and passes the vector to the
+ * lapic_handle_intr() function.
+ */
+#define ISR_VEC(index, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ SET_KERNEL_SREGS ; \
+ cld ; \
+ FAKE_MCOUNT(TF_EIP(%esp)) ; \
+ cmpl $0,x2apic_mode ; \
+ je 1f ; \
+ movl $(MSR_APIC_ISR0 + index),%ecx ; \
+ rdmsr ; \
+ jmp 2f ; \
+1: ; \
+ movl lapic_map, %edx ;/* pointer to local APIC */ \
+ movl LA_ISR + 16 * (index)(%edx), %eax ; /* load ISR */ \
+2: ; \
+ bsrl %eax, %eax ; /* index of highest set bit in ISR */ \
+ jz 3f ; \
+ addl $(32 * index),%eax ; \
+ pushl %esp ; \
+ pushl %eax ; /* pass the IRQ */ \
+ call lapic_handle_intr ; \
+ addl $8, %esp ; /* discard parameter */ \
+3: ; \
+ MEXITCOUNT ; \
+ jmp doreti
+
+/*
+ * Handle "spurious INTerrupts".
+ * Notes:
+ * This is different than the "spurious INTerrupt" generated by an
+ * 8259 PIC for missing INTs. See the APIC documentation for details.
+ * This routine should NOT do an 'EOI' cycle.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(spuriousint)
+
+ /* No EOI cycle used here */
+
+ iret
+
+ ISR_VEC(1, apic_isr1)
+ ISR_VEC(2, apic_isr2)
+ ISR_VEC(3, apic_isr3)
+ ISR_VEC(4, apic_isr4)
+ ISR_VEC(5, apic_isr5)
+ ISR_VEC(6, apic_isr6)
+ ISR_VEC(7, apic_isr7)
+
+/*
+ * Local APIC periodic timer handler.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(timerint)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ pushl %esp
+ call lapic_handle_timer
+ add $4, %esp
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Local APIC CMCI handler.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(cmcint)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ call lapic_handle_cmc
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Local APIC error interrupt handler.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(errorint)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ call lapic_handle_error
+ MEXITCOUNT
+ jmp doreti
+
+#ifdef XENHVM
+/*
+ * Xen event channel upcall interrupt handler.
+ * Only used when the hypervisor supports direct vector callbacks.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(xen_intr_upcall)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ pushl %esp
+ call xen_intr_handle_upcall
+ add $4, %esp
+ MEXITCOUNT
+ jmp doreti
+#endif
+
+#ifdef SMP
+/*
+ * Global address space TLB shootdown.
+ */
+ .text
+ SUPERALIGN_TEXT
+invltlb_ret:
+ call as_lapic_eoi
+ jmp doreti
+
+ SUPERALIGN_TEXT
+IDTVEC(invltlb)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call invltlb_handler
+
+ jmp invltlb_ret
+
+/*
+ * Single page TLB shootdown
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlpg)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call invlpg_handler
+
+ jmp invltlb_ret
+
+/*
+ * Page range TLB shootdown.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlrng)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call invlrng_handler
+
+ jmp invltlb_ret
+
+/*
+ * Invalidate cache.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(invlcache)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call invlcache_handler
+
+ jmp invltlb_ret
+
+/*
+ * Handler for IPIs sent via the per-cpu IPI bitmap.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(ipi_intr_bitmap_handler)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call as_lapic_eoi
+
+ FAKE_MCOUNT(TF_EIP(%esp))
+
+ call ipi_bitmap_handler
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives an IPI_STOP from another CPU.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(cpustop)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call as_lapic_eoi
+ call cpustop_handler
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives an IPI_SUSPEND from another CPU.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(cpususpend)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+ call as_lapic_eoi
+ call cpususpend_handler
+ jmp doreti
+
+/*
+ * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
+ *
+ * - Calls the generic rendezvous action function.
+ */
+ .text
+ SUPERALIGN_TEXT
+IDTVEC(rendezvous)
+ PUSH_FRAME
+ SET_KERNEL_SREGS
+ cld
+
+#ifdef COUNT_IPIS
+ movl PCPU(CPUID), %eax
+ movl ipi_rendezvous_counts(,%eax,4), %eax
+ incl (%eax)
+#endif
+ call smp_rendezvous_action
+
+ call as_lapic_eoi
+ jmp doreti
+
+#endif /* SMP */
Index: sys/i386/i386/atpic_vector.s
===================================================================
--- sys/i386/i386/atpic_vector.s
+++ sys/i386/i386/atpic_vector.s
@@ -38,7 +38,7 @@
#include <machine/asmacros.h>
-#include "assym.s"
+#include "assym.S"
/*
* Macros for interrupt entry, call to handler, and exit.
Index: sys/i386/i386/atpic_vector.s.orig
===================================================================
--- /dev/null
+++ sys/i386/i386/atpic_vector.s.orig
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: vector.s, 386BSD 0.1 unknown origin
+ * $FreeBSD$
+ */
+
+/*
+ * Interrupt entry points for external interrupts triggered by the 8259A
+ * master and slave interrupt controllers.
+ */
+
+#include <machine/asmacros.h>
+
+#include "assym.S"
+
+/*
+ * Macros for interrupt entry, call to handler, and exit.
+ */
+#define INTR(irq_num, vec_name) \
+ .text ; \
+ SUPERALIGN_TEXT ; \
+IDTVEC(vec_name) ; \
+ PUSH_FRAME ; \
+ SET_KERNEL_SREGS ; \
+ cld ; \
+; \
+ FAKE_MCOUNT(TF_EIP(%esp)) ; \
+ pushl %esp ; \
+ pushl $irq_num; /* pass the IRQ */ \
+ call atpic_handle_intr ; \
+ addl $8, %esp ; /* discard the parameters */ \
+; \
+ MEXITCOUNT ; \
+ jmp doreti
+
+ INTR(0, atpic_intr0)
+ INTR(1, atpic_intr1)
+ INTR(2, atpic_intr2)
+ INTR(3, atpic_intr3)
+ INTR(4, atpic_intr4)
+ INTR(5, atpic_intr5)
+ INTR(6, atpic_intr6)
+ INTR(7, atpic_intr7)
+ INTR(8, atpic_intr8)
+ INTR(9, atpic_intr9)
+ INTR(10, atpic_intr10)
+ INTR(11, atpic_intr11)
+ INTR(12, atpic_intr12)
+ INTR(13, atpic_intr13)
+ INTR(14, atpic_intr14)
+ INTR(15, atpic_intr15)
Index: sys/i386/i386/bioscall.s
===================================================================
--- sys/i386/i386/bioscall.s
+++ sys/i386/i386/bioscall.s
@@ -32,7 +32,7 @@
#include <machine/asmacros.h>
-#include "assym.s"
+#include "assym.S"
.data
ALIGN_DATA
Index: sys/i386/i386/exception.s
===================================================================
--- sys/i386/i386/exception.s
+++ sys/i386/i386/exception.s
@@ -42,7 +42,7 @@
#include <machine/psl.h>
#include <machine/trap.h>
-#include "assym.s"
+#include "assym.S"
#define SEL_RPL_MASK 0x0003
#define GSEL_KPL 0x0020 /* GSEL(GCODE_SEL, SEL_KPL) */
Index: sys/i386/i386/exception.s.orig
===================================================================
--- /dev/null
+++ sys/i386/i386/exception.s.orig
@@ -0,0 +1,514 @@
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz.
+ * Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_apic.h"
+#include "opt_atpic.h"
+#include "opt_hwpmc_hooks.h"
+
+#include <machine/asmacros.h>
+#include <machine/psl.h>
+#include <machine/trap.h>
+
+#include "assym.S"
+
+#define SEL_RPL_MASK 0x0003
+#define GSEL_KPL 0x0020 /* GSEL(GCODE_SEL, SEL_KPL) */
+
+#ifdef KDTRACE_HOOKS
+ .bss
+ .globl dtrace_invop_jump_addr
+ .align 4
+ .type dtrace_invop_jump_addr, @object
+ .size dtrace_invop_jump_addr, 4
+dtrace_invop_jump_addr:
+ .zero 4
+ .globl dtrace_invop_calltrap_addr
+ .align 4
+ .type dtrace_invop_calltrap_addr, @object
+ .size dtrace_invop_calltrap_addr, 4
+dtrace_invop_calltrap_addr:
+ .zero 8
+#endif
+ .text
+#ifdef HWPMC_HOOKS
+ ENTRY(start_exceptions)
+#endif
+/*****************************************************************************/
+/* Trap handling */
+/*****************************************************************************/
+/*
+ * Trap and fault vector routines.
+ *
+ * Most traps are 'trap gates', SDT_SYS386TGT. A trap gate pushes state on
+ * the stack that mostly looks like an interrupt, but does not disable
+ * interrupts. A few of the traps we are use are interrupt gates,
+ * SDT_SYS386IGT, which are nearly the same thing except interrupts are
+ * disabled on entry.
+ *
+ * The cpu will push a certain amount of state onto the kernel stack for
+ * the current process. The amount of state depends on the type of trap
+ * and whether the trap crossed rings or not. See i386/include/frame.h.
+ * At the very least the current EFLAGS (status register, which includes
+ * the interrupt disable state prior to the trap), the code segment register,
+ * and the return instruction pointer are pushed by the cpu. The cpu
+ * will also push an 'error' code for certain traps. We push a dummy
+ * error code for those traps where the cpu doesn't in order to maintain
+ * a consistent frame. We also push a contrived 'trap number'.
+ *
+ * The cpu does not push the general registers, we must do that, and we
+ * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
+ * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
+ * must load them with appropriate values for supervisor mode operation.
+ */
+
+MCOUNT_LABEL(user)
+MCOUNT_LABEL(btrap)
+
+#define TRAP(a) pushl $(a) ; jmp alltraps
+
+IDTVEC(div)
+ pushl $0; TRAP(T_DIVIDE)
+IDTVEC(dbg)
+ pushl $0; TRAP(T_TRCTRAP)
+IDTVEC(nmi)
+ pushl $0; TRAP(T_NMI)
+IDTVEC(bpt)
+ pushl $0; TRAP(T_BPTFLT)
+IDTVEC(dtrace_ret)
+ pushl $0; TRAP(T_DTRACE_RET)
+IDTVEC(ofl)
+ pushl $0; TRAP(T_OFLOW)
+IDTVEC(bnd)
+ pushl $0; TRAP(T_BOUND)
+#ifndef KDTRACE_HOOKS
+IDTVEC(ill)
+ pushl $0; TRAP(T_PRIVINFLT)
+#endif
+IDTVEC(dna)
+ pushl $0; TRAP(T_DNA)
+IDTVEC(fpusegm)
+ pushl $0; TRAP(T_FPOPFLT)
+IDTVEC(tss)
+ TRAP(T_TSSFLT)
+IDTVEC(missing)
+ TRAP(T_SEGNPFLT)
+IDTVEC(stk)
+ TRAP(T_STKFLT)
+IDTVEC(prot)
+ TRAP(T_PROTFLT)
+IDTVEC(page)
+ TRAP(T_PAGEFLT)
+IDTVEC(mchk)
+ pushl $0; TRAP(T_MCHK)
+IDTVEC(rsvd)
+ pushl $0; TRAP(T_RESERVED)
+IDTVEC(fpu)
+ pushl $0; TRAP(T_ARITHTRAP)
+IDTVEC(align)
+ TRAP(T_ALIGNFLT)
+IDTVEC(xmm)
+ pushl $0; TRAP(T_XMMFLT)
+
+ /*
+ * All traps except ones for syscalls jump to alltraps. If
+ * interrupts were enabled when the trap occurred, then interrupts
+ * are enabled now if the trap was through a trap gate, else
+ * disabled if the trap was through an interrupt gate. Note that
+ * int0x80_syscall is a trap gate. Interrupt gates are used by
+ * page faults, non-maskable interrupts, debug and breakpoint
+ * exceptions.
+ */
+ SUPERALIGN_TEXT
+ .globl alltraps
+ .type alltraps,@function
+alltraps:
+ pushal
+ pushl $0
+ movw %ds,(%esp)
+ pushl $0
+ movw %es,(%esp)
+ pushl $0
+ movw %fs,(%esp)
+alltraps_with_regs_pushed:
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+calltrap:
+ pushl %esp
+ call trap
+ add $4, %esp
+
+ /*
+ * Return via doreti to handle ASTs.
+ */
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Privileged instruction fault.
+ */
+#ifdef KDTRACE_HOOKS
+ SUPERALIGN_TEXT
+IDTVEC(ill)
+ /*
+ * Check if a DTrace hook is registered. The default (data) segment
+ * cannot be used for this since %ds is not known good until we
+ * verify that the entry was from kernel mode.
+ */
+ cmpl $0,%ss:dtrace_invop_jump_addr
+ je norm_ill
+
+ /*
+ * Check if this is a user fault. If so, just handle it as a normal
+ * trap.
+ */
+ cmpl $GSEL_KPL, 4(%esp) /* Check the code segment */
+ jne norm_ill
+ testl $PSL_VM, 8(%esp) /* and vm86 mode. */
+ jnz norm_ill
+
+ /*
+ * This is a kernel instruction fault that might have been caused
+ * by a DTrace provider.
+ */
+ pushal
+ cld
+
+ /*
+ * Set our jump address for the jump back in the event that
+ * the exception wasn't caused by DTrace at all.
+ */
+ movl $norm_ill, dtrace_invop_calltrap_addr
+
+ /* Jump to the code hooked in by DTrace. */
+ jmpl *dtrace_invop_jump_addr
+
+ /*
+ * Process the instruction fault in the normal way.
+ */
+norm_ill:
+ pushl $0
+ TRAP(T_PRIVINFLT)
+#endif
+
+/*
+ * Call gate entry for syscalls (lcall 7,0).
+ * This is used by FreeBSD 1.x a.out executables and "old" NetBSD executables.
+ *
+ * The intersegment call has been set up to specify one dummy parameter.
+ * This leaves a place to put eflags so that the call frame can be
+ * converted to a trap frame. Note that the eflags is (semi-)bogusly
+ * pushed into (what will be) tf_err and then copied later into the
+ * final spot. It has to be done this way because esp can't be just
+ * temporarily altered for the pushfl - an interrupt might come in
+ * and clobber the saved cs/eip.
+ */
+ SUPERALIGN_TEXT
+IDTVEC(lcall_syscall)
+ pushfl /* save eflags */
+ popl 8(%esp) /* shuffle into tf_eflags */
+ pushl $7 /* sizeof "lcall 7,0" */
+ pushl $0 /* tf_trapno */
+ pushal
+ pushl $0
+ movw %ds,(%esp)
+ pushl $0
+ movw %es,(%esp)
+ pushl $0
+ movw %fs,(%esp)
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ pushl %esp
+ call syscall
+ add $4, %esp
+ MEXITCOUNT
+ jmp doreti
+
+/*
+ * Trap gate entry for syscalls (int 0x80).
+ * This is used by FreeBSD ELF executables, "new" NetBSD executables, and all
+ * Linux executables.
+ *
+ * Even though the name says 'int0x80', this is actually a trap gate, not an
+ * interrupt gate. Thus interrupts are enabled on entry just as they are for
+ * a normal syscall.
+ */
+ SUPERALIGN_TEXT
+IDTVEC(int0x80_syscall)
+ pushl $2 /* sizeof "int 0x80" */
+ pushl $0 /* tf_trapno */
+ pushal
+ pushl $0
+ movw %ds,(%esp)
+ pushl $0
+ movw %es,(%esp)
+ pushl $0
+ movw %fs,(%esp)
+ SET_KERNEL_SREGS
+ cld
+ FAKE_MCOUNT(TF_EIP(%esp))
+ pushl %esp
+ call syscall
+ add $4, %esp
+ MEXITCOUNT
+ jmp doreti
+
+ENTRY(fork_trampoline)
+ pushl %esp /* trapframe pointer */
+ pushl %ebx /* arg1 */
+ pushl %esi /* function */
+ call fork_exit
+ addl $12,%esp
+ /* cut from syscall */
+
+ /*
+ * Return via doreti to handle ASTs.
+ */
+ MEXITCOUNT
+ jmp doreti
+
+
+/*
+ * To efficiently implement classification of trap and interrupt handlers
+ * for profiling, there must be only trap handlers between the labels btrap
+ * and bintr, and only interrupt handlers between the labels bintr and
+ * eintr. This is implemented (partly) by including files that contain
+ * some of the handlers. Before including the files, set up a normal asm
+ * environment so that the included files doen't need to know that they are
+ * included.
+ */
+
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+MCOUNT_LABEL(bintr)
+
+#ifdef DEV_ATPIC
+#include <i386/i386/atpic_vector.s>
+#endif
+
+#if defined(DEV_APIC) && defined(DEV_ATPIC)
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+#endif
+
+#ifdef DEV_APIC
+#include <i386/i386/apic_vector.s>
+#endif
+
+ .data
+ .p2align 4
+ .text
+ SUPERALIGN_TEXT
+#include <i386/i386/vm86bios.s>
+
+ .text
+MCOUNT_LABEL(eintr)
+
+/*
+ * void doreti(struct trapframe)
+ *
+ * Handle return from interrupts, traps and syscalls.
+ */
+ .text
+ SUPERALIGN_TEXT
+ .type doreti,@function
+ .globl doreti
+doreti:
+ FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
+doreti_next:
+ /*
+ * Check if ASTs can be handled now. ASTs cannot be safely
+ * processed when returning from an NMI.
+ */
+ cmpb $T_NMI,TF_TRAPNO(%esp)
+#ifdef HWPMC_HOOKS
+ je doreti_nmi
+#else
+ je doreti_exit
+#endif
+ /*
+ * PSL_VM must be checked first since segment registers only
+ * have an RPL in non-VM86 mode.
+ * ASTs can not be handled now if we are in a vm86 call.
+ */
+ testl $PSL_VM,TF_EFLAGS(%esp)
+ jz doreti_notvm86
+ movl PCPU(CURPCB),%ecx
+ testl $PCB_VM86CALL,PCB_FLAGS(%ecx)
+ jz doreti_ast
+ jmp doreti_exit
+
+doreti_notvm86:
+ testb $SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */
+ jz doreti_exit /* can't handle ASTs now if not */
+
+doreti_ast:
+ /*
+ * Check for ASTs atomically with returning. Disabling CPU
+ * interrupts provides sufficient locking even in the SMP case,
+ * since we will be informed of any new ASTs by an IPI.
+ */
+ cli
+ movl PCPU(CURTHREAD),%eax
+ testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
+ je doreti_exit
+ sti
+ pushl %esp /* pass a pointer to the trapframe */
+ call ast
+ add $4,%esp
+ jmp doreti_ast
+
+ /*
+ * doreti_exit: pop registers, iret.
+ *
+ * The segment register pop is a special case, since it may
+ * fault if (for example) a sigreturn specifies bad segment
+ * registers. The fault is handled in trap.c.
+ */
+doreti_exit:
+ MEXITCOUNT
+
+ .globl doreti_popl_fs
+doreti_popl_fs:
+ popl %fs
+ .globl doreti_popl_es
+doreti_popl_es:
+ popl %es
+ .globl doreti_popl_ds
+doreti_popl_ds:
+ popl %ds
+ popal
+ addl $8,%esp
+ .globl doreti_iret
+doreti_iret:
+ iret
+
+ /*
+ * doreti_iret_fault and friends. Alternative return code for
+ * the case where we get a fault in the doreti_exit code
+ * above. trap() (i386/i386/trap.c) catches this specific
+ * case, and continues in the corresponding place in the code
+ * below.
+ *
+ * If the fault occured during return to usermode, we recreate
+ * the trap frame and call trap() to send a signal. Otherwise
+ * the kernel was tricked into fault by attempt to restore invalid
+ * usermode segment selectors on return from nested fault or
+ * interrupt, where interrupted kernel entry code not yet loaded
+ * kernel selectors. In the latter case, emulate iret and zero
+ * the invalid selector.
+ */
+ ALIGN_TEXT
+ .globl doreti_iret_fault
+doreti_iret_fault:
+ subl $8,%esp
+ pushal
+ pushl $0
+ movw %ds,(%esp)
+ .globl doreti_popl_ds_fault
+doreti_popl_ds_fault:
+ testb $SEL_RPL_MASK,TF_CS-TF_DS(%esp)
+ jz doreti_popl_ds_kfault
+ pushl $0
+ movw %es,(%esp)
+ .globl doreti_popl_es_fault
+doreti_popl_es_fault:
+ testb $SEL_RPL_MASK,TF_CS-TF_ES(%esp)
+ jz doreti_popl_es_kfault
+ pushl $0
+ movw %fs,(%esp)
+ .globl doreti_popl_fs_fault
+doreti_popl_fs_fault:
+ testb $SEL_RPL_MASK,TF_CS-TF_FS(%esp)
+ jz doreti_popl_fs_kfault
+ sti
+ movl $0,TF_ERR(%esp) /* XXX should be the error code */
+ movl $T_PROTFLT,TF_TRAPNO(%esp)
+ jmp alltraps_with_regs_pushed
+
+doreti_popl_ds_kfault:
+ movl $0,(%esp)
+ jmp doreti_popl_ds
+doreti_popl_es_kfault:
+ movl $0,(%esp)
+ jmp doreti_popl_es
+doreti_popl_fs_kfault:
+ movl $0,(%esp)
+ jmp doreti_popl_fs
+
+#ifdef HWPMC_HOOKS
+doreti_nmi:
+ /*
+ * Since we are returning from an NMI, check if the current trap
+ * was from user mode and if so whether the current thread
+ * needs a user call chain capture.
+ */
+ testb $SEL_RPL_MASK,TF_CS(%esp)
+ jz doreti_exit
+ movl PCPU(CURTHREAD),%eax /* curthread present? */
+ orl %eax,%eax
+ jz doreti_exit
+ testl $TDP_CALLCHAIN,TD_PFLAGS(%eax) /* flagged for capture? */
+ jz doreti_exit
+ /*
+ * Take the processor out of NMI mode by executing a fake "iret".
+ */
+ pushfl
+ pushl %cs
+ pushl $outofnmi
+ iret
+outofnmi:
+ /*
+ * Call the callchain capture hook after turning interrupts back on.
+ */
+ movl pmc_hook,%ecx
+ orl %ecx,%ecx
+ jz doreti_exit
+ pushl %esp /* frame pointer */
+ pushl $PMC_FN_USER_CALLCHAIN /* command */
+ movl PCPU(CURTHREAD),%eax
+ pushl %eax /* curthread */
+ sti
+ call *%ecx
+ addl $12,%esp
+ jmp doreti_ast
+ ENTRY(end_exceptions)
+#endif
Index: sys/i386/i386/locore.s
===================================================================
--- sys/i386/i386/locore.s
+++ sys/i386/i386/locore.s
@@ -53,7 +53,7 @@
#include <machine/pmap.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
/*
* XXX
Index: sys/i386/i386/mpboot.s
===================================================================
--- sys/i386/i386/mpboot.s
+++ sys/i386/i386/mpboot.s
@@ -35,7 +35,7 @@
#include <x86/apicreg.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
#define R(x) ((x)-KERNBASE)
Index: sys/i386/i386/pmap.c.orig
===================================================================
--- /dev/null
+++ sys/i386/i386/pmap.c.orig
@@ -0,0 +1,5684 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_apic.h"
+#include "opt_cpu.h"
+#include "opt_pmap.h"
+#include "opt_smp.h"
+#include "opt_vm.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/sf_buf.h>
+#include <sys/sx.h>
+#include <sys/vmmeter.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_radix.h>
+#include <vm/vm_reserv.h>
+#include <vm/uma.h>
+
+#ifdef DEV_APIC
+#include <sys/bus.h>
+#include <machine/intr_machdep.h>
+#include <x86/apicvar.h>
+#endif
+#include <machine/cpu.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+#include <machine/specialreg.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
+
+#if !defined(DIAGNOSTIC)
+#ifdef __GNUC_GNU_INLINE__
+#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
+#else
+#define PMAP_INLINE extern inline
+#endif
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
+#define pa_index(pa) ((pa) >> PDRSHIFT)
+#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
+
+/*
+ * Get PDEs and PTEs for user/kernel address space
+ */
+#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
+#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
+
+#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
+#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
+#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
+#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
+#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
+
+#define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
+ atomic_clear_int((u_int *)(pte), PG_W))
+#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
+
+struct pmap kernel_pmap_store;
+LIST_HEAD(pmaplist, pmap);
+static struct pmaplist allpmaps;
+static struct mtx allpmaps_lock;
+
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+int pgeflag = 0; /* PG_G or-in */
+int pseflag = 0; /* PG_PS or-in */
+
+static int nkpt = NKPT;
+vm_offset_t kernel_vm_end = KERNBASE + NKPT * NBPDR;
+extern u_int32_t KERNend;
+extern u_int32_t KPTphys;
+
+#if defined(PAE) || defined(PAE_TABLES)
+pt_entry_t pg_nx;
+static uma_zone_t pdptzone;
+#endif
+
+static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
+
+static int pat_works = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
+ "Is page attribute table fully functional?");
+
+static int pg_ps_enabled = 1;
+SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
+ &pg_ps_enabled, 0, "Are large page mappings enabled?");
+
+#define PAT_INDEX_SIZE 8
+static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
+
+/*
+ * pmap_mapdev support pre initialization (i.e. console)
+ */
+#define PMAP_PREINIT_MAPPING_COUNT 8
+static struct pmap_preinit_mapping {
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_size_t sz;
+ int mode;
+} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
+static int pmap_initialized;
+
+static struct rwlock_padalign pvh_global_lock;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
+static struct md_page *pv_table;
+static int shpgperproc = PMAP_SHPGPERPROC;
+
+struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
+int pv_maxchunks; /* How many chunks we have KVA for */
+vm_offset_t pv_vafree; /* freelist stored in the PTE */
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+pt_entry_t *CMAP3;
+static pd_entry_t *KPTD;
+caddr_t ptvmmap = 0;
+caddr_t CADDR3;
+
+/*
+ * Crashdump maps.
+ */
+static caddr_t crashdumpmap;
+
+static pt_entry_t *PMAP1 = NULL, *PMAP2;
+static pt_entry_t *PADDR1 = NULL, *PADDR2;
+#ifdef SMP
+static int PMAP1cpu;
+static int PMAP1changedcpu;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
+ &PMAP1changedcpu, 0,
+ "Number of times pmap_pte_quick changed CPU with same PMAP1");
+#endif
+static int PMAP1changed;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
+ &PMAP1changed, 0,
+ "Number of times pmap_pte_quick changed PMAP1");
+static int PMAP1unchanged;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
+ &PMAP1unchanged, 0,
+ "Number of times pmap_pte_quick didn't change PMAP1");
+static struct mtx PMAP2mutex;
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
+static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
+static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
+#if VM_NRESERVLEVEL > 0
+static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
+#endif
+static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
+static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
+ vm_offset_t va);
+static int pmap_pvh_wired_mappings(struct md_page *pvh, int count);
+
+static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
+static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot);
+static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, vm_page_t mpte);
+static void pmap_flush_page(vm_page_t m);
+static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
+static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
+ pd_entry_t pde);
+static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
+static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
+static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
+static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
+static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
+static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
+#if VM_NRESERVLEVEL > 0
+static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
+#endif
+static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
+ vm_prot_t prot);
+static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
+static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
+ struct spglist *free);
+static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
+ struct spglist *free);
+static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
+static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
+ struct spglist *free);
+static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
+ vm_offset_t va);
+static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+ vm_page_t m);
+static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ pd_entry_t newpde);
+static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
+
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
+
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
+static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
+static void pmap_pte_release(pt_entry_t *pte);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
+#if defined(PAE) || defined(PAE_TABLES)
+static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain,
+ uint8_t *flags, int wait);
+#endif
+static void pmap_set_pg(void);
+
+static __inline void pagezero(void *page);
+
+CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
+CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
+
+/*
+ * If you get an error here, then you set KVA_PAGES wrong! See the
+ * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
+ * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
+ */
+CTASSERT(KERNBASE % (1 << 24) == 0);
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ *
+ * On the i386 this is called after mapping has already been enabled
+ * and just syncs the pmap module with what has already been done.
+ * [We can't call it easily with mapping off since the kernel is not
+ * mapped with PA == VA, hence we would have to relocate every address
+ * from the linked base (virtual) address "KERNBASE" to the actual
+ * (physical) address starting relative to 0]
+ */
+void
+pmap_bootstrap(vm_paddr_t firstaddr)
+{
+ vm_offset_t va;
+ pt_entry_t *pte, *unused;
+ struct pcpu *pc;
+ int i;
+
+ /*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated kernel page table pages so that vm_page structures
+ * representing these pages will be created. The vm_page structures
+ * are required for promotion of the corresponding kernel virtual
+ * addresses to superpage mappings.
+ */
+ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
+
+ /*
+ * Initialize the first available kernel virtual address. However,
+ * using "firstaddr" may waste a few pages of the kernel virtual
+ * address space, because locore may not have mapped every physical
+ * page that it allocated. Preferably, locore would provide a first
+ * unused virtual address in addition to "firstaddr".
+ */
+ virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
+
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ /*
+ * Initialize the kernel pmap (which is statically allocated).
+ */
+ PMAP_LOCK_INIT(kernel_pmap);
+ kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
+#if defined(PAE) || defined(PAE_TABLES)
+ kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
+#endif
+ CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
+ TAILQ_INIT(&kernel_pmap->pm_pvchunk);
+
+ /*
+ * Initialize the global pv list lock.
+ */
+ rw_init(&pvh_global_lock, "pmap pv global");
+
+ LIST_INIT(&allpmaps);
+
+ /*
+ * Request a spin mutex so that changes to allpmaps cannot be
+ * preempted by smp_rendezvous_cpus(). Otherwise,
+ * pmap_update_pde_kernel() could access allpmaps while it is
+ * being changed.
+ */
+ mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
+ mtx_unlock_spin(&allpmaps_lock);
+
+ /*
+ * Reserve some special page table entries/VA space for temporary
+ * mapping of pages.
+ */
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
+
+ va = virtual_avail;
+ pte = vtopte(va);
+
+
+ /*
+ * Initialize temporary map objects on the current CPU for use
+ * during early boot.
+ * CMAP1/CMAP2 are used for zeroing and copying pages.
+ * CMAP3 is used for the boot-time memory test.
+ */
+ pc = get_pcpu();
+ mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
+ SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1)
+ SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1)
+ SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1)
+
+ SYSMAP(caddr_t, CMAP3, CADDR3, 1);
+
+ /*
+ * Crashdump maps.
+ */
+ SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
+
+ /*
+ * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
+ */
+ SYSMAP(caddr_t, unused, ptvmmap, 1)
+
+ /*
+ * msgbufp is used to map the system message buffer.
+ */
+ SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize)))
+
+ /*
+ * KPTmap is used by pmap_kextract().
+ *
+ * KPTmap is first initialized by locore. However, that initial
+ * KPTmap can only support NKPT page table pages. Here, a larger
+ * KPTmap is created that can support KVA_PAGES page table pages.
+ */
+ SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
+
+ for (i = 0; i < NKPT; i++)
+ KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | pgeflag | PG_RW | PG_V;
+
+ /*
+ * Adjust the start of the KPTD and KPTmap so that the implementation
+ * of pmap_kextract() and pmap_growkernel() can be made simpler.
+ */
+ KPTD -= KPTDI;
+ KPTmap -= i386_btop(KPTDI << PDRSHIFT);
+
+ /*
+ * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(),
+ * respectively.
+ */
+ SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1)
+ SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1)
+
+ mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
+
+ virtual_avail = va;
+
+ /*
+ * Finish removing the identity mapping (virt == phys) of low memory.
+ * It was only used for 2 instructions in locore. locore then
+ * unmapped the first PTD to get some null pointer checks. ACPI
+ * wakeup will map the first PTD transiently to use it for 1
+ * instruction. The double mapping for low memory is not usable in
+ * normal operation since it breaks trapping of null pointers and
+ * causes inconsistencies in page tables when combined with PG_G.
+ */
+ for (i = 1; i < NKPT; i++)
+ PTD[i] = 0;
+
+ /*
+ * Initialize the PAT MSR if present.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment. We assume
+ * that PAT support implies PGE and in reverse, PGE presence
+ * comes with PAT. Both features were added for Pentium Pro.
+ */
+ pmap_init_pat();
+
+ /* Turn on PG_G on kernel page(s) */
+ pmap_set_pg();
+}
+
+static void
+pmap_init_reserved_pages(void)
+{
+ struct pcpu *pc;
+ vm_offset_t pages;
+ int i;
+
+ CPU_FOREACH(i) {
+ pc = pcpu_find(i);
+ /*
+ * Skip if the mapping has already been initialized,
+ * i.e. this is the BSP.
+ */
+ if (pc->pc_cmap_addr1 != 0)
+ continue;
+ mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF);
+ pages = kva_alloc(PAGE_SIZE * 3);
+ if (pages == 0)
+ panic("%s: unable to allocate KVA", __func__);
+ pc->pc_cmap_pte1 = vtopte(pages);
+ pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE);
+ pc->pc_cmap_addr1 = (caddr_t)pages;
+ pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE);
+ pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
+ }
+}
+
+SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
+
+/*
+ * Setup the PAT MSR.
+ */
+void
+pmap_init_pat(void)
+{
+ int pat_table[PAT_INDEX_SIZE];
+ uint64_t pat_msr;
+ u_long cr0, cr4;
+ int i;
+
+ /* Set default PAT index table. */
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_table[i] = -1;
+ pat_table[PAT_WRITE_BACK] = 0;
+ pat_table[PAT_WRITE_THROUGH] = 1;
+ pat_table[PAT_UNCACHEABLE] = 3;
+ pat_table[PAT_WRITE_COMBINING] = 3;
+ pat_table[PAT_WRITE_PROTECTED] = 3;
+ pat_table[PAT_UNCACHED] = 3;
+
+ /*
+ * Bail if this CPU doesn't implement PAT.
+ * We assume that PAT support implies PGE.
+ */
+ if ((cpu_feature & CPUID_PAT) == 0) {
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_index[i] = pat_table[i];
+ pat_works = 0;
+ return;
+ }
+
+ /*
+ * Due to some Intel errata, we can only safely use the lower 4
+ * PAT entries.
+ *
+ * Intel Pentium III Processor Specification Update
+ * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
+ * or Mode C Paging)
+ *
+ * Intel Pentium IV Processor Specification Update
+ * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
+ */
+ if (cpu_vendor_id == CPU_VENDOR_INTEL &&
+ !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe))
+ pat_works = 0;
+
+ /* Initialize default PAT entries. */
+ pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
+ PAT_VALUE(1, PAT_WRITE_THROUGH) |
+ PAT_VALUE(2, PAT_UNCACHED) |
+ PAT_VALUE(3, PAT_UNCACHEABLE) |
+ PAT_VALUE(4, PAT_WRITE_BACK) |
+ PAT_VALUE(5, PAT_WRITE_THROUGH) |
+ PAT_VALUE(6, PAT_UNCACHED) |
+ PAT_VALUE(7, PAT_UNCACHEABLE);
+
+ if (pat_works) {
+ /*
+ * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
+ * Program 5 and 6 as WP and WC.
+ * Leave 4 and 7 as WB and UC.
+ */
+ pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
+ pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
+ PAT_VALUE(6, PAT_WRITE_COMBINING);
+ pat_table[PAT_UNCACHED] = 2;
+ pat_table[PAT_WRITE_PROTECTED] = 5;
+ pat_table[PAT_WRITE_COMBINING] = 6;
+ } else {
+ /*
+ * Just replace PAT Index 2 with WC instead of UC-.
+ */
+ pat_msr &= ~PAT_MASK(2);
+ pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
+ pat_table[PAT_WRITE_COMBINING] = 2;
+ }
+
+ /* Disable PGE. */
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+
+ /* Disable caches (CD = 1, NW = 0). */
+ cr0 = rcr0();
+ load_cr0((cr0 & ~CR0_NW) | CR0_CD);
+
+ /* Flushes caches and TLBs. */
+ wbinvd();
+ invltlb();
+
+ /* Update PAT and index table. */
+ wrmsr(MSR_PAT, pat_msr);
+ for (i = 0; i < PAT_INDEX_SIZE; i++)
+ pat_index[i] = pat_table[i];
+
+ /* Flush caches and TLBs again. */
+ wbinvd();
+ invltlb();
+
+ /* Restore caches and PGE. */
+ load_cr0(cr0);
+ load_cr4(cr4);
+}
+
+/*
+ * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on.
+ */
+static void
+pmap_set_pg(void)
+{
+ pt_entry_t *pte;
+ vm_offset_t va, endva;
+
+ if (pgeflag == 0)
+ return;
+
+ endva = KERNBASE + KERNend;
+
+ if (pseflag) {
+ va = KERNBASE + roundup2(KERNLOAD, NBPDR);
+ while (va < endva) {
+ pdir_pde(PTD, va) |= pgeflag;
+ invltlb(); /* Flush non-PG_G entries. */
+ va += NBPDR;
+ }
+ } else {
+ va = (vm_offset_t)btext;
+ while (va < endva) {
+ pte = vtopte(va);
+ if (*pte)
+ *pte |= pgeflag;
+ invltlb(); /* Flush non-PG_G entries. */
+ va += PAGE_SIZE;
+ }
+ }
+}
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pat_mode = PAT_WRITE_BACK;
+}
+
+#if defined(PAE) || defined(PAE_TABLES)
+static void *
+pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
+ int wait)
+{
+
+ /* Inform UMA that this allocator uses kernel_map/object. */
+ *flags = UMA_SLAB_KERNEL;
+ return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, 0x0ULL,
+ 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
+}
+#endif
+
+/*
+ * Abuse the pte nodes for unmapped kva to thread a kva freelist through.
+ * Requirements:
+ * - Must deal with pages in order to ensure that none of the PG_* bits
+ * are ever set, PG_V in particular.
+ * - Assumes we can write to ptes without pte_store() atomic ops, even
+ * on PAE systems. This should be ok.
+ * - Assumes nothing will ever test these addresses for 0 to indicate
+ * no mapping instead of correctly checking PG_V.
+ * - Assumes a vm_offset_t will fit in a pte (true for i386).
+ * Because PG_V is never set, there can be no mappings to invalidate.
+ */
+static vm_offset_t
+pmap_ptelist_alloc(vm_offset_t *head)
+{
+ pt_entry_t *pte;
+ vm_offset_t va;
+
+ va = *head;
+ if (va == 0)
+ panic("pmap_ptelist_alloc: exhausted ptelist KVA");
+ pte = vtopte(va);
+ *head = *pte;
+ if (*head & PG_V)
+ panic("pmap_ptelist_alloc: va with PG_V set!");
+ *pte = 0;
+ return (va);
+}
+
+static void
+pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
+{
+ pt_entry_t *pte;
+
+ if (va & PG_V)
+ panic("pmap_ptelist_free: freeing va with PG_V set!");
+ pte = vtopte(va);
+ *pte = *head; /* virtual! PG_V is 0 though */
+ *head = va;
+}
+
+static void
+pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
+{
+ int i;
+ vm_offset_t va;
+
+ *head = 0;
+ for (i = npages - 1; i >= 0; i--) {
+ va = (vm_offset_t)base + i * PAGE_SIZE;
+ pmap_ptelist_free(head, va);
+ }
+}
+
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_page_t mpte;
+ vm_size_t s;
+ int i, pv_npg;
+
+ /*
+ * Initialize the vm page array entries for the kernel pmap's
+ * page table pages.
+ */
+ for (i = 0; i < NKPT; i++) {
+ mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
+ KASSERT(mpte >= vm_page_array &&
+ mpte < &vm_page_array[vm_page_array_size],
+ ("pmap_init: page table page is out of range"));
+ mpte->pindex = i + KPTDI;
+ mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
+ }
+
+ /*
+ * Initialize the address space (zone) for the pv entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
+ TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+ pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
+ TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
+ pv_entry_max = roundup(pv_entry_max, _NPCPV);
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+
+ /*
+ * If the kernel is running on a virtual machine, then it must assume
+ * that MCA is enabled by the hypervisor. Moreover, the kernel must
+ * be prepared for the hypervisor changing the vendor and family that
+ * are reported by CPUID. Consequently, the workaround for AMD Family
+ * 10h Erratum 383 is enabled if the processor's feature set does not
+ * include at least one feature that is only supported by older Intel
+ * or newer AMD processors.
+ */
+ if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
+ (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
+ CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
+ AMDID2_FMA4)) == 0)
+ workaround_erratum383 = 1;
+
+ /*
+ * Are large page mappings supported and enabled?
+ */
+ TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
+ if (pseflag == 0)
+ pg_ps_enabled = 0;
+ else if (pg_ps_enabled) {
+ KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
+ ("pmap_init: can't assign to pagesizes[1]"));
+ pagesizes[1] = NBPDR;
+ }
+
+ /*
+ * Calculate the size of the pv head table for superpages.
+ * Handle the possibility that "vm_phys_segs[...].end" is zero.
+ */
+ pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end -
+ PAGE_SIZE) / NBPDR + 1;
+
+ /*
+ * Allocate memory for the pv head table for superpages.
+ */
+ s = (vm_size_t)(pv_npg * sizeof(struct md_page));
+ s = round_page(s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
+ for (i = 0; i < pv_npg; i++)
+ TAILQ_INIT(&pv_table[i].pv_list);
+
+ pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
+ if (pv_chunkbase == NULL)
+ panic("pmap_init: not enough kvm for pv chunks");
+ pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
+#if defined(PAE) || defined(PAE_TABLES)
+ pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
+ NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
+ UMA_ZONE_VM | UMA_ZONE_NOFREE);
+ uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
+#endif
+
+ pmap_initialized = 1;
+ if (!bootverbose)
+ return;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0)
+ continue;
+ printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
+ (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
+ }
+}
+
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
+ "Max number of PV entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
+ "Page share factor per proc");
+
+static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
+ "2/4MB page mapping counters");
+
+static u_long pmap_pde_demotions;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
+ &pmap_pde_demotions, 0, "2/4MB page demotions");
+
+static u_long pmap_pde_mappings;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
+ &pmap_pde_mappings, 0, "2/4MB page mappings");
+
+static u_long pmap_pde_p_failures;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
+ &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
+
+static u_long pmap_pde_promotions;
+SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
+ &pmap_pde_promotions, 0, "2/4MB page promotions");
+
+/***************************************************
+ * Low level helper routines.....
+ ***************************************************/
+
+/*
+ * Determine the appropriate bits to set in a PTE or PDE for a specified
+ * caching mode.
+ */
+int
+pmap_cache_bits(int mode, boolean_t is_pde)
+{
+ int cache_bits, pat_flag, pat_idx;
+
+ if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
+ panic("Unknown caching mode %d\n", mode);
+
+ /* The PAT bit is different for PTE's and PDE's. */
+ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
+
+ /* Map the caching mode to a PAT index. */
+ pat_idx = pat_index[mode];
+
+ /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
+ cache_bits = 0;
+ if (pat_idx & 0x4)
+ cache_bits |= pat_flag;
+ if (pat_idx & 0x2)
+ cache_bits |= PG_NC_PCD;
+ if (pat_idx & 0x1)
+ cache_bits |= PG_NC_PWT;
+ return (cache_bits);
+}
+
+/*
+ * The caller is responsible for maintaining TLB consistency.
+ */
+static void
+pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde)
+{
+ pd_entry_t *pde;
+ pmap_t pmap;
+ boolean_t PTD_updated;
+
+ PTD_updated = FALSE;
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_FOREACH(pmap, &allpmaps, pm_list) {
+ if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
+ PG_FRAME))
+ PTD_updated = TRUE;
+ pde = pmap_pde(pmap, va);
+ pde_store(pde, newpde);
+ }
+ mtx_unlock_spin(&allpmaps_lock);
+ KASSERT(PTD_updated,
+ ("pmap_kenter_pde: current page table is not in allpmaps"));
+}
+
+/*
+ * After changing the page size for the specified virtual address in the page
+ * table, flush the corresponding entries from the processor's TLB. Only the
+ * calling processor's TLB is affected.
+ *
+ * The calling thread must be pinned to a processor.
+ */
+static void
+pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
+{
+ u_long cr4;
+
+ if ((newpde & PG_PS) == 0)
+ /* Demotion: flush a specific 2MB page mapping. */
+ invlpg(va);
+ else if ((newpde & PG_G) == 0)
+ /*
+ * Promotion: flush every 4KB page mapping from the TLB
+ * because there are too many to flush individually.
+ */
+ invltlb();
+ else {
+ /*
+ * Promotion: flush every 4KB page mapping from the TLB,
+ * including any global (PG_G) mappings.
+ */
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+ /*
+ * Although preemption at this point could be detrimental to
+ * performance, it would not lead to an error. PG_G is simply
+ * ignored if CR4.PGE is clear. Moreover, in case this block
+ * is re-entered, the load_cr4() either above or below will
+ * modify CR4.PGE flushing the TLB.
+ */
+ load_cr4(cr4 | CR4_PGE);
+ }
+}
+
+void
+invltlb_glob(void)
+{
+ uint64_t cr4;
+
+ if (pgeflag == 0) {
+ invltlb();
+ } else {
+ cr4 = rcr4();
+ load_cr4(cr4 & ~CR4_PGE);
+ load_cr4(cr4 | CR4_PGE);
+ }
+}
+
+
+#ifdef SMP
+/*
+ * For SMP, these functions have to use the IPI mechanism for coherence.
+ *
+ * N.B.: Before calling any of the following TLB invalidation functions,
+ * the calling processor must ensure that all stores updating a non-
+ * kernel page table are globally performed. Otherwise, another
+ * processor could cache an old, pre-update entry without being
+ * invalidated. This can happen one of two ways: (1) The pmap becomes
+ * active on another processor after its pm_active field is checked by
+ * one of the following functions but before a store updating the page
+ * table is globally performed. (2) The pmap becomes active on another
+ * processor before its pm_active field is checked but due to
+ * speculative loads one of the following functions stills reads the
+ * pmap as inactive on the other processor.
+ *
+ * The kernel page table is exempt because its pm_active field is
+ * immutable. The kernel page table is always active on every
+ * processor.
+ */
+void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+ cpuset_t *mask, other_cpus;
+ u_int cpuid;
+
+ sched_pin();
+ if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
+ invlpg(va);
+ mask = &all_cpus;
+ } else {
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
+ invlpg(va);
+ CPU_AND(&other_cpus, &pmap->pm_active);
+ mask = &other_cpus;
+ }
+ smp_masked_invlpg(*mask, va);
+ sched_unpin();
+}
+
+/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
+#define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
+
+void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ cpuset_t *mask, other_cpus;
+ vm_offset_t addr;
+ u_int cpuid;
+
+ if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
+ pmap_invalidate_all(pmap);
+ return;
+ }
+
+ sched_pin();
+ if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+ mask = &all_cpus;
+ } else {
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+ CPU_AND(&other_cpus, &pmap->pm_active);
+ mask = &other_cpus;
+ }
+ smp_masked_invlpg_range(*mask, sva, eva);
+ sched_unpin();
+}
+
+void
+pmap_invalidate_all(pmap_t pmap)
+{
+ cpuset_t *mask, other_cpus;
+ u_int cpuid;
+
+ sched_pin();
+ if (pmap == kernel_pmap) {
+ invltlb_glob();
+ mask = &all_cpus;
+ } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
+ invltlb();
+ mask = &all_cpus;
+ } else {
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (CPU_ISSET(cpuid, &pmap->pm_active))
+ invltlb();
+ CPU_AND(&other_cpus, &pmap->pm_active);
+ mask = &other_cpus;
+ }
+ smp_masked_invltlb(*mask, pmap);
+ sched_unpin();
+}
+
+void
+pmap_invalidate_cache(void)
+{
+
+ sched_pin();
+ wbinvd();
+ smp_cache_flush();
+ sched_unpin();
+}
+
+struct pde_action {
+ cpuset_t invalidate; /* processors that invalidate their TLB */
+ vm_offset_t va;
+ pd_entry_t *pde;
+ pd_entry_t newpde;
+ u_int store; /* processor that updates the PDE */
+};
+
+static void
+pmap_update_pde_kernel(void *arg)
+{
+ struct pde_action *act = arg;
+ pd_entry_t *pde;
+ pmap_t pmap;
+
+ if (act->store == PCPU_GET(cpuid)) {
+
+ /*
+ * Elsewhere, this operation requires allpmaps_lock for
+ * synchronization. Here, it does not because it is being
+ * performed in the context of an all_cpus rendezvous.
+ */
+ LIST_FOREACH(pmap, &allpmaps, pm_list) {
+ pde = pmap_pde(pmap, act->va);
+ pde_store(pde, act->newpde);
+ }
+ }
+}
+
+static void
+pmap_update_pde_user(void *arg)
+{
+ struct pde_action *act = arg;
+
+ if (act->store == PCPU_GET(cpuid))
+ pde_store(act->pde, act->newpde);
+}
+
+static void
+pmap_update_pde_teardown(void *arg)
+{
+ struct pde_action *act = arg;
+
+ if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
+ pmap_update_pde_invalidate(act->va, act->newpde);
+}
+
+/*
+ * Change the page size for the specified virtual address in a way that
+ * prevents any possibility of the TLB ever having two entries that map the
+ * same virtual address using different page sizes. This is the recommended
+ * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
+ * machine check exception for a TLB state that is improperly diagnosed as a
+ * hardware error.
+ */
+static void
+pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
+{
+ struct pde_action act;
+ cpuset_t active, other_cpus;
+ u_int cpuid;
+
+ sched_pin();
+ cpuid = PCPU_GET(cpuid);
+ other_cpus = all_cpus;
+ CPU_CLR(cpuid, &other_cpus);
+ if (pmap == kernel_pmap)
+ active = all_cpus;
+ else
+ active = pmap->pm_active;
+ if (CPU_OVERLAP(&active, &other_cpus)) {
+ act.store = cpuid;
+ act.invalidate = active;
+ act.va = va;
+ act.pde = pde;
+ act.newpde = newpde;
+ CPU_SET(cpuid, &active);
+ smp_rendezvous_cpus(active,
+ smp_no_rendezvous_barrier, pmap == kernel_pmap ?
+ pmap_update_pde_kernel : pmap_update_pde_user,
+ pmap_update_pde_teardown, &act);
+ } else {
+ if (pmap == kernel_pmap)
+ pmap_kenter_pde(va, newpde);
+ else
+ pde_store(pde, newpde);
+ if (CPU_ISSET(cpuid, &active))
+ pmap_update_pde_invalidate(va, newpde);
+ }
+ sched_unpin();
+}
+#else /* !SMP */
+/*
+ * Normal, non-SMP, 486+ invalidation functions.
+ * We inline these within pmap.c for speed.
+ */
+PMAP_INLINE void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+
+ if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
+ invlpg(va);
+}
+
+PMAP_INLINE void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t addr;
+
+ if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
+ for (addr = sva; addr < eva; addr += PAGE_SIZE)
+ invlpg(addr);
+}
+
+PMAP_INLINE void
+pmap_invalidate_all(pmap_t pmap)
+{
+
+ if (pmap == kernel_pmap)
+ invltlb_glob();
+ else if (!CPU_EMPTY(&pmap->pm_active))
+ invltlb();
+}
+
+PMAP_INLINE void
+pmap_invalidate_cache(void)
+{
+
+ wbinvd();
+}
+
+static void
+pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
+{
+
+ if (pmap == kernel_pmap)
+ pmap_kenter_pde(va, newpde);
+ else
+ pde_store(pde, newpde);
+ if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
+ pmap_update_pde_invalidate(va, newpde);
+}
+#endif /* !SMP */
+
+static void
+pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
+{
+
+ /*
+ * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was
+ * created by a promotion that did not invalidate the 512 or 1024 4KB
+ * page mappings that might exist in the TLB. Consequently, at this
+ * point, the TLB may hold both 4KB and 2- or 4MB page mappings for
+ * the address range [va, va + NBPDR). Therefore, the entire range
+ * must be invalidated here. In contrast, when PG_PROMOTED is clear,
+ * the TLB will not hold any 4KB page mappings for the address range
+ * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the
+ * 2- or 4MB page mapping from the TLB.
+ */
+ if ((pde & PG_PROMOTED) != 0)
+ pmap_invalidate_range(pmap, va, va + NBPDR - 1);
+ else
+ pmap_invalidate_page(pmap, va);
+}
+
+#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
+
+void
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
+{
+
+ if (force) {
+ sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
+ } else {
+ KASSERT((sva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: sva not page-aligned"));
+ KASSERT((eva & PAGE_MASK) == 0,
+ ("pmap_invalidate_cache_range: eva not page-aligned"));
+ }
+
+ if ((cpu_feature & CPUID_SS) != 0 && !force)
+ ; /* If "Self Snoop" is supported and allowed, do nothing. */
+ else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+#ifdef DEV_APIC
+ /*
+ * XXX: Some CPUs fault, hang, or trash the local APIC
+ * registers if we use CLFLUSH on the local APIC
+ * range. The local APIC is always uncached, so we
+ * don't need to flush for that range anyway.
+ */
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+#endif
+ /*
+ * Otherwise, do per-cache line flush. Use the sfence
+ * instruction to insure that previous stores are
+ * included in the write-back. The processor
+ * propagates flush to other processors in the cache
+ * coherence domain.
+ */
+ sfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflushopt(sva);
+ sfence();
+ } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
+ eva - sva < PMAP_CLFLUSH_THRESHOLD) {
+#ifdef DEV_APIC
+ if (pmap_kextract(sva) == lapic_paddr)
+ return;
+#endif
+ /*
+ * Writes are ordered by CLFLUSH on Intel CPUs.
+ */
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ for (; sva < eva; sva += cpu_clflush_line_size)
+ clflush(sva);
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ } else {
+
+ /*
+ * No targeted cache flush methods are supported by CPU,
+ * or the supplied range is bigger than 2MB.
+ * Globally invalidate cache.
+ */
+ pmap_invalidate_cache();
+ }
+}
+
+void
+pmap_invalidate_cache_pages(vm_page_t *pages, int count)
+{
+ int i;
+
+ if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
+ (cpu_feature & CPUID_CLFSH) == 0) {
+ pmap_invalidate_cache();
+ } else {
+ for (i = 0; i < count; i++)
+ pmap_flush_page(pages[i]);
+ }
+}
+
+/*
+ * Are we current address space or kernel?
+ */
+static __inline int
+pmap_is_current(pmap_t pmap)
+{
+
+ return (pmap == kernel_pmap || pmap ==
+ vmspace_pmap(curthread->td_proc->p_vmspace));
+}
+
+/*
+ * If the given pmap is not the current or kernel pmap, the returned pte must
+ * be released by passing it to pmap_pte_release().
+ */
+pt_entry_t *
+pmap_pte(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t newpf;
+ pd_entry_t *pde;
+
+ pde = pmap_pde(pmap, va);
+ if (*pde & PG_PS)
+ return (pde);
+ if (*pde != 0) {
+ /* are we current address space or kernel? */
+ if (pmap_is_current(pmap))
+ return (vtopte(va));
+ mtx_lock(&PMAP2mutex);
+ newpf = *pde & PG_FRAME;
+ if ((*PMAP2 & PG_FRAME) != newpf) {
+ *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
+ pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
+ }
+ return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
+ }
+ return (NULL);
+}
+
+/*
+ * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte
+ * being NULL.
+ */
+static __inline void
+pmap_pte_release(pt_entry_t *pte)
+{
+
+ if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
+ mtx_unlock(&PMAP2mutex);
+}
+
+/*
+ * NB: The sequence of updating a page table followed by accesses to the
+ * corresponding pages is subject to the situation described in the "AMD64
+ * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23,
+ * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG
+ * right after modifying the PTE bits is crucial.
+ */
+static __inline void
+invlcaddr(void *caddr)
+{
+
+ invlpg((u_int)caddr);
+}
+
+/*
+ * Super fast pmap_pte routine best used when scanning
+ * the pv lists. This eliminates many coarse-grained
+ * invltlb calls. Note that many of the pv list
+ * scans are across different pmaps. It is very wasteful
+ * to do an entire invltlb for checking a single mapping.
+ *
+ * If the given pmap is not the current pmap, pvh_global_lock
+ * must be held and curthread pinned to a CPU.
+ */
+static pt_entry_t *
+pmap_pte_quick(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t newpf;
+ pd_entry_t *pde;
+
+ pde = pmap_pde(pmap, va);
+ if (*pde & PG_PS)
+ return (pde);
+ if (*pde != 0) {
+ /* are we current address space or kernel? */
+ if (pmap_is_current(pmap))
+ return (vtopte(va));
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ newpf = *pde & PG_FRAME;
+ if ((*PMAP1 & PG_FRAME) != newpf) {
+ *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
+#ifdef SMP
+ PMAP1cpu = PCPU_GET(cpuid);
+#endif
+ invlcaddr(PADDR1);
+ PMAP1changed++;
+ } else
+#ifdef SMP
+ if (PMAP1cpu != PCPU_GET(cpuid)) {
+ PMAP1cpu = PCPU_GET(cpuid);
+ invlcaddr(PADDR1);
+ PMAP1changedcpu++;
+ } else
+#endif
+ PMAP1unchanged++;
+ return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
+ }
+ return (0);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ vm_paddr_t rtval;
+ pt_entry_t *pte;
+ pd_entry_t pde;
+
+ rtval = 0;
+ PMAP_LOCK(pmap);
+ pde = pmap->pm_pdir[va >> PDRSHIFT];
+ if (pde != 0) {
+ if ((pde & PG_PS) != 0)
+ rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
+ else {
+ pte = pmap_pte(pmap, va);
+ rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
+ pmap_pte_release(pte);
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ return (rtval);
+}
+
+/*
+ * Routine: pmap_extract_and_hold
+ * Function:
+ * Atomically extract and hold the physical page
+ * with the given pmap and virtual address pair
+ * if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pd_entry_t pde;
+ pt_entry_t pte, *ptep;
+ vm_page_t m;
+ vm_paddr_t pa;
+
+ pa = 0;
+ m = NULL;
+ PMAP_LOCK(pmap);
+retry:
+ pde = *pmap_pde(pmap, va);
+ if (pde != 0) {
+ if (pde & PG_PS) {
+ if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
+ if (vm_page_pa_tryrelock(pmap, (pde &
+ PG_PS_FRAME) | (va & PDRMASK), &pa))
+ goto retry;
+ m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
+ (va & PDRMASK));
+ vm_page_hold(m);
+ }
+ } else {
+ ptep = pmap_pte(pmap, va);
+ pte = *ptep;
+ pmap_pte_release(ptep);
+ if (pte != 0 &&
+ ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
+ if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
+ &pa))
+ goto retry;
+ m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
+ vm_page_hold(m);
+ }
+ }
+ }
+ PA_UNLOCK_COND(pa);
+ PMAP_UNLOCK(pmap);
+ return (m);
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+/*
+ * Add a wired page to the kva.
+ * Note: not SMP coherent.
+ *
+ * This function may be used before pmap_bootstrap() is called.
+ */
+PMAP_INLINE void
+pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_store(pte, pa | PG_RW | PG_V | pgeflag);
+}
+
+static __inline void
+pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
+}
+
+/*
+ * Remove a page from the kernel pagetables.
+ * Note: not SMP coherent.
+ *
+ * This function may be used before pmap_bootstrap() is called.
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+ pt_entry_t *pte;
+
+ pte = vtopte(va);
+ pte_clear(pte);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
+{
+ vm_offset_t va, sva;
+ vm_paddr_t superpage_offset;
+ pd_entry_t newpde;
+
+ va = *virt;
+ /*
+ * Does the physical address range's size and alignment permit at
+ * least one superpage mapping to be created?
+ */
+ superpage_offset = start & PDRMASK;
+ if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) {
+ /*
+ * Increase the starting virtual address so that its alignment
+ * does not preclude the use of superpage mappings.
+ */
+ if ((va & PDRMASK) < superpage_offset)
+ va = (va & ~PDRMASK) + superpage_offset;
+ else if ((va & PDRMASK) > superpage_offset)
+ va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset;
+ }
+ sva = va;
+ while (start < end) {
+ if ((start & PDRMASK) == 0 && end - start >= NBPDR &&
+ pseflag) {
+ KASSERT((va & PDRMASK) == 0,
+ ("pmap_map: misaligned va %#x", va));
+ newpde = start | PG_PS | pgeflag | PG_RW | PG_V;
+ pmap_kenter_pde(va, newpde);
+ va += NBPDR;
+ start += NBPDR;
+ } else {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+ *virt = va;
+ return (sva);
+}
+
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+ pt_entry_t *endpte, oldpte, pa, *pte;
+ vm_page_t m;
+
+ oldpte = 0;
+ pte = vtopte(sva);
+ endpte = pte + count;
+ while (pte < endpte) {
+ m = *ma++;
+ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+ if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
+ oldpte |= *pte;
+ pte_store(pte, pa | pgeflag | PG_RW | PG_V);
+ }
+ pte++;
+ }
+ if (__predict_false((oldpte & PG_V) != 0))
+ pmap_invalidate_range(kernel_pmap, sva, sva + count *
+ PAGE_SIZE);
+}
+
+/*
+ * This routine tears out page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qremove(vm_offset_t sva, int count)
+{
+ vm_offset_t va;
+
+ va = sva;
+ while (count-- > 0) {
+ pmap_kremove(va);
+ va += PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+static __inline void
+pmap_free_zero_pages(struct spglist *free)
+{
+ vm_page_t m;
+ int count;
+
+ for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
+ /* Preserve the page's PG_ZERO setting. */
+ vm_page_free_toq(m);
+ }
+ atomic_subtract_int(&vm_cnt.v_wire_count, count);
+}
+
+/*
+ * Schedule the specified unused page table page to be freed. Specifically,
+ * add the page to the specified list of pages that will be released to the
+ * physical memory manager after the TLB has been updated.
+ */
+static __inline void
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
+{
+
+ if (set_PG_ZERO)
+ m->flags |= PG_ZERO;
+ else
+ m->flags &= ~PG_ZERO;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
+}
+
+/*
+ * Inserts the specified page table page into the specified pmap's collection
+ * of idle page table pages. Each of a pmap's page table pages is responsible
+ * for mapping a distinct range of virtual addresses. The pmap's collection is
+ * ordered by this virtual address range.
+ */
+static __inline int
+pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ return (vm_radix_insert(&pmap->pm_root, mpte));
+}
+
+/*
+ * Removes the page table page mapping the specified virtual address from the
+ * specified pmap's collection of idle page table pages, and returns it.
+ * Otherwise, returns NULL if there is no page table page corresponding to the
+ * specified virtual address.
+ */
+static __inline vm_page_t
+pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
+}
+
+/*
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
+ */
+static inline boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
+{
+
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
+{
+ vm_offset_t pteva;
+
+ /*
+ * unmap the page table page
+ */
+ pmap->pm_pdir[m->pindex] = 0;
+ --pmap->pm_stats.resident_count;
+
+ /*
+ * Do an invltlb to make the invalidated mapping
+ * take effect immediately.
+ */
+ pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
+ pmap_invalidate_page(pmap, pteva);
+
+ /*
+ * Put page on a list so that it is released after
+ * *ALL* TLB shootdown is done
+ */
+ pmap_add_delayed_free_list(m, free, TRUE);
+}
+
+/*
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
+ */
+static int
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
+{
+ pd_entry_t ptepde;
+ vm_page_t mpte;
+
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (0);
+ ptepde = *pmap_pde(pmap, va);
+ mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
+ return (pmap_unwire_ptp(pmap, mpte, free));
+}
+
+/*
+ * Initialize the pmap for the swapper process.
+ */
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ PMAP_LOCK_INIT(pmap);
+ /*
+ * Since the page table directory is shared with the kernel pmap,
+ * which is already included in the list "allpmaps", this pmap does
+ * not need to be inserted into that list.
+ */
+ pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
+#if defined(PAE) || defined(PAE_TABLES)
+ pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
+#endif
+ pmap->pm_root.rt_root = 0;
+ CPU_ZERO(&pmap->pm_active);
+ PCPU_SET(curpmap, pmap);
+ TAILQ_INIT(&pmap->pm_pvchunk);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+}
+
+/*
+ * Initialize a preallocated and zeroed pmap structure,
+ * such as one in a vmspace structure.
+ */
+int
+pmap_pinit(pmap_t pmap)
+{
+ vm_page_t m, ptdpg[NPGPTD];
+ vm_paddr_t pa;
+ int i;
+
+ /*
+ * No need to allocate page table space yet but we do need a valid
+ * page directory table.
+ */
+ if (pmap->pm_pdir == NULL) {
+ pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
+ if (pmap->pm_pdir == NULL)
+ return (0);
+#if defined(PAE) || defined(PAE_TABLES)
+ pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
+ KASSERT(((vm_offset_t)pmap->pm_pdpt &
+ ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
+ ("pmap_pinit: pdpt misaligned"));
+ KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
+ ("pmap_pinit: pdpt above 4g"));
+#endif
+ pmap->pm_root.rt_root = 0;
+ }
+ KASSERT(vm_radix_is_empty(&pmap->pm_root),
+ ("pmap_pinit: pmap has reserved page table page(s)"));
+
+ /*
+ * allocate the page directory page(s)
+ */
+ for (i = 0; i < NPGPTD;) {
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (m == NULL)
+ VM_WAIT;
+ else {
+ ptdpg[i++] = m;
+ }
+ }
+
+ pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
+
+ for (i = 0; i < NPGPTD; i++)
+ if ((ptdpg[i]->flags & PG_ZERO) == 0)
+ pagezero(pmap->pm_pdir + (i * NPDEPG));
+
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
+ /* Copy the kernel page table directory entries. */
+ bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
+ mtx_unlock_spin(&allpmaps_lock);
+
+ /* install self-referential address mapping entry(s) */
+ for (i = 0; i < NPGPTD; i++) {
+ pa = VM_PAGE_TO_PHYS(ptdpg[i]);
+ pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
+#if defined(PAE) || defined(PAE_TABLES)
+ pmap->pm_pdpt[i] = pa | PG_V;
+#endif
+ }
+
+ CPU_ZERO(&pmap->pm_active);
+ TAILQ_INIT(&pmap->pm_pvchunk);
+ bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
+
+ return (1);
+}
+
+/*
+ * this routine is called if the page table page is not
+ * mapped correctly.
+ */
+static vm_page_t
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
+{
+ vm_paddr_t ptepa;
+ vm_page_t m;
+
+ /*
+ * Allocate a page table page.
+ */
+ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
+ if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(&pvh_global_lock);
+ VM_WAIT;
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ }
+
+ /*
+ * Indicate the need to retry. While waiting, the page table
+ * page may have been allocated.
+ */
+ return (NULL);
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ /*
+ * Map the pagetable page into the process address space, if
+ * it isn't already there.
+ */
+
+ pmap->pm_stats.resident_count++;
+
+ ptepa = VM_PAGE_TO_PHYS(m);
+ pmap->pm_pdir[ptepindex] =
+ (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
+
+ return (m);
+}
+
+static vm_page_t
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
+{
+ u_int ptepindex;
+ pd_entry_t ptepa;
+ vm_page_t m;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = va >> PDRSHIFT;
+retry:
+ /*
+ * Get the page directory entry
+ */
+ ptepa = pmap->pm_pdir[ptepindex];
+
+ /*
+ * This supports switching from a 4MB page to a
+ * normal 4K page.
+ */
+ if (ptepa & PG_PS) {
+ (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
+ ptepa = pmap->pm_pdir[ptepindex];
+ }
+
+ /*
+ * If the page table page is mapped, we just increment the
+ * hold count, and activate it.
+ */
+ if (ptepa) {
+ m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
+ m->wire_count++;
+ } else {
+ /*
+ * Here if the pte page isn't mapped, or if it has
+ * been deallocated.
+ */
+ m = _pmap_allocpte(pmap, ptepindex, flags);
+ if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
+ goto retry;
+ }
+ return (m);
+}
+
+
+/***************************************************
+* Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ vm_page_t m, ptdpg[NPGPTD];
+ int i;
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("pmap_release: pmap resident count %ld != 0",
+ pmap->pm_stats.resident_count));
+ KASSERT(vm_radix_is_empty(&pmap->pm_root),
+ ("pmap_release: pmap has reserved page table page(s)"));
+ KASSERT(CPU_EMPTY(&pmap->pm_active),
+ ("releasing active pmap %p", pmap));
+
+ mtx_lock_spin(&allpmaps_lock);
+ LIST_REMOVE(pmap, pm_list);
+ mtx_unlock_spin(&allpmaps_lock);
+
+ for (i = 0; i < NPGPTD; i++)
+ ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
+ PG_FRAME);
+
+ bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
+ sizeof(*pmap->pm_pdir));
+
+ pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
+
+ for (i = 0; i < NPGPTD; i++) {
+ m = ptdpg[i];
+#if defined(PAE) || defined(PAE_TABLES)
+ KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
+ ("pmap_release: got wrong ptd page"));
+#endif
+ m->wire_count--;
+ vm_page_free_zero(m);
+ }
+ atomic_subtract_int(&vm_cnt.v_wire_count, NPGPTD);
+}
+
+static int
+kvm_size(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
+
+ return (sysctl_handle_long(oidp, &ksize, 0, req));
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_size, "IU", "Size of KVM");
+
+static int
+kvm_free(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+
+ return (sysctl_handle_long(oidp, &kfree, 0, req));
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_free, "IU", "Amount of KVM free");
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+ vm_paddr_t ptppaddr;
+ vm_page_t nkpg;
+ pd_entry_t newpdir;
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+ addr = roundup2(addr, NBPDR);
+ if (addr - 1 >= kernel_map->max_offset)
+ addr = kernel_map->max_offset;
+ while (kernel_vm_end < addr) {
+ if (pdir_pde(PTD, kernel_vm_end)) {
+ kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ continue;
+ }
+
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+
+ nkpt++;
+
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ ptppaddr = VM_PAGE_TO_PHYS(nkpg);
+ newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
+ pdir_pde(KPTD, kernel_vm_end) = pgeflag | newpdir;
+
+ pmap_kenter_pde(kernel_vm_end, newpdir);
+ kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ }
+}
+
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 11);
+CTASSERT(_NPCPV == 336);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+ return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */
+#define PC_FREE10 0x0000fffful /* Free values for index 10 */
+
+static const uint32_t pc_freemask[_NPCM] = {
+ PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
+ PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
+ PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
+ PC_FREE0_9, PC_FREE10
+};
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+ "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+#endif
+
+/*
+ * We are in a serious low memory condition. Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.
+ */
+static vm_page_t
+pmap_pv_reclaim(pmap_t locked_pmap)
+{
+ struct pch newtail;
+ struct pv_chunk *pc;
+ struct md_page *pvh;
+ pd_entry_t *pde;
+ pmap_t pmap;
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m, m_pc;
+ struct spglist free;
+ uint32_t inuse;
+ int bit, field, freed;
+
+ PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+ pmap = NULL;
+ m_pc = NULL;
+ SLIST_INIT(&free);
+ TAILQ_INIT(&newtail);
+ while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
+ SLIST_EMPTY(&free))) {
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ if (pmap != pc->pc_pmap) {
+ if (pmap != NULL) {
+ pmap_invalidate_all(pmap);
+ if (pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ }
+ pmap = pc->pc_pmap;
+ /* Avoid deadlock and lock recursion. */
+ if (pmap > locked_pmap)
+ PMAP_LOCK(pmap);
+ else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
+ pmap = NULL;
+ TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+ continue;
+ }
+ }
+
+ /*
+ * Destroy every non-wired, 4 KB page mapping in the chunk.
+ */
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ for (inuse = ~pc->pc_map[field] & pc_freemask[field];
+ inuse != 0; inuse &= ~(1UL << bit)) {
+ bit = bsfl(inuse);
+ pv = &pc->pc_pventry[field * 32 + bit];
+ va = pv->pv_va;
+ pde = pmap_pde(pmap, va);
+ if ((*pde & PG_PS) != 0)
+ continue;
+ pte = pmap_pte(pmap, va);
+ tpte = *pte;
+ if ((tpte & PG_W) == 0)
+ tpte = pte_load_clear(pte);
+ pmap_pte_release(pte);
+ if ((tpte & PG_W) != 0)
+ continue;
+ KASSERT(tpte != 0,
+ ("pmap_pv_reclaim: pmap %p va %x zero pte",
+ pmap, va));
+ if ((tpte & PG_G) != 0)
+ pmap_invalidate_page(pmap, va);
+ m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if ((tpte & PG_A) != 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ vm_page_aflag_clear(m,
+ PGA_WRITEABLE);
+ }
+ }
+ pc->pc_map[field] |= 1UL << bit;
+ pmap_unuse_pt(pmap, va, &free);
+ freed++;
+ }
+ }
+ if (freed == 0) {
+ TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+ continue;
+ }
+ /* Every freed mapping is for a 4 KB page. */
+ pmap->pm_stats.resident_count -= freed;
+ PV_STAT(pv_entry_frees += freed);
+ PV_STAT(pv_entry_spare += freed);
+ pv_entry_count -= freed;
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ for (field = 0; field < _NPCM; field++)
+ if (pc->pc_map[field] != pc_freemask[field]) {
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+ pc_list);
+ TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
+
+ /*
+ * One freed pv entry in locked_pmap is
+ * sufficient.
+ */
+ if (pmap == locked_pmap)
+ goto out;
+ break;
+ }
+ if (field == _NPCM) {
+ PV_STAT(pv_entry_spare -= _NPCPV);
+ PV_STAT(pc_chunk_count--);
+ PV_STAT(pc_chunk_frees++);
+ /* Entire chunk is free; return it. */
+ m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
+ pmap_qremove((vm_offset_t)pc, 1);
+ pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
+ break;
+ }
+ }
+out:
+ TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
+ if (pmap != NULL) {
+ pmap_invalidate_all(pmap);
+ if (pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ }
+ if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
+ /* Recycle a freed page table page. */
+ m_pc->wire_count = 1;
+ }
+ pmap_free_zero_pages(&free);
+ return (m_pc);
+}
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+ struct pv_chunk *pc;
+ int idx, field, bit;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(pv_entry_frees++);
+ PV_STAT(pv_entry_spare++);
+ pv_entry_count--;
+ pc = pv_to_chunk(pv);
+ idx = pv - &pc->pc_pventry[0];
+ field = idx / 32;
+ bit = idx % 32;
+ pc->pc_map[field] |= 1ul << bit;
+ for (idx = 0; idx < _NPCM; idx++)
+ if (pc->pc_map[idx] != pc_freemask[idx]) {
+ /*
+ * 98% of the time, pc is already at the head of the
+ * list. If it isn't already, move it to the head.
+ */
+ if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
+ pc)) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
+ return;
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+}
+
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+ vm_page_t m;
+
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ PV_STAT(pv_entry_spare -= _NPCPV);
+ PV_STAT(pc_chunk_count--);
+ PV_STAT(pc_chunk_frees++);
+ /* entire chunk is free, return it */
+ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
+ pmap_qremove((vm_offset_t)pc, 1);
+ vm_page_unwire(m, PQ_NONE);
+ vm_page_free(m);
+ pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, boolean_t try)
+{
+ static const struct timeval printinterval = { 60, 0 };
+ static struct timeval lastprint;
+ int bit, field;
+ pv_entry_t pv;
+ struct pv_chunk *pc;
+ vm_page_t m;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(pv_entry_allocs++);
+ pv_entry_count++;
+ if (pv_entry_count > pv_entry_high_water)
+ if (ratecheck(&lastprint, &printinterval))
+ printf("Approaching the limit on PV entries, consider "
+ "increasing either the vm.pmap.shpgperproc or the "
+ "vm.pmap.pv_entry_max tunable.\n");
+retry:
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ if (pc != NULL) {
+ for (field = 0; field < _NPCM; field++) {
+ if (pc->pc_map[field]) {
+ bit = bsfl(pc->pc_map[field]);
+ break;
+ }
+ }
+ if (field < _NPCM) {
+ pv = &pc->pc_pventry[field * 32 + bit];
+ pc->pc_map[field] &= ~(1ul << bit);
+ /* If this was the last item, move it to tail */
+ for (field = 0; field < _NPCM; field++)
+ if (pc->pc_map[field] != 0) {
+ PV_STAT(pv_entry_spare--);
+ return (pv); /* not full, return */
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(pv_entry_spare--);
+ return (pv);
+ }
+ }
+ /*
+ * Access to the ptelist "pv_vafree" is synchronized by the pvh
+ * global lock. If "pv_vafree" is currently non-empty, it will
+ * remain non-empty until pmap_ptelist_alloc() completes.
+ */
+ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+ if (try) {
+ pv_entry_count--;
+ PV_STAT(pc_chunk_tryfail++);
+ return (NULL);
+ }
+ m = pmap_pv_reclaim(pmap);
+ if (m == NULL)
+ goto retry;
+ }
+ PV_STAT(pc_chunk_count++);
+ PV_STAT(pc_chunk_allocs++);
+ pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
+ pmap_qenter((vm_offset_t)pc, &m, 1);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */
+ for (field = 1; field < _NPCM; field++)
+ pc->pc_map[field] = pc_freemask[field];
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ pv = &pc->pc_pventry[0];
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(pv_entry_spare += _NPCPV - 1);
+ return (pv);
+}
+
+static __inline pv_entry_t
+pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ break;
+ }
+ }
+ return (pv);
+}
+
+static void
+pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ KASSERT((pa & PDRMASK) == 0,
+ ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
+
+ /*
+ * Transfer the 4mpage's pv entry for this mapping to the first
+ * page's pv list.
+ */
+ pvh = pa_to_pvh(pa);
+ va = trunc_4mpage(va);
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
+ m = PHYS_TO_VM_PAGE(pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ /* Instantiate the remaining NPTEPG - 1 pv entries. */
+ va_last = va + NBPDR - PAGE_SIZE;
+ do {
+ m++;
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_pv_demote_pde: page %p is not managed", m));
+ va += PAGE_SIZE;
+ pmap_insert_entry(pmap, va, m);
+ } while (va < va_last);
+}
+
+#if VM_NRESERVLEVEL > 0
+static void
+pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ vm_offset_t va_last;
+ vm_page_t m;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ KASSERT((pa & PDRMASK) == 0,
+ ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
+
+ /*
+ * Transfer the first page's pv entry for this mapping to the
+ * 4mpage's pv list. Aside from avoiding the cost of a call
+ * to get_pv_entry(), a transfer avoids the possibility that
+ * get_pv_entry() calls pmap_collect() and that pmap_collect()
+ * removes one of the mappings that is being promoted.
+ */
+ m = PHYS_TO_VM_PAGE(pa);
+ va = trunc_4mpage(va);
+ pv = pmap_pvh_remove(&m->md, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ /* Free the remaining NPTEPG - 1 pv entries. */
+ va_last = va + NBPDR - PAGE_SIZE;
+ do {
+ m++;
+ va += PAGE_SIZE;
+ pmap_pvh_free(&m->md, pmap, va);
+ } while (va < va_last);
+}
+#endif /* VM_NRESERVLEVEL > 0 */
+
+static void
+pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
+ free_pv_entry(pmap, pv);
+}
+
+static void
+pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
+{
+ struct md_page *pvh;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ pmap_pvh_free(&m->md, pmap, va);
+ if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+}
+
+/*
+ * Create a pv entry for page at pa for
+ * (pmap, va).
+ */
+static void
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ pv = get_pv_entry(pmap, FALSE);
+ pv->pv_va = va;
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+}
+
+/*
+ * Conditionally create a pv entry.
+ */
+static boolean_t
+pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ if (pv_entry_count < pv_entry_high_water &&
+ (pv = get_pv_entry(pmap, TRUE)) != NULL) {
+ pv->pv_va = va;
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+/*
+ * Create the pv entries for each of the pages within a superpage.
+ */
+static boolean_t
+pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ if (pv_entry_count < pv_entry_high_water &&
+ (pv = get_pv_entry(pmap, TRUE)) != NULL) {
+ pv->pv_va = va;
+ pvh = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+/*
+ * Fills a page table page with mappings to consecutive physical pages.
+ */
+static void
+pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
+{
+ pt_entry_t *pte;
+
+ for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
+ *pte = newpte;
+ newpte += PAGE_SIZE;
+ }
+}
+
+/*
+ * Tries to demote a 2- or 4MB page mapping. If demotion fails, the
+ * 2- or 4MB page mapping is invalidated.
+ */
+static boolean_t
+pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+{
+ pd_entry_t newpde, oldpde;
+ pt_entry_t *firstpte, newpte;
+ vm_paddr_t mptepa;
+ vm_page_t mpte;
+ struct spglist free;
+ vm_offset_t sva;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpde = *pde;
+ KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
+ ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
+ if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
+ NULL) {
+ KASSERT((oldpde & PG_W) == 0,
+ ("pmap_demote_pde: page table page for a wired mapping"
+ " is missing"));
+
+ /*
+ * Invalidate the 2- or 4MB page mapping and return
+ * "failure" if the mapping was never accessed or the
+ * allocation of the new page table page fails.
+ */
+ if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
+ va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
+ VM_ALLOC_WIRED)) == NULL) {
+ SLIST_INIT(&free);
+ sva = trunc_4mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free);
+ if ((oldpde & PG_G) == 0)
+ pmap_invalidate_pde_page(pmap, sva, oldpde);
+ pmap_free_zero_pages(&free);
+ CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
+ " in pmap %p", va, pmap);
+ return (FALSE);
+ }
+ if (va < VM_MAXUSER_ADDRESS)
+ pmap->pm_stats.resident_count++;
+ }
+ mptepa = VM_PAGE_TO_PHYS(mpte);
+
+ /*
+ * If the page mapping is in the kernel's address space, then the
+ * KPTmap can provide access to the page table page. Otherwise,
+ * temporarily map the page table page (mpte) into the kernel's
+ * address space at either PADDR1 or PADDR2.
+ */
+ if (va >= KERNBASE)
+ firstpte = &KPTmap[i386_btop(trunc_4mpage(va))];
+ else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) {
+ if ((*PMAP1 & PG_FRAME) != mptepa) {
+ *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
+#ifdef SMP
+ PMAP1cpu = PCPU_GET(cpuid);
+#endif
+ invlcaddr(PADDR1);
+ PMAP1changed++;
+ } else
+#ifdef SMP
+ if (PMAP1cpu != PCPU_GET(cpuid)) {
+ PMAP1cpu = PCPU_GET(cpuid);
+ invlcaddr(PADDR1);
+ PMAP1changedcpu++;
+ } else
+#endif
+ PMAP1unchanged++;
+ firstpte = PADDR1;
+ } else {
+ mtx_lock(&PMAP2mutex);
+ if ((*PMAP2 & PG_FRAME) != mptepa) {
+ *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
+ pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
+ }
+ firstpte = PADDR2;
+ }
+ newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
+ KASSERT((oldpde & PG_A) != 0,
+ ("pmap_demote_pde: oldpde is missing PG_A"));
+ KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
+ ("pmap_demote_pde: oldpde is missing PG_M"));
+ newpte = oldpde & ~PG_PS;
+ if ((newpte & PG_PDE_PAT) != 0)
+ newpte ^= PG_PDE_PAT | PG_PTE_PAT;
+
+ /*
+ * If the page table page is new, initialize it.
+ */
+ if (mpte->wire_count == 1) {
+ mpte->wire_count = NPTEPG;
+ pmap_fill_ptp(firstpte, newpte);
+ }
+ KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
+ ("pmap_demote_pde: firstpte and newpte map different physical"
+ " addresses"));
+
+ /*
+ * If the mapping has changed attributes, update the page table
+ * entries.
+ */
+ if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
+ pmap_fill_ptp(firstpte, newpte);
+
+ /*
+ * Demote the mapping. This pmap is locked. The old PDE has
+ * PG_A set. If the old PDE has PG_RW set, it also has PG_M
+ * set. Thus, there is no danger of a race with another
+ * processor changing the setting of PG_A and/or PG_M between
+ * the read above and the store below.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, newpde);
+ else if (pmap == kernel_pmap)
+ pmap_kenter_pde(va, newpde);
+ else
+ pde_store(pde, newpde);
+ if (firstpte == PADDR2)
+ mtx_unlock(&PMAP2mutex);
+
+ /*
+ * Invalidate the recursive mapping of the page table page.
+ */
+ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
+
+ /*
+ * Demote the pv entry. This depends on the earlier demotion
+ * of the mapping. Specifically, the (re)creation of a per-
+ * page pv entry might trigger the execution of pmap_collect(),
+ * which might reclaim a newly (re)created per-page pv entry
+ * and destroy the associated mapping. In order to destroy
+ * the mapping, the PDE must have already changed from mapping
+ * the 2mpage to referencing the page table page.
+ */
+ if ((oldpde & PG_MANAGED) != 0)
+ pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
+
+ pmap_pde_demotions++;
+ CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
+ " in pmap %p", va, pmap);
+ return (TRUE);
+}
+
+/*
+ * Removes a 2- or 4MB page mapping from the kernel pmap.
+ */
+static void
+pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+{
+ pd_entry_t newpde;
+ vm_paddr_t mptepa;
+ vm_page_t mpte;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mpte = pmap_remove_pt_page(pmap, va);
+ if (mpte == NULL)
+ panic("pmap_remove_kernel_pde: Missing pt page.");
+
+ mptepa = VM_PAGE_TO_PHYS(mpte);
+ newpde = mptepa | PG_M | PG_A | PG_RW | PG_V;
+
+ /*
+ * Initialize the page table page.
+ */
+ pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]);
+
+ /*
+ * Remove the mapping.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, newpde);
+ else
+ pmap_kenter_pde(va, newpde);
+
+ /*
+ * Invalidate the recursive mapping of the page table page.
+ */
+ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
+}
+
+/*
+ * pmap_remove_pde: do the things to unmap a superpage in a process
+ */
+static void
+pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
+ struct spglist *free)
+{
+ struct md_page *pvh;
+ pd_entry_t oldpde;
+ vm_offset_t eva, va;
+ vm_page_t m, mpte;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((sva & PDRMASK) == 0,
+ ("pmap_remove_pde: sva is not 4mpage aligned"));
+ oldpde = pte_load_clear(pdq);
+ if (oldpde & PG_W)
+ pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
+
+ /*
+ * Machines that don't support invlpg, also don't support
+ * PG_G.
+ */
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
+
+ pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
+ if (oldpde & PG_MANAGED) {
+ pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
+ pmap_pvh_free(pvh, pmap, sva);
+ eva = sva + NBPDR;
+ for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ va < eva; va += PAGE_SIZE, m++) {
+ if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if (oldpde & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ }
+ if (pmap == kernel_pmap) {
+ pmap_remove_kernel_pde(pmap, pdq, sva);
+ } else {
+ mpte = pmap_remove_pt_page(pmap, sva);
+ if (mpte != NULL) {
+ pmap->pm_stats.resident_count--;
+ KASSERT(mpte->wire_count == NPTEPG,
+ ("pmap_remove_pde: pte page wire count error"));
+ mpte->wire_count = 0;
+ pmap_add_delayed_free_list(mpte, free, FALSE);
+ }
+ }
+}
+
+/*
+ * pmap_remove_pte: do the things to unmap a page in a process
+ */
+static int
+pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
+ struct spglist *free)
+{
+ pt_entry_t oldpte;
+ vm_page_t m;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ oldpte = pte_load_clear(ptq);
+ KASSERT(oldpte != 0,
+ ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
+ if (oldpte & PG_W)
+ pmap->pm_stats.wired_count -= 1;
+ /*
+ * Machines that don't support invlpg, also don't support
+ * PG_G.
+ */
+ if (oldpte & PG_G)
+ pmap_invalidate_page(kernel_pmap, va);
+ pmap->pm_stats.resident_count -= 1;
+ if (oldpte & PG_MANAGED) {
+ m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
+ if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if (oldpte & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ pmap_remove_entry(pmap, m, va);
+ }
+ return (pmap_unuse_pt(pmap, va, free));
+}
+
+/*
+ * Remove a single page from a process address space
+ */
+static void
+pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
+{
+ pt_entry_t *pte;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
+ return;
+ pmap_remove_pte(pmap, pte, va, free);
+ pmap_invalidate_page(pmap, va);
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t pdnxt;
+ pd_entry_t ptpaddr;
+ pt_entry_t *pte;
+ struct spglist free;
+ int anyvalid;
+
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ anyvalid = 0;
+ SLIST_INIT(&free);
+
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ PMAP_LOCK(pmap);
+
+ /*
+ * special handling of removing one page. a very
+ * common operation and easy to short circuit some
+ * code.
+ */
+ if ((sva + PAGE_SIZE == eva) &&
+ ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
+ pmap_remove_page(pmap, sva, &free);
+ goto out;
+ }
+
+ for (; sva < eva; sva = pdnxt) {
+ u_int pdirindex;
+
+ /*
+ * Calculate index for next page table.
+ */
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ if (pmap->pm_stats.resident_count == 0)
+ break;
+
+ pdirindex = sva >> PDRSHIFT;
+ ptpaddr = pmap->pm_pdir[pdirindex];
+
+ /*
+ * Weed out invalid mappings. Note: we assume that the page
+ * directory table is always allocated, and in kernel virtual.
+ */
+ if (ptpaddr == 0)
+ continue;
+
+ /*
+ * Check for large page.
+ */
+ if ((ptpaddr & PG_PS) != 0) {
+ /*
+ * Are we removing the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == pdnxt && eva >= pdnxt) {
+ /*
+ * The TLB entry for a PG_G mapping is
+ * invalidated by pmap_remove_pde().
+ */
+ if ((ptpaddr & PG_G) == 0)
+ anyvalid = 1;
+ pmap_remove_pde(pmap,
+ &pmap->pm_pdir[pdirindex], sva, &free);
+ continue;
+ } else if (!pmap_demote_pde(pmap,
+ &pmap->pm_pdir[pdirindex], sva)) {
+ /* The large page mapping was destroyed. */
+ continue;
+ }
+ }
+
+ /*
+ * Limit our scan to either the end of the va represented
+ * by the current page table page, or to the end of the
+ * range being removed.
+ */
+ if (pdnxt > eva)
+ pdnxt = eva;
+
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if (*pte == 0)
+ continue;
+
+ /*
+ * The TLB entry for a PG_G mapping is invalidated
+ * by pmap_remove_pte().
+ */
+ if ((*pte & PG_G) == 0)
+ anyvalid = 1;
+ if (pmap_remove_pte(pmap, pte, sva, &free))
+ break;
+ }
+ }
+out:
+ sched_unpin();
+ if (anyvalid)
+ pmap_invalidate_all(pmap);
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+void
+pmap_remove_all(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ pmap_t pmap;
+ pt_entry_t *pte, tpte;
+ pd_entry_t *pde;
+ vm_offset_t va;
+ struct spglist free;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_all: page %p is not managed", m));
+ SLIST_INIT(&free);
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
+ va = pv->pv_va;
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, va);
+ (void)pmap_demote_pde(pmap, pde, va);
+ PMAP_UNLOCK(pmap);
+ }
+small_mappings:
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pmap->pm_stats.resident_count--;
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
+ " a 4mpage in page %p's pv list", m));
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ tpte = pte_load_clear(pte);
+ KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
+ pmap, pv->pv_va));
+ if (tpte & PG_W)
+ pmap->pm_stats.wired_count--;
+ if (tpte & PG_A)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ pmap_unuse_pt(pmap, pv->pv_va, &free);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ free_pv_entry(pmap, pv);
+ PMAP_UNLOCK(pmap);
+ }
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * pmap_protect_pde: do the things to protect a 4mpage in a process
+ */
+static boolean_t
+pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
+{
+ pd_entry_t newpde, oldpde;
+ vm_offset_t eva, va;
+ vm_page_t m;
+ boolean_t anychanged;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT((sva & PDRMASK) == 0,
+ ("pmap_protect_pde: sva is not 4mpage aligned"));
+ anychanged = FALSE;
+retry:
+ oldpde = newpde = *pde;
+ if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ eva = sva + NBPDR;
+ for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
+ va < eva; va += PAGE_SIZE, m++)
+ vm_page_dirty(m);
+ }
+ if ((prot & VM_PROT_WRITE) == 0)
+ newpde &= ~(PG_RW | PG_M);
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpde |= pg_nx;
+#endif
+ if (newpde != oldpde) {
+ /*
+ * As an optimization to future operations on this PDE, clear
+ * PG_PROMOTED. The impending invalidation will remove any
+ * lingering 4KB page mappings from the TLB.
+ */
+ if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED))
+ goto retry;
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
+ else
+ anychanged = TRUE;
+ }
+ return (anychanged);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ vm_offset_t pdnxt;
+ pd_entry_t ptpaddr;
+ pt_entry_t *pte;
+ boolean_t anychanged, pv_lists_locked;
+
+ KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
+ if (prot == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE|VM_PROT_EXECUTE))
+ return;
+#else
+ if (prot & VM_PROT_WRITE)
+ return;
+#endif
+
+ if (pmap_is_current(pmap))
+ pv_lists_locked = FALSE;
+ else {
+ pv_lists_locked = TRUE;
+resume:
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ }
+ anychanged = FALSE;
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = pdnxt) {
+ pt_entry_t obits, pbits;
+ u_int pdirindex;
+
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+
+ pdirindex = sva >> PDRSHIFT;
+ ptpaddr = pmap->pm_pdir[pdirindex];
+
+ /*
+ * Weed out invalid mappings. Note: we assume that the page
+ * directory table is always allocated, and in kernel virtual.
+ */
+ if (ptpaddr == 0)
+ continue;
+
+ /*
+ * Check for large page.
+ */
+ if ((ptpaddr & PG_PS) != 0) {
+ /*
+ * Are we protecting the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == pdnxt && eva >= pdnxt) {
+ /*
+ * The TLB entry for a PG_G mapping is
+ * invalidated by pmap_protect_pde().
+ */
+ if (pmap_protect_pde(pmap,
+ &pmap->pm_pdir[pdirindex], sva, prot))
+ anychanged = TRUE;
+ continue;
+ } else {
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_wlock(&pvh_global_lock)) {
+ if (anychanged)
+ pmap_invalidate_all(
+ pmap);
+ PMAP_UNLOCK(pmap);
+ goto resume;
+ }
+ sched_pin();
+ }
+ if (!pmap_demote_pde(pmap,
+ &pmap->pm_pdir[pdirindex], sva)) {
+ /*
+ * The large page mapping was
+ * destroyed.
+ */
+ continue;
+ }
+ }
+ }
+
+ if (pdnxt > eva)
+ pdnxt = eva;
+
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ vm_page_t m;
+
+retry:
+ /*
+ * Regardless of whether a pte is 32 or 64 bits in
+ * size, PG_RW, PG_A, and PG_M are among the least
+ * significant 32 bits.
+ */
+ obits = pbits = *pte;
+ if ((pbits & PG_V) == 0)
+ continue;
+
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
+ (PG_MANAGED | PG_M | PG_RW)) {
+ m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
+ vm_page_dirty(m);
+ }
+ pbits &= ~(PG_RW | PG_M);
+ }
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pbits |= pg_nx;
+#endif
+
+ if (pbits != obits) {
+#if defined(PAE) || defined(PAE_TABLES)
+ if (!atomic_cmpset_64(pte, obits, pbits))
+ goto retry;
+#else
+ if (!atomic_cmpset_int((u_int *)pte, obits,
+ pbits))
+ goto retry;
+#endif
+ if (obits & PG_G)
+ pmap_invalidate_page(pmap, sva);
+ else
+ anychanged = TRUE;
+ }
+ }
+ }
+ if (anychanged)
+ pmap_invalidate_all(pmap);
+ if (pv_lists_locked) {
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+#if VM_NRESERVLEVEL > 0
+/*
+ * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
+ * within a single page table page (PTP) to a single 2- or 4MB page mapping.
+ * For promotion to occur, two conditions must be met: (1) the 4KB page
+ * mappings must map aligned, contiguous physical memory and (2) the 4KB page
+ * mappings must have identical characteristics.
+ *
+ * Managed (PG_MANAGED) mappings within the kernel address space are not
+ * promoted. The reason is that kernel PDEs are replicated in each pmap but
+ * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
+ * pmap.
+ */
+static void
+pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+{
+ pd_entry_t newpde;
+ pt_entry_t *firstpte, oldpte, pa, *pte;
+ vm_offset_t oldpteva;
+ vm_page_t mpte;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Examine the first PTE in the specified PTP. Abort if this PTE is
+ * either invalid, unused, or does not map the first 4KB physical page
+ * within a 2- or 4MB page.
+ */
+ firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
+setpde:
+ newpde = *firstpte;
+ if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
+ pmap_pde_p_failures++;
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
+ pmap_pde_p_failures++;
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ if ((newpde & (PG_M | PG_RW)) == PG_RW) {
+ /*
+ * When PG_M is already clear, PG_RW can be cleared without
+ * a TLB invalidation.
+ */
+ if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
+ ~PG_RW))
+ goto setpde;
+ newpde &= ~PG_RW;
+ }
+
+ /*
+ * Examine each of the other PTEs in the specified PTP. Abort if this
+ * PTE maps an unexpected 4KB physical page or does not have identical
+ * characteristics to the first PTE.
+ */
+ pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
+ for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
+setpte:
+ oldpte = *pte;
+ if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
+ pmap_pde_p_failures++;
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
+ /*
+ * When PG_M is already clear, PG_RW can be cleared
+ * without a TLB invalidation.
+ */
+ if (!atomic_cmpset_int((u_int *)pte, oldpte,
+ oldpte & ~PG_RW))
+ goto setpte;
+ oldpte &= ~PG_RW;
+ oldpteva = (oldpte & PG_FRAME & PDRMASK) |
+ (va & ~PDRMASK);
+ CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
+ " in pmap %p", oldpteva, pmap);
+ }
+ if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
+ pmap_pde_p_failures++;
+ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
+ " in pmap %p", va, pmap);
+ return;
+ }
+ pa -= PAGE_SIZE;
+ }
+
+ /*
+ * Save the page table page in its current state until the PDE
+ * mapping the superpage is demoted by pmap_demote_pde() or
+ * destroyed by pmap_remove_pde().
+ */
+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ KASSERT(mpte >= vm_page_array &&
+ mpte < &vm_page_array[vm_page_array_size],
+ ("pmap_promote_pde: page table page is out of range"));
+ KASSERT(mpte->pindex == va >> PDRSHIFT,
+ ("pmap_promote_pde: page table page's pindex is wrong"));
+ if (pmap_insert_pt_page(pmap, mpte)) {
+ pmap_pde_p_failures++;
+ CTR2(KTR_PMAP,
+ "pmap_promote_pde: failure for va %#x in pmap %p", va,
+ pmap);
+ return;
+ }
+
+ /*
+ * Promote the pv entries.
+ */
+ if ((newpde & PG_MANAGED) != 0)
+ pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
+
+ /*
+ * Propagate the PAT index to its proper position.
+ */
+ if ((newpde & PG_PTE_PAT) != 0)
+ newpde ^= PG_PDE_PAT | PG_PTE_PAT;
+
+ /*
+ * Map the superpage.
+ */
+ if (workaround_erratum383)
+ pmap_update_pde(pmap, va, pde, PG_PS | newpde);
+ else if (pmap == kernel_pmap)
+ pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde);
+ else
+ pde_store(pde, PG_PROMOTED | PG_PS | newpde);
+
+ pmap_pde_promotions++;
+ CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
+ " in pmap %p", va, pmap);
+}
+#endif /* VM_NRESERVLEVEL > 0 */
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ pt_entry_t newpte, origpte;
+ pv_entry_t pv;
+ vm_paddr_t opa, pa;
+ vm_page_t mpte, om;
+ boolean_t invlva, wired;
+
+ va = trunc_page(va);
+ mpte = NULL;
+ wired = (flags & PMAP_ENTER_WIRED) != 0;
+
+ KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
+ KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
+ ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
+ va));
+ if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+ VM_OBJECT_ASSERT_LOCKED(m->object);
+
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ sched_pin();
+
+ pde = pmap_pde(pmap, va);
+ if (va < VM_MAXUSER_ADDRESS) {
+ /*
+ * va is for UVA.
+ * In the case that a page table page is not resident,
+ * we are creating it here. pmap_allocpte() handles
+ * demotion.
+ */
+ mpte = pmap_allocpte(pmap, va, flags);
+ if (mpte == NULL) {
+ KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+ ("pmap_allocpte failed with sleep allowed"));
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ } else {
+ /*
+ * va is for KVA, so pmap_demote_pde() will never fail
+ * to install a page table page. PG_V is also
+ * asserted by pmap_demote_pde().
+ */
+ KASSERT(pde != NULL && (*pde & PG_V) != 0,
+ ("KVA %#x invalid pde pdir %#jx", va,
+ (uintmax_t)pmap->pm_pdir[PTDPTDI]));
+ if ((*pde & PG_PS) != 0)
+ pmap_demote_pde(pmap, pde, va);
+ }
+ pte = pmap_pte_quick(pmap, va);
+
+ /*
+ * Page Directory table entry is not valid, which should not
+ * happen. We should have either allocated the page table
+ * page or demoted the existing mapping above.
+ */
+ if (pte == NULL) {
+ panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
+ (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
+ }
+
+ pa = VM_PAGE_TO_PHYS(m);
+ om = NULL;
+ origpte = *pte;
+ opa = origpte & PG_FRAME;
+
+ /*
+ * Mapping has not changed, must be protection or wiring change.
+ */
+ if (origpte && (opa == pa)) {
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is wired,
+ * the PT page will be also.
+ */
+ if (wired && ((origpte & PG_W) == 0))
+ pmap->pm_stats.wired_count++;
+ else if (!wired && (origpte & PG_W))
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Remove extra pte reference
+ */
+ if (mpte)
+ mpte->wire_count--;
+
+ if (origpte & PG_MANAGED) {
+ om = m;
+ pa |= PG_MANAGED;
+ }
+ goto validate;
+ }
+
+ pv = NULL;
+
+ /*
+ * Mapping has changed, invalidate old range and fall through to
+ * handle validating new mapping.
+ */
+ if (opa) {
+ if (origpte & PG_W)
+ pmap->pm_stats.wired_count--;
+ if (origpte & PG_MANAGED) {
+ om = PHYS_TO_VM_PAGE(opa);
+ pv = pmap_pvh_remove(&om->md, pmap, va);
+ }
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%x", va));
+ }
+ } else
+ pmap->pm_stats.resident_count++;
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
+ if (pv == NULL)
+ pv = get_pv_entry(pmap, FALSE);
+ pv->pv_va = va;
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ pa |= PG_MANAGED;
+ } else if (pv != NULL)
+ free_pv_entry(pmap, pv);
+
+ /*
+ * Increment counters
+ */
+ if (wired)
+ pmap->pm_stats.wired_count++;
+
+validate:
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
+ if ((prot & VM_PROT_WRITE) != 0) {
+ newpte |= PG_RW;
+ if ((newpte & PG_MANAGED) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ }
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpte |= pg_nx;
+#endif
+ if (wired)
+ newpte |= PG_W;
+ if (va < VM_MAXUSER_ADDRESS)
+ newpte |= PG_U;
+ if (pmap == kernel_pmap)
+ newpte |= pgeflag;
+
+ /*
+ * if the mapping or permission bits are different, we need
+ * to update the pte.
+ */
+ if ((origpte & ~(PG_M|PG_A)) != newpte) {
+ newpte |= PG_A;
+ if ((flags & VM_PROT_WRITE) != 0)
+ newpte |= PG_M;
+ if (origpte & PG_V) {
+ invlva = FALSE;
+ origpte = pte_load_store(pte, newpte);
+ if (origpte & PG_A) {
+ if (origpte & PG_MANAGED)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ if (opa != VM_PAGE_TO_PHYS(m))
+ invlva = TRUE;
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((origpte & PG_NX) == 0 &&
+ (newpte & PG_NX) != 0)
+ invlva = TRUE;
+#endif
+ }
+ if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if ((origpte & PG_MANAGED) != 0)
+ vm_page_dirty(om);
+ if ((prot & VM_PROT_WRITE) == 0)
+ invlva = TRUE;
+ }
+ if ((origpte & PG_MANAGED) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list) &&
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ if (invlva)
+ pmap_invalidate_page(pmap, va);
+ } else
+ pte_store(pte, newpte);
+ }
+
+#if VM_NRESERVLEVEL > 0
+ /*
+ * If both the page table page and the reservation are fully
+ * populated, then attempt promotion.
+ */
+ if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
+ pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+ vm_reserv_level_iffullpop(m) == 0)
+ pmap_promote_pde(pmap, pde, va);
+#endif
+
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Tries to create a 2- or 4MB page mapping. Returns TRUE if successful and
+ * FALSE otherwise. Fails if (1) a page table page cannot be allocated without
+ * blocking, (2) a mapping already exists at the specified virtual address, or
+ * (3) a pv entry cannot be allocated without reclaiming another pv entry.
+ */
+static boolean_t
+pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ pd_entry_t *pde, newpde;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ pde = pmap_pde(pmap, va);
+ if (*pde != 0) {
+ CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (FALSE);
+ }
+ newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
+ PG_PS | PG_V;
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ newpde |= PG_MANAGED;
+
+ /*
+ * Abort this mapping if its PV entry could not be created.
+ */
+ if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
+ CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
+ " in pmap %p", va, pmap);
+ return (FALSE);
+ }
+ }
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ newpde |= pg_nx;
+#endif
+ if (va < VM_MAXUSER_ADDRESS)
+ newpde |= PG_U;
+
+ /*
+ * Increment counters.
+ */
+ pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
+
+ /*
+ * Map the superpage. (This is not a promoted mapping; there will not
+ * be any lingering 4KB page mappings in the TLB.)
+ */
+ pde_store(pde, newpde);
+
+ pmap_pde_mappings++;
+ CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
+ " in pmap %p", va, pmap);
+ return (TRUE);
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ vm_offset_t va;
+ vm_page_t m, mpte;
+ vm_pindex_t diff, psize;
+
+ VM_OBJECT_ASSERT_LOCKED(m_start->object);
+
+ psize = atop(end - start);
+ mpte = NULL;
+ m = m_start;
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ va = start + ptoa(diff);
+ if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
+ m->psind == 1 && pg_ps_enabled &&
+ pmap_enter_pde(pmap, va, m, prot))
+ m = &m[NBPDR / PAGE_SIZE - 1];
+ else
+ mpte = pmap_enter_quick_locked(pmap, va, m, prot,
+ mpte);
+ m = TAILQ_NEXT(m, listq);
+ }
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
+
+static vm_page_t
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, vm_page_t mpte)
+{
+ pt_entry_t *pte;
+ vm_paddr_t pa;
+ struct spglist free;
+
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->oflags & VPO_UNMANAGED) != 0,
+ ("pmap_enter_quick_locked: managed mapping within the clean submap"));
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ u_int ptepindex;
+ pd_entry_t ptepa;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = va >> PDRSHIFT;
+ if (mpte && (mpte->pindex == ptepindex)) {
+ mpte->wire_count++;
+ } else {
+ /*
+ * Get the page directory entry
+ */
+ ptepa = pmap->pm_pdir[ptepindex];
+
+ /*
+ * If the page table page is mapped, we just increment
+ * the hold count, and activate it.
+ */
+ if (ptepa) {
+ if (ptepa & PG_PS)
+ return (NULL);
+ mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
+ mpte->wire_count++;
+ } else {
+ mpte = _pmap_allocpte(pmap, ptepindex,
+ PMAP_ENTER_NOSLEEP);
+ if (mpte == NULL)
+ return (mpte);
+ }
+ }
+ } else {
+ mpte = NULL;
+ }
+
+ /*
+ * This call to vtopte makes the assumption that we are
+ * entering the page into the current pmap. In order to support
+ * quick entry into any pmap, one would likely use pmap_pte_quick.
+ * But that isn't as quick as vtopte.
+ */
+ pte = vtopte(va);
+ if (*pte) {
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0 &&
+ !pmap_try_insert_pv_entry(pmap, va, m)) {
+ if (mpte != NULL) {
+ SLIST_INIT(&free);
+ if (pmap_unwire_ptp(pmap, mpte, &free)) {
+ pmap_invalidate_page(pmap, va);
+ pmap_free_zero_pages(&free);
+ }
+
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap->pm_stats.resident_count++;
+
+ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
+#if defined(PAE) || defined(PAE_TABLES)
+ if ((prot & VM_PROT_EXECUTE) == 0)
+ pa |= pg_nx;
+#endif
+
+ /*
+ * Now validate mapping with RO protection
+ */
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ pte_store(pte, pa | PG_V | PG_U);
+ else
+ pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
+ return (mpte);
+}
+
+/*
+ * Make a temporary mapping for a physical address. This is only intended
+ * to be used for panic dumps.
+ */
+void *
+pmap_kenter_temporary(vm_paddr_t pa, int i)
+{
+ vm_offset_t va;
+
+ va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
+ pmap_kenter(va, pa);
+ invlpg(va);
+ return ((void *)crashdumpmap);
+}
+
+/*
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
+{
+ pd_entry_t *pde;
+ vm_paddr_t pa, ptepa;
+ vm_page_t p;
+ int pat_mode;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
+ ("pmap_object_init_pt: non-device object"));
+ if (pseflag &&
+ (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
+ if (!vm_object_populate(object, pindex, pindex + atop(size)))
+ return;
+ p = vm_page_lookup(object, pindex);
+ KASSERT(p->valid == VM_PAGE_BITS_ALL,
+ ("pmap_object_init_pt: invalid page %p", p));
+ pat_mode = p->md.pat_mode;
+
+ /*
+ * Abort the mapping if the first page is not physically
+ * aligned to a 2/4MB page boundary.
+ */
+ ptepa = VM_PAGE_TO_PHYS(p);
+ if (ptepa & (NBPDR - 1))
+ return;
+
+ /*
+ * Skip the first page. Abort the mapping if the rest of
+ * the pages are not physically contiguous or have differing
+ * memory attributes.
+ */
+ p = TAILQ_NEXT(p, listq);
+ for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
+ pa += PAGE_SIZE) {
+ KASSERT(p->valid == VM_PAGE_BITS_ALL,
+ ("pmap_object_init_pt: invalid page %p", p));
+ if (pa != VM_PAGE_TO_PHYS(p) ||
+ pat_mode != p->md.pat_mode)
+ return;
+ p = TAILQ_NEXT(p, listq);
+ }
+
+ /*
+ * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
+ * "size" is a multiple of 2/4M, adding the PAT setting to
+ * "pa" will not affect the termination of this loop.
+ */
+ PMAP_LOCK(pmap);
+ for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
+ size; pa += NBPDR) {
+ pde = pmap_pde(pmap, addr);
+ if (*pde == 0) {
+ pde_store(pde, pa | PG_PS | PG_M | PG_A |
+ PG_U | PG_RW | PG_V);
+ pmap->pm_stats.resident_count += NBPDR /
+ PAGE_SIZE;
+ pmap_pde_mappings++;
+ }
+ /* Else continue on if the PDE is already valid. */
+ addr += NBPDR;
+ }
+ PMAP_UNLOCK(pmap);
+ }
+}
+
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t pdnxt;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ boolean_t pv_lists_locked;
+
+ if (pmap_is_current(pmap))
+ pv_lists_locked = FALSE;
+ else {
+ pv_lists_locked = TRUE;
+resume:
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ }
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ if ((*pde & PG_V) == 0)
+ continue;
+ if ((*pde & PG_PS) != 0) {
+ if ((*pde & PG_W) == 0)
+ panic("pmap_unwire: pde %#jx is missing PG_W",
+ (uintmax_t)*pde);
+
+ /*
+ * Are we unwiring the entire large page? If not,
+ * demote the mapping and fall through.
+ */
+ if (sva + NBPDR == pdnxt && eva >= pdnxt) {
+ /*
+ * Regardless of whether a pde (or pte) is 32
+ * or 64 bits in size, PG_W is among the least
+ * significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pde, PG_W);
+ pmap->pm_stats.wired_count -= NBPDR /
+ PAGE_SIZE;
+ continue;
+ } else {
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_wlock(&pvh_global_lock)) {
+ PMAP_UNLOCK(pmap);
+ /* Repeat sva. */
+ goto resume;
+ }
+ sched_pin();
+ }
+ if (!pmap_demote_pde(pmap, pde, sva))
+ panic("pmap_unwire: demotion failed");
+ }
+ }
+ if (pdnxt > eva)
+ pdnxt = eva;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & PG_V) == 0)
+ continue;
+ if ((*pte & PG_W) == 0)
+ panic("pmap_unwire: pte %#jx is missing PG_W",
+ (uintmax_t)*pte);
+
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ *
+ * PG_W is among the least significant 32 bits.
+ */
+ atomic_clear_int((u_int *)pte, PG_W);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ if (pv_lists_locked) {
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+ vm_offset_t src_addr)
+{
+ struct spglist free;
+ vm_offset_t addr;
+ vm_offset_t end_addr = src_addr + len;
+ vm_offset_t pdnxt;
+
+ if (dst_addr != src_addr)
+ return;
+
+ if (!pmap_is_current(src_pmap))
+ return;
+
+ rw_wlock(&pvh_global_lock);
+ if (dst_pmap < src_pmap) {
+ PMAP_LOCK(dst_pmap);
+ PMAP_LOCK(src_pmap);
+ } else {
+ PMAP_LOCK(src_pmap);
+ PMAP_LOCK(dst_pmap);
+ }
+ sched_pin();
+ for (addr = src_addr; addr < end_addr; addr = pdnxt) {
+ pt_entry_t *src_pte, *dst_pte;
+ vm_page_t dstmpte, srcmpte;
+ pd_entry_t srcptepaddr;
+ u_int ptepindex;
+
+ KASSERT(addr < UPT_MIN_ADDRESS,
+ ("pmap_copy: invalid to pmap_copy page tables"));
+
+ pdnxt = (addr + NBPDR) & ~PDRMASK;
+ if (pdnxt < addr)
+ pdnxt = end_addr;
+ ptepindex = addr >> PDRSHIFT;
+
+ srcptepaddr = src_pmap->pm_pdir[ptepindex];
+ if (srcptepaddr == 0)
+ continue;
+
+ if (srcptepaddr & PG_PS) {
+ if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
+ continue;
+ if (dst_pmap->pm_pdir[ptepindex] == 0 &&
+ ((srcptepaddr & PG_MANAGED) == 0 ||
+ pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
+ PG_PS_FRAME))) {
+ dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
+ ~PG_W;
+ dst_pmap->pm_stats.resident_count +=
+ NBPDR / PAGE_SIZE;
+ pmap_pde_mappings++;
+ }
+ continue;
+ }
+
+ srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
+ KASSERT(srcmpte->wire_count > 0,
+ ("pmap_copy: source page table page is unused"));
+
+ if (pdnxt > end_addr)
+ pdnxt = end_addr;
+
+ src_pte = vtopte(addr);
+ while (addr < pdnxt) {
+ pt_entry_t ptetemp;
+ ptetemp = *src_pte;
+ /*
+ * we only virtual copy managed pages
+ */
+ if ((ptetemp & PG_MANAGED) != 0) {
+ dstmpte = pmap_allocpte(dst_pmap, addr,
+ PMAP_ENTER_NOSLEEP);
+ if (dstmpte == NULL)
+ goto out;
+ dst_pte = pmap_pte_quick(dst_pmap, addr);
+ if (*dst_pte == 0 &&
+ pmap_try_insert_pv_entry(dst_pmap, addr,
+ PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
+ /*
+ * Clear the wired, modified, and
+ * accessed (referenced) bits
+ * during the copy.
+ */
+ *dst_pte = ptetemp & ~(PG_W | PG_M |
+ PG_A);
+ dst_pmap->pm_stats.resident_count++;
+ } else {
+ SLIST_INIT(&free);
+ if (pmap_unwire_ptp(dst_pmap, dstmpte,
+ &free)) {
+ pmap_invalidate_page(dst_pmap,
+ addr);
+ pmap_free_zero_pages(&free);
+ }
+ goto out;
+ }
+ if (dstmpte->wire_count >= srcmpte->wire_count)
+ break;
+ }
+ addr += PAGE_SIZE;
+ src_pte++;
+ }
+ }
+out:
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(src_pmap);
+ PMAP_UNLOCK(dst_pmap);
+}
+
+/*
+ * Zero 1 page of virtual memory mapped from a hardware page by the caller.
+ */
+static __inline void
+pagezero(void *page)
+{
+#if defined(I686_CPU)
+ if (cpu_class == CPUCLASS_686) {
+ if (cpu_feature & CPUID_SSE2)
+ sse2_pagezero(page);
+ else
+ i686_pagezero(page);
+ } else
+#endif
+ bzero(page, PAGE_SIZE);
+}
+
+/*
+ * Zero the specified hardware page.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ pt_entry_t *cmap_pte2;
+ struct pcpu *pc;
+
+ sched_pin();
+ pc = get_pcpu();
+ cmap_pte2 = pc->pc_cmap_pte2;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (*cmap_pte2)
+ panic("pmap_zero_page: CMAP2 busy");
+ *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(m->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr2);
+ pagezero(pc->pc_cmap_addr2);
+ *cmap_pte2 = 0;
+
+ /*
+ * Unpin the thread before releasing the lock. Otherwise the thread
+ * could be rescheduled while still bound to the current CPU, only
+ * to unpin itself immediately upon resuming execution.
+ */
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
+}
+
+/*
+ * Zero an an area within a single hardware page. off and size must not
+ * cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+ pt_entry_t *cmap_pte2;
+ struct pcpu *pc;
+
+ sched_pin();
+ pc = get_pcpu();
+ cmap_pte2 = pc->pc_cmap_pte2;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (*cmap_pte2)
+ panic("pmap_zero_page_area: CMAP2 busy");
+ *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(m->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr2);
+ if (off == 0 && size == PAGE_SIZE)
+ pagezero(pc->pc_cmap_addr2);
+ else
+ bzero(pc->pc_cmap_addr2 + off, size);
+ *cmap_pte2 = 0;
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
+}
+
+/*
+ * Copy 1 specified hardware page to another.
+ */
+void
+pmap_copy_page(vm_page_t src, vm_page_t dst)
+{
+ pt_entry_t *cmap_pte1, *cmap_pte2;
+ struct pcpu *pc;
+
+ sched_pin();
+ pc = get_pcpu();
+ cmap_pte1 = pc->pc_cmap_pte1;
+ cmap_pte2 = pc->pc_cmap_pte2;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (*cmap_pte1)
+ panic("pmap_copy_page: CMAP1 busy");
+ if (*cmap_pte2)
+ panic("pmap_copy_page: CMAP2 busy");
+ *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
+ pmap_cache_bits(src->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr1);
+ *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
+ pmap_cache_bits(dst->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr2);
+ bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
+ *cmap_pte1 = 0;
+ *cmap_pte2 = 0;
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
+}
+
+int unmapped_buf_allowed = 1;
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ vm_page_t a_pg, b_pg;
+ char *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ pt_entry_t *cmap_pte1, *cmap_pte2;
+ struct pcpu *pc;
+ int cnt;
+
+ sched_pin();
+ pc = get_pcpu();
+ cmap_pte1 = pc->pc_cmap_pte1;
+ cmap_pte2 = pc->pc_cmap_pte2;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (*cmap_pte1 != 0)
+ panic("pmap_copy_pages: CMAP1 busy");
+ if (*cmap_pte2 != 0)
+ panic("pmap_copy_pages: CMAP2 busy");
+ while (xfersize > 0) {
+ a_pg = ma[a_offset >> PAGE_SHIFT];
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ b_pg = mb[b_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
+ pmap_cache_bits(a_pg->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr1);
+ *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
+ PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr2);
+ a_cp = pc->pc_cmap_addr1 + a_pg_offset;
+ b_cp = pc->pc_cmap_addr2 + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ *cmap_pte1 = 0;
+ *cmap_pte2 = 0;
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv;
+ int loops = 0;
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ rw_wlock(&pvh_global_lock);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ }
+ rw_wunlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ int count;
+
+ count = 0;
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (count);
+ rw_wlock(&pvh_global_lock);
+ count = pmap_pvh_wired_mappings(&m->md, count);
+ if ((m->flags & PG_FICTITIOUS) == 0) {
+ count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
+ count);
+ }
+ rw_wunlock(&pvh_global_lock);
+ return (count);
+}
+
+/*
+ * pmap_pvh_wired_mappings:
+ *
+ * Return the updated number "count" of managed mappings that are wired.
+ */
+static int
+pmap_pvh_wired_mappings(struct md_page *pvh, int count)
+{
+ pmap_t pmap;
+ pt_entry_t *pte;
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ sched_pin();
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ if ((*pte & PG_W) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ sched_unpin();
+ return (count);
+}
+
+/*
+ * Returns TRUE if the given page is mapped individually or as part of
+ * a 4mpage. Otherwise, returns FALSE.
+ */
+boolean_t
+pmap_page_is_mapped(vm_page_t m)
+{
+ boolean_t rv;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (FALSE);
+ rw_wlock(&pvh_global_lock);
+ rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
+ rw_wunlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * Remove all pages from specified address space
+ * this aids process exit speeds. Also, this code
+ * is special cased for current process only, but
+ * can have the more generic (and slightly slower)
+ * mode enabled. This is much faster than pmap_remove
+ * in the case of running down an entire address space.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+ pt_entry_t *pte, tpte;
+ vm_page_t m, mpte, mt;
+ pv_entry_t pv;
+ struct md_page *pvh;
+ struct pv_chunk *pc, *npc;
+ struct spglist free;
+ int field, idx;
+ int32_t bit;
+ uint32_t inuse, bitmask;
+ int allfree;
+
+ if (pmap != PCPU_GET(curpmap)) {
+ printf("warning: pmap_remove_pages called with non-current pmap\n");
+ return;
+ }
+ SLIST_INIT(&free);
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ sched_pin();
+ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+ KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap,
+ pc->pc_pmap));
+ allfree = 1;
+ for (field = 0; field < _NPCM; field++) {
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
+ while (inuse != 0) {
+ bit = bsfl(inuse);
+ bitmask = 1UL << bit;
+ idx = field * 32 + bit;
+ pv = &pc->pc_pventry[idx];
+ inuse &= ~bitmask;
+
+ pte = pmap_pde(pmap, pv->pv_va);
+ tpte = *pte;
+ if ((tpte & PG_PS) == 0) {
+ pte = vtopte(pv->pv_va);
+ tpte = *pte & ~PG_PTE_PAT;
+ }
+
+ if (tpte == 0) {
+ printf(
+ "TPTE at %p IS ZERO @ VA %08x\n",
+ pte, pv->pv_va);
+ panic("bad pte");
+ }
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (tpte & PG_W) {
+ allfree = 0;
+ continue;
+ }
+
+ m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
+ KASSERT(m->phys_addr == (tpte & PG_FRAME),
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tpte));
+
+ KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+ m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %#jx",
+ (uintmax_t)tpte));
+
+ pte_clear(pte);
+
+ /*
+ * Update the vm_page_t clean/reference bits.
+ */
+ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if ((tpte & PG_PS) != 0) {
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ vm_page_dirty(mt);
+ } else
+ vm_page_dirty(m);
+ }
+
+ /* Mark free */
+ PV_STAT(pv_entry_frees++);
+ PV_STAT(pv_entry_spare++);
+ pv_entry_count--;
+ pc->pc_map[field] |= bitmask;
+ if ((tpte & PG_PS) != 0) {
+ pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
+ pvh = pa_to_pvh(tpte & PG_PS_FRAME);
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ if (TAILQ_EMPTY(&pvh->pv_list)) {
+ for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
+ if (TAILQ_EMPTY(&mt->md.pv_list))
+ vm_page_aflag_clear(mt, PGA_WRITEABLE);
+ }
+ mpte = pmap_remove_pt_page(pmap, pv->pv_va);
+ if (mpte != NULL) {
+ pmap->pm_stats.resident_count--;
+ KASSERT(mpte->wire_count == NPTEPG,
+ ("pmap_remove_pages: pte page wire count error"));
+ mpte->wire_count = 0;
+ pmap_add_delayed_free_list(mpte, &free, FALSE);
+ }
+ } else {
+ pmap->pm_stats.resident_count--;
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ if (TAILQ_EMPTY(&m->md.pv_list) &&
+ (m->flags & PG_FICTITIOUS) == 0) {
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ if (TAILQ_EMPTY(&pvh->pv_list))
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ }
+ pmap_unuse_pt(pmap, pv->pv_va, &free);
+ }
+ }
+ }
+ if (allfree) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+ }
+ }
+ sched_unpin();
+ pmap_invalidate_all(pmap);
+ rw_wunlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
+ * is clear, no PTEs can have PG_M set.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return (FALSE);
+ rw_wlock(&pvh_global_lock);
+ rv = pmap_is_modified_pvh(&m->md) ||
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+ rw_wunlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * Returns TRUE if any of the given mappings were used to modify
+ * physical memory. Otherwise, returns FALSE. Both page and 2mpage
+ * mappings are supported.
+ */
+static boolean_t
+pmap_is_modified_pvh(struct md_page *pvh)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ boolean_t rv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ rv = FALSE;
+ sched_pin();
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ break;
+ }
+ sched_unpin();
+ return (rv);
+}
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is elgible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ boolean_t rv;
+
+ rv = FALSE;
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, addr);
+ if (*pde != 0 && (*pde & PG_PS) == 0) {
+ pte = vtopte(addr);
+ rv = *pte == 0;
+ }
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
+ rw_wlock(&pvh_global_lock);
+ rv = pmap_is_referenced_pvh(&m->md) ||
+ ((m->flags & PG_FICTITIOUS) == 0 &&
+ pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+ rw_wunlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * Returns TRUE if any of the given mappings were referenced and FALSE
+ * otherwise. Both page and 4mpage mappings are supported.
+ */
+static boolean_t
+pmap_is_referenced_pvh(struct md_page *pvh)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ boolean_t rv;
+
+ rw_assert(&pvh_global_lock, RA_WLOCKED);
+ rv = FALSE;
+ sched_pin();
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ break;
+ }
+ sched_unpin();
+ return (rv);
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t next_pv, pv;
+ pmap_t pmap;
+ pd_entry_t *pde;
+ pt_entry_t oldpte, *pte;
+ vm_offset_t va;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * set by another thread while the object is locked. Thus,
+ * if PGA_WRITEABLE is clear, no page table entries need updating.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ va = pv->pv_va;
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, va);
+ if ((*pde & PG_RW) != 0)
+ (void)pmap_demote_pde(pmap, pde, va);
+ PMAP_UNLOCK(pmap);
+ }
+small_mappings:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
+ " a 4mpage in page %p's pv list", m));
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+retry:
+ oldpte = *pte;
+ if ((oldpte & PG_RW) != 0) {
+ /*
+ * Regardless of whether a pte is 32 or 64 bits
+ * in size, PG_RW and PG_M are among the least
+ * significant 32 bits.
+ */
+ if (!atomic_cmpset_int((u_int *)pte, oldpte,
+ oldpte & ~(PG_RW | PG_M)))
+ goto retry;
+ if ((oldpte & PG_M) != 0)
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+}
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits. This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified(). However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages. Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t pv, pvf;
+ pmap_t pmap;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ vm_paddr_t pa;
+ int rtval = 0;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pa_to_pvh(pa);
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0 ||
+ (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
+ goto small_mappings;
+ pv = pvf;
+ do {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ /*
+ * Although "*pde" is mapping a 2/4MB page, because
+ * this function is called at a 4KB page granularity,
+ * we only update the 4KB page under test.
+ */
+ vm_page_dirty(m);
+ }
+ if ((*pde & PG_A) != 0) {
+ /*
+ * Since this reference bit is shared by either 1024
+ * or 512 4KB pages, it should not be cleared every
+ * time it is tested. Apply a simple "hash" function
+ * on the physical page number, the virtual superpage
+ * number, and the pmap address to select one 4KB page
+ * out of the 1024 or 512 on which testing the
+ * reference bit will result in clearing that bit.
+ * This function is designed to avoid the selection of
+ * the same 4KB page for every 2- or 4MB page mapping.
+ *
+ * On demotion, a mapping that hasn't been referenced
+ * is simply destroyed. To avoid the possibility of a
+ * subsequent page fault on a demoted wired mapping,
+ * always leave its reference bit set. Moreover,
+ * since the superpage is wired, the current state of
+ * its reference bit won't affect page replacement.
+ */
+ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
+ (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
+ (*pde & PG_W) == 0) {
+ atomic_clear_int((u_int *)pde, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ rtval++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ }
+ if (rtval >= PMAP_TS_REFERENCED_MAX)
+ goto out;
+ } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
+small_mappings:
+ if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
+ goto out;
+ pv = pvf;
+ do {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0,
+ ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
+ m));
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
+ if ((*pte & PG_A) != 0) {
+ atomic_clear_int((u_int *)pte, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ rtval++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ }
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
+ PMAP_TS_REFERENCED_MAX);
+out:
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ return (rtval);
+}
+
+/*
+ * Apply the given advice to the specified range of addresses within the
+ * given pmap. Depending on the advice, clear the referenced and/or
+ * modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+ pd_entry_t oldpde, *pde;
+ pt_entry_t *pte;
+ vm_offset_t va, pdnxt;
+ vm_page_t m;
+ boolean_t anychanged, pv_lists_locked;
+
+ if (advice != MADV_DONTNEED && advice != MADV_FREE)
+ return;
+ if (pmap_is_current(pmap))
+ pv_lists_locked = FALSE;
+ else {
+ pv_lists_locked = TRUE;
+resume:
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ }
+ anychanged = FALSE;
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = pdnxt) {
+ pdnxt = (sva + NBPDR) & ~PDRMASK;
+ if (pdnxt < sva)
+ pdnxt = eva;
+ pde = pmap_pde(pmap, sva);
+ oldpde = *pde;
+ if ((oldpde & PG_V) == 0)
+ continue;
+ else if ((oldpde & PG_PS) != 0) {
+ if ((oldpde & PG_MANAGED) == 0)
+ continue;
+ if (!pv_lists_locked) {
+ pv_lists_locked = TRUE;
+ if (!rw_try_wlock(&pvh_global_lock)) {
+ if (anychanged)
+ pmap_invalidate_all(pmap);
+ PMAP_UNLOCK(pmap);
+ goto resume;
+ }
+ sched_pin();
+ }
+ if (!pmap_demote_pde(pmap, pde, sva)) {
+ /*
+ * The large page mapping was destroyed.
+ */
+ continue;
+ }
+
+ /*
+ * Unless the page mappings are wired, remove the
+ * mapping to a single page so that a subsequent
+ * access may repromote. Since the underlying page
+ * table page is fully populated, this removal never
+ * frees a page table page.
+ */
+ if ((oldpde & PG_W) == 0) {
+ pte = pmap_pte_quick(pmap, sva);
+ KASSERT((*pte & PG_V) != 0,
+ ("pmap_advise: invalid PTE"));
+ pmap_remove_pte(pmap, pte, sva, NULL);
+ anychanged = TRUE;
+ }
+ }
+ if (pdnxt > eva)
+ pdnxt = eva;
+ va = pdnxt;
+ for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
+ sva += PAGE_SIZE) {
+ if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
+ goto maybe_invlrng;
+ else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if (advice == MADV_DONTNEED) {
+ /*
+ * Future calls to pmap_is_modified()
+ * can be avoided by making the page
+ * dirty now.
+ */
+ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
+ vm_page_dirty(m);
+ }
+ atomic_clear_int((u_int *)pte, PG_M | PG_A);
+ } else if ((*pte & PG_A) != 0)
+ atomic_clear_int((u_int *)pte, PG_A);
+ else
+ goto maybe_invlrng;
+ if ((*pte & PG_G) != 0) {
+ if (va == pdnxt)
+ va = sva;
+ } else
+ anychanged = TRUE;
+ continue;
+maybe_invlrng:
+ if (va != pdnxt) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = pdnxt;
+ }
+ }
+ if (va != pdnxt)
+ pmap_invalidate_range(pmap, va, sva);
+ }
+ if (anychanged)
+ pmap_invalidate_all(pmap);
+ if (pv_lists_locked) {
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+ }
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+ struct md_page *pvh;
+ pv_entry_t next_pv, pv;
+ pmap_t pmap;
+ pd_entry_t oldpde, *pde;
+ pt_entry_t oldpte, *pte;
+ vm_offset_t va;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ KASSERT(!vm_page_xbusied(m),
+ ("pmap_clear_modify: page %p is exclusive busied", m));
+
+ /*
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
+ * If the object containing the page is locked and the page is not
+ * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
+ */
+ if ((m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ rw_wlock(&pvh_global_lock);
+ sched_pin();
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ goto small_mappings;
+ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
+ va = pv->pv_va;
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, va);
+ oldpde = *pde;
+ if ((oldpde & PG_RW) != 0) {
+ if (pmap_demote_pde(pmap, pde, va)) {
+ if ((oldpde & PG_W) == 0) {
+ /*
+ * Write protect the mapping to a
+ * single page so that a subsequent
+ * write access may repromote.
+ */
+ va += VM_PAGE_TO_PHYS(m) - (oldpde &
+ PG_PS_FRAME);
+ pte = pmap_pte_quick(pmap, va);
+ oldpte = *pte;
+ if ((oldpte & PG_V) != 0) {
+ /*
+ * Regardless of whether a pte is 32 or 64 bits
+ * in size, PG_RW and PG_M are among the least
+ * significant 32 bits.
+ */
+ while (!atomic_cmpset_int((u_int *)pte,
+ oldpte,
+ oldpte & ~(PG_M | PG_RW)))
+ oldpte = *pte;
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, va);
+ }
+ }
+ }
+ }
+ PMAP_UNLOCK(pmap);
+ }
+small_mappings:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
+ " a 4mpage in page %p's pv list", m));
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ /*
+ * Regardless of whether a pte is 32 or 64 bits
+ * in size, PG_M is among the least significant
+ * 32 bits.
+ */
+ atomic_clear_int((u_int *)pte, PG_M);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ sched_unpin();
+ rw_wunlock(&pvh_global_lock);
+}
+
+/*
+ * Miscellaneous support routines follow
+ */
+
+/* Adjust the cache mode for a 4KB page mapped via a PTE. */
+static __inline void
+pmap_pte_attr(pt_entry_t *pte, int cache_bits)
+{
+ u_int opte, npte;
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PTE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opte = *(u_int *)pte;
+ npte = opte & ~PG_PTE_CACHE;
+ npte |= cache_bits;
+ } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
+}
+
+/* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
+static __inline void
+pmap_pde_attr(pd_entry_t *pde, int cache_bits)
+{
+ u_int opde, npde;
+
+ /*
+ * The cache mode bits are all in the low 32-bits of the
+ * PDE, so we can just spin on updating the low 32-bits.
+ */
+ do {
+ opde = *(u_int *)pde;
+ npde = opde & ~PG_PDE_CACHE;
+ npde |= cache_bits;
+ } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
+}
+
+/*
+ * Map a set of physical memory pages into the kernel virtual
+ * address space. Return a pointer to where it is mapped. This
+ * routine is intended to be used for mapping device memory,
+ * NOT real memory.
+ */
+void *
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t va, offset;
+ vm_size_t tmpsize;
+ int i;
+
+ offset = pa & PAGE_MASK;
+ size = round_page(offset + size);
+ pa = pa & PG_FRAME;
+
+ if (pa < KERNLOAD && pa + size <= KERNLOAD)
+ va = KERNBASE + pa;
+ else if (!pmap_initialized) {
+ va = 0;
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == 0) {
+ ppim->pa = pa;
+ ppim->sz = size;
+ ppim->mode = mode;
+ ppim->va = virtual_avail;
+ virtual_avail += size;
+ va = ppim->va;
+ break;
+ }
+ }
+ if (va == 0)
+ panic("%s: too many preinit mappings", __func__);
+ } else {
+ /*
+ * If we have a preinit mapping, re-use it.
+ */
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->pa == pa && ppim->sz == size &&
+ ppim->mode == mode)
+ return ((void *)(ppim->va + offset));
+ }
+ va = kva_alloc(size);
+ if (va == 0)
+ panic("%s: Couldn't allocate KVA", __func__);
+ }
+ for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
+ pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
+ pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
+ pmap_invalidate_cache_range(va, va + size, FALSE);
+ return ((void *)(va + offset));
+}
+
+void *
+pmap_mapdev(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
+}
+
+void *
+pmap_mapbios(vm_paddr_t pa, vm_size_t size)
+{
+
+ return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
+}
+
+void
+pmap_unmapdev(vm_offset_t va, vm_size_t size)
+{
+ struct pmap_preinit_mapping *ppim;
+ vm_offset_t offset;
+ int i;
+
+ if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
+ return;
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+ va = trunc_page(va);
+ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
+ ppim = pmap_preinit_mapping + i;
+ if (ppim->va == va && ppim->sz == size) {
+ if (pmap_initialized)
+ return;
+ ppim->pa = 0;
+ ppim->va = 0;
+ ppim->sz = 0;
+ ppim->mode = 0;
+ if (va + size == virtual_avail)
+ virtual_avail = va;
+ return;
+ }
+ }
+ if (pmap_initialized)
+ kva_free(va, size);
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+ m->md.pat_mode = ma;
+ if ((m->flags & PG_FICTITIOUS) != 0)
+ return;
+
+ /*
+ * If "m" is a normal page, flush it from the cache.
+ * See pmap_invalidate_cache_range().
+ *
+ * First, try to find an existing mapping of the page by sf
+ * buffer. sf_buf_invalidate_cache() modifies mapping and
+ * flushes the cache.
+ */
+ if (sf_buf_invalidate_cache(m))
+ return;
+
+ /*
+ * If page is not mapped by sf buffer, but CPU does not
+ * support self snoop, map the page transient and do
+ * invalidation. In the worst case, whole cache is flushed by
+ * pmap_invalidate_cache_range().
+ */
+ if ((cpu_feature & CPUID_SS) == 0)
+ pmap_flush_page(m);
+}
+
+static void
+pmap_flush_page(vm_page_t m)
+{
+ pt_entry_t *cmap_pte2;
+ struct pcpu *pc;
+ vm_offset_t sva, eva;
+ bool useclflushopt;
+
+ useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
+ if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
+ sched_pin();
+ pc = get_pcpu();
+ cmap_pte2 = pc->pc_cmap_pte2;
+ mtx_lock(&pc->pc_cmap_lock);
+ if (*cmap_pte2)
+ panic("pmap_flush_page: CMAP2 busy");
+ *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
+ PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
+ invlcaddr(pc->pc_cmap_addr2);
+ sva = (vm_offset_t)pc->pc_cmap_addr2;
+ eva = sva + PAGE_SIZE;
+
+ /*
+ * Use mfence or sfence despite the ordering implied by
+ * mtx_{un,}lock() because clflush on non-Intel CPUs
+ * and clflushopt are not guaranteed to be ordered by
+ * any other instruction.
+ */
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ for (; sva < eva; sva += cpu_clflush_line_size) {
+ if (useclflushopt)
+ clflushopt(sva);
+ else
+ clflush(sva);
+ }
+ if (useclflushopt)
+ sfence();
+ else if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ mfence();
+ *cmap_pte2 = 0;
+ sched_unpin();
+ mtx_unlock(&pc->pc_cmap_lock);
+ } else
+ pmap_invalidate_cache();
+}
+
+/*
+ * Changes the specified virtual address range's memory type to that given by
+ * the parameter "mode". The specified virtual address range must be
+ * completely contained within either the kernel map.
+ *
+ * Returns zero if the change completed successfully, and either EINVAL or
+ * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
+ * of the virtual address range was not mapped, and ENOMEM is returned if
+ * there was insufficient memory available to complete the change.
+ */
+int
+pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
+{
+ vm_offset_t base, offset, tmpva;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ int cache_bits_pte, cache_bits_pde;
+ boolean_t changed;
+
+ base = trunc_page(va);
+ offset = va & PAGE_MASK;
+ size = round_page(offset + size);
+
+ /*
+ * Only supported on kernel virtual addresses above the recursive map.
+ */
+ if (base < VM_MIN_KERNEL_ADDRESS)
+ return (EINVAL);
+
+ cache_bits_pde = pmap_cache_bits(mode, 1);
+ cache_bits_pte = pmap_cache_bits(mode, 0);
+ changed = FALSE;
+
+ /*
+ * Pages that aren't mapped aren't supported. Also break down
+ * 2/4MB pages into 4KB pages if required.
+ */
+ PMAP_LOCK(kernel_pmap);
+ for (tmpva = base; tmpva < base + size; ) {
+ pde = pmap_pde(kernel_pmap, tmpva);
+ if (*pde == 0) {
+ PMAP_UNLOCK(kernel_pmap);
+ return (EINVAL);
+ }
+ if (*pde & PG_PS) {
+ /*
+ * If the current 2/4MB page already has
+ * the required memory type, then we need not
+ * demote this page. Just increment tmpva to
+ * the next 2/4MB page frame.
+ */
+ if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
+ tmpva = trunc_4mpage(tmpva) + NBPDR;
+ continue;
+ }
+
+ /*
+ * If the current offset aligns with a 2/4MB
+ * page frame and there is at least 2/4MB left
+ * within the range, then we need not break
+ * down this page into 4KB pages.
+ */
+ if ((tmpva & PDRMASK) == 0 &&
+ tmpva + PDRMASK < base + size) {
+ tmpva += NBPDR;
+ continue;
+ }
+ if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
+ PMAP_UNLOCK(kernel_pmap);
+ return (ENOMEM);
+ }
+ }
+ pte = vtopte(tmpva);
+ if (*pte == 0) {
+ PMAP_UNLOCK(kernel_pmap);
+ return (EINVAL);
+ }
+ tmpva += PAGE_SIZE;
+ }
+ PMAP_UNLOCK(kernel_pmap);
+
+ /*
+ * Ok, all the pages exist, so run through them updating their
+ * cache mode if required.
+ */
+ for (tmpva = base; tmpva < base + size; ) {
+ pde = pmap_pde(kernel_pmap, tmpva);
+ if (*pde & PG_PS) {
+ if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
+ pmap_pde_attr(pde, cache_bits_pde);
+ changed = TRUE;
+ }
+ tmpva = trunc_4mpage(tmpva) + NBPDR;
+ } else {
+ pte = vtopte(tmpva);
+ if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
+ pmap_pte_attr(pte, cache_bits_pte);
+ changed = TRUE;
+ }
+ tmpva += PAGE_SIZE;
+ }
+ }
+
+ /*
+ * Flush CPU caches to make sure any data isn't cached that
+ * shouldn't be, etc.
+ */
+ if (changed) {
+ pmap_invalidate_range(kernel_pmap, base, tmpva);
+ pmap_invalidate_cache_range(base, tmpva, FALSE);
+ }
+ return (0);
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
+{
+ pd_entry_t *pdep;
+ pt_entry_t *ptep, pte;
+ vm_paddr_t pa;
+ int val;
+
+ PMAP_LOCK(pmap);
+retry:
+ pdep = pmap_pde(pmap, addr);
+ if (*pdep != 0) {
+ if (*pdep & PG_PS) {
+ pte = *pdep;
+ /* Compute the physical address of the 4KB page. */
+ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
+ PG_FRAME;
+ val = MINCORE_SUPER;
+ } else {
+ ptep = pmap_pte(pmap, addr);
+ pte = *ptep;
+ pmap_pte_release(ptep);
+ pa = pte & PG_FRAME;
+ val = 0;
+ }
+ } else {
+ pte = 0;
+ pa = 0;
+ val = 0;
+ }
+ if ((pte & PG_V) != 0) {
+ val |= MINCORE_INCORE;
+ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+ if ((pte & PG_A) != 0)
+ val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+ }
+ if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
+ (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
+ (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
+ /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
+ if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
+ goto retry;
+ } else
+ PA_UNLOCK_COND(*locked_pa);
+ PMAP_UNLOCK(pmap);
+ return (val);
+}
+
+void
+pmap_activate(struct thread *td)
+{
+ pmap_t pmap, oldpmap;
+ u_int cpuid;
+ u_int32_t cr3;
+
+ critical_enter();
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ oldpmap = PCPU_GET(curpmap);
+ cpuid = PCPU_GET(cpuid);
+#if defined(SMP)
+ CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
+ CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
+#else
+ CPU_CLR(cpuid, &oldpmap->pm_active);
+ CPU_SET(cpuid, &pmap->pm_active);
+#endif
+#if defined(PAE) || defined(PAE_TABLES)
+ cr3 = vtophys(pmap->pm_pdpt);
+#else
+ cr3 = vtophys(pmap->pm_pdir);
+#endif
+ /*
+ * pmap_activate is for the current thread on the current cpu
+ */
+ td->td_pcb->pcb_cr3 = cr3;
+ load_cr3(cr3);
+ PCPU_SET(curpmap, pmap);
+ critical_exit();
+}
+
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+}
+
+/*
+ * Increase the starting virtual address of the given mapping if a
+ * different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t *addr, vm_size_t size)
+{
+ vm_offset_t superpage_offset;
+
+ if (size < NBPDR)
+ return;
+ if (object != NULL && (object->flags & OBJ_COLORED) != 0)
+ offset += ptoa(object->pg_color);
+ superpage_offset = offset & PDRMASK;
+ if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
+ (*addr & PDRMASK) == superpage_offset)
+ return;
+ if ((*addr & PDRMASK) < superpage_offset)
+ *addr = (*addr & ~PDRMASK) + superpage_offset;
+ else
+ *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
+}
+
+vm_offset_t
+pmap_quick_enter_page(vm_page_t m)
+{
+ vm_offset_t qaddr;
+ pt_entry_t *pte;
+
+ critical_enter();
+ qaddr = PCPU_GET(qmap_addr);
+ pte = vtopte(qaddr);
+
+ KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
+ *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
+ pmap_cache_bits(pmap_page_get_memattr(m), 0);
+ invlpg(qaddr);
+
+ return (qaddr);
+}
+
+void
+pmap_quick_remove_page(vm_offset_t addr)
+{
+ vm_offset_t qaddr;
+ pt_entry_t *pte;
+
+ qaddr = PCPU_GET(qmap_addr);
+ pte = vtopte(qaddr);
+
+ KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
+ KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address"));
+
+ *pte = 0;
+ critical_exit();
+}
+
+#if defined(PMAP_DEBUG)
+pmap_pid_dump(int pid)
+{
+ pmap_t pmap;
+ struct proc *p;
+ int npte = 0;
+ int index;
+
+ sx_slock(&allproc_lock);
+ FOREACH_PROC_IN_SYSTEM(p) {
+ if (p->p_pid != pid)
+ continue;
+
+ if (p->p_vmspace) {
+ int i,j;
+ index = 0;
+ pmap = vmspace_pmap(p->p_vmspace);
+ for (i = 0; i < NPDEPTD; i++) {
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ vm_offset_t base = i << PDRSHIFT;
+
+ pde = &pmap->pm_pdir[i];
+ if (pde && pmap_pde_v(pde)) {
+ for (j = 0; j < NPTEPG; j++) {
+ vm_offset_t va = base + (j << PAGE_SHIFT);
+ if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
+ if (index) {
+ index = 0;
+ printf("\n");
+ }
+ sx_sunlock(&allproc_lock);
+ return (npte);
+ }
+ pte = pmap_pte(pmap, va);
+ if (pte && pmap_pte_v(pte)) {
+ pt_entry_t pa;
+ vm_page_t m;
+ pa = *pte;
+ m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
+ printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
+ va, pa, m->hold_count, m->wire_count, m->flags);
+ npte++;
+ index++;
+ if (index >= 2) {
+ index = 0;
+ printf("\n");
+ } else {
+ printf(" ");
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ sx_sunlock(&allproc_lock);
+ return (npte);
+}
+#endif
Index: sys/i386/i386/support.s
===================================================================
--- sys/i386/i386/support.s
+++ sys/i386/i386/support.s
@@ -34,7 +34,7 @@
#include <machine/pmap.h>
#include <machine/specialreg.h>
-#include "assym.s"
+#include "assym.S"
#define IDXSHIFT 10
Index: sys/i386/i386/swtch.s
===================================================================
--- sys/i386/i386/swtch.s
+++ sys/i386/i386/swtch.s
@@ -36,7 +36,7 @@
#include <machine/asmacros.h>
-#include "assym.s"
+#include "assym.S"
#if defined(SMP) && defined(SCHED_ULE)
#define SETOP xchgl
Index: sys/i386/i386/vm86bios.s
===================================================================
--- sys/i386/i386/vm86bios.s
+++ sys/i386/i386/vm86bios.s
@@ -29,7 +29,7 @@
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/trap.h>
-#include "assym.s"
+#include "assym.S"
#define SCR_NEWPTD PCB_ESI /* readability macros */
#define SCR_VMFRAME PCB_EBP /* see vm86.c for explanation */
Index: sys/i386/linux/linux_locore.s
===================================================================
--- sys/i386/linux/linux_locore.s
+++ sys/i386/linux/linux_locore.s
@@ -5,7 +5,7 @@
#include <i386/linux/linux_syscall.h> /* system call numbers */
-#include "assym.s"
+#include "assym.S"
/*
* To avoid excess stack frame the signal trampoline code emulates
Index: sys/i386/linux/linux_support.s
===================================================================
--- sys/i386/linux/linux_support.s
+++ sys/i386/linux/linux_support.s
@@ -31,7 +31,7 @@
#include "linux_assym.h" /* system definitions */
#include <machine/asmacros.h> /* miscellaneous asm macros */
-#include "assym.s"
+#include "assym.S"
futex_fault_decx:
movl PCPU(CURPCB),%ecx
Index: sys/mips/cavium/octeon_cop2.S
===================================================================
--- sys/mips/cavium/octeon_cop2.S
+++ sys/mips/cavium/octeon_cop2.S
@@ -29,7 +29,7 @@
#include <machine/asm.h>
#include <mips/cavium/octeon_cop2.h>
-#include "assym.s"
+#include "assym.S"
.set noreorder
Index: sys/mips/ingenic/jz4780_mpboot.S
===================================================================
--- sys/mips/ingenic/jz4780_mpboot.S
+++ sys/mips/ingenic/jz4780_mpboot.S
@@ -27,7 +27,7 @@
*/
#include <machine/asm.h>
-#include "assym.s"
+#include "assym.S"
.text
.set noat
Index: sys/mips/mips/exception.S
===================================================================
--- sys/mips/mips/exception.S
+++ sys/mips/mips/exception.S
@@ -63,7 +63,7 @@
#include <machine/pte.h>
#include <machine/pcb.h>
-#include "assym.s"
+#include "assym.S"
.set noreorder # Noreorder is default style!
Index: sys/mips/mips/fp.S
===================================================================
--- sys/mips/mips/fp.S
+++ sys/mips/mips/fp.S
@@ -43,7 +43,7 @@
#include <machine/regnum.h>
#include <machine/cpuregs.h>
-#include "assym.s"
+#include "assym.S"
#define SEXP_INF 0xff
#define DEXP_INF 0x7ff
Index: sys/mips/mips/locore.S
===================================================================
--- sys/mips/mips/locore.S
+++ sys/mips/mips/locore.S
@@ -70,7 +70,7 @@
#include <machine/cpuregs.h>
#include <machine/regnum.h>
-#include "assym.s"
+#include "assym.S"
.data
#ifdef YAMON
Index: sys/mips/mips/mpboot.S
===================================================================
--- sys/mips/mips/mpboot.S
+++ sys/mips/mips/mpboot.S
@@ -30,7 +30,7 @@
#include <machine/cpu.h>
#include <machine/cpuregs.h>
-#include "assym.s"
+#include "assym.S"
.text
.set noat
Index: sys/mips/mips/octeon_cop2_swtch.S
===================================================================
--- sys/mips/mips/octeon_cop2_swtch.S
+++ sys/mips/mips/octeon_cop2_swtch.S
@@ -30,7 +30,7 @@
#include <machine/cpuregs.h>
#include <machine/octeon_cop2.h>
-#include "assym.s"
+#include "assym.S"
.set noreorder
Index: sys/mips/mips/support.S
===================================================================
--- sys/mips/mips/support.S
+++ sys/mips/mips/support.S
@@ -94,7 +94,7 @@
#include <machine/cpuregs.h>
#include <machine/pcb.h>
-#include "assym.s"
+#include "assym.S"
.set noreorder # Noreorder is default style!
Index: sys/mips/mips/swtch.S
===================================================================
--- sys/mips/mips/swtch.S
+++ sys/mips/mips/swtch.S
@@ -64,7 +64,7 @@
#include <machine/pte.h>
#include <machine/pcb.h>
-#include "assym.s"
+#include "assym.S"
.set noreorder # Noreorder is default style!
Index: sys/mips/nlm/mpreset.S
===================================================================
--- sys/mips/nlm/mpreset.S
+++ sys/mips/nlm/mpreset.S
@@ -38,7 +38,7 @@
#define SYS_REG_KSEG1(node, reg) (0xa0000000 + XLP_DEFAULT_IO_BASE + \
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + (reg) * 4)
-#include "assym.s"
+#include "assym.S"
.text
.set noat
Index: sys/modules/dtrace/dtrace/Makefile
===================================================================
--- sys/modules/dtrace/dtrace/Makefile
+++ sys/modules/dtrace/dtrace/Makefile
@@ -27,9 +27,9 @@
SRCS+= bus_if.h device_if.h vnode_if.h
# Needed for dtrace_asm.S
-DPSRCS+= assym.s
+DPSRCS+= assym.S
-# These are needed for assym.s
+# These are needed for assym.S
SRCS+= opt_compat.h opt_kstack_pages.h opt_nfs.h opt_hwpmc_hooks.h
#This is needed for dtrace.c
@@ -53,7 +53,7 @@
dtrace_unregister \
dtrace_probe_lookup
-dtrace_asm.o: assym.s
+dtrace_asm.o: assym.S
.include <bsd.kmod.mk>
Index: sys/modules/hyperv/vmbus/Makefile
===================================================================
--- sys/modules/hyperv/vmbus/Makefile
+++ sys/modules/hyperv/vmbus/Makefile
@@ -16,13 +16,13 @@
vmbus_xact.c
SRCS+= acpi_if.h bus_if.h device_if.h opt_acpi.h pci_if.h pcib_if.h vmbus_if.h
-# XXX: for assym.s
+# XXX: for assym.S
SRCS+= opt_kstack_pages.h opt_nfs.h opt_hwpmc_hooks.h opt_compat.h
.if ${MACHINE_CPUARCH} == "i386"
SRCS+= opt_apic.h
.endif
-SRCS+= assym.s \
+SRCS+= assym.S \
vmbus_vector.S
vmbus_vector.o:
Index: sys/modules/linux/Makefile
===================================================================
--- sys/modules/linux/Makefile
+++ sys/modules/linux/Makefile
@@ -17,11 +17,11 @@
linux${SFX}_sysvec.c linux_uid16.c linux_time.c \
linux_timer.c linux_vdso.c \
opt_inet6.h opt_compat.h opt_posix.h opt_usb.h vnode_if.h \
- device_if.h bus_if.h assym.s \
+ device_if.h bus_if.h assym.S \
linux${SFX}_support.s
DPSRCS= linux${SFX}_genassym.c
-# XXX: for assym.s
+# XXX: for assym.S
SRCS+= opt_kstack_pages.h opt_nfs.h opt_compat.h opt_hwpmc_hooks.h
.if ${MACHINE_CPUARCH} == "i386"
SRCS+= opt_apic.h
@@ -48,7 +48,7 @@
linux${SFX}_assym.h: linux${SFX}_genassym.o
sh ${SYSDIR}/kern/genassym.sh linux${SFX}_genassym.o > ${.TARGET}
-linux${SFX}_locore.o: linux${SFX}_assym.h assym.s
+linux${SFX}_locore.o: linux${SFX}_assym.h assym.S
${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s \
-pipe -I. -I${SYSDIR} -Werror -Wall -fno-common -nostdinc -nostdlib \
-fno-omit-frame-pointer -fPIC \
@@ -56,7 +56,7 @@
-Wl,-soname=${VDSO}.so.1,--eh-frame-hdr,-warn-common \
${.IMPSRC} -o ${.TARGET}
-linux${SFX}_support.o: linux${SFX}_assym.h assym.s
+linux${SFX}_support.o: linux${SFX}_assym.h assym.S
${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \
${.IMPSRC} -o ${.TARGET}
Index: sys/modules/linux64/Makefile
===================================================================
--- sys/modules/linux64/Makefile
+++ sys/modules/linux64/Makefile
@@ -11,11 +11,11 @@
linux_socket.c linux_stats.c linux_sysctl.c linux_sysent.c \
linux_sysvec.c linux_time.c linux_vdso.c linux_timer.c \
opt_inet6.h opt_compat.h opt_posix.h opt_usb.h \
- vnode_if.h device_if.h bus_if.h assym.s \
+ vnode_if.h device_if.h bus_if.h assym.S \
linux_support.s
DPSRCS= linux_genassym.c
-# XXX: for assym.s
+# XXX: for assym.S
SRCS+= opt_kstack_pages.h opt_nfs.h opt_hwpmc_hooks.h
.if ${MACHINE_CPUARCH} == "i386"
SRCS+= opt_apic.h
@@ -40,7 +40,7 @@
-S -g --binary-architecture i386:x86-64 linux_locore.o ${.TARGET}
strip -N _binary_linux_locore_o_size ${.TARGET}
-linux_support.o: assym.s linux_assym.h
+linux_support.o: assym.S linux_assym.h
${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \
${.IMPSRC} -o ${.TARGET}
Index: sys/modules/sgx/Makefile
===================================================================
--- sys/modules/sgx/Makefile
+++ sys/modules/sgx/Makefile
@@ -3,10 +3,10 @@
.PATH: ${SRCTOP}/sys/amd64/sgx
KMOD= sgx
-SRCS= sgx.c sgxvar.h assym.s sgx_support.S
+SRCS= sgx.c sgxvar.h assym.S sgx_support.S
SRCS+= opt_compat.h opt_hwpmc_hooks.h opt_kstack_pages.h
-sgx_support.o: assym.s
+sgx_support.o: assym.S
${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \
${.IMPSRC} -o ${.TARGET}
Index: sys/powerpc/aim/locore32.S
===================================================================
--- sys/powerpc/aim/locore32.S
+++ sys/powerpc/aim/locore32.S
@@ -25,7 +25,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
Index: sys/powerpc/aim/locore64.S
===================================================================
--- sys/powerpc/aim/locore64.S
+++ sys/powerpc/aim/locore64.S
@@ -27,7 +27,7 @@
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
Index: sys/powerpc/booke/locore.S
===================================================================
--- sys/powerpc/booke/locore.S
+++ sys/powerpc/booke/locore.S
@@ -26,7 +26,7 @@
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_hwpmc_hooks.h"
Index: sys/powerpc/powerpc/sigcode32.S
===================================================================
--- sys/powerpc/powerpc/sigcode32.S
+++ sys/powerpc/powerpc/sigcode32.S
@@ -34,7 +34,7 @@
#include <machine/asm.h>
#include <sys/syscall.h>
-#include "assym.s"
+#include "assym.S"
/*
* The following code gets copied to the top of the user stack on process
Index: sys/powerpc/powerpc/sigcode64.S
===================================================================
--- sys/powerpc/powerpc/sigcode64.S
+++ sys/powerpc/powerpc/sigcode64.S
@@ -34,7 +34,7 @@
#include <machine/asm.h>
#include <sys/syscall.h>
-#include "assym.s"
+#include "assym.S"
/*
* The following code gets copied to the top of the user stack on process
Index: sys/powerpc/powerpc/swtch32.S
===================================================================
--- sys/powerpc/powerpc/swtch32.S
+++ sys/powerpc/powerpc/swtch32.S
@@ -56,7 +56,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
#include <sys/syscall.h>
Index: sys/powerpc/powerpc/swtch64.S
===================================================================
--- sys/powerpc/powerpc/swtch64.S
+++ sys/powerpc/powerpc/swtch64.S
@@ -56,7 +56,7 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
#include <sys/syscall.h>
Index: sys/riscv/riscv/copyinout.S
===================================================================
--- sys/riscv/riscv/copyinout.S
+++ sys/riscv/riscv/copyinout.S
@@ -37,7 +37,7 @@
#include <sys/errno.h>
-#include "assym.s"
+#include "assym.S"
/*
* Fault handler for the copy{in,out} functions below.
Index: sys/riscv/riscv/exception.S
===================================================================
--- sys/riscv/riscv/exception.S
+++ sys/riscv/riscv/exception.S
@@ -35,7 +35,7 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
-#include "assym.s"
+#include "assym.S"
#include <machine/trap.h>
#include <machine/riscvreg.h>
Index: sys/riscv/riscv/locore.S
===================================================================
--- sys/riscv/riscv/locore.S
+++ sys/riscv/riscv/locore.S
@@ -34,7 +34,7 @@
* $FreeBSD$
*/
-#include "assym.s"
+#include "assym.S"
#include <sys/syscall.h>
#include <machine/asm.h>
Index: sys/riscv/riscv/support.S
===================================================================
--- sys/riscv/riscv/support.S
+++ sys/riscv/riscv/support.S
@@ -37,7 +37,7 @@
#include <machine/setjmp.h>
-#include "assym.s"
+#include "assym.S"
/*
* One of the fu* or su* functions failed, return -1.
Index: sys/riscv/riscv/swtch.S
===================================================================
--- sys/riscv/riscv/swtch.S
+++ sys/riscv/riscv/swtch.S
@@ -32,7 +32,7 @@
* SUCH DAMAGE.
*/
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
#include <machine/param.h>
Index: sys/sparc64/sparc64/exception.S
===================================================================
--- sys/sparc64/sparc64/exception.S
+++ sys/sparc64/sparc64/exception.S
@@ -74,7 +74,7 @@
#include <machine/utrap.h>
#include <machine/wstate.h>
-#include "assym.s"
+#include "assym.S"
#define TSB_ASI 0x0
#define TSB_KERNEL 0x0
Index: sys/sparc64/sparc64/interrupt.S
===================================================================
--- sys/sparc64/sparc64/interrupt.S
+++ sys/sparc64/sparc64/interrupt.S
@@ -33,7 +33,7 @@
#include <machine/pstate.h>
#include <machine/ver.h>
-#include "assym.s"
+#include "assym.S"
/*
* Handle a vectored interrupt.
Index: sys/sparc64/sparc64/locore.S
===================================================================
--- sys/sparc64/sparc64/locore.S
+++ sys/sparc64/sparc64/locore.S
@@ -33,7 +33,7 @@
#include <machine/pstate.h>
#include <machine/wstate.h>
-#include "assym.s"
+#include "assym.S"
.register %g2,#ignore
Index: sys/sparc64/sparc64/mp_exception.S
===================================================================
--- sys/sparc64/sparc64/mp_exception.S
+++ sys/sparc64/sparc64/mp_exception.S
@@ -33,7 +33,7 @@
#include <machine/ktr.h>
#include <machine/pstate.h>
-#include "assym.s"
+#include "assym.S"
.register %g2, #ignore
.register %g3, #ignore
Index: sys/sparc64/sparc64/mp_locore.S
===================================================================
--- sys/sparc64/sparc64/mp_locore.S
+++ sys/sparc64/sparc64/mp_locore.S
@@ -36,7 +36,7 @@
#include <machine/smp.h>
#include <machine/ver.h>
-#include "assym.s"
+#include "assym.S"
.register %g2, #ignore
.register %g3, #ignore
Index: sys/sparc64/sparc64/support.S
===================================================================
--- sys/sparc64/sparc64/support.S
+++ sys/sparc64/sparc64/support.S
@@ -39,7 +39,7 @@
#include <machine/pstate.h>
#include <machine/wstate.h>
-#include "assym.s"
+#include "assym.S"
.register %g2, #ignore
.register %g3, #ignore
Index: sys/sparc64/sparc64/swtch.S
===================================================================
--- sys/sparc64/sparc64/swtch.S
+++ sys/sparc64/sparc64/swtch.S
@@ -35,7 +35,7 @@
#include <machine/pcb.h>
#include <machine/tstate.h>
-#include "assym.s"
+#include "assym.S"
#include "opt_sched.h"
.register %g2, #ignore

File Metadata

Mime Type
text/plain
Expires
Fri, Feb 27, 8:07 AM (1 h, 24 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29022145
Default Alt Text
D14180.id38885.diff (694 KB)

Event Timeline