diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c index 9601a88e11a4..1afbdc3a3bec 100644 --- a/sys/amd64/acpica/acpi_wakeup.c +++ b/sys/amd64/acpica/acpi_wakeup.c @@ -1,449 +1,463 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Takanori Watanabe * Copyright (c) 2001-2012 Mitsuru IWASAKI * Copyright (c) 2003 Peter Wemm * Copyright (c) 2008-2012 Jung-uk Kim * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #include #endif #include #include #include "acpi_wakecode.h" #include "acpi_wakedata.h" /* Make sure the code is less than a page and leave room for the stack. */ CTASSERT(sizeof(wakecode) < PAGE_SIZE - 1024); extern int acpi_resume_beep; extern int acpi_reset_video; extern int acpi_susp_bounce; #ifdef SMP extern struct susppcb **susppcbs; static cpuset_t suspcpus; #else static struct susppcb **susppcbs; #endif static void acpi_stop_beep(void *); #ifdef SMP static int acpi_wakeup_ap(struct acpi_softc *, int); static void acpi_wakeup_cpus(struct acpi_softc *); #endif #define ACPI_WAKEPT_PAGES 7 #define WAKECODE_FIXUP(offset, type, val) do { \ type *addr; \ addr = (type *)(sc->acpi_wakeaddr + (offset)); \ *addr = val; \ } while (0) static void acpi_stop_beep(void *arg) { if (acpi_resume_beep != 0) timer_spkr_release(); } #ifdef SMP static int acpi_wakeup_ap(struct acpi_softc *sc, int cpu) { struct pcb *pcb; int vector = (sc->acpi_wakephys >> 12) & 0xff; int apic_id = cpu_apic_ids[cpu]; int ms; pcb = &susppcbs[cpu]->sp_pcb; WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb); WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit); WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base); ipi_startup(apic_id, vector); /* Wait up to 5 seconds for it to resume. */ for (ms = 0; ms < 5000; ms++) { if (!CPU_ISSET(cpu, &suspended_cpus)) return (1); /* return SUCCESS */ DELAY(1000); } return (0); /* return FAILURE */ } #define WARMBOOT_TARGET 0 #define WARMBOOT_OFF (KERNBASE + 0x0467) #define WARMBOOT_SEG (KERNBASE + 0x0469) #define CMOS_REG (0x70) #define CMOS_DATA (0x71) #define BIOS_RESET (0x0f) #define BIOS_WARM (0x0a) static void acpi_wakeup_cpus(struct acpi_softc *sc) { uint32_t mpbioswarmvec; int cpu; u_char mpbiosreason; if (!efi_boot) { /* save the current value of the warm-start vector */ mpbioswarmvec = *((uint32_t *)WARMBOOT_OFF); outb(CMOS_REG, BIOS_RESET); mpbiosreason = inb(CMOS_DATA); /* setup a vector to our boot code */ *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET; *((volatile u_short *)WARMBOOT_SEG) = sc->acpi_wakephys >> 4; outb(CMOS_REG, BIOS_RESET); outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ } /* Wake up each AP. */ for (cpu = 1; cpu < mp_ncpus; cpu++) { if (!CPU_ISSET(cpu, &suspcpus)) continue; if (acpi_wakeup_ap(sc, cpu) == 0) { /* restore the warmstart vector */ *(uint32_t *)WARMBOOT_OFF = mpbioswarmvec; panic("acpi_wakeup: failed to resume AP #%d (PHY #%d)", cpu, cpu_apic_ids[cpu]); } } if (!efi_boot) { /* restore the warmstart vector */ *(uint32_t *)WARMBOOT_OFF = mpbioswarmvec; outb(CMOS_REG, BIOS_RESET); outb(CMOS_DATA, mpbiosreason); } } #endif int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; struct pcb *pcb; struct pcpu *pc; int i; if (sc->acpi_wakeaddr == 0ul) return (-1); /* couldn't alloc wake memory */ #ifdef SMP suspcpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &suspcpus); #endif if (acpi_resume_beep != 0) timer_spkr_acquire(); AcpiSetFirmwareWakingVector(sc->acpi_wakephys, 0); intr_suspend(); pcb = &susppcbs[0]->sp_pcb; if (savectx(pcb)) { fpususpend(susppcbs[0]->sp_fpususpend); #ifdef SMP if (!CPU_EMPTY(&suspcpus) && suspend_cpus(suspcpus) == 0) { device_printf(sc->acpi_dev, "Failed to suspend APs\n"); return (0); /* couldn't sleep */ } #endif hw_ibrs_ibpb_active = 0; hw_ssb_active = 0; cpu_stdext_feature3 = 0; CPU_FOREACH(i) { pc = pcpu_find(i); pc->pc_ibpb_set = 0; } WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0)); WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0)); WAKECODE_FIXUP(wakeup_efer, uint64_t, rdmsr(MSR_EFER) & ~(EFER_LMA)); WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb); WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit); WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base); /* Call ACPICA to enter the desired sleep state */ if (state == ACPI_STATE_S4 && sc->acpi_s4bios) status = AcpiEnterSleepStateS4bios(); else status = AcpiEnterSleepState(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); return (0); /* couldn't sleep */ } if (acpi_susp_bounce) resumectx(pcb); for (;;) ia32_pause(); } else { /* * Re-initialize console hardware as soon as possible. * No console output (e.g. printf) is allowed before * this point. */ cnresume(); fpuresume(susppcbs[0]->sp_fpususpend); } return (1); /* wakeup successfully */ } int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled) { if (sleep_result == -1) return (sleep_result); if (!intr_enabled) { /* Wakeup MD procedures in interrupt disabled context */ if (sleep_result == 1) { ucode_reload(); pmap_init_pat(); initializecpu(); PCPU_SET(switchtime, 0); PCPU_SET(switchticks, ticks); lapic_xapic_mode(); #ifdef SMP if (!CPU_EMPTY(&suspcpus)) acpi_wakeup_cpus(sc); #endif } #ifdef SMP if (!CPU_EMPTY(&suspcpus)) resume_cpus(suspcpus); #endif + + /* + * Re-read cpu_stdext_feature3, which was zeroed-out + * in acpi_sleep_machdep, after the microcode was + * reloaded. Then recalculate the active mitigations + * knobs that depend on the microcode and + * cpu_stdext_feature3. + */ + identify_cpu_ext_features(); + hw_ibrs_recalculate(true); + hw_ssb_recalculate(true); + amd64_syscall_ret_flush_l1d_recalc(); + x86_rngds_mitg_recalculate(true); + mca_resume(); if (vmm_resume_p != NULL) vmm_resume_p(); intr_resume(/*suspend_cancelled*/false); AcpiSetFirmwareWakingVector(0, 0); } else { /* Wakeup MD procedures in interrupt enabled context */ if (sleep_result == 1 && mem_range_softc.mr_op != NULL && mem_range_softc.mr_op->reinit != NULL) mem_range_softc.mr_op->reinit(&mem_range_softc); } return (sleep_result); } static void acpi_alloc_wakeup_handler(void **wakeaddr, void *wakept_pages[ACPI_WAKEPT_PAGES]) { vm_page_t wakept_m[ACPI_WAKEPT_PAGES]; int i; *wakeaddr = NULL; memset(wakept_pages, 0, ACPI_WAKEPT_PAGES * sizeof(*wakept_pages)); memset(wakept_m, 0, ACPI_WAKEPT_PAGES * sizeof(*wakept_m)); /* * Specify the region for our wakeup code. We want it in the * low 1 MB region, excluding real mode IVT (0-0x3ff), BDA * (0x400-0x4ff), EBDA (less than 128KB, below 0xa0000, must * be excluded by SMAP and DSDT), and ROM area (0xa0000 and * above). */ *wakeaddr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0x500, 0xa0000, PAGE_SIZE, 0ul); if (*wakeaddr == NULL) { printf("%s: can't alloc wake memory\n", __func__); goto freepages; } for (i = 0; i < ACPI_WAKEPT_PAGES - (la57 ? 0 : 1); i++) { wakept_m[i] = pmap_page_alloc_below_4g(true); wakept_pages[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( wakept_m[i])); } if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL, EVENTHANDLER_PRI_LAST) == NULL) { printf("%s: can't register event handler\n", __func__); goto freepages; } susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK); for (i = 0; i < mp_ncpus; i++) { susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK); susppcbs[i]->sp_fpususpend = alloc_fpusave(M_WAITOK); } return; freepages: if (*wakeaddr != NULL) contigfree(*wakeaddr, PAGE_SIZE, M_DEVBUF); for (i = 0; i < ACPI_WAKEPT_PAGES; i++) { if (wakept_m[i] != NULL) vm_page_free(wakept_m[i]); } *wakeaddr = NULL; } void acpi_install_wakeup_handler(struct acpi_softc *sc) { static void *wakeaddr; void *wakept_pages[ACPI_WAKEPT_PAGES]; uint64_t *pt5, *pt4, *pt3, *pt2_0, *pt2_1, *pt2_2, *pt2_3; vm_paddr_t pt5pa, pt4pa, pt3pa, pt2_0pa, pt2_1pa, pt2_2pa, pt2_3pa; int i; if (wakeaddr != NULL) return; acpi_alloc_wakeup_handler(&wakeaddr, wakept_pages); if (wakeaddr == NULL) return; sc->acpi_wakeaddr = (vm_offset_t)wakeaddr; sc->acpi_wakephys = vtophys(wakeaddr); if (la57) { pt5 = wakept_pages[6]; pt5pa = vtophys(pt5); } pt4 = wakept_pages[0]; pt3 = wakept_pages[1]; pt2_0 = wakept_pages[2]; pt2_1 = wakept_pages[3]; pt2_2 = wakept_pages[4]; pt2_3 = wakept_pages[5]; pt4pa = vtophys(pt4); pt3pa = vtophys(pt3); pt2_0pa = vtophys(pt2_0); pt2_1pa = vtophys(pt2_1); pt2_2pa = vtophys(pt2_2); pt2_3pa = vtophys(pt2_3); bcopy(wakecode, (void *)sc->acpi_wakeaddr, sizeof(wakecode)); /* Patch GDT base address, ljmp targets. */ WAKECODE_FIXUP((bootgdtdesc + 2), uint32_t, sc->acpi_wakephys + bootgdt); WAKECODE_FIXUP((wakeup_sw32 + 2), uint32_t, sc->acpi_wakephys + wakeup_32); WAKECODE_FIXUP((wakeup_sw64 + 1), uint32_t, sc->acpi_wakephys + wakeup_64); WAKECODE_FIXUP(wakeup_pagetables, uint32_t, la57 ? (pt5pa | 0x1) : pt4pa); /* Save pointers to some global data. */ WAKECODE_FIXUP(wakeup_ret, void *, resumectx); /* Create 1:1 mapping for the low 4G */ if (la57) { bcopy(kernel_pmap->pm_pmltop, pt5, PAGE_SIZE); pt5[0] = (uint64_t)pt4pa; pt5[0] |= PG_V | PG_RW | PG_U; } else { bcopy(kernel_pmap->pm_pmltop, pt4, PAGE_SIZE); } pt4[0] = (uint64_t)pt3pa; pt4[0] |= PG_V | PG_RW | PG_U; pt3[0] = (uint64_t)pt2_0pa; pt3[0] |= PG_V | PG_RW | PG_U; pt3[1] = (uint64_t)pt2_1pa; pt3[1] |= PG_V | PG_RW | PG_U; pt3[2] = (uint64_t)pt2_2pa; pt3[2] |= PG_V | PG_RW | PG_U; pt3[3] = (uint64_t)pt2_3pa; pt3[3] |= PG_V | PG_RW | PG_U; for (i = 0; i < NPDEPG; i++) { pt2_0[i] = (pd_entry_t)i * NBPDR; pt2_0[i] |= PG_V | PG_RW | PG_PS | PG_U; } for (i = 0; i < NPDEPG; i++) { pt2_1[i] = (pd_entry_t)NBPDP + i * NBPDR; pt2_1[i] |= PG_V | PG_RW | PG_PS | PG_U; } for (i = 0; i < NPDEPG; i++) { pt2_2[i] = (pd_entry_t)2 * NBPDP + i * NBPDR; pt2_2[i] |= PG_V | PG_RW | PG_PS | PG_U; } for (i = 0; i < NPDEPG; i++) { pt2_3[i] = (pd_entry_t)3 * NBPDP + i * NBPDR; pt2_3[i] |= PG_V | PG_RW | PG_PS | PG_U; } if (bootverbose) device_printf(sc->acpi_dev, "wakeup code va %#jx pa %#jx\n", (uintmax_t)sc->acpi_wakeaddr, (uintmax_t)sc->acpi_wakephys); } diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c index 16780a9e069b..8d4c9d9cbe64 100644 --- a/sys/amd64/amd64/initcpu.c +++ b/sys/amd64/amd64/initcpu.c @@ -1,376 +1,376 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) KATO Takenori, 1997, 1998. * * All rights reserved. Unpublished rights reserved under the copyright * laws of Japan. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include static int hw_instruction_sse; SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); static int lower_sharedpage_init; int hw_lower_amd64_sharedpage; SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN, &hw_lower_amd64_sharedpage, 0, "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory"); /* * -1: automatic (default) * 0: keep enable CLFLUSH * 1: force disable CLFLUSH */ static int hw_clflush_disable = -1; static void init_amd(void) { uint64_t msr; /* * C1E renders the local APIC timer dead, so we disable it by * reading the Interrupt Pending Message register and clearing * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). * * Reference: * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" * #32559 revision 3.00+ * * Detect the presence of C1E capability mostly on latest * dual-cores (or future) k8 family. Affected models range is * taken from Linux sources. */ if ((CPUID_TO_FAMILY(cpu_id) == 0xf || CPUID_TO_FAMILY(cpu_id) == 0x10) && (cpu_feature2 & CPUID2_HV) == 0) cpu_amdc1e_bug = 1; /* * Work around Erratum 721 for Family 10h and 12h processors. * These processors may incorrectly update the stack pointer * after a long series of push and/or near-call instructions, * or a long series of pop and/or near-return instructions. * * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf * * Hypervisors do not provide access to the errata MSR, * causing #GP exception on attempt to apply the errata. The * MSR write shall be done on host and persist globally * anyway, so do not try to do it when under virtualization. */ switch (CPUID_TO_FAMILY(cpu_id)) { case 0x10: case 0x12: if ((cpu_feature2 & CPUID2_HV) == 0) wrmsr(MSR_DE_CFG, rdmsr(MSR_DE_CFG) | 1); break; } /* * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG. * So, do it here or otherwise some tools could be confused by * Initial Local APIC ID reported with CPUID Function 1 in EBX. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(MSR_NB_CFG1); msr |= (uint64_t)1 << 54; wrmsr(MSR_NB_CFG1, msr); } } /* * BIOS may configure Family 10h processors to convert WC+ cache type * to CD. That can hurt performance of guest VMs using nested paging. * The relevant MSR bit is not documented in the BKDG, * the fix is borrowed from Linux. */ if (CPUID_TO_FAMILY(cpu_id) == 0x10) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(0xc001102a); msr &= ~((uint64_t)1 << 24); wrmsr(0xc001102a, msr); } } /* * Work around Erratum 793: Specific Combination of Writes to Write * Combined Memory Types and Locked Instructions May Cause Core Hang. * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors, * revision 3.04 or later, publication 51810. */ if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) { if ((cpu_feature2 & CPUID2_HV) == 0) { msr = rdmsr(MSR_LS_CFG); msr |= (uint64_t)1 << 15; wrmsr(MSR_LS_CFG, msr); } } /* Ryzen erratas. */ if (CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1 && (cpu_feature2 & CPUID2_HV) == 0) { /* 1021 */ msr = rdmsr(MSR_DE_CFG); msr |= 0x2000; wrmsr(MSR_DE_CFG, msr); /* 1033 */ msr = rdmsr(MSR_LS_CFG); msr |= 0x10; wrmsr(MSR_LS_CFG, msr); /* 1049 */ msr = rdmsr(0xc0011028); msr |= 0x10; wrmsr(0xc0011028, msr); /* 1095 */ msr = rdmsr(MSR_LS_CFG); msr |= 0x200000000000000; wrmsr(MSR_LS_CFG, msr); } /* * Work around a problem on Ryzen that is triggered by executing * code near the top of user memory, in our case the signal * trampoline code in the shared page on amd64. * * This function is executed once for the BSP before tunables take * effect so the value determined here can be overridden by the * tunable. This function is then executed again for each AP and * also on resume. Set a flag the first time so that value set by * the tunable is not overwritten. * * The stepping and/or microcode versions should be checked after * this issue is fixed by AMD so that we don't use this mode if not * needed. */ if (lower_sharedpage_init == 0) { lower_sharedpage_init = 1; if (CPUID_TO_FAMILY(cpu_id) == 0x17 || CPUID_TO_FAMILY(cpu_id) == 0x18) { hw_lower_amd64_sharedpage = 1; } } } /* * Initialize special VIA features */ static void init_via(void) { u_int regs[4], val; /* * Check extended CPUID for PadLock features. * * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf */ do_cpuid(0xc0000000, regs); if (regs[0] >= 0xc0000001) { do_cpuid(0xc0000001, regs); val = regs[3]; } else return; /* Enable RNG if present. */ if ((val & VIA_CPUID_HAS_RNG) != 0) { via_feature_rng = VIA_HAS_RNG; wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG); } /* Enable PadLock if present. */ if ((val & VIA_CPUID_HAS_ACE) != 0) via_feature_xcrypt |= VIA_HAS_AES; if ((val & VIA_CPUID_HAS_ACE2) != 0) via_feature_xcrypt |= VIA_HAS_AESCTR; if ((val & VIA_CPUID_HAS_PHE) != 0) via_feature_xcrypt |= VIA_HAS_SHA; if ((val & VIA_CPUID_HAS_PMM) != 0) via_feature_xcrypt |= VIA_HAS_MM; if (via_feature_xcrypt != 0) wrmsr(0x1107, rdmsr(0x1107) | (1 << 28)); } /* * The value for the TSC_AUX MSR and rdtscp/rdpid on the invoking CPU. * * Caller should prevent CPU migration. */ u_int cpu_auxmsr(void) { KASSERT((read_rflags() & PSL_I) == 0, ("context switch possible")); return (PCPU_GET(cpuid)); } void cpu_init_small_core(void) { u_int r[4]; if (cpu_high < 0x1a) return; cpuid_count(0x1a, 0, r); if ((r[0] & CPUID_HYBRID_CORE_MASK) != CPUID_HYBRID_SMALL_CORE) return; PCPU_SET(small_core, 1); if (pmap_pcid_enabled && invpcid_works && pmap_pcid_invlpg_workaround_uena) { PCPU_SET(pcid_invlpg_workaround, 1); pmap_pcid_invlpg_workaround = 1; } } /* * Initialize CPU control registers */ void initializecpu(void) { uint64_t msr; uint32_t cr4; cr4 = rcr4(); if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { cr4 |= CR4_FXSR | CR4_XMM; hw_instruction_sse = 1; } if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) cr4 |= CR4_FSGSBASE; if (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) cr4 |= CR4_PKE; /* * If SMEP is present, we only need to flush RSB (by default) * on context switches, to prevent cross-process ret2spec * attacks. Do it automatically if ibrs_disable is set, to * complete the mitigation. * * Postpone enabling the SMEP on the boot CPU until the page * tables are switched from the boot loader identity mapping * to the kernel tables. The boot loader enables the U bit in * its tables. */ if (IS_BSP()) { if (cpu_stdext_feature & CPUID_STDEXT_SMEP && !TUNABLE_INT_FETCH( "machdep.mitigations.cpu_flush_rsb_ctxsw", &cpu_flush_rsb_ctxsw) && hw_ibrs_disable) cpu_flush_rsb_ctxsw = 1; } else { if (cpu_stdext_feature & CPUID_STDEXT_SMEP) cr4 |= CR4_SMEP; if (cpu_stdext_feature & CPUID_STDEXT_SMAP) cr4 |= CR4_SMAP; } load_cr4(cr4); /* Reload cpu ext features to reflect cr4 changes */ - if (IS_BSP()) + if (IS_BSP() && cold) identify_cpu_ext_features(); if (IS_BSP() && (amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); pg_nx = PG_NX; } hw_ibrs_recalculate(false); hw_ssb_recalculate(false); amd64_syscall_ret_flush_l1d_recalc(); x86_rngds_mitg_recalculate(false); switch (cpu_vendor_id) { case CPU_VENDOR_AMD: case CPU_VENDOR_HYGON: init_amd(); break; case CPU_VENDOR_CENTAUR: init_via(); break; } if ((amd_feature & AMDID_RDTSCP) != 0 || (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0) wrmsr(MSR_TSC_AUX, cpu_auxmsr()); if (!IS_BSP()) cpu_init_small_core(); } void initializecpucache(void) { /* * CPUID with %eax = 1, %ebx returns * Bits 15-8: CLFLUSH line size * (Value * 8 = cache line size in bytes) */ if ((cpu_feature & CPUID_CLFSH) != 0) cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; /* * XXXKIB: (temporary) hack to work around traps generated * when CLFLUSHing APIC register window under virtualization * environments. These environments tend to disable the * CPUID_SS feature even though the native CPU supports it. */ TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable); if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) { cpu_feature &= ~CPUID_CLFSH; cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; } /* * The kernel's use of CLFLUSH{,OPT} can be disabled manually * by setting the hw.clflush_disable tunable. */ if (hw_clflush_disable == 1) { cpu_feature &= ~CPUID_CLFSH; cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT; } }