Changeset View
Changeset View
Standalone View
Standalone View
head/sys/x86/x86/cpu_machdep.c
Show First 20 Lines • Show All 136 Lines • ▼ Show 20 Lines | |||||
* interrupts. | * interrupts. | ||||
*/ | */ | ||||
void | void | ||||
acpi_cpu_idle_mwait(uint32_t mwait_hint) | acpi_cpu_idle_mwait(uint32_t mwait_hint) | ||||
{ | { | ||||
int *state; | int *state; | ||||
/* | /* | ||||
* A comment in Linux patch claims that 'CPUs run faster with | |||||
* speculation protection disabled. All CPU threads in a core | |||||
* must disable speculation protection for it to be | |||||
* disabled. Disable it while we are idle so the other | |||||
* hyperthread can run fast.' | |||||
* | |||||
* XXXKIB. Software coordination mode should be supported, | * XXXKIB. Software coordination mode should be supported, | ||||
* but all Intel CPUs provide hardware coordination. | * but all Intel CPUs provide hardware coordination. | ||||
*/ | */ | ||||
state = (int *)PCPU_PTR(monitorbuf); | state = (int *)PCPU_PTR(monitorbuf); | ||||
KASSERT(*state == STATE_SLEEPING, | KASSERT(*state == STATE_SLEEPING, | ||||
("cpu_mwait_cx: wrong monitorbuf state")); | ("cpu_mwait_cx: wrong monitorbuf state")); | ||||
*state = STATE_MWAIT; | *state = STATE_MWAIT; | ||||
handle_ibrs_entry(); | |||||
cpu_monitor(state, 0, 0); | cpu_monitor(state, 0, 0); | ||||
if (*state == STATE_MWAIT) | if (*state == STATE_MWAIT) | ||||
cpu_mwait(MWAIT_INTRBREAK, mwait_hint); | cpu_mwait(MWAIT_INTRBREAK, mwait_hint); | ||||
handle_ibrs_exit(); | |||||
/* | /* | ||||
* We should exit on any event that interrupts mwait, because | * We should exit on any event that interrupts mwait, because | ||||
* that event might be a wanted interrupt. | * that event might be a wanted interrupt. | ||||
*/ | */ | ||||
*state = STATE_RUNNING; | *state = STATE_RUNNING; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 400 Lines • ▼ Show 20 Lines | #ifdef SMP | ||||
if (nmi_is_broadcast) { | if (nmi_is_broadcast) { | ||||
nmi_call_kdb_smp(type, frame); | nmi_call_kdb_smp(type, frame); | ||||
return; | return; | ||||
} | } | ||||
#endif | #endif | ||||
nmi_call_kdb(PCPU_GET(cpuid), type, frame); | nmi_call_kdb(PCPU_GET(cpuid), type, frame); | ||||
#endif | #endif | ||||
} | } | ||||
int hw_ibrs_active; | |||||
int hw_ibrs_disable = 1; | |||||
SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0, | |||||
"IBRS active"); | |||||
void | |||||
hw_ibrs_recalculate(void) | |||||
{ | |||||
uint64_t v; | |||||
if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) { | |||||
if (hw_ibrs_disable) { | |||||
v= rdmsr(MSR_IA32_SPEC_CTRL); | |||||
v &= ~IA32_SPEC_CTRL_IBRS; | |||||
wrmsr(MSR_IA32_SPEC_CTRL, v); | |||||
} else { | |||||
v= rdmsr(MSR_IA32_SPEC_CTRL); | |||||
v |= IA32_SPEC_CTRL_IBRS; | |||||
wrmsr(MSR_IA32_SPEC_CTRL, v); | |||||
} | |||||
return; | |||||
} | |||||
hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 && | |||||
!hw_ibrs_disable; | |||||
} | |||||
static int | |||||
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
int error, val; | |||||
val = hw_ibrs_disable; | |||||
error = sysctl_handle_int(oidp, &val, 0, req); | |||||
if (error != 0 || req->newptr == NULL) | |||||
return (error); | |||||
hw_ibrs_disable = val != 0; | |||||
hw_ibrs_recalculate(); | |||||
return (0); | |||||
} | |||||
SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN | | |||||
CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I", | |||||
"Disable IBRS"); |