Changeset View
Changeset View
Standalone View
Standalone View
sys/x86/x86/tsc.c
Show First 20 Lines • Show All 234 Lines • ▼ Show 20 Lines | #undef C2D | ||||
*res = freq; | *res = freq; | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
return (false); | return (false); | ||||
} | } | ||||
static void | static void | ||||
tsc_freq_8254(uint64_t *res) | tsc_freq_tc(uint64_t *res) | ||||
{ | { | ||||
uint64_t tsc1, tsc2; | uint64_t tsc1, tsc2; | ||||
int64_t overhead; | int64_t overhead; | ||||
int count, i; | int count, i; | ||||
overhead = 0; | overhead = 0; | ||||
for (i = 0, count = 8; i < count; i++) { | for (i = 0, count = 8; i < count; i++) { | ||||
tsc1 = rdtsc_ordered(); | tsc1 = rdtsc_ordered(); | ||||
DELAY(0); | DELAY(0); | ||||
tsc2 = rdtsc_ordered(); | tsc2 = rdtsc_ordered(); | ||||
if (i > 0) | if (i > 0) | ||||
overhead += tsc2 - tsc1; | overhead += tsc2 - tsc1; | ||||
} | } | ||||
overhead /= count; | overhead /= count; | ||||
tsc1 = rdtsc_ordered(); | tsc1 = rdtsc_ordered(); | ||||
DELAY(100000); | DELAY(100000); | ||||
tsc2 = rdtsc_ordered(); | tsc2 = rdtsc_ordered(); | ||||
tsc_freq = (tsc2 - tsc1 - overhead) * 10; | tsc_freq = (tsc2 - tsc1 - overhead) * 10; | ||||
} | } | ||||
/* | |||||
* Try to determine the TSC frequency using CPUID or hypercalls. If successful, | |||||
* this lets use the TSC for early DELAY() calls instead of the 8254 timer, | |||||
* which may be unreliable or entirely absent on contemporary systems. However, | |||||
* avoid calibrating using the 8254 here so as to give hypervisors a chance to | |||||
rpokala: "a chance to a"
I think you're missing a verb...? | |||||
* register a timecounter that can be used instead. | |||||
*/ | |||||
static void | static void | ||||
probe_tsc_freq(void) | probe_tsc_freq_early(void) | ||||
{ | { | ||||
#ifdef __i386__ | #ifdef __i386__ | ||||
/* The TSC is known to be broken on certain CPUs. */ | /* The TSC is known to be broken on certain CPUs. */ | ||||
switch (cpu_vendor_id) { | switch (cpu_vendor_id) { | ||||
case CPU_VENDOR_AMD: | case CPU_VENDOR_AMD: | ||||
switch (cpu_id & 0xFF0) { | switch (cpu_id & 0xFF0) { | ||||
case 0x500: | case 0x500: | ||||
/* K5 Model 0 */ | /* K5 Model 0 */ | ||||
▲ Show 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | if (tsc_freq_cpuid_vm()) { | ||||
* newer platforms anyway, so don't delay our boot for what | * newer platforms anyway, so don't delay our boot for what | ||||
* might be a garbage result. Late calibration is required if | * might be a garbage result. Late calibration is required if | ||||
* the initial frequency was obtained from CPUID.16H, as the | * the initial frequency was obtained from CPUID.16H, as the | ||||
* derived value may be off by as much as 1%. | * derived value may be off by as much as 1%. | ||||
*/ | */ | ||||
if (bootverbose) | if (bootverbose) | ||||
printf("Early TSC frequency %juHz derived from CPUID\n", | printf("Early TSC frequency %juHz derived from CPUID\n", | ||||
(uintmax_t)tsc_freq); | (uintmax_t)tsc_freq); | ||||
} else if (tsc_skip_calibration) { | } | ||||
} | |||||
/* | /* | ||||
* If we were unable to determine the TSC frequency via CPU registers, try | |||||
* to calibrate against a known clock. | |||||
*/ | |||||
static void | |||||
probe_tsc_freq_late(void) | |||||
{ | |||||
if (tsc_freq != 0) | |||||
return; | |||||
if (tsc_skip_calibration) { | |||||
/* | |||||
* Try to parse the brand string to obtain the nominal TSC | * Try to parse the brand string to obtain the nominal TSC | ||||
* frequency. | * frequency. | ||||
*/ | */ | ||||
if (cpu_vendor_id == CPU_VENDOR_INTEL && | if (cpu_vendor_id == CPU_VENDOR_INTEL && | ||||
tsc_freq_intel_brand(&tsc_freq)) { | tsc_freq_intel_brand(&tsc_freq)) { | ||||
if (bootverbose) | if (bootverbose) | ||||
printf( | printf( | ||||
"Early TSC frequency %juHz derived from brand string\n", | "Early TSC frequency %juHz derived from brand string\n", | ||||
(uintmax_t)tsc_freq); | (uintmax_t)tsc_freq); | ||||
} else { | } else { | ||||
tsc_disabled = 1; | tsc_disabled = 1; | ||||
} | } | ||||
} else { | } else { | ||||
/* | /* | ||||
* Calibrate against the 8254 PIT. This estimate will be | * Calibrate against a timecounter or the 8254 PIT. This | ||||
* refined later in tsc_calib(). | * estimate will be refined later in tsc_calib(). | ||||
*/ | */ | ||||
tsc_freq_8254(&tsc_freq); | tsc_freq_tc(&tsc_freq); | ||||
if (bootverbose) | if (bootverbose) | ||||
printf( | printf( | ||||
"Early TSC frequency %juHz calibrated from 8254 PIT\n", | "Early TSC frequency %juHz calibrated from 8254 PIT\n", | ||||
(uintmax_t)tsc_freq); | (uintmax_t)tsc_freq); | ||||
} | } | ||||
} | |||||
void | |||||
start_TSC(void) | |||||
{ | |||||
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) | |||||
return; | |||||
probe_tsc_freq_late(); | |||||
if (cpu_power_ecx & CPUID_PERF_STAT) { | if (cpu_power_ecx & CPUID_PERF_STAT) { | ||||
/* | /* | ||||
* XXX Some emulators expose host CPUID without actual support | * XXX Some emulators expose host CPUID without actual support | ||||
* for these MSRs. We must test whether they really work. | * for these MSRs. We must test whether they really work. | ||||
*/ | */ | ||||
wrmsr(MSR_MPERF, 0); | wrmsr(MSR_MPERF, 0); | ||||
wrmsr(MSR_APERF, 0); | wrmsr(MSR_APERF, 0); | ||||
DELAY(10); | DELAY(10); | ||||
if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) | if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) | ||||
tsc_perf_stat = 1; | tsc_perf_stat = 1; | ||||
} | } | ||||
} | |||||
void | |||||
start_TSC(void) | |||||
{ | |||||
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) | |||||
return; | |||||
/* | /* | ||||
* Inform CPU accounting about our boot-time clock rate. This will | * Inform CPU accounting about our boot-time clock rate. This will | ||||
* be updated if someone loads a cpufreq driver after boot that | * be updated if someone loads a cpufreq driver after boot that | ||||
* discovers a new max frequency. | * discovers a new max frequency. | ||||
* | * | ||||
* The frequency may also be updated after late calibration is complete; | * The frequency may also be updated after late calibration is complete; | ||||
* however, we register the TSC as the ticker now to avoid switching | * however, we register the TSC as the ticker now to avoid switching | ||||
* counters after much of the kernel has already booted and potentially | * counters after much of the kernel has already booted and potentially | ||||
▲ Show 20 Lines • Show All 293 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
void | void | ||||
tsc_init(void) | tsc_init(void) | ||||
{ | { | ||||
if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) | if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) | ||||
return; | return; | ||||
probe_tsc_freq(); | probe_tsc_freq_early(); | ||||
} | } | ||||
/* | /* | ||||
* Perform late calibration of the TSC frequency once ACPI-based timecounters | * Perform late calibration of the TSC frequency once ACPI-based timecounters | ||||
* are available. At this point timehands are not set up, so we read the | * are available. At this point timehands are not set up, so we read the | ||||
* highest-quality timecounter directly rather than using (s)binuptime(). | * highest-quality timecounter directly rather than using (s)binuptime(). | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 226 Lines • Show Last 20 Lines |
"a chance to a"
I think you're missing a verb...?