Page MenuHomeFreeBSD
Authored By
crest_bultmann.eu
Jul 22 2019, 3:17 PM
Size
41 KB
Referenced Files
None
Subscribers
None

hwpstate.patch

diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 184f02b2f62..e9c50cd5994 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -699,7 +699,8 @@ x86/bios/smbios.c optional smbios
x86/bios/vpd.c optional vpd
x86/cpufreq/powernow.c optional cpufreq
x86/cpufreq/est.c optional cpufreq
-x86/cpufreq/hwpstate.c optional cpufreq
+x86/cpufreq/hwpstate_amd.c optional cpufreq
+x86/cpufreq/hwpstate_intel.c optional cpufreq
x86/cpufreq/p4tcc.c optional cpufreq
x86/iommu/busdma_dmar.c optional acpi acpi_dmar pci
x86/iommu/intel_ctx.c optional acpi acpi_dmar pci
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index f7a86cf5ee4..d9d9152e33b 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -576,7 +576,8 @@ x86/acpica/srat.c optional acpi
x86/bios/smbios.c optional smbios
x86/bios/vpd.c optional vpd
x86/cpufreq/est.c optional cpufreq
-x86/cpufreq/hwpstate.c optional cpufreq
+x86/cpufreq/hwpstate_amd.c optional cpufreq
+x86/cpufreq/hwpstate_intel.c optional cpufreq
x86/cpufreq/p4tcc.c optional cpufreq
x86/cpufreq/powernow.c optional cpufreq
x86/cpufreq/smist.c optional cpufreq
diff --git a/sys/dev/acpica/acpi_perf.c b/sys/dev/acpica/acpi_perf.c
index df0fa9a29b6..fcbd7aa1ec8 100644
--- a/sys/dev/acpica/acpi_perf.c
+++ b/sys/dev/acpica/acpi_perf.c
@@ -50,6 +50,8 @@ __FBSDID("$FreeBSD$");
#include "cpufreq_if.h"
+extern bool intel_speed_shift;
+
/*
* Support for ACPI processor performance states (Px) according to
* section 8.3.3 of the ACPI 2.0c specification.
@@ -144,6 +146,9 @@ acpi_perf_identify(driver_t *driver, device_t parent)
ACPI_HANDLE handle;
device_t dev;
+ if (intel_speed_shift)
+ return;
+
/* Make sure we're not being doubly invoked. */
if (device_find_child(parent, "acpi_perf", -1) != NULL)
return;
diff --git a/sys/kern/kern_cpu.c b/sys/kern/kern_cpu.c
index 724131d713a..5493027f27e 100644
--- a/sys/kern/kern_cpu.c
+++ b/sys/kern/kern_cpu.c
@@ -75,7 +75,8 @@ struct cpufreq_softc {
struct cf_level_lst all_levels;
int all_count;
int max_mhz;
- device_t dev;
+ device_t cf_dev;
+ device_t cf_drv_dev;
struct sysctl_ctx_list sysctl_ctx;
struct task startup_task;
struct cf_level *levels_buf;
@@ -99,13 +100,13 @@ TAILQ_HEAD(cf_setting_lst, cf_setting_array);
printf("cpufreq: " msg); \
} while (0)
-static int cpufreq_attach(device_t dev);
+static int cpufreq_attach(device_t cf_dev);
static void cpufreq_startup_task(void *ctx, int pending);
-static int cpufreq_detach(device_t dev);
-static int cf_set_method(device_t dev, const struct cf_level *level,
+static int cpufreq_detach(device_t cf_dev);
+static int cf_set_method(device_t cf_dev, const struct cf_level *level,
int priority);
-static int cf_get_method(device_t dev, struct cf_level *level);
-static int cf_levels_method(device_t dev, struct cf_level *levels,
+static int cf_get_method(device_t cf_dev, struct cf_level *level);
+static int cf_levels_method(device_t cf_dev, struct cf_level *levels,
int *count);
static int cpufreq_insert_abs(struct cpufreq_softc *sc,
struct cf_setting *sets, int count);
@@ -142,46 +143,42 @@ SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1,
SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1,
"Print verbose debugging messages");
+/*
+ * This is called as the result of a hardware specific frequency control driver
+ * calling cpufreq_register. It provides a general interface for system wide
+ * frequency controls and operates on a per cpu basis.
+ */
static int
-cpufreq_attach(device_t dev)
+cpufreq_attach(device_t cf_dev)
{
struct cpufreq_softc *sc;
struct pcpu *pc;
device_t parent;
uint64_t rate;
- int numdevs;
- CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
- sc = device_get_softc(dev);
- parent = device_get_parent(dev);
- sc->dev = dev;
+ CF_DEBUG("initializing %s\n", device_get_nameunit(cf_dev));
+ sc = device_get_softc(cf_dev);
+ parent = device_get_parent(cf_dev);
+ sc->cf_dev = cf_dev;
sysctl_ctx_init(&sc->sysctl_ctx);
TAILQ_INIT(&sc->all_levels);
CF_MTX_INIT(&sc->lock);
sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
SLIST_INIT(&sc->saved_freq);
/* Try to get nominal CPU freq to use it as maximum later if needed */
- sc->max_mhz = cpu_get_nominal_mhz(dev);
+ sc->max_mhz = cpu_get_nominal_mhz(cf_dev);
/* If that fails, try to measure the current rate */
if (sc->max_mhz <= 0) {
- pc = cpu_get_pcpu(dev);
+ CF_DEBUG("Unable to obtain nominal frequency.\n");
+ pc = cpu_get_pcpu(cf_dev);
if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
sc->max_mhz = rate / 1000000;
else
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
}
- /*
- * Only initialize one set of sysctls for all CPUs. In the future,
- * if multiple CPUs can have different settings, we can move these
- * sysctls to be under every CPU instead of just the first one.
- */
- numdevs = devclass_get_count(cpufreq_dc);
- if (numdevs > 1)
- return (0);
-
CF_DEBUG("initializing one-time data for %s\n",
- device_get_nameunit(dev));
+ device_get_nameunit(cf_dev));
sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
M_DEVBUF, M_WAITOK);
SYSCTL_ADD_PROC(&sc->sysctl_ctx,
@@ -197,7 +194,7 @@ cpufreq_attach(device_t dev)
* Queue a one-shot broadcast that levels have changed.
* It will run once the system has completed booting.
*/
- TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
+ TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, cf_dev);
taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
return (0);
@@ -212,14 +209,13 @@ cpufreq_startup_task(void *ctx, int pending)
}
static int
-cpufreq_detach(device_t dev)
+cpufreq_detach(device_t cf_dev)
{
struct cpufreq_softc *sc;
struct cf_saved_freq *saved_freq;
- int numdevs;
- CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
- sc = device_get_softc(dev);
+ CF_DEBUG("shutdown %s\n", device_get_nameunit(cf_dev));
+ sc = device_get_softc(cf_dev);
sysctl_ctx_free(&sc->sysctl_ctx);
while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
@@ -227,18 +223,13 @@ cpufreq_detach(device_t dev)
free(saved_freq, M_TEMP);
}
- /* Only clean up these resources when the last device is detaching. */
- numdevs = devclass_get_count(cpufreq_dc);
- if (numdevs == 1) {
- CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
- free(sc->levels_buf, M_DEVBUF);
- }
+ free(sc->levels_buf, M_DEVBUF);
return (0);
}
static int
-cf_set_method(device_t dev, const struct cf_level *level, int priority)
+cf_set_method(device_t cf_dev, const struct cf_level *level, int priority)
{
struct cpufreq_softc *sc;
const struct cf_setting *set;
@@ -247,7 +238,7 @@ cf_set_method(device_t dev, const struct cf_level *level, int priority)
int error, i;
u_char pri;
- sc = device_get_softc(dev);
+ sc = device_get_softc(cf_dev);
error = 0;
set = NULL;
saved_freq = NULL;
@@ -273,7 +264,8 @@ cf_set_method(device_t dev, const struct cf_level *level, int priority)
* handle having different CPUs at different frequencies.
*/
if (mp_ncpus > 1 && !smp_started) {
- device_printf(dev, "rejecting change, SMP not started yet\n");
+ device_printf(cf_dev,
+ "rejecting change, SMP not started yet\n");
error = ENXIO;
goto out;
}
@@ -422,27 +414,79 @@ cf_set_method(device_t dev, const struct cf_level *level, int priority)
}
static int
-cf_get_method(device_t dev, struct cf_level *level)
+cpufreq_get_frequency(device_t cf_drv_dev)
+{
+ struct cf_setting set;
+
+ if (CPUFREQ_DRV_GET(cf_drv_dev, &set) != 0)
+ return (-1);
+
+ return (set.freq);
+}
+
+/* Returns the index into *levels with the match */
+static int
+cpufreq_get_level(device_t cf_drv_dev, struct cf_level *levels, int count)
+{
+ int i, freq;
+
+ if ((freq = cpufreq_get_frequency(cf_drv_dev)) < 0)
+ return (-1);
+ for (i = 0; i < count; i++)
+ if (freq == levels[i].total_set.freq)
+ return (i);
+
+ return (-1);
+}
+
+/*
+ * Used by the cpufreq core, this function will populate *level with the current
+ * frequency as either determined by a cached value sc->curr_level, or in the
+ * case the lower level driver has set the CPUFREQ_FLAG_UNCACHED flag, it will
+ * obtain the frequency from the driver itself.
+ */
+static int
+cf_get_method(device_t cf_dev, struct cf_level *level)
{
struct cpufreq_softc *sc;
struct cf_level *levels;
- struct cf_setting *curr_set, set;
+ struct cf_setting *curr_set;
struct pcpu *pc;
device_t *devs;
- int bdiff, count, diff, error, i, n, numdevs;
+ int bdiff, count, diff, error, i, numdevs, type;
uint64_t rate;
- sc = device_get_softc(dev);
+ sc = device_get_softc(cf_dev);
error = 0;
levels = NULL;
- /* If we already know the current frequency, we're done. */
+ /*
+ * If we already know the current frequency, and the driver didn't ask
+ * for uncached usage, we're done.
+ */
CF_MTX_LOCK(&sc->lock);
curr_set = &sc->curr_level.total_set;
- if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
+ error = CPUFREQ_DRV_TYPE(sc->cf_drv_dev, &type);
+ if (error == 0 && (type & CPUFREQ_FLAG_UNCACHED)) {
+ struct cf_setting set;
+
+ /*
+ * If the driver wants to always report back the real frequency,
+ * first try the driver and if that fails, fall back to
+ * estimating.
+ */
+ if (CPUFREQ_DRV_GET(sc->cf_drv_dev, &set) != 0)
+ goto estimate;
+ sc->curr_level.total_set = set;
+ CF_DEBUG("get returning immediate freq %d\n", curr_set->freq);
+ goto out;
+ } else if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
CF_DEBUG("get returning known freq %d\n", curr_set->freq);
+ error = (0);
goto out;
}
+
+ MPASS(error != 0 || (type & CPUFREQ_FLAG_UNCACHED) == 0);
CF_MTX_UNLOCK(&sc->lock);
/*
@@ -454,14 +498,14 @@ cf_get_method(device_t dev, struct cf_level *level)
levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
if (levels == NULL)
return (ENOMEM);
- error = CPUFREQ_LEVELS(sc->dev, levels, &count);
+ error = CPUFREQ_LEVELS(cf_dev, levels, &count);
if (error) {
if (error == E2BIG)
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
free(levels, M_TEMP);
return (error);
}
- error = device_get_children(device_get_parent(dev), &devs, &numdevs);
+ error = device_get_children(device_get_parent(cf_dev), &devs, &numdevs);
if (error) {
free(levels, M_TEMP);
return (error);
@@ -476,29 +520,26 @@ cf_get_method(device_t dev, struct cf_level *level)
* The estimation code below catches this case though.
*/
CF_MTX_LOCK(&sc->lock);
- for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
- if (!device_is_attached(devs[n]))
- continue;
- if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
- continue;
- for (i = 0; i < count; i++) {
- if (set.freq == levels[i].total_set.freq) {
- sc->curr_level = levels[i];
- break;
- }
- }
- }
- free(devs, M_TEMP);
+ i = cpufreq_get_level(sc->cf_drv_dev, levels, count);
+ if (i >= 0)
+ sc->curr_level = levels[i];
+ else
+ CF_DEBUG("Couldn't find supported level for %s\n",
+ device_get_nameunit(sc->cf_drv_dev));
+
if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
goto out;
}
+estimate:
+ CF_MTX_ASSERT(&sc->lock);
+
/*
* We couldn't find an exact match, so attempt to estimate and then
* match against a level.
*/
- pc = cpu_get_pcpu(dev);
+ pc = cpu_get_pcpu(cf_dev);
if (pc == NULL) {
error = ENXIO;
goto out;
@@ -525,102 +566,133 @@ cf_get_method(device_t dev, struct cf_level *level)
return (error);
}
+/*
+ * Either directly obtain settings from the cpufreq driver, or build a list of
+ * relative settings to be integrated later against an absolute max.
+ */
static int
-cf_levels_method(device_t dev, struct cf_level *levels, int *count)
+cpufreq_add_levels(device_t cf_dev,
+ struct cpufreq_softc *sc,
+ struct cf_setting_lst *rel_sets)
+{
+ struct cf_setting_array *set_arr;
+ struct cf_setting *sets;
+ int type, set_count, error;
+
+ /* Skip devices that aren't ready. */
+ if (!device_is_attached(cf_dev))
+ return (0);
+
+ /*
+ * Get settings, skipping drivers that offer no settings or
+ * provide settings for informational purposes only.
+ */
+ error = CPUFREQ_DRV_TYPE(cf_dev, &type);
+ if (error != 0 || (type & CPUFREQ_FLAG_INFO_ONLY)) {
+ if (error == 0) {
+ CF_DEBUG("skipping info-only driver %s\n",
+ device_get_nameunit(cf_dev));
+ }
+ return (error);
+ }
+
+ sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
+ if (sets == NULL)
+ return (ENOMEM);
+
+ set_count = MAX_SETTINGS;
+ error = CPUFREQ_DRV_SETTINGS(cf_dev, sets, &set_count);
+ if (error || set_count == 0)
+ goto out;
+
+ /* Add the settings to our absolute/relative lists. */
+ switch (type & CPUFREQ_TYPE_MASK) {
+ case CPUFREQ_TYPE_ABSOLUTE:
+ error = cpufreq_insert_abs(sc, sets, set_count);
+ break;
+ case CPUFREQ_TYPE_RELATIVE:
+ CF_DEBUG("adding %d relative settings\n", set_count);
+ set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
+ if (set_arr == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
+ set_arr->count = set_count;
+ TAILQ_INSERT_TAIL(rel_sets, set_arr, link);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+out:
+ free(sets, M_TEMP);
+ return (error);
+}
+
+static int
+cf_levels_method(device_t cf_dev, struct cf_level *levels, int *count)
{
struct cf_setting_array *set_arr;
struct cf_setting_lst rel_sets;
struct cpufreq_softc *sc;
struct cf_level *lev;
- struct cf_setting *sets;
struct pcpu *pc;
device_t *devs;
- int error, i, numdevs, set_count, type;
+ int error, i, numdevs;
uint64_t rate;
if (levels == NULL || count == NULL)
return (EINVAL);
TAILQ_INIT(&rel_sets);
- sc = device_get_softc(dev);
- error = device_get_children(device_get_parent(dev), &devs, &numdevs);
+ sc = device_get_softc(cf_dev);
+ error = device_get_children(device_get_parent(cf_dev), &devs, &numdevs);
if (error)
return (error);
- sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
- if (sets == NULL) {
- free(devs, M_TEMP);
- return (ENOMEM);
- }
/* Get settings from all cpufreq drivers. */
CF_MTX_LOCK(&sc->lock);
- for (i = 0; i < numdevs; i++) {
- /* Skip devices that aren't ready. */
- if (!device_is_attached(devs[i]))
- continue;
-
- /*
- * Get settings, skipping drivers that offer no settings or
- * provide settings for informational purposes only.
- */
- error = CPUFREQ_DRV_TYPE(devs[i], &type);
- if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
- if (error == 0) {
- CF_DEBUG("skipping info-only driver %s\n",
- device_get_nameunit(devs[i]));
- }
- continue;
- }
- set_count = MAX_SETTINGS;
- error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
- if (error || set_count == 0)
- continue;
-
- /* Add the settings to our absolute/relative lists. */
- switch (type & CPUFREQ_TYPE_MASK) {
- case CPUFREQ_TYPE_ABSOLUTE:
- error = cpufreq_insert_abs(sc, sets, set_count);
- break;
- case CPUFREQ_TYPE_RELATIVE:
- CF_DEBUG("adding %d relative settings\n", set_count);
- set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
- if (set_arr == NULL) {
- error = ENOMEM;
+ error = cpufreq_add_levels(sc->cf_drv_dev, sc, &rel_sets);
+ /* Fall back to legacy style enumeration if the frequency device doesn't
+ * implement a needed interface. In general it should be safe to remove
+ * this fallback once all frequency drivers are checked.
+ */
+ if (error == ENXIO) {
+ for (i = 0; i < numdevs; i++) {
+ error = cpufreq_add_levels(devs[i], sc, &rel_sets);
+ if (error)
goto out;
- }
- bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
- set_arr->count = set_count;
- TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
- break;
- default:
- error = EINVAL;
}
- if (error)
- goto out;
- }
+ } else if (error)
+ goto out;
/*
* If there are no absolute levels, create a fake one at 100%. We
* then cache the clockrate for later use as our base frequency.
*/
if (TAILQ_EMPTY(&sc->all_levels)) {
+ struct cf_setting set;
+
+ CF_DEBUG("No absolute levels returned by driver\n");
+
if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
- sc->max_mhz = cpu_get_nominal_mhz(dev);
+ sc->max_mhz = cpu_get_nominal_mhz(cf_dev);
/*
* If the CPU can't report a rate for 100%, hope
* the CPU is running at its nominal rate right now,
* and use that instead.
*/
if (sc->max_mhz <= 0) {
- pc = cpu_get_pcpu(dev);
+ pc = cpu_get_pcpu(cf_dev);
cpu_est_clockrate(pc->pc_cpuid, &rate);
sc->max_mhz = rate / 1000000;
}
}
- memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
- sets[0].freq = sc->max_mhz;
- sets[0].dev = NULL;
- error = cpufreq_insert_abs(sc, sets, 1);
+ memset(&set, CPUFREQ_VAL_UNKNOWN, sizeof(set));
+ set.freq = sc->max_mhz;
+ set.dev = NULL;
+ error = cpufreq_insert_abs(sc, &set, 1);
if (error)
goto out;
}
@@ -666,7 +738,6 @@ cf_levels_method(device_t dev, struct cf_level *levels, int *count)
free(set_arr, M_TEMP);
}
free(devs, M_TEMP);
- free(sets, M_TEMP);
return (error);
}
@@ -891,7 +962,7 @@ cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
sc = oidp->oid_arg1;
levels = sc->levels_buf;
- error = CPUFREQ_GET(sc->dev, &levels[0]);
+ error = CPUFREQ_GET(sc->cf_dev, &levels[0]);
if (error)
goto out;
freq = levels[0].total_set.freq;
@@ -953,7 +1024,7 @@ cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_delete(&sb);
return (ENOMEM);
}
- error = CPUFREQ_LEVELS(sc->dev, levels, &count);
+ error = CPUFREQ_LEVELS(sc->cf_dev, levels, &count);
if (error) {
if (error == E2BIG)
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
@@ -978,12 +1049,12 @@ cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
static int
cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
{
- device_t dev;
+ device_t cf_drv_dev;
struct cf_setting *sets;
struct sbuf sb;
int error, i, set_count;
- dev = oidp->oid_arg1;
+ cf_drv_dev = oidp->oid_arg1;
sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
/* Get settings from the device and generate the output string. */
@@ -993,7 +1064,7 @@ cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_delete(&sb);
return (ENOMEM);
}
- error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
+ error = CPUFREQ_DRV_SETTINGS(cf_drv_dev, sets, &set_count);
if (error)
goto out;
if (set_count) {
@@ -1011,26 +1082,42 @@ cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
return (error);
}
+static void
+cpufreq_add_freq_driver_sysctl(device_t cf_dev)
+{
+ struct cpufreq_softc *sc;
+
+ sc = device_get_softc(cf_dev);
+ SYSCTL_ADD_STRING(&sc->sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(cf_dev)),
+ OID_AUTO, "freq_driver", CTLFLAG_RD,
+ (char *)(uintptr_t)device_get_name(sc->cf_drv_dev), 0,
+ "cpufreq driver used by this cpu");
+}
+
int
-cpufreq_register(device_t dev)
+cpufreq_register(device_t cf_drv_dev)
{
struct cpufreq_softc *sc;
device_t cf_dev, cpu_dev;
+ int error;
/* Add a sysctl to get each driver's settings separately. */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(cf_drv_dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(cf_drv_dev)),
+ OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, cf_drv_dev, 0,
cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
/*
* Add only one cpufreq device to each CPU. Currently, all CPUs
* must offer the same levels and be switched at the same time.
*/
- cpu_dev = device_get_parent(dev);
+ cpu_dev = device_get_parent(cf_drv_dev);
if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
sc = device_get_softc(cf_dev);
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
+ sc->cf_drv_dev = cf_drv_dev;
+ cpufreq_add_freq_driver_sysctl(cf_dev);
return (0);
}
@@ -1040,49 +1127,62 @@ cpufreq_register(device_t dev)
return (ENOMEM);
device_quiet(cf_dev);
- return (device_probe_and_attach(cf_dev));
+ error = device_probe_and_attach(cf_dev);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(cf_dev);
+ sc->cf_drv_dev = cf_drv_dev;
+ cpufreq_add_freq_driver_sysctl(cf_dev);
+ return (error);
}
int
-cpufreq_unregister(device_t dev)
+cpufreq_unregister(device_t cf_drv_dev)
{
device_t cf_dev, *devs;
int cfcount, devcount, error, i, type;
+ struct cpufreq_softc *sc;
/*
* If this is the last cpufreq child device, remove the control
* device as well. We identify cpufreq children by calling a method
* they support.
*/
- error = device_get_children(device_get_parent(dev), &devs, &devcount);
+ error = device_get_children(device_get_parent(cf_drv_dev), &devs, &devcount);
if (error)
return (error);
- cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
+ cf_dev = device_find_child(device_get_parent(cf_drv_dev), "cpufreq", -1);
if (cf_dev == NULL) {
- device_printf(dev,
+ device_printf(cf_drv_dev,
"warning: cpufreq_unregister called with no cpufreq device active\n");
free(devs, M_TEMP);
return (0);
}
- cfcount = 0;
- for (i = 0; i < devcount; i++) {
- if (!device_is_attached(devs[i]))
- continue;
- if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
- cfcount++;
- }
- if (cfcount <= 1)
+
+ sc = device_get_softc(cf_dev);
+ if (sc->cf_drv_dev == cf_drv_dev) {
device_delete_child(device_get_parent(cf_dev), cf_dev);
+ } else { cfcount = 0;
+ for (i = 0; i < devcount; i++) {
+ if (!device_is_attached(devs[i]))
+ continue;
+ if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
+ cfcount++;
+ }
+ if (cfcount <= 1)
+ device_delete_child(device_get_parent(cf_dev), cf_dev);
+ }
free(devs, M_TEMP);
return (0);
}
int
-cpufreq_settings_changed(device_t dev)
+cpufreq_settings_changed(device_t cf_dev)
{
EVENTHANDLER_INVOKE(cpufreq_levels_changed,
- device_get_unit(device_get_parent(dev)));
+ device_get_unit(device_get_parent(cf_dev)));
return (0);
}
diff --git a/sys/modules/cpufreq/Makefile b/sys/modules/cpufreq/Makefile
index e9fae049ebe..4198d7d66a7 100644
--- a/sys/modules/cpufreq/Makefile
+++ b/sys/modules/cpufreq/Makefile
@@ -11,7 +11,7 @@ SRCS+= bus_if.h cpufreq_if.h device_if.h pci_if.h
.PATH: ${SRCTOP}/sys/x86/cpufreq
SRCS+= acpi_if.h opt_acpi.h
-SRCS+= est.c hwpstate.c p4tcc.c powernow.c
+SRCS+= est.c hwpstate_amd.c p4tcc.c powernow.c hwpstate_intel.c
.endif
.if ${MACHINE} == "i386"
diff --git a/sys/sys/cpu.h b/sys/sys/cpu.h
index 8a74e470efc..d117345b6bf 100644
--- a/sys/sys/cpu.h
+++ b/sys/sys/cpu.h
@@ -120,11 +120,16 @@ TAILQ_HEAD(cf_level_lst, cf_level);
* information about settings but rely on another machine-dependent driver
* for actually performing the frequency transition (e.g., ACPI performance
* states of type "functional fixed hardware.")
+ *
+ * The "uncached" flag tells CPUFREQ_DRV_GET to try obtaining the real
+ * instantaneous frequency from the underlying hardware regardless of cached
+ * state. It is probably a bug to not combine this with "info only"
*/
#define CPUFREQ_TYPE_MASK 0xffff
#define CPUFREQ_TYPE_RELATIVE (1<<0)
#define CPUFREQ_TYPE_ABSOLUTE (1<<1)
#define CPUFREQ_FLAG_INFO_ONLY (1<<16)
+#define CPUFREQ_FLAG_UNCACHED (1<<17)
/*
* When setting a level, the caller indicates the priority of this request.
diff --git a/sys/x86/cpufreq/est.c b/sys/x86/cpufreq/est.c
index 0cd0358e161..18637c563b9 100644
--- a/sys/x86/cpufreq/est.c
+++ b/sys/x86/cpufreq/est.c
@@ -50,6 +50,8 @@ __FBSDID("$FreeBSD$");
#include <dev/acpica/acpivar.h>
#include "acpi_if.h"
+extern bool intel_speed_shift;
+
/* Status/control registers (from the IA-32 System Programming Guide). */
#define MSR_PERF_STATUS 0x198
#define MSR_PERF_CTL 0x199
@@ -916,6 +918,10 @@ est_identify(driver_t *driver, device_t parent)
{
device_t child;
+ /* If the Intel driver is handling this */
+ if (intel_speed_shift)
+ return;
+
/* Make sure we're not being doubly invoked. */
if (device_find_child(parent, "est", -1) != NULL)
return;
diff --git a/sys/x86/cpufreq/hwpstate.c b/sys/x86/cpufreq/hwpstate_amd.c
similarity index 96%
rename from sys/x86/cpufreq/hwpstate.c
rename to sys/x86/cpufreq/hwpstate_amd.c
index ab90ac2b327..4a23115736b 100644
--- a/sys/x86/cpufreq/hwpstate.c
+++ b/sys/x86/cpufreq/hwpstate_amd.c
@@ -124,11 +124,11 @@ static int hwpstate_get_info_from_msr(device_t dev);
static int hwpstate_goto_pstate(device_t dev, int pstate_id);
static int hwpstate_verbose;
-SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
+SYSCTL_INT(_debug, OID_AUTO, hwpstate_amd_verbose, CTLFLAG_RWTUN,
&hwpstate_verbose, 0, "Debug hwpstate");
static int hwpstate_verify;
-SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
+SYSCTL_INT(_debug, OID_AUTO, hwpstate_amd_verify, CTLFLAG_RWTUN,
&hwpstate_verify, 0, "Verify P-state after setting");
static device_method_t hwpstate_methods[] = {
@@ -151,14 +151,14 @@ static device_method_t hwpstate_methods[] = {
{0, 0}
};
-static devclass_t hwpstate_devclass;
-static driver_t hwpstate_driver = {
- "hwpstate",
+static devclass_t hwpstate_amd_devclass;
+static driver_t hwpstate_amd_driver = {
+ "hwpstate_amd",
hwpstate_methods,
sizeof(struct hwpstate_softc),
};
-DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
+DRIVER_MODULE(hwpstate_amd, cpu, hwpstate_amd_driver, hwpstate_amd_devclass, 0, 0);
/*
* Go to Px-state on all cpus considering the limit.
@@ -312,7 +312,7 @@ static void
hwpstate_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "hwpstate", -1) != NULL)
+ if (device_find_child(parent, "hwpstate_amd", -1) != NULL)
return;
if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
@@ -326,10 +326,10 @@ hwpstate_identify(driver_t *driver, device_t parent)
return;
}
- if (resource_disabled("hwpstate", 0))
+ if (resource_disabled("hwpstate_amd", 0))
return;
- if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 10, "hwpstate_amd", -1) == NULL)
device_printf(parent, "hwpstate: add child failed\n");
}
diff --git a/sys/x86/cpufreq/hwpstate_intel.c b/sys/x86/cpufreq/hwpstate_intel.c
new file mode 100644
index 00000000000..368d7647d89
--- /dev/null
+++ b/sys/x86/cpufreq/hwpstate_intel.c
@@ -0,0 +1,508 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2018 Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/sbuf.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/cputypes.h>
+#include <machine/specialreg.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include "acpi_if.h"
+#include "cpufreq_if.h"
+
+extern uint64_t tsc_freq;
+
+bool intel_speed_shift = true;
+SYSCTL_BOOL(_machdep, OID_AUTO, intel_speed_shift, CTLFLAG_RDTUN, &intel_speed_shift,
+ 0, "Enable Intel Speed Shift (HWP)");
+
+static void intel_hwpstate_identify(driver_t *driver, device_t parent);
+static int intel_hwpstate_probe(device_t dev);
+static int intel_hwpstate_attach(device_t dev);
+static int intel_hwpstate_detach(device_t dev);
+
+static int intel_hwpstate_get(device_t dev, struct cf_setting *cf);
+static int intel_hwpstate_type(device_t dev, int *type);
+
+static device_method_t intel_hwpstate_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, intel_hwpstate_identify),
+ DEVMETHOD(device_probe, intel_hwpstate_probe),
+ DEVMETHOD(device_attach, intel_hwpstate_attach),
+ DEVMETHOD(device_detach, intel_hwpstate_detach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_get, intel_hwpstate_get),
+ DEVMETHOD(cpufreq_drv_type, intel_hwpstate_type),
+
+ DEVMETHOD_END
+};
+
+struct hwp_softc {
+ device_t dev;
+ bool hwp_notifications;
+ bool hwp_activity_window;
+ bool hwp_pref_ctrl;
+ bool hwp_pkg_ctrl;
+
+ uint64_t req; /* Cached copy of last request */
+
+ uint8_t high;
+ uint8_t guaranteed;
+ uint8_t efficient;
+ uint8_t low;
+};
+
+static devclass_t hwpstate_intel_devclass;
+static driver_t hwpstate_intel_driver = {
+ "hwpstate_intel",
+ intel_hwpstate_methods,
+ sizeof(struct hwp_softc),
+};
+
+/*
+ * NB: This must run before the est and acpi_perf module!!!!
+ *
+ * If a user opts in to hwp, but the CPU doesn't support it, we need to find that
+ * out before est loads or else we won't be able to use est as a backup.
+ */
+DRIVER_MODULE_ORDERED(hwpstate_intel, cpu, hwpstate_intel_driver,
+ hwpstate_intel_devclass, 0, 0, SI_ORDER_FIRST);
+
+static int
+intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev;
+ struct pcpu *pc;
+ struct sbuf *sb;
+ struct hwp_softc *sc;
+ uint64_t data, data2;
+ int ret;
+
+ sc = (struct hwp_softc *)arg1;
+ dev = sc->dev;
+
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENXIO);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 1024, req);
+ sbuf_putc(sb, '\n');
+ thread_lock(curthread);
+ sched_bind(curthread, pc->pc_cpuid);
+ thread_unlock(curthread);
+
+ rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
+ sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
+ ((data & 1) ? "En" : "Dis"));
+
+ if (data == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
+ sbuf_printf(sb, "\tHighest Performance: %03lu\n", data & 0xff);
+ sbuf_printf(sb, "\tGuaranteed Performance: %03lu\n", (data >> 8) & 0xff);
+ sbuf_printf(sb, "\tEfficient Performance: %03lu\n", (data >> 16) & 0xff);
+ sbuf_printf(sb, "\tLowest Performance: %03lu\n", (data >> 24) & 0xff);
+
+ rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
+ if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL)) {
+ rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
+ }
+
+ sbuf_putc(sb, '\n');
+
+#define pkg_print(x, name, offset) do { \
+ if (!sc->hwp_pkg_ctrl || (data & x) != 0) \
+ sbuf_printf(sb, "\t%s: %03lu\n", name, (data >> offset) & 0xff);\
+ else \
+ sbuf_printf(sb, "\t%s: %03lu\n", name, (data2 >> offset) & 0xff);\
+} while (0)
+
+ pkg_print(IA32_HWP_REQUEST_EPP_VALID,
+ "Requested Efficiency Performance Preference", 24);
+ pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
+ "Requested Desired Performance", 16);
+ pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
+ "Requested Maximum Performance", 8);
+ pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
+ "Requested Minimum Performance", 0);
+#undef pkg_print
+
+ sbuf_putc(sb, '\n');
+
+out:
+ thread_lock(curthread);
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+
+ ret = sbuf_finish(sb);
+ sbuf_delete(sb);
+
+ return (ret);
+}
+
+static inline int
+__percent_to_raw(int x)
+{
+
+ MPASS(x <= 100 && x >= 0);
+ return (0xff * x / 100);
+}
+
+static inline int
+__raw_to_percent(int x)
+{
+
+ MPASS(x <= 0xff && x >= 0);
+ return (x * 100 / 0xff);
+}
+
+static int
+sysctl_epp_select(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev;
+ struct pcpu *pc;
+ uint64_t requested;
+ uint32_t val;
+ int ret;
+
+ dev = oidp->oid_arg1;
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENXIO);
+
+ thread_lock(curthread);
+ sched_bind(curthread, pc->pc_cpuid);
+ thread_unlock(curthread);
+
+ rdmsr_safe(MSR_IA32_HWP_REQUEST, &requested);
+ val = (requested & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
+ val = __raw_to_percent(val);
+
+ MPASS(val >= 0 && val <= 100);
+
+ ret = sysctl_handle_int(oidp, &val, 0, req);
+ if (ret || req->newptr == NULL)
+ goto out;
+
+ if (val < 0)
+ val = 0;
+ if (val > 100)
+ val = 100;
+
+ val = __percent_to_raw(val);
+
+ requested &= ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE;
+ requested |= val << 24;
+
+ wrmsr_safe(MSR_IA32_HWP_REQUEST, requested);
+
+out:
+ thread_lock(curthread);
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+
+ return (ret);
+}
+
+static void
+intel_hwpstate_identify(driver_t *driver, device_t parent)
+{
+ uint32_t regs[4];
+
+ if (!intel_speed_shift)
+ return;
+
+ if (device_find_child(parent, "hwpstate_intel", -1) != NULL) {
+ intel_speed_shift = false;
+ return;
+ }
+
+ if (cpu_vendor_id != CPU_VENDOR_INTEL) {
+ intel_speed_shift = false;
+ return;
+ }
+
+ if (resource_disabled("hwpstate_intel", 0)) {
+ intel_speed_shift = false;
+ return;
+ }
+
+ /*
+ * Intel SDM 14.4.1 (HWP Programming Interfaces):
+ * The CPUID instruction allows software to discover the presence of
+ * HWP support in an Intel processor. Specifically, execute CPUID
+ * instruction with EAX=06H as input will return 5 bit flags covering
+ * the following aspects in bits 7 through 11 of CPUID.06H:EAX.
+ */
+
+ if (cpu_high < 6)
+ goto out;
+
+ /*
+ * Intel SDM 14.4.1 (HWP Programming Interfaces):
+ * Availability of HWP baseline resource and capability,
+ * CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
+ * architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
+ * IA32_HWP_REQUEST, IA32_HWP_STATUS.
+ */
+
+ do_cpuid(6, regs);
+ if ((regs[0] & CPUTPM1_HWP) == 0)
+ goto out;
+
+ if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", -1) == NULL)
+ goto out;
+
+ device_printf(parent, "hwpstate registered");
+ return;
+
+out:
+ device_printf(parent, "Speed Shift unavailable. Falling back to est\n");
+ intel_speed_shift = false;
+}
+
+static int
+intel_hwpstate_probe(device_t dev)
+{
+ device_t perf_dev;
+ int ret, type;
+
+ /*
+ * It is currently impossible for conflicting cpufreq driver to be loaded at
+ * this point since it's protected by the boolean intel_speed_shift.
+ * However, if at some point the knobs are made a bit more robust to
+ * control cpufreq, or, at some point INFO_ONLY drivers are permitted,
+ * this should make sure things work properly.
+ *
+ * IOW: This is a no-op for now.
+ */
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ if (perf_dev && device_is_attached(perf_dev)) {
+ ret= CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (ret == 0) {
+ if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
+ device_printf(dev, "Avoiding acpi_perf\n");
+ return (ENXIO);
+ }
+ }
+ }
+
+ perf_dev = device_find_child(device_get_parent(dev), "est", -1);
+ if (perf_dev && device_is_attached(perf_dev)) {
+ ret= CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (ret == 0) {
+ if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
+ device_printf(dev, "Avoiding EST\n");
+ return (ENXIO);
+ }
+ }
+ }
+
+ device_set_desc(dev, "Intel Speed Shift");
+ return (BUS_PROBE_DEFAULT);
+}
+
+/* FIXME: Need to support PKG variant */
+static int
+set_autonomous_hwp(struct hwp_softc *sc)
+{
+ struct pcpu *pc;
+ device_t dev;
+ uint64_t caps;
+ int ret;
+
+ dev = sc->dev;
+
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENXIO);
+
+ thread_lock(curthread);
+ sched_bind(curthread, pc->pc_cpuid);
+ thread_unlock(curthread);
+
+ /* XXX: Many MSRs aren't readable until feature is enabled */
+ ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
+ if (ret) {
+ device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
+ pc->pc_cpuid, ret);
+ goto out;
+ }
+
+ ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
+ if (ret)
+ return (ret);
+
+ ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
+ if (ret)
+ return (ret);
+
+ sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
+ sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
+ sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
+ sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
+
+ /* hardware autonomous selection determines the performance target */
+ sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
+
+ /* enable HW dynamic selection of window size */
+ sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
+
+ /* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
+ sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
+ sc->req |= sc->low;
+
+ /* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
+ sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
+ sc->req |= sc->high << 8;
+
+ ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
+ if (ret) {
+ device_printf(dev,
+ "Failed to setup autonomous HWP for cpu%d (file a bug)\n",
+ pc->pc_cpuid);
+ }
+
+out:
+ thread_lock(curthread);
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+
+ return (ret);
+}
+
+static int
+intel_hwpstate_attach(device_t dev)
+{
+ struct hwp_softc *sc;
+ uint32_t regs[4];
+ int ret;
+
+ KASSERT(device_find_child(device_get_parent(dev), "est", -1) == NULL,
+ ("EST driver already loaded"));
+
+ KASSERT(device_find_child(device_get_parent(dev), "acpi_perf", -1) == NULL,
+ ("ACPI driver already loaded"));
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ do_cpuid(6, regs);
+ if (regs[0] & CPUTPM1_HWP_NOTIFICATION)
+ sc->hwp_notifications = true;
+ if (regs[0] & CPUTPM1_HWP_ACTIVITY_WINDOW)
+ sc->hwp_activity_window = true;
+ if (regs[0] & CPUTPM1_HWP_PERF_PREF)
+ sc->hwp_pref_ctrl = true;
+ if (regs[0] & CPUTPM1_HWP_PKG)
+ sc->hwp_pkg_ctrl = true;
+
+ ret = set_autonomous_hwp(sc);
+ if (ret)
+ return (ret);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP,
+ sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "epp", CTLTYPE_INT | CTLFLAG_RWTUN, dev, sizeof(dev),
+ sysctl_epp_select, "I",
+ "Efficiency/Performance Preference (0-100)");
+
+ return (cpufreq_register(dev));
+}
+
+static int
+intel_hwpstate_detach(device_t dev)
+{
+
+ return (cpufreq_unregister(dev));
+}
+
+static int
+intel_hwpstate_get(device_t dev, struct cf_setting *set)
+{
+ struct pcpu *pc;
+ uint64_t rate;
+ int ret;
+
+ if (set == NULL)
+ return (EINVAL);
+
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENXIO);
+
+ memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
+ set->dev = dev;
+
+ ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
+ if (ret == 0)
+ set->freq = rate / 1000000;
+
+ set->volts = CPUFREQ_VAL_UNKNOWN;
+ set->power = CPUFREQ_VAL_UNKNOWN;
+ set->lat = CPUFREQ_VAL_UNKNOWN;
+
+ return (0);
+}
+
+static int
+intel_hwpstate_type(device_t dev, int *type)
+{
+ if (type == NULL)
+ return (EINVAL);
+ *type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
+
+ return (0);
+}
diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h
index ec88deaf68c..91fb264fb85 100644
--- a/sys/x86/include/specialreg.h
+++ b/sys/x86/include/specialreg.h
@@ -189,6 +189,12 @@
#define CPUTPM1_SENSOR 0x00000001
#define CPUTPM1_TURBO 0x00000002
#define CPUTPM1_ARAT 0x00000004
+#define CPUTPM1_HWP 0x00000080
+#define CPUTPM1_HWP_NOTIFICATION 0x00000100
+#define CPUTPM1_HWP_ACTIVITY_WINDOW 0x00000200
+#define CPUTPM1_HWP_PERF_PREF 0x00000400
+#define CPUTPM1_HWP_PKG 0x00000800
+#define CPUTPM1_HWP_FLEXIBLE 0x00020000
#define CPUTPM2_EFFREQ 0x00000001
/* Intel Processor Trace CPUID. */
@@ -543,7 +549,14 @@
#define MSR_DRAM_ENERGY_STATUS 0x619
#define MSR_PP0_ENERGY_STATUS 0x639
#define MSR_PP1_ENERGY_STATUS 0x641
+#define MSR_PPERF 0x64e
#define MSR_TSC_DEADLINE 0x6e0 /* Writes are not serializing */
+#define MSR_IA32_PM_ENABLE 0x770
+#define MSR_IA32_HWP_CAPABILITIES 0x771
+#define MSR_IA32_HWP_REQUEST_PKG 0x772
+#define MSR_IA32_HWP_INTERRUPT 0x773
+#define MSR_IA32_HWP_REQUEST 0x774
+#define MSR_IA32_HWP_STATUS 0x777
/*
* VMX MSRs
@@ -720,6 +733,25 @@
/* MSR IA32_FLUSH_CMD */
#define IA32_FLUSH_CMD_L1D 0x00000001
+/* MSR IA32_HWP_CAPABILITIES */
+#define IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(x) (((x) >> 0) & 0xff)
+#define IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(x) (((x) >> 8) & 0xff)
+#define IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(x) (((x) >> 16) & 0xff)
+#define IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(x) (((x) >> 24) & 0xff)
+
+/* MSR IA32_HWP_REQUEST */
+#define IA32_HWP_REQUEST_MINIMUM_VALID (1ULL << 63)
+#define IA32_HWP_REQUEST_MAXIMUM_VALID (1ULL << 62)
+#define IA32_HWP_REQUEST_DESIRED_VALID (1ULL << 61)
+#define IA32_HWP_REQUEST_EPP_VALID (1ULL << 60)
+#define IA32_HWP_REQUEST_ACTIVITY_WINDOW_VALID (1ULL << 59)
+#define IA32_HWP_REQUEST_PACKAGE_CONTROL (1ULL << 42)
+#define IA32_HWP_ACTIVITY_WINDOW (0x3ffULL << 32)
+#define IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE (0xffULL << 24)
+#define IA32_HWP_DESIRED_PERFORMANCE (0xffULL << 16)
+#define IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE (0xffULL << 8)
+#define IA32_HWP_MINIMUM_PERFORMANCE (0xffULL << 0)
+
/*
* PAT modes.
*/

File Metadata

Mime Type
text/x-diff
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2060531
Default Alt Text
hwpstate.patch (41 KB)

Event Timeline