diff --git a/sys/dev/acpica/acpi_perf.c b/sys/dev/acpica/acpi_perf.c index 23070006c0f0..ab17a561caef 100644 --- a/sys/dev/acpica/acpi_perf.c +++ b/sys/dev/acpica/acpi_perf.c @@ -1,540 +1,545 @@ /*- * Copyright (c) 2003-2005 Nate Lawson (SDG) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi.h" #include #include "cpufreq_if.h" /* * Support for ACPI processor performance states (Px) according to * section 8.3.3 of the ACPI 2.0c specification. */ struct acpi_px { uint32_t core_freq; uint32_t power; uint32_t trans_lat; uint32_t bm_lat; uint32_t ctrl_val; uint32_t sts_val; }; /* Offsets in struct cf_setting array for storing driver-specific values. */ #define PX_SPEC_CONTROL 0 #define PX_SPEC_STATUS 1 #define MAX_PX_STATES 16 struct acpi_perf_softc { device_t dev; ACPI_HANDLE handle; struct resource *perf_ctrl; /* Set new performance state. */ int perf_ctrl_type; /* Resource type for perf_ctrl. */ struct resource *perf_status; /* Check that transition succeeded. */ int perf_sts_type; /* Resource type for perf_status. */ struct acpi_px *px_states; /* ACPI perf states. */ uint32_t px_count; /* Total number of perf states. */ uint32_t px_max_avail; /* Lowest index state available. */ int px_curr_state; /* Active state index. */ int px_rid; int info_only; /* Can we set new states? */ }; #define PX_GET_REG(reg) \ (bus_space_read_4(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0)) #define PX_SET_REG(reg, val) \ (bus_space_write_4(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0, (val))) #define ACPI_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */ static void acpi_perf_identify(driver_t *driver, device_t parent); static int acpi_perf_probe(device_t dev); static int acpi_perf_attach(device_t dev); static int acpi_perf_detach(device_t dev); static int acpi_perf_evaluate(device_t dev); static int acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set); static void acpi_px_available(struct acpi_perf_softc *sc); static void acpi_px_startup(void *arg); static void acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context); static int acpi_px_settings(device_t dev, struct cf_setting *sets, int *count); static int acpi_px_set(device_t dev, const struct cf_setting *set); static int acpi_px_get(device_t dev, struct cf_setting *set); static int acpi_px_type(device_t dev, int *type); static device_method_t acpi_perf_methods[] = { /* Device interface */ DEVMETHOD(device_identify, acpi_perf_identify), DEVMETHOD(device_probe, acpi_perf_probe), DEVMETHOD(device_attach, acpi_perf_attach), DEVMETHOD(device_detach, acpi_perf_detach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, acpi_px_set), DEVMETHOD(cpufreq_drv_get, acpi_px_get), DEVMETHOD(cpufreq_drv_type, acpi_px_type), DEVMETHOD(cpufreq_drv_settings, acpi_px_settings), {0, 0} }; static driver_t acpi_perf_driver = { "acpi_perf", acpi_perf_methods, sizeof(struct acpi_perf_softc), }; static devclass_t acpi_perf_devclass; DRIVER_MODULE(acpi_perf, cpu, acpi_perf_driver, acpi_perf_devclass, 0, 0); MODULE_DEPEND(acpi_perf, acpi, 1, 1, 1); MALLOC_DEFINE(M_ACPIPERF, "acpi_perf", "ACPI Performance states"); static void acpi_perf_identify(driver_t *driver, device_t parent) { ACPI_HANDLE handle; /* Make sure we're not being doubly invoked. */ if (device_find_child(parent, "acpi_perf", -1) != NULL) return; /* Get the handle for the Processor object and check for perf states. */ handle = acpi_get_handle(parent); if (handle == NULL) return; if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PSS", NULL, NULL))) return; + + /* + * Add a child to every CPU that has the right methods. In future + * versions of the ACPI spec, CPUs can have different settings. + */ if (BUS_ADD_CHILD(parent, 0, "acpi_perf", -1) == NULL) device_printf(parent, "add acpi_perf child failed\n"); } static int acpi_perf_probe(device_t dev) { ACPI_HANDLE handle; ACPI_OBJECT *pkg; struct resource *res; ACPI_BUFFER buf; int error, rid, type; if (resource_disabled("acpi_perf", 0)) return (ENXIO); /* * Check the performance state registers. If they are of type * "functional fixed hardware", we attach quietly since we will * only be providing information on settings to other drivers. */ error = ENXIO; handle = acpi_get_handle(dev); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; if (ACPI_FAILURE(AcpiEvaluateObject(handle, "_PCT", NULL, &buf))) return (error); pkg = (ACPI_OBJECT *)buf.Pointer; if (ACPI_PKG_VALID(pkg, 2)) { rid = 0; error = acpi_PkgGas(dev, pkg, 0, &type, &rid, &res); switch (error) { case 0: bus_release_resource(dev, type, rid, res); device_set_desc(dev, "ACPI CPU Frequency Control"); break; case EOPNOTSUPP: device_quiet(dev); error = 0; break; } } AcpiOsFree(buf.Pointer); return (error); } static int acpi_perf_attach(device_t dev) { struct acpi_perf_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->handle = acpi_get_handle(dev); sc->px_max_avail = 0; sc->px_curr_state = CPUFREQ_VAL_UNKNOWN; if (acpi_perf_evaluate(dev) != 0) return (ENXIO); AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_px_startup, NULL); if (!sc->info_only) cpufreq_register(dev); return (0); } static int acpi_perf_detach(device_t dev) { /* TODO: teardown registers, remove notify handler. */ return (ENXIO); } /* Probe and setup any valid performance states (Px). */ static int acpi_perf_evaluate(device_t dev) { struct acpi_perf_softc *sc; ACPI_BUFFER buf; ACPI_OBJECT *pkg, *res; ACPI_STATUS status; int error, i, j; uint32_t *p; /* Get the control values and parameters for each state. */ error = ENXIO; sc = device_get_softc(dev); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->handle, "_PSS", NULL, &buf); if (ACPI_FAILURE(status)) return (ENXIO); pkg = (ACPI_OBJECT *)buf.Pointer; if (!ACPI_PKG_VALID(pkg, 1)) { device_printf(dev, "invalid top level _PSS package\n"); goto out; } sc->px_count = pkg->Package.Count; sc->px_states = malloc(sc->px_count * sizeof(struct acpi_px), M_ACPIPERF, M_WAITOK | M_ZERO); if (sc->px_states == NULL) goto out; /* * Each state is a package of {CoreFreq, Power, TransitionLatency, * BusMasterLatency, ControlVal, StatusVal}, sorted from highest * performance to lowest. */ for (i = 0; i < sc->px_count; i++) { res = &pkg->Package.Elements[i]; if (!ACPI_PKG_VALID(res, 6)) { device_printf(dev, "invalid _PSS package\n"); continue; } p = &sc->px_states[i].core_freq; for (j = 0; j < 6; j++, p++) acpi_PkgInt32(res, j, p); } AcpiOsFree(buf.Pointer); /* Get the control and status registers (one of each). */ buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->handle, "_PCT", NULL, &buf); if (ACPI_FAILURE(status)) goto out; /* Check the package of two registers, each a Buffer in GAS format. */ pkg = (ACPI_OBJECT *)buf.Pointer; if (!ACPI_PKG_VALID(pkg, 2)) { device_printf(dev, "invalid perf register package\n"); goto out; } error = acpi_PkgGas(sc->dev, pkg, 0, &sc->perf_ctrl_type, &sc->px_rid, &sc->perf_ctrl); if (error) { /* * If the register is of type FFixedHW, we can only return * info, we can't get or set new settings. */ if (error == EOPNOTSUPP) { sc->info_only = TRUE; error = 0; } else device_printf(dev, "failed in PERF_CTL attach\n"); goto out; } sc->px_rid++; error = acpi_PkgGas(sc->dev, pkg, 1, &sc->perf_sts_type, &sc->px_rid, &sc->perf_status); if (error) { if (error == EOPNOTSUPP) { sc->info_only = TRUE; error = 0; } else device_printf(dev, "failed in PERF_STATUS attach\n"); goto out; } sc->px_rid++; /* Get our current limit and register for notifies. */ acpi_px_available(sc); AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, acpi_px_notify, sc); error = 0; out: if (error) { if (sc->px_states) free(sc->px_states, M_ACPIPERF); sc->px_count = 0; } if (buf.Pointer) AcpiOsFree(buf.Pointer); return (error); } static void acpi_px_startup(void *arg) { /* Signal to the platform that we are taking over CPU control. */ if (AcpiGbl_FADT->PstateCnt == 0) return; ACPI_LOCK(acpi); AcpiOsWritePort(AcpiGbl_FADT->SmiCmd, AcpiGbl_FADT->PstateCnt, 8); ACPI_UNLOCK(acpi); } static void acpi_px_notify(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_perf_softc *sc; sc = context; if (notify != ACPI_NOTIFY_PERF_STATES) return; acpi_px_available(sc); /* TODO: Implement notification when frequency changes. */ } /* * Find the highest currently-supported performance state. * This can be called at runtime (e.g., due to a docking event) at * the request of a Notify on the processor object. */ static void acpi_px_available(struct acpi_perf_softc *sc) { ACPI_STATUS status; struct cf_setting set; status = acpi_GetInteger(sc->handle, "_PPC", &sc->px_max_avail); /* If the old state is too high, set current state to the new max. */ if (ACPI_SUCCESS(status)) { if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN && sc->px_curr_state > sc->px_max_avail) { acpi_px_to_set(sc->dev, &sc->px_states[sc->px_max_avail], &set); acpi_px_set(sc->dev, &set); } } else sc->px_max_avail = 0; } static int acpi_px_to_set(device_t dev, struct acpi_px *px, struct cf_setting *set) { if (px == NULL || set == NULL) return (EINVAL); set->freq = px->core_freq; set->power = px->power; /* XXX Include BM latency too? */ set->lat = px->trans_lat; set->volts = CPUFREQ_VAL_UNKNOWN; set->dev = dev; set->spec[PX_SPEC_CONTROL] = px->ctrl_val; set->spec[PX_SPEC_STATUS] = px->sts_val; return (0); } static int acpi_px_settings(device_t dev, struct cf_setting *sets, int *count) { struct acpi_perf_softc *sc; int x, y; sc = device_get_softc(dev); if (sets == NULL || count == NULL) return (EINVAL); if (*count < sc->px_count - sc->px_max_avail) return (E2BIG); /* Return a list of settings that are currently valid. */ y = 0; for (x = sc->px_max_avail; x < sc->px_count; x++, y++) acpi_px_to_set(dev, &sc->px_states[x], &sets[y]); *count = sc->px_count - sc->px_max_avail; return (0); } static int acpi_px_set(device_t dev, const struct cf_setting *set) { struct acpi_perf_softc *sc; int i, status, sts_val, tries; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* If we can't set new states, return immediately. */ if (sc->info_only) return (ENXIO); /* Look up appropriate state, based on frequency. */ for (i = sc->px_max_avail; i < sc->px_count; i++) { if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq)) break; } if (i == sc->px_count) return (EINVAL); /* Write the appropriate value to the register. */ PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val); /* * Try for up to 10 ms to verify the desired state was selected. * This is longer than the standard says (1 ms) but in some modes, * systems may take longer to respond. */ sts_val = sc->px_states[i].sts_val; for (tries = 0; tries < 1000; tries++) { status = PX_GET_REG(sc->perf_status); /* * If we match the status or the desired status is 8 bits * and matches the relevant bits, assume we succeeded. It * appears some systems (IBM R32) expect byte-wide access * even though the standard says the register is 32-bit. */ if (status == sts_val || ((sts_val & ~0xff) == 0 && (status & 0xff) == sts_val)) break; DELAY(10); } if (tries == 1000) { device_printf(dev, "Px transition to %d failed\n", sc->px_states[i].core_freq); return (ENXIO); } sc->px_curr_state = i; return (0); } static int acpi_px_get(device_t dev, struct cf_setting *set) { struct acpi_perf_softc *sc; uint64_t rate; int i; struct pcpu *pc; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* If we can't get new states, return immediately. */ if (sc->info_only) return (ENXIO); /* If we've set the rate before, use the cached value. */ if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) { acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set); return (0); } /* Otherwise, estimate and try to match against our settings. */ pc = cpu_get_pcpu(dev); if (pc == NULL) return (ENXIO); cpu_est_clockrate(pc->pc_cpuid, &rate); rate /= 1000000; for (i = 0; i < sc->px_count; i++) { if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) { sc->px_curr_state = i; acpi_px_to_set(dev, &sc->px_states[i], set); break; } } /* No match, give up. */ if (i == sc->px_count) { sc->px_curr_state = CPUFREQ_VAL_UNKNOWN; set->freq = CPUFREQ_VAL_UNKNOWN; } return (0); } static int acpi_px_type(device_t dev, int *type) { struct acpi_perf_softc *sc; if (type == NULL) return (EINVAL); sc = device_get_softc(dev); *type = CPUFREQ_TYPE_ABSOLUTE; if (sc->info_only) *type |= CPUFREQ_FLAG_INFO_ONLY; return (0); } diff --git a/sys/dev/cpufreq/ichss.c b/sys/dev/cpufreq/ichss.c index b8d35421583e..d38ed523482d 100644 --- a/sys/dev/cpufreq/ichss.c +++ b/sys/dev/cpufreq/ichss.c @@ -1,400 +1,404 @@ /*- * Copyright (c) 2004-2005 Nate Lawson (SDG) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpufreq_if.h" /* * The SpeedStep ICH feature is a chipset-initiated voltage and frequency * transition available on the ICH2M, 3M, and 4M. It is different from * the newer Pentium-M SpeedStep feature. It offers only two levels of * frequency/voltage. Often, the BIOS will select one of the levels via * SMM code during the power-on process (i.e., choose a lower level if the * system is off AC power.) */ struct ichss_softc { device_t dev; int bm_rid; /* Bus-mastering control (PM2REG). */ struct resource *bm_reg; int ctrl_rid; /* Control/status register. */ struct resource *ctrl_reg; struct cf_setting sets[2]; /* Only two settings. */ }; /* Supported PCI IDs. */ #define PCI_VENDOR_INTEL 0x8086 #define PCI_DEV_82801BA 0x244c /* ICH2M */ #define PCI_DEV_82801CA 0x248c /* ICH3M */ #define PCI_DEV_82801DB 0x24cc /* ICH4M */ #define PCI_DEV_82815BA 0x1130 /* Unsupported/buggy part */ /* PCI config registers for finding PMBASE and enabling SpeedStep. */ #define ICHSS_PMBASE_OFFSET 0x40 #define ICHSS_PMCFG_OFFSET 0xa0 /* Values and masks. */ #define ICHSS_ENABLE (1<<3) /* Enable SpeedStep control. */ #define ICHSS_IO_REG 0x1 /* Access register via I/O space. */ #define ICHSS_PMBASE_MASK 0xff80 /* PMBASE address bits. */ #define ICHSS_CTRL_BIT 0x1 /* 0 is high speed, 1 is low. */ #define ICHSS_BM_DISABLE 0x1 /* Offsets from PMBASE for various registers. */ #define ICHSS_BM_OFFSET 0x20 #define ICHSS_CTRL_OFFSET 0x50 #define ICH_GET_REG(reg) \ (bus_space_read_1(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0)) #define ICH_SET_REG(reg, val) \ (bus_space_write_1(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0, (val))) static int ichss_pci_probe(device_t dev); static int ichss_probe(device_t dev); static int ichss_attach(device_t dev); static int ichss_detach(device_t dev); static int ichss_settings(device_t dev, struct cf_setting *sets, int *count); static int ichss_set(device_t dev, const struct cf_setting *set); static int ichss_get(device_t dev, struct cf_setting *set); static int ichss_type(device_t dev, int *type); static device_method_t ichss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ichss_probe), DEVMETHOD(device_attach, ichss_attach), DEVMETHOD(device_detach, ichss_detach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, ichss_set), DEVMETHOD(cpufreq_drv_get, ichss_get), DEVMETHOD(cpufreq_drv_type, ichss_type), DEVMETHOD(cpufreq_drv_settings, ichss_settings), {0, 0} }; static driver_t ichss_driver = { "ichss", ichss_methods, sizeof(struct ichss_softc) }; static devclass_t ichss_devclass; DRIVER_MODULE(ichss, cpu, ichss_driver, ichss_devclass, 0, 0); static device_method_t ichss_pci_methods[] = { DEVMETHOD(device_probe, ichss_pci_probe), {0, 0} }; static driver_t ichss_pci_driver = { "ichss_pci", ichss_pci_methods, 0 }; static devclass_t ichss_pci_devclass; DRIVER_MODULE(ichss_pci, pci, ichss_pci_driver, ichss_pci_devclass, 0, 0); #if 0 #define DPRINT(x...) printf(x) #else #define DPRINT(x...) #endif /* * We detect the chipset by looking for its LPC bus ID during the PCI * scan and reading its config registers during the probe. However, * we add the ichss child under the cpu device since even though the * chipset provides the control, it really affects the cpu only. * * XXX This approach does not work if the module is loaded after boot. */ static int ichss_pci_probe(device_t dev) { device_t child, parent; uint32_t pmbase; uint16_t ss_en; /* * TODO: add a quirk to disable if we see the 82815_MC along * with the 82801BA and revision < 5. */ if (pci_get_vendor(dev) != PCI_VENDOR_INTEL || (pci_get_device(dev) != PCI_DEV_82801BA && pci_get_device(dev) != PCI_DEV_82801CA && pci_get_device(dev) != PCI_DEV_82801DB)) return (ENXIO); /* Only one CPU is supported for this hardware. */ if (devclass_get_device(ichss_devclass, 0)) return (ENXIO); - /* Add a child under the CPU parent. */ + /* + * Add a child under the CPU parent. It appears that ICH SpeedStep + * only requires a single CPU to set the value (since the chipset + * is shared by all CPUs.) Thus, we only add a child to cpu 0. + */ parent = devclass_get_device(devclass_find("cpu"), 0); KASSERT(parent != NULL, ("cpu parent is NULL")); child = BUS_ADD_CHILD(parent, 0, "ichss", 0); if (child == NULL) { device_printf(parent, "add SpeedStep child failed\n"); return (ENXIO); } /* Find the PMBASE register from our PCI config header. */ pmbase = pci_read_config(dev, ICHSS_PMBASE_OFFSET, sizeof(pmbase)); if ((pmbase & ICHSS_IO_REG) == 0) { printf("ichss: invalid PMBASE memory type\n"); return (ENXIO); } pmbase &= ICHSS_PMBASE_MASK; if (pmbase == 0) { printf("ichss: invalid zero PMBASE address\n"); return (ENXIO); } DPRINT("ichss: PMBASE is %#x\n", pmbase); /* Add the bus master arbitration and control registers. */ bus_set_resource(child, SYS_RES_IOPORT, 0, pmbase + ICHSS_BM_OFFSET, 1); bus_set_resource(child, SYS_RES_IOPORT, 1, pmbase + ICHSS_CTRL_OFFSET, 1); /* Activate SpeedStep control if not already enabled. */ ss_en = pci_read_config(dev, ICHSS_PMCFG_OFFSET, sizeof(ss_en)); if ((ss_en & ICHSS_ENABLE) == 0) { printf("ichss: enabling SpeedStep support\n"); pci_write_config(dev, ICHSS_PMCFG_OFFSET, ss_en | ICHSS_ENABLE, sizeof(ss_en)); } /* Attach the new CPU child now. */ device_probe_and_attach(child); return (ENXIO); } static int ichss_probe(device_t dev) { device_t est_dev, perf_dev; int error, type; if (resource_disabled("ichss", 0)) return (ENXIO); /* * If the ACPI perf driver has attached and is not just offering * info, let it manage things. Also, if Enhanced SpeedStep is * available, don't attach. */ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); if (perf_dev && device_is_attached(perf_dev)) { error = CPUFREQ_DRV_TYPE(perf_dev, &type); if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) return (ENXIO); } est_dev = device_find_child(device_get_parent(dev), "est", -1); if (est_dev && device_is_attached(est_dev)) return (ENXIO); device_set_desc(dev, "SpeedStep ICH"); return (-1000); } static int ichss_attach(device_t dev) { struct ichss_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->bm_rid = 0; sc->bm_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->bm_rid, RF_ACTIVE); if (sc->bm_reg == NULL) { device_printf(dev, "failed to alloc BM arb register\n"); return (ENXIO); } sc->ctrl_rid = 1; sc->ctrl_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->ctrl_rid, RF_ACTIVE); if (sc->ctrl_reg == NULL) { device_printf(dev, "failed to alloc control register\n"); bus_release_resource(dev, SYS_RES_IOPORT, sc->bm_rid, sc->bm_reg); return (ENXIO); } /* Setup some defaults for our exported settings. */ sc->sets[0].freq = CPUFREQ_VAL_UNKNOWN; sc->sets[0].volts = CPUFREQ_VAL_UNKNOWN; sc->sets[0].power = CPUFREQ_VAL_UNKNOWN; sc->sets[0].lat = 1000; sc->sets[0].dev = dev; sc->sets[1] = sc->sets[0]; cpufreq_register(dev); return (0); } static int ichss_detach(device_t dev) { /* TODO: teardown BM and CTRL registers. */ return (ENXIO); } static int ichss_settings(device_t dev, struct cf_setting *sets, int *count) { struct ichss_softc *sc; struct cf_setting set; int first, i; if (sets == NULL || count == NULL) return (EINVAL); if (*count < 2) { *count = 2; return (E2BIG); } sc = device_get_softc(dev); /* * Estimate frequencies for both levels, temporarily switching to * the other one if we haven't calibrated it yet. */ ichss_get(dev, &set); for (i = 0; i < 2; i++) { if (sc->sets[i].freq == CPUFREQ_VAL_UNKNOWN) { first = (i == 0) ? 1 : 0; ichss_set(dev, &sc->sets[i]); ichss_set(dev, &sc->sets[first]); } } bcopy(sc->sets, sets, sizeof(sc->sets)); *count = 2; return (0); } static int ichss_set(device_t dev, const struct cf_setting *set) { struct ichss_softc *sc; uint8_t bmval, new_val, old_val, req_val; uint64_t rate; register_t regs; /* Look up appropriate bit value based on frequency. */ sc = device_get_softc(dev); if (CPUFREQ_CMP(set->freq, sc->sets[0].freq)) req_val = 0; else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq)) req_val = ICHSS_CTRL_BIT; else return (EINVAL); DPRINT("ichss: requested setting %d\n", req_val); /* Disable interrupts and get the other register contents. */ regs = intr_disable(); old_val = ICH_GET_REG(sc->ctrl_reg) & ~ICHSS_CTRL_BIT; /* * Disable bus master arbitration, write the new value to the control * register, and then re-enable bus master arbitration. */ bmval = ICH_GET_REG(sc->bm_reg) | ICHSS_BM_DISABLE; ICH_SET_REG(sc->bm_reg, bmval); ICH_SET_REG(sc->ctrl_reg, old_val | req_val); ICH_SET_REG(sc->bm_reg, bmval & ~ICHSS_BM_DISABLE); /* Get the new value and re-enable interrupts. */ new_val = ICH_GET_REG(sc->ctrl_reg); intr_restore(regs); /* Check if the desired state was indeed selected. */ if (req_val != (new_val & ICHSS_CTRL_BIT)) { device_printf(sc->dev, "transition to %d failed\n", req_val); return (ENXIO); } /* Re-initialize our cycle counter if we don't know this new state. */ if (sc->sets[req_val].freq == CPUFREQ_VAL_UNKNOWN) { cpu_est_clockrate(0, &rate); sc->sets[req_val].freq = rate / 1000000; DPRINT("ichss: set calibrated new rate of %d\n", sc->sets[req_val].freq); } return (0); } static int ichss_get(device_t dev, struct cf_setting *set) { struct ichss_softc *sc; uint64_t rate; uint8_t state; sc = device_get_softc(dev); state = ICH_GET_REG(sc->ctrl_reg) & ICHSS_CTRL_BIT; /* If we haven't changed settings yet, estimate the current value. */ if (sc->sets[state].freq == CPUFREQ_VAL_UNKNOWN) { cpu_est_clockrate(0, &rate); sc->sets[state].freq = rate / 1000000; DPRINT("ichss: get calibrated new rate of %d\n", sc->sets[state].freq); } *set = sc->sets[state]; return (0); } static int ichss_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_ABSOLUTE; return (0); } diff --git a/sys/i386/cpufreq/est.c b/sys/i386/cpufreq/est.c index 53b1cd73d2f0..6239332b1d4c 100644 --- a/sys/i386/cpufreq/est.c +++ b/sys/i386/cpufreq/est.c @@ -1,787 +1,791 @@ /*- * Copyright (c) 2004 Colin Percival * Copyright (c) 2005 Nate Lawson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted providing that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "cpufreq_if.h" #include /* Status/control registers (from the IA-32 System Programming Guide). */ #define MSR_PERF_STATUS 0x198 #define MSR_PERF_CTL 0x199 /* Register and bit for enabling SpeedStep. */ #define MSR_MISC_ENABLE 0x1a0 #define MSR_SS_ENABLE (1<<16) /* Frequency and MSR control values. */ typedef struct { uint16_t freq; uint16_t volts; uint16_t id16; } freq_info; /* Identifying characteristics of a processor and supported frequencies. */ typedef struct { const char *vendor; uint32_t id32; uint32_t bus_clk; const freq_info *freqtab; } cpu_info; struct est_softc { device_t dev; const freq_info *freq_list; }; /* Convert MHz and mV into IDs for passing to the MSR. */ #define ID16(MHz, mV, bus_clk) \ (((MHz / bus_clk) << 8) | ((mV ? mV - 700 : 0) >> 4)) #define ID32(MHz_hi, mV_hi, MHz_lo, mV_lo, bus_clk) \ ((ID16(MHz_lo, mV_lo, bus_clk) << 16) | (ID16(MHz_hi, mV_hi, bus_clk))) /* Format for storing IDs in our table. */ #define FREQ_INFO(MHz, mV, bus_clk) \ { MHz, mV, ID16(MHz, mV, bus_clk) } #define INTEL(tab, zhi, vhi, zlo, vlo, bus_clk) \ { GenuineIntel, ID32(zhi, vhi, zlo, vlo, bus_clk), bus_clk, tab } const char GenuineIntel[] = "GenuineIntel"; /* Default bus clock value for Centrino processors. */ #define INTEL_BUS_CLK 100 /* XXX Update this if new CPUs have more settings. */ #define EST_MAX_SETTINGS 10 CTASSERT(EST_MAX_SETTINGS <= MAX_SETTINGS); /* Estimate in microseconds of latency for performing a transition. */ #define EST_TRANS_LAT 10 /* * Frequency (MHz) and voltage (mV) settings. Data from the * Intel Pentium M Processor Datasheet (Order Number 252612), Table 5. * * XXX New Dothan processors have multiple VID# with different * settings for each VID#. Since we can't uniquely identify this info * without undisclosed methods from Intel, we can't support newer * processors with this table method. If ACPI Px states are supported, * we can get info from them. */ const freq_info PM17_130[] = { /* 130nm 1.70GHz Pentium M */ FREQ_INFO(1700, 1484, INTEL_BUS_CLK), FREQ_INFO(1400, 1308, INTEL_BUS_CLK), FREQ_INFO(1200, 1228, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1004, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM16_130[] = { /* 130nm 1.60GHz Pentium M */ FREQ_INFO(1600, 1484, INTEL_BUS_CLK), FREQ_INFO(1400, 1420, INTEL_BUS_CLK), FREQ_INFO(1200, 1276, INTEL_BUS_CLK), FREQ_INFO(1000, 1164, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM15_130[] = { /* 130nm 1.50GHz Pentium M */ FREQ_INFO(1500, 1484, INTEL_BUS_CLK), FREQ_INFO(1400, 1452, INTEL_BUS_CLK), FREQ_INFO(1200, 1356, INTEL_BUS_CLK), FREQ_INFO(1000, 1228, INTEL_BUS_CLK), FREQ_INFO( 800, 1116, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM14_130[] = { /* 130nm 1.40GHz Pentium M */ FREQ_INFO(1400, 1484, INTEL_BUS_CLK), FREQ_INFO(1200, 1436, INTEL_BUS_CLK), FREQ_INFO(1000, 1308, INTEL_BUS_CLK), FREQ_INFO( 800, 1180, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM13_130[] = { /* 130nm 1.30GHz Pentium M */ FREQ_INFO(1300, 1388, INTEL_BUS_CLK), FREQ_INFO(1200, 1356, INTEL_BUS_CLK), FREQ_INFO(1000, 1292, INTEL_BUS_CLK), FREQ_INFO( 800, 1260, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM13_LV_130[] = { /* 130nm 1.30GHz Low Voltage Pentium M */ FREQ_INFO(1300, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1164, INTEL_BUS_CLK), FREQ_INFO(1100, 1100, INTEL_BUS_CLK), FREQ_INFO(1000, 1020, INTEL_BUS_CLK), FREQ_INFO( 900, 1004, INTEL_BUS_CLK), FREQ_INFO( 800, 988, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM12_LV_130[] = { /* 130 nm 1.20GHz Low Voltage Pentium M */ FREQ_INFO(1200, 1180, INTEL_BUS_CLK), FREQ_INFO(1100, 1164, INTEL_BUS_CLK), FREQ_INFO(1000, 1100, INTEL_BUS_CLK), FREQ_INFO( 900, 1020, INTEL_BUS_CLK), FREQ_INFO( 800, 1004, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM11_LV_130[] = { /* 130 nm 1.10GHz Low Voltage Pentium M */ FREQ_INFO(1100, 1180, INTEL_BUS_CLK), FREQ_INFO(1000, 1164, INTEL_BUS_CLK), FREQ_INFO( 900, 1100, INTEL_BUS_CLK), FREQ_INFO( 800, 1020, INTEL_BUS_CLK), FREQ_INFO( 600, 956, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM11_ULV_130[] = { /* 130 nm 1.10GHz Ultra Low Voltage Pentium M */ FREQ_INFO(1100, 1004, INTEL_BUS_CLK), FREQ_INFO(1000, 988, INTEL_BUS_CLK), FREQ_INFO( 900, 972, INTEL_BUS_CLK), FREQ_INFO( 800, 956, INTEL_BUS_CLK), FREQ_INFO( 600, 844, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM10_ULV_130[] = { /* 130 nm 1.00GHz Ultra Low Voltage Pentium M */ FREQ_INFO(1000, 1004, INTEL_BUS_CLK), FREQ_INFO( 900, 988, INTEL_BUS_CLK), FREQ_INFO( 800, 972, INTEL_BUS_CLK), FREQ_INFO( 600, 844, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; /* * Data from "Intel Pentium M Processor on 90nm Process with * 2-MB L2 Cache Datasheet", Order Number 302189, Table 5. */ const freq_info PM_765A_90[] = { /* 90 nm 2.10GHz Pentium M, VID #A */ FREQ_INFO(2100, 1340, INTEL_BUS_CLK), FREQ_INFO(1800, 1276, INTEL_BUS_CLK), FREQ_INFO(1600, 1228, INTEL_BUS_CLK), FREQ_INFO(1400, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1132, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_765B_90[] = { /* 90 nm 2.10GHz Pentium M, VID #B */ FREQ_INFO(2100, 1324, INTEL_BUS_CLK), FREQ_INFO(1800, 1260, INTEL_BUS_CLK), FREQ_INFO(1600, 1212, INTEL_BUS_CLK), FREQ_INFO(1400, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1132, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_765C_90[] = { /* 90 nm 2.10GHz Pentium M, VID #C */ FREQ_INFO(2100, 1308, INTEL_BUS_CLK), FREQ_INFO(1800, 1244, INTEL_BUS_CLK), FREQ_INFO(1600, 1212, INTEL_BUS_CLK), FREQ_INFO(1400, 1164, INTEL_BUS_CLK), FREQ_INFO(1200, 1116, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_765E_90[] = { /* 90 nm 2.10GHz Pentium M, VID #E */ FREQ_INFO(2100, 1356, INTEL_BUS_CLK), FREQ_INFO(1800, 1292, INTEL_BUS_CLK), FREQ_INFO(1600, 1244, INTEL_BUS_CLK), FREQ_INFO(1400, 1196, INTEL_BUS_CLK), FREQ_INFO(1200, 1148, INTEL_BUS_CLK), FREQ_INFO(1000, 1100, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_755A_90[] = { /* 90 nm 2.00GHz Pentium M, VID #A */ FREQ_INFO(2000, 1340, INTEL_BUS_CLK), FREQ_INFO(1800, 1292, INTEL_BUS_CLK), FREQ_INFO(1600, 1244, INTEL_BUS_CLK), FREQ_INFO(1400, 1196, INTEL_BUS_CLK), FREQ_INFO(1200, 1148, INTEL_BUS_CLK), FREQ_INFO(1000, 1100, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_755B_90[] = { /* 90 nm 2.00GHz Pentium M, VID #B */ FREQ_INFO(2000, 1324, INTEL_BUS_CLK), FREQ_INFO(1800, 1276, INTEL_BUS_CLK), FREQ_INFO(1600, 1228, INTEL_BUS_CLK), FREQ_INFO(1400, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1132, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_755C_90[] = { /* 90 nm 2.00GHz Pentium M, VID #C */ FREQ_INFO(2000, 1308, INTEL_BUS_CLK), FREQ_INFO(1800, 1276, INTEL_BUS_CLK), FREQ_INFO(1600, 1228, INTEL_BUS_CLK), FREQ_INFO(1400, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1132, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_755D_90[] = { /* 90 nm 2.00GHz Pentium M, VID #D */ FREQ_INFO(2000, 1276, INTEL_BUS_CLK), FREQ_INFO(1800, 1244, INTEL_BUS_CLK), FREQ_INFO(1600, 1196, INTEL_BUS_CLK), FREQ_INFO(1400, 1164, INTEL_BUS_CLK), FREQ_INFO(1200, 1116, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_745A_90[] = { /* 90 nm 1.80GHz Pentium M, VID #A */ FREQ_INFO(1800, 1340, INTEL_BUS_CLK), FREQ_INFO(1600, 1292, INTEL_BUS_CLK), FREQ_INFO(1400, 1228, INTEL_BUS_CLK), FREQ_INFO(1200, 1164, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_745B_90[] = { /* 90 nm 1.80GHz Pentium M, VID #B */ FREQ_INFO(1800, 1324, INTEL_BUS_CLK), FREQ_INFO(1600, 1276, INTEL_BUS_CLK), FREQ_INFO(1400, 1212, INTEL_BUS_CLK), FREQ_INFO(1200, 1164, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_745C_90[] = { /* 90 nm 1.80GHz Pentium M, VID #C */ FREQ_INFO(1800, 1308, INTEL_BUS_CLK), FREQ_INFO(1600, 1260, INTEL_BUS_CLK), FREQ_INFO(1400, 1212, INTEL_BUS_CLK), FREQ_INFO(1200, 1148, INTEL_BUS_CLK), FREQ_INFO(1000, 1100, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_745D_90[] = { /* 90 nm 1.80GHz Pentium M, VID #D */ FREQ_INFO(1800, 1276, INTEL_BUS_CLK), FREQ_INFO(1600, 1228, INTEL_BUS_CLK), FREQ_INFO(1400, 1180, INTEL_BUS_CLK), FREQ_INFO(1200, 1132, INTEL_BUS_CLK), FREQ_INFO(1000, 1084, INTEL_BUS_CLK), FREQ_INFO( 800, 1036, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_735A_90[] = { /* 90 nm 1.70GHz Pentium M, VID #A */ FREQ_INFO(1700, 1340, INTEL_BUS_CLK), FREQ_INFO(1400, 1244, INTEL_BUS_CLK), FREQ_INFO(1200, 1180, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_735B_90[] = { /* 90 nm 1.70GHz Pentium M, VID #B */ FREQ_INFO(1700, 1324, INTEL_BUS_CLK), FREQ_INFO(1400, 1244, INTEL_BUS_CLK), FREQ_INFO(1200, 1180, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_735C_90[] = { /* 90 nm 1.70GHz Pentium M, VID #C */ FREQ_INFO(1700, 1308, INTEL_BUS_CLK), FREQ_INFO(1400, 1228, INTEL_BUS_CLK), FREQ_INFO(1200, 1164, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_735D_90[] = { /* 90 nm 1.70GHz Pentium M, VID #D */ FREQ_INFO(1700, 1276, INTEL_BUS_CLK), FREQ_INFO(1400, 1212, INTEL_BUS_CLK), FREQ_INFO(1200, 1148, INTEL_BUS_CLK), FREQ_INFO(1000, 1100, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_725A_90[] = { /* 90 nm 1.60GHz Pentium M, VID #A */ FREQ_INFO(1600, 1340, INTEL_BUS_CLK), FREQ_INFO(1400, 1276, INTEL_BUS_CLK), FREQ_INFO(1200, 1212, INTEL_BUS_CLK), FREQ_INFO(1000, 1132, INTEL_BUS_CLK), FREQ_INFO( 800, 1068, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_725B_90[] = { /* 90 nm 1.60GHz Pentium M, VID #B */ FREQ_INFO(1600, 1324, INTEL_BUS_CLK), FREQ_INFO(1400, 1260, INTEL_BUS_CLK), FREQ_INFO(1200, 1196, INTEL_BUS_CLK), FREQ_INFO(1000, 1132, INTEL_BUS_CLK), FREQ_INFO( 800, 1068, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_725C_90[] = { /* 90 nm 1.60GHz Pentium M, VID #C */ FREQ_INFO(1600, 1308, INTEL_BUS_CLK), FREQ_INFO(1400, 1244, INTEL_BUS_CLK), FREQ_INFO(1200, 1180, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_725D_90[] = { /* 90 nm 1.60GHz Pentium M, VID #D */ FREQ_INFO(1600, 1276, INTEL_BUS_CLK), FREQ_INFO(1400, 1228, INTEL_BUS_CLK), FREQ_INFO(1200, 1164, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_715A_90[] = { /* 90 nm 1.50GHz Pentium M, VID #A */ FREQ_INFO(1500, 1340, INTEL_BUS_CLK), FREQ_INFO(1200, 1228, INTEL_BUS_CLK), FREQ_INFO(1000, 1148, INTEL_BUS_CLK), FREQ_INFO( 800, 1068, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_715B_90[] = { /* 90 nm 1.50GHz Pentium M, VID #B */ FREQ_INFO(1500, 1324, INTEL_BUS_CLK), FREQ_INFO(1200, 1212, INTEL_BUS_CLK), FREQ_INFO(1000, 1148, INTEL_BUS_CLK), FREQ_INFO( 800, 1068, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_715C_90[] = { /* 90 nm 1.50GHz Pentium M, VID #C */ FREQ_INFO(1500, 1308, INTEL_BUS_CLK), FREQ_INFO(1200, 1212, INTEL_BUS_CLK), FREQ_INFO(1000, 1132, INTEL_BUS_CLK), FREQ_INFO( 800, 1068, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_715D_90[] = { /* 90 nm 1.50GHz Pentium M, VID #D */ FREQ_INFO(1500, 1276, INTEL_BUS_CLK), FREQ_INFO(1200, 1180, INTEL_BUS_CLK), FREQ_INFO(1000, 1116, INTEL_BUS_CLK), FREQ_INFO( 800, 1052, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_738_90[] = { /* 90 nm 1.40GHz Low Voltage Pentium M */ FREQ_INFO(1400, 1116, INTEL_BUS_CLK), FREQ_INFO(1300, 1116, INTEL_BUS_CLK), FREQ_INFO(1200, 1100, INTEL_BUS_CLK), FREQ_INFO(1100, 1068, INTEL_BUS_CLK), FREQ_INFO(1000, 1052, INTEL_BUS_CLK), FREQ_INFO( 900, 1036, INTEL_BUS_CLK), FREQ_INFO( 800, 1020, INTEL_BUS_CLK), FREQ_INFO( 600, 988, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_733_90[] = { /* 90 nm 1.10GHz Ultra Low Voltage Pentium M */ FREQ_INFO(1100, 940, INTEL_BUS_CLK), FREQ_INFO(1000, 924, INTEL_BUS_CLK), FREQ_INFO( 900, 892, INTEL_BUS_CLK), FREQ_INFO( 800, 876, INTEL_BUS_CLK), FREQ_INFO( 600, 812, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const freq_info PM_723_90[] = { /* 90 nm 1.00GHz Ultra Low Voltage Pentium M */ FREQ_INFO(1000, 940, INTEL_BUS_CLK), FREQ_INFO( 900, 908, INTEL_BUS_CLK), FREQ_INFO( 800, 876, INTEL_BUS_CLK), FREQ_INFO( 600, 812, INTEL_BUS_CLK), FREQ_INFO( 0, 0, 1), }; const cpu_info ESTprocs[] = { INTEL(PM17_130, 1700, 1484, 600, 956, INTEL_BUS_CLK), INTEL(PM16_130, 1600, 1484, 600, 956, INTEL_BUS_CLK), INTEL(PM15_130, 1500, 1484, 600, 956, INTEL_BUS_CLK), INTEL(PM14_130, 1400, 1484, 600, 956, INTEL_BUS_CLK), INTEL(PM13_130, 1300, 1388, 600, 956, INTEL_BUS_CLK), INTEL(PM13_LV_130, 1300, 1180, 600, 956, INTEL_BUS_CLK), INTEL(PM12_LV_130, 1200, 1180, 600, 956, INTEL_BUS_CLK), INTEL(PM11_LV_130, 1100, 1180, 600, 956, INTEL_BUS_CLK), INTEL(PM11_ULV_130, 1100, 1004, 600, 844, INTEL_BUS_CLK), INTEL(PM10_ULV_130, 1000, 1004, 600, 844, INTEL_BUS_CLK), INTEL(PM_765A_90, 2100, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_765B_90, 2100, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_765C_90, 2100, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_765E_90, 2100, 1356, 600, 988, INTEL_BUS_CLK), INTEL(PM_755A_90, 2000, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_755B_90, 2000, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_755C_90, 2000, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_755D_90, 2000, 1276, 600, 988, INTEL_BUS_CLK), INTEL(PM_745A_90, 1800, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_745B_90, 1800, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_745C_90, 1800, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_745D_90, 1800, 1276, 600, 988, INTEL_BUS_CLK), INTEL(PM_735A_90, 1700, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_735B_90, 1700, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_735C_90, 1700, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_735D_90, 1700, 1276, 600, 988, INTEL_BUS_CLK), INTEL(PM_725A_90, 1600, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_725B_90, 1600, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_725C_90, 1600, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_725D_90, 1600, 1276, 600, 988, INTEL_BUS_CLK), INTEL(PM_715A_90, 1500, 1340, 600, 988, INTEL_BUS_CLK), INTEL(PM_715B_90, 1500, 1324, 600, 988, INTEL_BUS_CLK), INTEL(PM_715C_90, 1500, 1308, 600, 988, INTEL_BUS_CLK), INTEL(PM_715D_90, 1500, 1276, 600, 988, INTEL_BUS_CLK), INTEL(PM_738_90, 1400, 1116, 600, 988, INTEL_BUS_CLK), INTEL(PM_733_90, 1100, 940, 600, 812, INTEL_BUS_CLK), INTEL(PM_723_90, 1000, 940, 600, 812, INTEL_BUS_CLK), { NULL, 0, 0, NULL }, }; static void est_identify(driver_t *driver, device_t parent); static int est_probe(device_t parent); static int est_attach(device_t parent); static int est_detach(device_t parent); static int est_find_cpu(const char *vendor, uint64_t msr, uint32_t bus_clk, const freq_info **freqs); static const freq_info *est_get_current(const freq_info *freq_list); static int est_settings(device_t dev, struct cf_setting *sets, int *count); static int est_set(device_t dev, const struct cf_setting *set); static int est_get(device_t dev, struct cf_setting *set); static int est_type(device_t dev, int *type); static device_method_t est_methods[] = { /* Device interface */ DEVMETHOD(device_identify, est_identify), DEVMETHOD(device_probe, est_probe), DEVMETHOD(device_attach, est_attach), DEVMETHOD(device_detach, est_detach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, est_set), DEVMETHOD(cpufreq_drv_get, est_get), DEVMETHOD(cpufreq_drv_type, est_type), DEVMETHOD(cpufreq_drv_settings, est_settings), {0, 0} }; static driver_t est_driver = { "est", est_methods, sizeof(struct est_softc), }; static devclass_t est_devclass; DRIVER_MODULE(est, cpu, est_driver, est_devclass, 0, 0); static void est_identify(driver_t *driver, device_t parent) { u_int p[4]; /* Make sure we're not being doubly invoked. */ if (device_find_child(parent, "est", -1) != NULL) return; /* Check that CPUID is supported and the vendor is Intel.*/ if (cpu_high == 0 || strcmp(cpu_vendor, GenuineIntel) != 0) return; /* Read capability bits and check if the CPU supports EST. */ do_cpuid(1, p); if ((p[2] & 0x80) == 0) return; + /* + * We add a child for each CPU since settings must be performed + * on each CPU in the SMP case. + */ if (BUS_ADD_CHILD(parent, 0, "est", -1) == NULL) device_printf(parent, "add est child failed\n"); } static int est_probe(device_t dev) { const freq_info *f; device_t perf_dev; uint64_t msr; int error, type; if (resource_disabled("est", 0)) return (ENXIO); /* * If the ACPI perf driver has attached and is not just offering * info, let it manage things. */ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); if (perf_dev && device_is_attached(perf_dev)) { error = CPUFREQ_DRV_TYPE(perf_dev, &type); if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) return (ENXIO); } /* Attempt to enable SpeedStep if not currently enabled. */ msr = rdmsr(MSR_MISC_ENABLE); if ((msr & MSR_SS_ENABLE) == 0) { wrmsr(MSR_MISC_ENABLE, msr | MSR_SS_ENABLE); /* Check if the enable failed. */ msr = rdmsr(MSR_MISC_ENABLE); if ((msr & MSR_SS_ENABLE) == 0) { device_printf(dev, "failed to enable SpeedStep\n"); return (ENXIO); } } /* Identify the exact CPU model */ msr = rdmsr(MSR_PERF_STATUS); if (est_find_cpu(cpu_vendor, msr, INTEL_BUS_CLK, &f) != 0) { printf( "CPU claims to support Enhanced Speedstep, but is not recognized.\n" "Please update driver or contact the maintainer.\n" "cpu_vendor = %s msr = %0jx, bus_clk = %x\n", cpu_vendor, msr, INTEL_BUS_CLK); return (ENXIO); } device_set_desc(dev, "Enhanced SpeedStep Frequency Control"); return (0); } static int est_attach(device_t dev) { struct est_softc *sc; uint64_t msr; sc = device_get_softc(dev); sc->dev = dev; msr = rdmsr(MSR_PERF_STATUS); est_find_cpu(cpu_vendor, msr, INTEL_BUS_CLK, &sc->freq_list); cpufreq_register(dev); return (0); } static int est_detach(device_t dev) { return (ENXIO); } static int est_find_cpu(const char *vendor, uint64_t msr, uint32_t bus_clk, const freq_info **freqs) { const cpu_info *p; uint32_t id; /* Find a table which matches (vendor, id, bus_clk). */ id = msr >> 32; for (p = ESTprocs; p->id32 != 0; p++) { if (strcmp(p->vendor, vendor) == 0 && p->id32 == id && p->bus_clk == bus_clk) break; } if (p->id32 == 0) return (EOPNOTSUPP); /* Make sure the current setpoint is valid. */ if (est_get_current(p->freqtab) == NULL) return (EOPNOTSUPP); *freqs = p->freqtab; return (0); } static const freq_info * est_get_current(const freq_info *freq_list) { const freq_info *f; int i; uint16_t id16; /* * Try a few times to get a valid value. Sometimes, if the CPU * is in the middle of an asynchronous transition (i.e., P4TCC), * we get a temporary invalid result. */ for (i = 0; i < 5; i++) { id16 = rdmsr(MSR_PERF_STATUS) & 0xffff; for (f = freq_list; f->id16 != 0; f++) { if (f->id16 == id16) return (f); } DELAY(100); } return (NULL); } static int est_settings(device_t dev, struct cf_setting *sets, int *count) { struct est_softc *sc; const freq_info *f; int i; sc = device_get_softc(dev); if (*count < EST_MAX_SETTINGS) return (E2BIG); i = 0; for (f = sc->freq_list; f->freq != 0; f++, i++) { sets[i].freq = f->freq; sets[i].volts = f->volts; sets[i].power = CPUFREQ_VAL_UNKNOWN; sets[i].lat = EST_TRANS_LAT; sets[i].dev = dev; } *count = i; return (0); } static int est_set(device_t dev, const struct cf_setting *set) { struct est_softc *sc; const freq_info *f; uint64_t msr; /* Find the setting matching the requested one. */ sc = device_get_softc(dev); for (f = sc->freq_list; f->freq != 0; f++) { if (f->freq == set->freq) break; } if (f->freq == 0) return (EINVAL); /* Read the current register, mask out the old, set the new id. */ msr = rdmsr(MSR_PERF_CTL); msr = (msr & ~0xffff) | f->id16; wrmsr(MSR_PERF_CTL, msr); /* Wait a short while for the new setting. XXX Is this necessary? */ DELAY(EST_TRANS_LAT); return (0); } static int est_get(device_t dev, struct cf_setting *set) { struct est_softc *sc; const freq_info *f; sc = device_get_softc(dev); f = est_get_current(sc->freq_list); if (f == NULL) return (ENXIO); set->freq = f->freq; set->volts = f->volts; set->power = CPUFREQ_VAL_UNKNOWN; set->lat = EST_TRANS_LAT; set->dev = dev; return (0); } static int est_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_ABSOLUTE; return (0); } diff --git a/sys/i386/cpufreq/p4tcc.c b/sys/i386/cpufreq/p4tcc.c index cff04fe61770..b8cf66e75d89 100644 --- a/sys/i386/cpufreq/p4tcc.c +++ b/sys/i386/cpufreq/p4tcc.c @@ -1,278 +1,289 @@ /*- * Copyright (c) 2005 Nate Lawson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Throttle clock frequency by using the thermal control circuit. This * operates independently of SpeedStep and ACPI throttling and is supported * on Pentium 4 and later models (feature TM). * * Reference: Intel Developer's manual v.3 #245472-012 * * The original version of this driver was written by Ted Unangst for * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson * for use with the cpufreq framework. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "cpufreq_if.h" struct p4tcc_softc { device_t dev; int set_count; int lowest_val; int auto_mode; }; #define TCC_NUM_SETTINGS 8 #define TCC_ENABLE_ONDEMAND (1<<4) #define TCC_REG_OFFSET 1 #define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS) static void p4tcc_identify(driver_t *driver, device_t parent); static int p4tcc_probe(device_t dev); static int p4tcc_attach(device_t dev); static int p4tcc_settings(device_t dev, struct cf_setting *sets, int *count); static int p4tcc_set(device_t dev, const struct cf_setting *set); static int p4tcc_get(device_t dev, struct cf_setting *set); static int p4tcc_type(device_t dev, int *type); static device_method_t p4tcc_methods[] = { /* Device interface */ DEVMETHOD(device_identify, p4tcc_identify), DEVMETHOD(device_probe, p4tcc_probe), DEVMETHOD(device_attach, p4tcc_attach), /* cpufreq interface */ DEVMETHOD(cpufreq_drv_set, p4tcc_set), DEVMETHOD(cpufreq_drv_get, p4tcc_get), DEVMETHOD(cpufreq_drv_type, p4tcc_type), DEVMETHOD(cpufreq_drv_settings, p4tcc_settings), {0, 0} }; static driver_t p4tcc_driver = { "p4tcc", p4tcc_methods, sizeof(struct p4tcc_softc), }; static devclass_t p4tcc_devclass; DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, p4tcc_devclass, 0, 0); static void p4tcc_identify(driver_t *driver, device_t parent) { if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM)) return; + + /* Make sure we're not being doubly invoked. */ + if (device_find_child(parent, "p4tcc", -1) != NULL) + return; + + /* + * We attach a p4tcc child for every CPU since settings need to + * be performed on every CPU in the SMP case. See section 13.15.3 + * of the IA32 Intel Architecture Software Developer's Manual, + * Volume 3, for more info. + */ if (BUS_ADD_CHILD(parent, 0, "p4tcc", -1) == NULL) device_printf(parent, "add p4tcc child failed\n"); } static int p4tcc_probe(device_t dev) { if (resource_disabled("p4tcc", 0)) return (ENXIO); device_set_desc(dev, "CPU Frequency Thermal Control"); return (0); } static int p4tcc_attach(device_t dev) { struct p4tcc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->set_count = TCC_NUM_SETTINGS; /* * On boot, the TCC is usually in Automatic mode where reading the * current performance level is likely to produce bogus results. * We record that state here and don't trust the contents of the * status MSR until we've set it ourselves. */ sc->auto_mode = TRUE; switch (cpu_id & 0xf) { case 0x22: case 0x24: case 0x25: case 0x27: case 0x29: /* * These CPU models hang when set to 12.5%. * See Errata O50, P44, and Z21. */ sc->set_count -= 1; break; case 0x07: /* errata N44 and P18 */ case 0x0a: case 0x12: case 0x13: /* * These CPU models hang when set to 12.5% or 25%. * See Errata N44 and P18l. */ sc->set_count -= 2; break; } sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1; cpufreq_register(dev); return (0); } static int p4tcc_settings(device_t dev, struct cf_setting *sets, int *count) { struct p4tcc_softc *sc; int i, val; sc = device_get_softc(dev); if (sets == NULL || count == NULL) return (EINVAL); if (*count < sc->set_count) return (E2BIG); /* Return a list of valid settings for this driver. */ memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count); val = TCC_NUM_SETTINGS; for (i = 0; i < sc->set_count; i++, val--) { sets[i].freq = TCC_SPEED_PERCENT(val); sets[i].dev = dev; } *count = sc->set_count; return (0); } static int p4tcc_set(device_t dev, const struct cf_setting *set) { struct p4tcc_softc *sc; uint64_t mask, msr; int val; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* * Validate requested state converts to a setting that is an integer * from [sc->lowest_val .. TCC_NUM_SETTINGS]. */ val = set->freq * TCC_NUM_SETTINGS / 10000; if (val * 10000 != set->freq * TCC_NUM_SETTINGS || val < sc->lowest_val || val > TCC_NUM_SETTINGS) return (EINVAL); /* * Read the current register and mask off the old setting and * On-Demand bit. If the new val is < 100%, set it and the On-Demand * bit, otherwise just return to Automatic mode. */ msr = rdmsr(MSR_THERM_CONTROL); mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET; msr &= ~(mask | TCC_ENABLE_ONDEMAND); if (val < TCC_NUM_SETTINGS) msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND; wrmsr(MSR_THERM_CONTROL, msr); /* * Record whether we're now in Automatic or On-Demand mode. We have * to cache this since there is no reliable way to check if TCC is in * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of * the ACPI Thermal Monitor Control Register produces 0 no matter * what the current mode. */ if (msr & TCC_ENABLE_ONDEMAND) sc->auto_mode = TRUE; else sc->auto_mode = FALSE; return (0); } static int p4tcc_get(device_t dev, struct cf_setting *set) { struct p4tcc_softc *sc; uint64_t msr; int val; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* * Read the current register and extract the current setting. If * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%). * * XXX This is not completely reliable since at high temperatures * the CPU may be automatically throttling to 50% but it's the best * we can do. */ if (!sc->auto_mode) { msr = rdmsr(MSR_THERM_CONTROL); val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1); } else val = TCC_NUM_SETTINGS; memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set)); set->freq = TCC_SPEED_PERCENT(val); set->dev = dev; return (0); } static int p4tcc_type(device_t dev, int *type) { if (type == NULL) return (EINVAL); *type = CPUFREQ_TYPE_RELATIVE; return (0); }