Index: head/sys/dev/acpica/acpi.c =================================================================== --- head/sys/dev/acpica/acpi.c (revision 82371) +++ head/sys/dev/acpica/acpi.c (revision 82372) @@ -1,1755 +1,1753 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi.h" #include #include MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS MODULE_NAME("ACPI") /* * Character device */ static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; #define CDEV_MAJOR 152 static struct cdevsw acpi_cdevsw = { acpiopen, acpiclose, noread, nowrite, acpiioctl, nopoll, nommap, nostrategy, "acpi", CDEV_MAJOR, nodump, nopsize, 0 }; static const char* sleep_state_names[] = { "S0", "S1", "S2", "S3", "S4", "S5", "S4B" }; /* this has to be static, as the softc is gone when we need it */ static int acpi_off_state = ACPI_STATE_S5; struct mtx acpi_mutex; static int acpi_modevent(struct module *mod, int event, void *junk); static void acpi_identify(driver_t *driver, device_t parent); static int acpi_probe(device_t dev); static int acpi_attach(device_t dev); static device_t acpi_add_child(device_t bus, int order, const char *name, int unit); static int acpi_print_resources(struct resource_list *rl, const char *name, int type, const char *format); static int acpi_print_child(device_t bus, device_t child); static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static int acpi_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count); static int acpi_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp); static struct resource *acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static void acpi_probe_children(device_t bus); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_shutdown_pre_sync(void *arg, int howto); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, acpi_identify), DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, acpi_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_driver_added, bus_generic_driver_added), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), {0, 0} }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; devclass_t acpi_devclass; DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0); SYSCTL_INT(_debug, OID_AUTO, acpi_debug_layer, CTLFLAG_RW, &AcpiDbgLayer, 0, ""); SYSCTL_INT(_debug, OID_AUTO, acpi_debug_level, CTLFLAG_RW, &AcpiDbgLevel, 0, ""); /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus heirarchy hangs off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch(event) { case MOD_LOAD: if (!cold) return(EPERM); break; case MOD_UNLOAD: return(EBUSY); default: break; } return(0); } /* * Detect ACPI, perform early initialisation */ static void acpi_identify(driver_t *driver, device_t parent) { device_t child; int error; #ifdef ENABLE_DEBUGGER char *debugpoint = getenv("debug.acpi.debugger"); #endif FUNCTION_TRACE(__func__); if(!cold){ printf("Don't load this driver from userland!!\n"); return ; } /* * Make sure we're not being doubly invoked. */ if (device_find_child(parent, "acpi", 0) != NULL) return_VOID; /* initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", MTX_DEF); /* * Start up the ACPI CA subsystem. */ #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "init")) acpi_EnterDebugger(); #endif if ((error = AcpiInitializeSubsystem()) != AE_OK) { printf("ACPI: initialisation failed: %s\n", AcpiFormatException(error)); return_VOID; } #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "tables")) acpi_EnterDebugger(); #endif if ((error = AcpiLoadTables()) != AE_OK) { printf("ACPI: table load failed: %s\n", AcpiFormatException(error)); return_VOID; } /* * Attach the actual ACPI device. */ if ((child = BUS_ADD_CHILD(parent, 0, "acpi", 0)) == NULL) { device_printf(parent, "ACPI: could not attach\n"); return_VOID; } } /* * Fetch some descriptive data from ACPI to put in our attach message */ static int acpi_probe(device_t dev) { ACPI_TABLE_HEADER th; char buf[20]; ACPI_STATUS status; int error; FUNCTION_TRACE(__func__); ACPI_LOCK; if ((status = AcpiGetTableHeader(ACPI_TABLE_XSDT, 1, &th)) != AE_OK) { device_printf(dev, "couldn't get XSDT header: %s\n", AcpiFormatException(status)); error = ENXIO; } else { sprintf(buf, "%.6s %.8s", th.OemId, th.OemTableId); device_set_desc_copy(dev, buf); error = 0; } ACPI_UNLOCK; return_VALUE(error); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error; #ifdef ENABLE_DEBUGGER char *debugpoint = getenv("debug.acpi.debugger"); #endif FUNCTION_TRACE(__func__); ACPI_LOCK; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->acpi_dev = dev; acpi_install_wakeup_handler(sc); #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "spaces")) acpi_EnterDebugger(); #endif /* * Install the default address space handlers. */ error = ENXIO; if ((status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL)) != AE_OK) { device_printf(dev, "could not initialise SystemMemory handler: %s\n", AcpiFormatException(status)); goto out; } if ((status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL)) != AE_OK) { device_printf(dev, "could not initialise SystemIO handler: %s\n", AcpiFormatException(status)); goto out; } if ((status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL)) != AE_OK) { device_printf(dev, "could not initialise PciConfig handler: %s\n", AcpiFormatException(status)); goto out; } /* * Bring ACPI fully online. * * Note that we request that device _STA and _INI methods not be run (ACPI_NO_DEVICE_INIT) * and the final object initialisation pass be skipped (ACPI_NO_OBJECT_INIT). * * XXX We need to arrange for the object init pass after we have attached all our * child devices. */ #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "enable")) acpi_EnterDebugger(); #endif if ((status = AcpiEnableSubsystem(ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT)) != AE_OK) { device_printf(dev, "could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", ""); /* * Dispatch the default sleep state to devices. * TBD: should be configured from userland policy manager. */ sc->acpi_power_button_sx = ACPI_POWER_BUTTON_DEFAULT_SX; sc->acpi_sleep_button_sx = ACPI_SLEEP_BUTTON_DEFAULT_SX; sc->acpi_lid_switch_sx = ACPI_LID_SWITCH_DEFAULT_SX; acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "probe")) acpi_EnterDebugger(); #endif if (!acpi_disabled("bus")) acpi_probe_children(dev); /* * Register our shutdown handlers */ EVENTHANDLER_REGISTER(shutdown_pre_sync, acpi_shutdown_pre_sync, sc, SHUTDOWN_PRI_LAST); EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* * Flag our initial states. */ sc->acpi_enabled = 1; sc->acpi_sstate = ACPI_STATE_S0; /* * Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, 0, 5, 0660, "acpi"); sc->acpi_dev_t->si_drv1 = sc; #ifdef ENABLE_DEBUGGER if (debugpoint && !strcmp(debugpoint, "running")) acpi_EnterDebugger(); #endif error = 0; out: ACPI_UNLOCK; return_VALUE(error); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT)) == NULL) return(NULL); bzero(ad, sizeof(*ad)); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); return(child); } /* * Print child device resource usage */ static int acpi_print_resources(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; if (!SLIST_FIRST(rl)) return(0); /* Yes, this is kinda cheating */ SLIST_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else if (printed > 0) retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return(retval); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += acpi_print_resources(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += acpi_print_resources(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += acpi_print_resources(rl, "irq", SYS_RES_IRQ, "%ld"); retval += bus_print_child_footer(bus, child); return(retval); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { printf("device has no ivars\n"); return(ENOENT); } switch(index) { /* ACPI ivars */ case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_MAGIC: *(int *)result = ad->ad_magic; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; default: panic("bad ivar read request (%d)\n", index); return(ENOENT); } return(0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { printf("device has no ivars\n"); return(ENOENT); } switch(index) { /* ACPI ivars */ case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_MAGIC: ad->ad_magic = (int )value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; default: panic("bad ivar write request (%d)\n", index); return(ENOENT); } return(0); } /* * Handle child resource allocation/removal */ static int acpi_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; resource_list_add(rl, type, rid, start, start + count -1, count); return(0); } static int acpi_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return(0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; return(resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; return(resource_list_release(rl, bus, child, type, rid, r)); } /* * Scan relevant portions of the ACPI namespace and attach child devices. * * Note that we only expect to find devices in the \_PR_, \_TZ_, \_SI_ and \_SB_ scopes, * and \_PR_ and \_TZ_ become obsolete in the ACPI 2.0 spec. */ static void acpi_probe_children(device_t bus) { ACPI_HANDLE parent; static char *scopes[] = {"\\_PR_", "\\_TZ_", "\\_SI", "\\_SB_", NULL}; int i; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; /* * Create any static children by calling device identify methods. */ - DEBUG_PRINT(TRACE_OBJECTS, ("device identify routines\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* * Scan the namespace and insert placeholders for all the devices that * we find. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are currently * present. (This assumes that we don't want to create/remove devices as they * appear, which might be smarter.) */ - DEBUG_PRINT(TRACE_OBJECTS, ("namespace scan\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); for (i = 0; scopes[i] != NULL; i++) if ((AcpiGetHandle(ACPI_ROOT_OBJECT, scopes[i], &parent)) == AE_OK) AcpiWalkNamespace(ACPI_TYPE_ANY, parent, 100, acpi_probe_child, bus, NULL); /* * Scan all of the child devices we have created and let them probe/attach. */ - DEBUG_PRINT(TRACE_OBJECTS, ("first bus_generic_attach\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "first bus_generic_attach\n")); bus_generic_attach(bus); /* * Some of these children may have attached others as part of their attach * process (eg. the root PCI bus driver), so rescan. */ - DEBUG_PRINT(TRACE_OBJECTS, ("second bus_generic_attach\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "second bus_generic_attach\n")); bus_generic_attach(bus); return_VOID; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_OBJECT_TYPE type; device_t child, bus = (device_t)context; FUNCTION_TRACE(__func__); /* * Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS(AE_OK); if (AcpiGetType(handle, &type) == AE_OK) { switch(type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: if (acpi_disabled("children")) break; /* * Create a placeholder device for this node. Sort the placeholder * so that the probe/attach passes will run breadth-first. */ - DEBUG_PRINT(TRACE_OBJECTS, ("scanning '%s'\n", acpi_name(handle))) + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", acpi_name(handle))); child = BUS_ADD_CHILD(bus, level * 10, NULL, -1); acpi_set_handle(child, handle); DEBUG_EXEC(device_probe_and_attach(child)); break; } } return_ACPI_STATUS(AE_OK); } static void acpi_shutdown_pre_sync(void *arg, int howto) { ACPI_ASSERTLOCK; /* * Disable all ACPI events before soft off, otherwise the system * will be turned on again on some laptops. * * XXX this should probably be restricted to masking some events just * before powering down, since we may still need ACPI during the * shutdown process. */ acpi_Disable((struct acpi_softc *)arg); } static void acpi_shutdown_final(void *arg, int howto) { ACPI_STATUS status; ACPI_ASSERTLOCK; if (howto & RB_POWEROFF) { printf("Power system off using ACPI...\n"); if ((status = AcpiEnterSleepState(acpi_off_state)) != AE_OK) { printf("ACPI power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); printf("ACPI power-off failed - timeout\n"); } } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; #define MSGFORMAT "%s button is handled as a fixed feature programming model.\n" ACPI_ASSERTLOCK; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT != NULL) && (AcpiGbl_FADT->PwrButton == 0)) { AcpiEnableEvent(ACPI_EVENT_POWER_BUTTON, ACPI_EVENT_FIXED); AcpiClearEvent(ACPI_EVENT_POWER_BUTTON, ACPI_EVENT_FIXED); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_eventhandler_power_button_for_sleep, sc); if (first_time) { device_printf(sc->acpi_dev, MSGFORMAT, "power"); } } if ((AcpiGbl_FADT != NULL) && (AcpiGbl_FADT->SleepButton == 0)) { AcpiEnableEvent(ACPI_EVENT_SLEEP_BUTTON, ACPI_EVENT_FIXED); AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON, ACPI_EVENT_FIXED); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_eventhandler_sleep_button_for_sleep, sc); if (first_time) { device_printf(sc->acpi_dev, MSGFORMAT, "sleep"); } } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; ACPI_DEVICE_INFO devinfo; ACPI_STATUS error; ACPI_ASSERTLOCK; if ((h = acpi_get_handle(dev)) == NULL) return(FALSE); if ((error = AcpiGetObjectInfo(h, &devinfo)) != AE_OK) return(FALSE); /* XXX 0xf is probably not appropriate */ if ((devinfo.Valid & ACPI_VALID_HID) && (devinfo.CurrentStatus & 0xf)) return(TRUE); return(FALSE); } /* * Match a HID string against a device */ BOOLEAN acpi_MatchHid(device_t dev, char *hid) { ACPI_HANDLE h; ACPI_DEVICE_INFO devinfo; ACPI_STATUS error; ACPI_ASSERTLOCK; if (hid == NULL) return(FALSE); if ((h = acpi_get_handle(dev)) == NULL) return(FALSE); if ((error = AcpiGetObjectInfo(h, &devinfo)) != AE_OK) return(FALSE); if ((devinfo.Valid & ACPI_VALID_HID) && !strcmp(hid, devinfo.HardwareId)) return(TRUE); return(FALSE); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; ACPI_ASSERTLOCK; /* walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (status == AE_OK) { *result = r; return(AE_OK); } if (status != AE_NOT_FOUND) return(AE_OK); if (AcpiGetParent(parent, &r) != AE_OK) return(AE_NOT_FOUND); parent = r; } } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return(NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return(buf); } /* * Perform the tedious double-get procedure required for fetching something into * an ACPI_BUFFER that has not been initialised. */ ACPI_STATUS acpi_GetIntoBuffer(ACPI_HANDLE handle, ACPI_STATUS (*func)(ACPI_HANDLE, ACPI_BUFFER *), ACPI_BUFFER *buf) { ACPI_STATUS status; ACPI_ASSERTLOCK; buf->Length = 0; buf->Pointer = NULL; if ((status = func(handle, buf)) != AE_BUFFER_OVERFLOW) return(status); if ((buf->Pointer = AcpiOsCallocate(buf->Length)) == NULL) return(AE_NO_MEMORY); return(func(handle, buf)); } /* * Perform the tedious double-get procedure required for fetching a table into * an ACPI_BUFFER that has not been initialised. */ ACPI_STATUS acpi_GetTableIntoBuffer(ACPI_TABLE_TYPE table, UINT32 instance, ACPI_BUFFER *buf) { ACPI_STATUS status; ACPI_ASSERTLOCK; buf->Length = 0; buf->Pointer = NULL; if ((status = AcpiGetTable(table, instance, buf)) != AE_BUFFER_OVERFLOW) return(status); if ((buf->Pointer = AcpiOsCallocate(buf->Length)) == NULL) return(AE_NO_MEMORY); return(AcpiGetTable(table, instance, buf)); } /* * Perform the tedious double-evaluate procedure for evaluating something into * an ACPI_BUFFER that has not been initialised. Note that this evaluates * twice, so avoid applying this to things that may have side-effects. * * This is like AcpiEvaluateObject with automatic buffer allocation. */ ACPI_STATUS acpi_EvaluateIntoBuffer(ACPI_HANDLE object, ACPI_STRING pathname, ACPI_OBJECT_LIST *params, ACPI_BUFFER *buf) { ACPI_STATUS status; ACPI_ASSERTLOCK; buf->Length = 0; buf->Pointer = NULL; if ((status = AcpiEvaluateObject(object, pathname, params, buf)) != AE_BUFFER_OVERFLOW) return(status); if ((buf->Pointer = AcpiOsCallocate(buf->Length)) == NULL) return(AE_NO_MEMORY); return(AcpiEvaluateObject(object, pathname, params, buf)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_EvaluateInteger(ACPI_HANDLE handle, char *path, int *number) { ACPI_STATUS error; ACPI_BUFFER buf; ACPI_OBJECT param; ACPI_ASSERTLOCK; if (handle == NULL) handle = ACPI_ROOT_OBJECT; buf.Pointer = ¶m; buf.Length = sizeof(param); if ((error = AcpiEvaluateObject(handle, path, NULL, &buf)) == AE_OK) { if (param.Type == ACPI_TYPE_INTEGER) { *number = param.Integer.Value; } else { error = AE_TYPE; } } return(error); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (* func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if ((pkg == NULL) || (pkg->Type != ACPI_TYPE_PACKAGE)) return(AE_BAD_PARAMETER); /* iterate over components */ for (i = 0, comp = pkg->Package.Elements; i < pkg->Package.Count; i++, comp++) func(comp, arg); return(AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return(AE_BAD_PARAMETER); /* check for terminator */ if ((rp->Id == ACPI_RSTYPE_END_TAG) || (rp->Length == 0)) return(AE_NOT_FOUND); rp = ACPI_RESOURCE_NEXT(rp); } if (resp != NULL) *resp = rp; return(AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* * Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return(AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Id = ACPI_RSTYPE_END_TAG; rp->Length = 0; } if (res == NULL) return(AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return(AE_BAD_PARAMETER); if ((rp->Id == ACPI_RSTYPE_END_TAG) || (rp->Length == 0)) { break; } rp = ACPI_RESOURCE_NEXT(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RESOURCE_LENGTH_NO_DATA + ACPI_RESOURCE_LENGTH) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return(AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* * Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RESOURCE_LENGTH_NO_DATA); /* * And add the terminator. */ rp = ACPI_RESOURCE_NEXT(rp); rp->Id = ACPI_RSTYPE_END_TAG; rp->Length = 0; return(AE_OK); } static ACPI_STATUS __inline acpi_wakeup(UINT8 state) { ACPI_STATUS Status; ACPI_OBJECT_LIST Arg_list; ACPI_OBJECT Arg; ACPI_OBJECT Objects[3]; /* package plus 2 number objects */ ACPI_BUFFER ReturnBuffer; FUNCTION_TRACE_U32(__func__, state); ACPI_ASSERTLOCK; /* * Evaluate the _WAK method */ bzero(&Arg_list, sizeof(Arg_list)); Arg_list.Count = 1; Arg_list.Pointer = &Arg; bzero(&Arg, sizeof(Arg)); Arg.Type = ACPI_TYPE_INTEGER; Arg.Integer.Value = state; /* * Set up _WAK result code buffer. * * XXX should use acpi_EvaluateIntoBuffer */ bzero(Objects, sizeof(Objects)); ReturnBuffer.Length = sizeof(Objects); ReturnBuffer.Pointer = Objects; AcpiEvaluateObject (NULL, "\\_WAK", &Arg_list, &ReturnBuffer); Status = AE_OK; /* Check result code for _WAK */ if (Objects[0].Type != ACPI_TYPE_PACKAGE || Objects[1].Type != ACPI_TYPE_INTEGER || Objects[2].Type != ACPI_TYPE_INTEGER) { /* * In many BIOSes, _WAK doesn't return a result code. * We don't need to worry about it too much :-). */ - DEBUG_PRINT(ACPI_INFO, - ("acpi_wakeup: _WAK result code is corrupted, " - "but should be OK.\n")); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "acpi_wakeup: _WAK result code is corrupted, " + "but should be OK.\n")); } else { /* evaluate status code */ switch (Objects[1].Integer.Value) { case 0x00000001: - DEBUG_PRINT(ACPI_ERROR, - ("acpi_wakeup: Wake was signaled " - "but failed due to lack of power.\n")); + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "acpi_wakeup: Wake was signaled " + "but failed due to lack of power.\n")); Status = AE_ERROR; break; case 0x00000002: - DEBUG_PRINT(ACPI_ERROR, - ("acpi_wakeup: Wake was signaled " - "but failed due to thermal condition.\n")); + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "acpi_wakeup: Wake was signaled " + "but failed due to thermal condition.\n")); Status = AE_ERROR; break; } /* evaluate PSS code */ if (Objects[2].Integer.Value == 0) { - DEBUG_PRINT(ACPI_ERROR, - ("acpi_wakeup: The targeted S-state " - "was not entered because of too much current " - "being drawn from the power supply.\n")); + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "acpi_wakeup: The targeted S-state " + "was not entered because of too much current " + "being drawn from the power supply.\n")); Status = AE_ERROR; } } return_ACPI_STATUS(Status); } /* * Set the system sleep state * * Currently we only support S1 and S5 */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { ACPI_STATUS status = AE_OK; UINT16 Count; UINT8 TypeA; UINT8 TypeB; FUNCTION_TRACE_U32(__func__, state); ACPI_ASSERTLOCK; switch (state) { case ACPI_STATE_S0: /* XXX only for testing */ status = AcpiEnterSleepState((UINT8)state); if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); } break; case ACPI_STATE_S1: case ACPI_STATE_S2: case ACPI_STATE_S3: case ACPI_STATE_S4: status = AcpiHwObtainSleepTypeRegisterData((UINT8)state, &TypeA, &TypeB); if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiHwObtainSleepTypeRegisterData failed - %s\n", AcpiFormatException(status)); break; } /* * Inform all devices that we are going to sleep. */ if (DEVICE_SUSPEND(root_bus) != 0) { /* * Re-wake the system. * * XXX note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the * current bus interface does not provide for this. */ DEVICE_RESUME(root_bus); return_ACPI_STATUS(AE_ERROR); } sc->acpi_sstate = state; if (state != ACPI_STATE_S1) { acpi_sleep_machdep(sc, state); /* AcpiEnterSleepState() maybe incompleted, unlock here. */ AcpiUtReleaseMutex(ACPI_MTX_HARDWARE); /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state >= ACPI_STATE_S4) { acpi_Disable(sc); acpi_Enable(sc); } } else { status = AcpiEnterSleepState((UINT8)state); if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); break; } /* wait for the WAK_STS bit */ Count = 0; while (!(AcpiHwRegisterBitAccess(ACPI_READ, ACPI_MTX_LOCK, WAK_STS))) { AcpiOsSleep(0, 1); /* * Some BIOSes don't set WAK_STS at all, * give up waiting for wakeup if we time out. */ if (Count > 1000) { break; /* giving up */ } Count++; } } acpi_wakeup((UINT8)state); DEVICE_RESUME(root_bus); sc->acpi_sstate = ACPI_STATE_S0; acpi_enable_fixed_events(sc); break; case ACPI_STATE_S5: /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); break; default: status = AE_BAD_PARAMETER; break; } return_ACPI_STATUS(status); } /* * Enable/Disable ACPI */ ACPI_STATUS acpi_Enable(struct acpi_softc *sc) { ACPI_STATUS status; u_int32_t flags; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; flags = ACPI_NO_ADDRESS_SPACE_INIT | ACPI_NO_HARDWARE_INIT | ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; if (!sc->acpi_enabled) { status = AcpiEnableSubsystem(flags); } else { status = AE_OK; } if (status == AE_OK) sc->acpi_enabled = 1; return_ACPI_STATUS(status); } ACPI_STATUS acpi_Disable(struct acpi_softc *sc) { ACPI_STATUS status; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; if (sc->acpi_enabled) { status = AcpiDisable(); } else { status = AE_OK; } if (status == AE_OK) sc->acpi_enabled = 0; return_ACPI_STATUS(status); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { FUNCTION_TRACE_U32(__func__, state); ACPI_LOCK; if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) acpi_SetSleepState((struct acpi_softc *)arg, state); ACPI_UNLOCK; return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { FUNCTION_TRACE_U32(__func__, state); /* Well, what to do? :-) */ ACPI_LOCK; ACPI_UNLOCK; return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ UINT32 acpi_eventhandler_power_button_for_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; FUNCTION_TRACE(__func__); EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx); return_VALUE(INTERRUPT_HANDLED); } UINT32 acpi_eventhandler_power_button_for_wakeup(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; FUNCTION_TRACE(__func__); EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx); return_VALUE(INTERRUPT_HANDLED); } UINT32 acpi_eventhandler_sleep_button_for_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; FUNCTION_TRACE(__func__); EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx); return_VALUE(INTERRUPT_HANDLED); } UINT32 acpi_eventhandler_sleep_button_for_wakeup(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; FUNCTION_TRACE(__func__); EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx); return_VALUE(INTERRUPT_HANDLED); } /* * XXX This is kinda ugly, and should not be here. */ struct acpi_staticbuf { ACPI_BUFFER buffer; char data[512]; }; char * acpi_name(ACPI_HANDLE handle) { static struct acpi_staticbuf buf; ACPI_ASSERTLOCK; buf.buffer.Length = 512; buf.buffer.Pointer = &buf.data[0]; if (AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf.buffer) == AE_OK) return(buf.buffer.Pointer); return("(unknown path)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((cp = getenv("debug.acpi.avoid")) == NULL) return(0); /* scan the avoid list checking for a match */ for (;;) { while ((*cp != 0) && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while ((cp[len] != 0) && !isspace(cp[len])) len++; - if (!strncmp(cp, np, len)) { - DEBUG_PRINT(TRACE_OBJECTS, ("avoiding '%s'\n", np)); + if (!strncmp(cp, np, len)) return(1); - } cp += len; } return(0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp; int len; if ((cp = getenv("debug.acpi.disable")) == NULL) return(0); if (!strcmp(cp, "all")) return(1); /* scan the disable list checking for a match */ for (;;) { while ((*cp != 0) && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while ((cp[len] != 0) && !isspace(cp[len])) len++; - if (!strncmp(cp, subsys, len)) { - DEBUG_PRINT(TRACE_OBJECTS, ("disabled '%s'\n", subsys)); + if (!strncmp(cp, subsys, len)) return(1); - } cp += len; } return(0); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the register/deregister * interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; int (* fn)(u_long cmd, caddr_t addr, void *arg); void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; /* * Register an ioctl handler. */ int acpi_register_ioctl(u_long cmd, int (* fn)(u_long cmd, caddr_t addr, void *arg), void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return(ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); return(0); } /* * Deregister an ioctl handler. */ void acpi_deregister_ioctl(u_long cmd, int (* fn)(u_long cmd, caddr_t addr, void *arg)) { struct acpi_ioctl_hook *hp; TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if ((hp->cmd == cmd) && (hp->fn == fn)) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } } static int acpiopen(dev_t dev, int flag, int fmt, struct proc *p) { return(0); } static int acpiclose(dev_t dev, int flag, int fmt, struct proc *p) { return(0); } static int acpiioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, xerror, state; ACPI_LOCK; error = state = 0; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ if (acpi_ioctl_hooks_initted) { TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) { xerror = hp->fn(cmd, addr, hp->arg); if (xerror != 0) error = xerror; goto out; } } } /* * Core system ioctls. */ switch (cmd) { case ACPIIO_ENABLE: if (ACPI_FAILURE(acpi_Enable(sc))) error = ENXIO; break; case ACPIIO_DISABLE: if (ACPI_FAILURE(acpi_Disable(sc))) error = ENXIO; break; case ACPIIO_SETSLPSTATE: if (!sc->acpi_enabled) { error = ENXIO; break; } state = *(int *)addr; if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) { acpi_SetSleepState(sc, state); } else { error = EINVAL; } break; default: if (error == 0) error = EINVAL; break; } out: ACPI_UNLOCK; return(error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error; u_int new_state, old_state; old_state = *(u_int *)oidp->oid_arg1; if (old_state > ACPI_S_STATES_MAX) { strcpy(sleep_state, "unknown"); } else { strncpy(sleep_state, sleep_state_names[old_state], sizeof(sleep_state_names[old_state])); } error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { for (new_state = ACPI_STATE_S0; new_state <= ACPI_S_STATES_MAX; new_state++) { if (strncmp(sleep_state, sleep_state_names[new_state], sizeof(sleep_state)) == 0) break; } if ((new_state != old_state) && (new_state <= ACPI_S_STATES_MAX)) { *(u_int *)oidp->oid_arg1 = new_state; } else { error = EINVAL; } } return(error); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, - {"ACPI_POWER", ACPI_POWER}, + {"ACPI_DEBUGGER", ACPI_DEBUGGER}, + {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, + {"ACPI_BUS", ACPI_BUS}, + {"ACPI_SYSTEM", ACPI_SYSTEM}, {"ACPI_POWER", ACPI_POWER}, {"ACPI_EC", ACPI_EC}, - {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUTTON", ACPI_BUTTON}, - {"ACPI_SYSTEM", ACPI_SYSTEM}, + {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, - {"ACPI_DEBUGGER", ACPI_DEBUGGER}, - {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, + {"ACPI_FAN", ACPI_FAN}, + {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {NULL, 0} }; static struct debugtag dbg_level[] = { - {"ACPI_OK", ACPI_OK}, - {"ACPI_INFO", ACPI_INFO}, - {"ACPI_WARN", ACPI_WARN}, - {"ACPI_ERROR", ACPI_ERROR}, - {"ACPI_FATAL", ACPI_FATAL}, - {"ACPI_DEBUG_OBJECT", ACPI_DEBUG_OBJECT}, - {"ACPI_ALL", ACPI_ALL}, - {"TRACE_THREADS", TRACE_THREADS}, - {"TRACE_PARSE", TRACE_PARSE}, - {"TRACE_DISPATCH", TRACE_DISPATCH}, - {"TRACE_LOAD", TRACE_LOAD}, - {"TRACE_EXEC", TRACE_EXEC}, - {"TRACE_NAMES", TRACE_NAMES}, - {"TRACE_OPREGION", TRACE_OPREGION}, - {"TRACE_BFIELD", TRACE_BFIELD}, - {"TRACE_TRASH", TRACE_TRASH}, - {"TRACE_TABLES", TRACE_TABLES}, - {"TRACE_FUNCTIONS", TRACE_FUNCTIONS}, - {"TRACE_VALUES", TRACE_VALUES}, - {"TRACE_OBJECTS", TRACE_OBJECTS}, - {"TRACE_ALLOCATIONS", TRACE_ALLOCATIONS}, - {"TRACE_RESOURCES", TRACE_RESOURCES}, - {"TRACE_IO", TRACE_IO}, - {"TRACE_INTERRUPTS", TRACE_INTERRUPTS}, - {"TRACE_USER_REQUESTS", TRACE_USER_REQUESTS}, - {"TRACE_PACKAGE", TRACE_PACKAGE}, - {"TRACE_MUTEX", TRACE_MUTEX}, - {"TRACE_INIT", TRACE_INIT}, - {"TRACE_ALL", TRACE_ALL}, - {"VERBOSE_AML_DISASSEMBLE", VERBOSE_AML_DISASSEMBLE}, - {"VERBOSE_INFO", VERBOSE_INFO}, - {"VERBOSE_TABLES", VERBOSE_TABLES}, - {"VERBOSE_EVENTS", VERBOSE_EVENTS}, - {"VERBOSE_ALL", VERBOSE_ALL}, + {"ACPI_LV_OK", ACPI_LV_OK}, + {"ACPI_LV_INFO", ACPI_LV_INFO}, + {"ACPI_LV_WARN", ACPI_LV_WARN}, + {"ACPI_LV_ERROR", ACPI_LV_ERROR}, + {"ACPI_LV_FATAL", ACPI_LV_FATAL}, + {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, + {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, + {"ACPI_LV_THREADS", ACPI_LV_THREADS}, + {"ACPI_LV_PARSE", ACPI_LV_PARSE}, + {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, + {"ACPI_LV_LOAD", ACPI_LV_LOAD}, + {"ACPI_LV_EXEC", ACPI_LV_EXEC}, + {"ACPI_LV_NAMES", ACPI_LV_NAMES}, + {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, + {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, + {"ACPI_LV_TRASH", ACPI_LV_TRASH}, + {"ACPI_LV_TABLES", ACPI_LV_TABLES}, + {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, + {"ACPI_LV_VALUES", ACPI_LV_VALUES}, + {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, + {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, + {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, + {"ACPI_LV_IO", ACPI_LV_IO}, + {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, + {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, + {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, + {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, + {"ACPI_LV_INIT", ACPI_LV_INIT}, + {"ACPI_LV_ALL", ACPI_LV_ALL}, + {"ACPI_DB_AML_DISASSEMBLE", ACPI_DB_AML_DISASSEMBLE}, + {"ACPI_DB_VERBOSE_INFO", ACPI_DB_VERBOSE_INFO}, + {"ACPI_DB_FULL_TABLES", ACPI_DB_FULL_TABLES}, + {"ACPI_DB_EVENTS", ACPI_DB_EVENTS}, + {"ACPI_DB_VERBOSE", ACPI_DB_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) { *flag |= tag[i].value; } else { *flag &= ~tag[i].value; } printf("ACPI_DEBUG: set '%s'\n", tag[i].name); } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *cp; AcpiDbgLayer = 0; AcpiDbgLevel = 0; if ((cp = getenv("debug.acpi.layer")) != NULL) acpi_parse_debug(cp, &dbg_layer[0], &AcpiDbgLayer); if ((cp = getenv("debug.acpi.level")) != NULL) acpi_parse_debug(cp, &dbg_level[0], &AcpiDbgLevel); printf("ACPI debug layer 0x%x debug level 0x%x\n", AcpiDbgLayer, AcpiDbgLevel); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); #endif Index: head/sys/dev/acpica/acpi_battery.c =================================================================== --- head/sys/dev/acpica/acpi_battery.c (revision 82371) +++ head/sys/dev/acpica/acpi_battery.c (revision 82372) @@ -1,251 +1,250 @@ /*- * Copyright (c) 2000 Mitsuru IWASAKI * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" /* XXX trim includes */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi.h" #include #include -/* XXX should use our own malloc class */ -MALLOC_DECLARE(M_ACPIDEV); +MALLOC_DEFINE(M_ACPIBATT, "acpibatt", "ACPI generic battery data"); /* * ACPI Battery Abstruction Layer. */ struct acpi_batteries { TAILQ_ENTRY(acpi_batteries) link; struct acpi_battdesc battdesc; }; static TAILQ_HEAD(,acpi_batteries) acpi_batteries; static int acpi_batteries_initted = 0; static int acpi_batteries_units = 0; static struct acpi_battinfo acpi_battery_battinfo; static int acpi_battery_get_units(void) { return (acpi_batteries_units); } static int acpi_battery_get_battdesc(int logical_unit, struct acpi_battdesc *battdesc) { int i; struct acpi_batteries *bp; if (logical_unit < 0 || logical_unit >= acpi_batteries_units) { return (ENXIO); } i = 0; TAILQ_FOREACH(bp, &acpi_batteries, link) { if (logical_unit == i) { battdesc->type = bp->battdesc.type; battdesc->phys_unit = bp->battdesc.phys_unit; return (0); } i++; } return (ENXIO); } static int acpi_battery_get_battinfo(int unit, struct acpi_battinfo *battinfo) { int error; struct acpi_battdesc battdesc; error = 0; if (unit == -1) { error = acpi_cmbat_get_battinfo(-1, battinfo); goto out; } else { if ((error = acpi_battery_get_battdesc(unit, &battdesc)) != 0) { goto out; } switch (battdesc.type) { case ACPI_BATT_TYPE_CMBAT: error = acpi_cmbat_get_battinfo(battdesc.phys_unit, battinfo); break; default: error = ENXIO; break; } } out: return (error); } static int acpi_battery_ioctl(u_long cmd, caddr_t addr, void *arg) { int error; int logical_unit; union acpi_battery_ioctl_arg *ioctl_arg; ioctl_arg = (union acpi_battery_ioctl_arg *)addr; error = 0; switch (cmd) { case ACPIIO_BATT_GET_UNITS: *(int *)addr = acpi_battery_get_units(); break; case ACPIIO_BATT_GET_BATTDESC: logical_unit = ioctl_arg->unit; error = acpi_battery_get_battdesc(logical_unit, &ioctl_arg->battdesc); break; case ACPIIO_BATT_GET_BATTINFO: logical_unit = ioctl_arg->unit; error = acpi_battery_get_battinfo(logical_unit, &ioctl_arg->battinfo); break; default: error = EINVAL; break; } return (error); } static int acpi_battery_sysctl(SYSCTL_HANDLER_ARGS) { int val; int error; acpi_battery_get_battinfo(-1, &acpi_battery_battinfo); val = *(u_int *)oidp->oid_arg1; error = sysctl_handle_int(oidp, &val, 0, req); return (error); } static int acpi_battery_init(void) { device_t dev; struct acpi_softc *sc; int error; if ((dev = devclass_get_device(acpi_devclass, 0)) == NULL) { return (ENXIO); } if ((sc = device_get_softc(dev)) == NULL) { return (ENXIO); } error = 0; TAILQ_INIT(&acpi_batteries); acpi_batteries_initted = 1; if ((error = acpi_register_ioctl(ACPIIO_BATT_GET_UNITS, acpi_battery_ioctl, NULL)) != 0) { return (error); } if ((error = acpi_register_ioctl(ACPIIO_BATT_GET_BATTDESC, acpi_battery_ioctl, NULL)) != 0) { return (error); } if ((error = acpi_register_ioctl(ACPIIO_BATT_GET_BATTINFO, acpi_battery_ioctl, NULL)) != 0) { return (error); } sysctl_ctx_init(&sc->acpi_battery_sysctl_ctx); sc->acpi_battery_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_battery_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "battery", CTLFLAG_RD, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_battery_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_battery_sysctl_tree), OID_AUTO, "life", CTLTYPE_INT | CTLFLAG_RD, &acpi_battery_battinfo.cap, 0, acpi_battery_sysctl, "I", ""); SYSCTL_ADD_PROC(&sc->acpi_battery_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_battery_sysctl_tree), OID_AUTO, "time", CTLTYPE_INT | CTLFLAG_RD, &acpi_battery_battinfo.min, 0, acpi_battery_sysctl, "I", ""); SYSCTL_ADD_PROC(&sc->acpi_battery_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_battery_sysctl_tree), OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RD, &acpi_battery_battinfo.state, 0, acpi_battery_sysctl, "I", ""); SYSCTL_ADD_INT(&sc->acpi_battery_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_battery_sysctl_tree), OID_AUTO, "units", CTLFLAG_RD, &acpi_batteries_units, 0, ""); return (error); } int acpi_battery_register(int type, int phys_unit) { int error; struct acpi_batteries *bp; error = 0; - if ((bp = malloc(sizeof(*bp), M_ACPIDEV, M_NOWAIT)) == NULL) { + if ((bp = malloc(sizeof(*bp), M_ACPIBATT, M_NOWAIT)) == NULL) { return(ENOMEM); } bp->battdesc.type = type; bp->battdesc.phys_unit = phys_unit; if (acpi_batteries_initted == 0) { if ((error = acpi_battery_init()) != 0) { - free(bp, M_ACPIDEV); + free(bp, M_ACPIBATT); return(error); } } TAILQ_INSERT_TAIL(&acpi_batteries, bp, link); acpi_batteries_units++; return(0); } Index: head/sys/dev/acpica/acpi_cpu.c =================================================================== --- head/sys/dev/acpica/acpi_cpu.c (revision 82371) +++ head/sys/dev/acpica/acpi_cpu.c (revision 82372) @@ -1,380 +1,380 @@ /*- * Copyright (c) 2001 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include #include #include "acpi.h" #include /* * Support for ACPI Processor devices. * * Note that this only provides ACPI 1.0 support (with the exception of the * PSTATE_CNT field). 2.0 support will involve implementing _PTC, _PCT, * _PSS and _PPC. */ /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_PROCESSOR MODULE_NAME("PROCESSOR") struct acpi_cpu_softc { device_t cpu_dev; ACPI_HANDLE cpu_handle; u_int32_t cpu_id; /* CPU throttling control register */ struct resource *cpu_p_blk; #define CPU_GET_P_CNT(sc) (bus_space_read_4(rman_get_bustag((sc)->cpu_p_blk), \ rman_get_bushandle((sc)->cpu_p_blk), \ 0)) #define CPU_SET_P_CNT(sc, val) (bus_space_write_4(rman_get_bustag((sc)->cpu_p_blk), \ rman_get_bushandle((sc)->cpu_p_blk), \ 0, (val))) #define CPU_P_CNT_THT_EN (1<<4) }; /* * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and * reported to the user in tenths of a percent. */ static u_int32_t cpu_duty_offset; static u_int32_t cpu_duty_width; #define CPU_MAX_SPEED (1 << cpu_duty_width) #define CPU_SPEED_PERCENT(x) ((1000 * (x)) / CPU_MAX_SPEED) #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10),(CPU_SPEED_PERCENT(x) % 10) static u_int32_t cpu_smi_cmd; /* should be a generic way to do this */ static u_int8_t cpu_pstate_cnt; static u_int32_t cpu_current_state; static u_int32_t cpu_performance_state; static u_int32_t cpu_economy_state; static u_int32_t cpu_max_state; static device_t *cpu_devices; static int cpu_ndevices; static struct sysctl_ctx_list acpi_cpu_sysctl_ctx; static struct sysctl_oid *acpi_cpu_sysctl_tree; static int acpi_cpu_probe(device_t dev); static int acpi_cpu_attach(device_t dev); static void acpi_cpu_init_throttling(void *arg); static void acpi_cpu_set_speed(u_int32_t speed); static void acpi_cpu_powerprofile(void *arg); static int acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t acpi_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_cpu_probe), DEVMETHOD(device_attach, acpi_cpu_attach), {0, 0} }; static driver_t acpi_cpu_driver = { "acpi_cpu", acpi_cpu_methods, sizeof(struct acpi_cpu_softc), }; devclass_t acpi_cpu_devclass; DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); static int acpi_cpu_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) { device_set_desc(dev, "CPU"); /* XXX get more verbose description? */ return(0); } return(ENXIO); } static int acpi_cpu_attach(device_t dev) { struct acpi_cpu_softc *sc; struct acpi_softc *acpi_sc; ACPI_OBJECT processor; ACPI_BUFFER buf; ACPI_STATUS status; u_int32_t p_blk; u_int32_t p_blk_length; u_int32_t duty_end; int rid; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; sc = device_get_softc(dev); sc->cpu_dev = dev; sc->cpu_handle = acpi_get_handle(dev); /* * Get global parameters from the FADT. */ if (device_get_unit(sc->cpu_dev) == 0) { cpu_duty_offset = AcpiGbl_FADT->DutyOffset; cpu_duty_width = AcpiGbl_FADT->DutyWidth; cpu_smi_cmd = AcpiGbl_FADT->SmiCmd; cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt; /* validate the offset/width */ duty_end = cpu_duty_offset + cpu_duty_width - 1; /* check that it fits */ if (duty_end > 31) { printf("acpi_cpu: CLK_VAL field overflows P_CNT register\n"); cpu_duty_width = 0; } /* check for overlap with the THT_EN bit */ if ((cpu_duty_offset <= 4) && (duty_end >= 4)) { printf("acpi_cpu: CLK_VAL field overlaps THT_EN bit\n"); cpu_duty_width = 0; } /* * Start the throttling process once the probe phase completes, if we think that * it's going to be useful. If the duty width value is zero, there are no significant * bits in the register and thus no throttled states. */ if (cpu_duty_width > 0) { AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_init_throttling, NULL); acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&acpi_cpu_sysctl_ctx); acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", CTLFLAG_RD, 0, ""); SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "max_speed", CTLFLAG_RD, &cpu_max_state, 0, "maximum CPU speed"); SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "current_speed", CTLFLAG_RD, &cpu_current_state, 0, "current CPU speed"); SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "performance_speed", CTLTYPE_INT | CTLFLAG_RW, &cpu_performance_state, 0, acpi_cpu_speed_sysctl, "I", ""); SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "economy_speed", CTLTYPE_INT | CTLFLAG_RW, &cpu_economy_state, 0, acpi_cpu_speed_sysctl, "I", ""); } } /* * Get the processor object. */ buf.Pointer = &processor; buf.Length = sizeof(processor); if (ACPI_FAILURE(status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf))) { device_printf(sc->cpu_dev, "couldn't get Processor object - %s\n", AcpiFormatException(status)); return_VALUE(ENXIO); } if (processor.Type != ACPI_TYPE_PROCESSOR) { device_printf(sc->cpu_dev, "Processor object has bad type %d\n", processor.Type); return_VALUE(ENXIO); } sc->cpu_id = processor.Processor.ProcId; /* * If it looks like we support throttling, find this CPU's P_BLK. * * Note that some systems seem to duplicate the P_BLK pointer across * multiple CPUs, so not getting the resource is not fatal. * * XXX should support _PTC here as well, once we work out how to parse it. * * XXX is it valid to assume that the P_BLK must be 6 bytes long? */ if (cpu_duty_width > 0) { p_blk = processor.Processor.PblkAddress; p_blk_length = processor.Processor.PblkLength; /* allocate bus space if possible */ if ((p_blk > 0) && (p_blk_length == 6)) { rid = 0; bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, rid, p_blk, p_blk_length); sc->cpu_p_blk = bus_alloc_resource(sc->cpu_dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); - DEBUG_PRINT(TRACE_IO, ("acpi_cpu%d: throttling with P_BLK at 0x%x/%d%s\n", - device_get_unit(sc->cpu_dev), p_blk, p_blk_length, - sc->cpu_p_blk ? "" : " (shadowed)")); + ACPI_DEBUG_PRINT((ACPI_DB_IO, "acpi_cpu%d: throttling with P_BLK at 0x%x/%d%s\n", + device_get_unit(sc->cpu_dev), p_blk, p_blk_length, + sc->cpu_p_blk ? "" : " (shadowed)")); } } return_VALUE(0); } /* * Call this *after* all CPUs have been attached. * * Takes the ACPI lock to avoid fighting anyone over the SMI command * port. Could probably lock less code. */ static void acpi_cpu_init_throttling(void *arg) { ACPI_LOCK; /* get set of CPU devices */ devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); /* initialise throttling states */ cpu_max_state = CPU_MAX_SPEED; cpu_performance_state = cpu_max_state; cpu_economy_state = cpu_performance_state / 2; if (cpu_economy_state == 0) /* 0 is 'reserved' */ cpu_economy_state++; /* register performance profile change handler */ EVENTHANDLER_REGISTER(powerprofile_change, acpi_cpu_powerprofile, NULL, 0); /* if ACPI 2.0+, signal platform that we are taking over throttling */ if (cpu_pstate_cnt != 0) { /* XXX should be a generic interface for this */ AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8); } ACPI_UNLOCK; /* set initial speed */ acpi_cpu_powerprofile(NULL); printf("acpi_cpu: CPU throttling enabled, %d steps from 100%% to %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1)); } /* * Set CPUs to the new state. * * Must be called with the ACPI lock held. */ static void acpi_cpu_set_speed(u_int32_t speed) { struct acpi_cpu_softc *sc; int i; u_int32_t p_cnt, clk_val; ACPI_ASSERTLOCK; /* iterate over processors */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); if (sc->cpu_p_blk == NULL) continue; /* get the current P_CNT value and disable throttling */ p_cnt = CPU_GET_P_CNT(sc); p_cnt &= ~CPU_P_CNT_THT_EN; CPU_SET_P_CNT(sc, p_cnt); /* if we're at maximum speed, that's all */ if (speed < CPU_MAX_SPEED) { /* mask the old CLK_VAL off and or-in the new value */ clk_val = CPU_MAX_SPEED << cpu_duty_offset; p_cnt &= ~clk_val; p_cnt |= (speed << cpu_duty_offset); /* write the new P_CNT value and then enable throttling */ CPU_SET_P_CNT(sc, p_cnt); p_cnt |= CPU_P_CNT_THT_EN; CPU_SET_P_CNT(sc, p_cnt); } device_printf(sc->cpu_dev, "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed)); } cpu_current_state = speed; } /* * Power profile change hook. * * Uses the ACPI lock to avoid reentrancy. */ static void acpi_cpu_powerprofile(void *arg) { u_int32_t new; ACPI_LOCK; new = (powerprofile_get_state() == POWERPROFILE_PERFORMANCE) ? cpu_performance_state : cpu_economy_state; if (cpu_current_state != new) acpi_cpu_set_speed(new); ACPI_UNLOCK; } /* * Handle changes in the performance/ecomony CPU settings. * * Does not need the ACPI lock (although setting *argp should * probably be atomic). */ static int acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS) { u_int32_t *argp; u_int32_t arg; int error; argp = (u_int32_t *)oidp->oid_arg1; arg = *argp; error = sysctl_handle_int(oidp, &arg, 0, req); /* error or no new value */ if ((error != 0) || (req->newptr == NULL)) return(error); /* range check */ if ((arg < 1) || (arg > cpu_max_state)) return(EINVAL); /* set new value and possibly switch */ *argp = arg; acpi_cpu_powerprofile(NULL); return(0); } Index: head/sys/dev/acpica/acpi_ec.c =================================================================== --- head/sys/dev/acpica/acpi_ec.c (revision 82371) +++ head/sys/dev/acpica/acpi_ec.c (revision 82372) @@ -1,817 +1,817 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999, Intel Corp. All rights * reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * *****************************************************************************/ #include "opt_acpi.h" #include #include #include #include #include #include #include "acpi.h" #include /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_EC MODULE_NAME("EC") /* * EC_COMMAND: * ----------- */ typedef UINT8 EC_COMMAND; #define EC_COMMAND_UNKNOWN ((EC_COMMAND) 0x00) #define EC_COMMAND_READ ((EC_COMMAND) 0x80) #define EC_COMMAND_WRITE ((EC_COMMAND) 0x81) #define EC_COMMAND_BURST_ENABLE ((EC_COMMAND) 0x82) #define EC_COMMAND_BURST_DISABLE ((EC_COMMAND) 0x83) #define EC_COMMAND_QUERY ((EC_COMMAND) 0x84) /* * EC_STATUS: * ---------- * The encoding of the EC status register is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the output buffer is full). * +-+-+-+-+-+-+-+-+ * |7|6|5|4|3|2|1|0| * +-+-+-+-+-+-+-+-+ * | | | | | | | | * | | | | | | | +- Output Buffer Full? * | | | | | | +--- Input Buffer Full? * | | | | | +----- * | | | | +------- Data Register is Command Byte? * | | | +--------- Burst Mode Enabled? * | | +----------- SCI Event? * | +------------- SMI Event? * +--------------- * */ typedef UINT8 EC_STATUS; #define EC_FLAG_OUTPUT_BUFFER ((EC_STATUS) 0x01) #define EC_FLAG_INPUT_BUFFER ((EC_STATUS) 0x02) #define EC_FLAG_BURST_MODE ((EC_STATUS) 0x10) #define EC_FLAG_SCI ((EC_STATUS) 0x20) /* * EC_EVENT: * --------- */ typedef UINT8 EC_EVENT; #define EC_EVENT_UNKNOWN ((EC_EVENT) 0x00) #define EC_EVENT_OUTPUT_BUFFER_FULL ((EC_EVENT) 0x01) #define EC_EVENT_INPUT_BUFFER_EMPTY ((EC_EVENT) 0x02) #define EC_EVENT_SCI ((EC_EVENT) 0x20) /* * Register access primitives */ #define EC_GET_DATA(sc) \ bus_space_read_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0) #define EC_SET_DATA(sc, v) \ bus_space_write_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0, (v)) #define EC_GET_CSR(sc) \ bus_space_read_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0) #define EC_SET_CSR(sc, v) \ bus_space_write_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0, (v)) /* * Driver softc. */ struct acpi_ec_softc { device_t ec_dev; ACPI_HANDLE ec_handle; UINT32 ec_gpebit; int ec_data_rid; struct resource *ec_data_res; bus_space_tag_t ec_data_tag; bus_space_handle_t ec_data_handle; int ec_csr_rid; struct resource *ec_csr_res; bus_space_tag_t ec_csr_tag; bus_space_handle_t ec_csr_handle; int ec_locked; int ec_pendquery; int ec_csrvalue; }; #define EC_LOCK_TIMEOUT 1000 /* 1ms */ static __inline ACPI_STATUS EcLock(struct acpi_ec_softc *sc) { ACPI_STATUS status; status = AcpiAcquireGlobalLock(); (sc)->ec_locked = 1; return(status); } static __inline void EcUnlock(struct acpi_ec_softc *sc) { (sc)->ec_locked = 0; AcpiReleaseGlobalLock(); } static __inline int EcIsLocked(struct acpi_ec_softc *sc) { return((sc)->ec_locked != 0); } typedef struct { EC_COMMAND Command; UINT8 Address; UINT8 Data; } EC_REQUEST; static void EcGpeHandler(void *Context); static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function, void *Context, void **return_Context); static ACPI_STATUS EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width, UINT32 *Value, void *Context, void *RegionContext); static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event); static ACPI_STATUS EcQuery(struct acpi_ec_softc *sc, UINT8 *Data); static ACPI_STATUS EcTransaction(struct acpi_ec_softc *sc, EC_REQUEST *EcRequest); static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data); static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data); static void acpi_ec_identify(driver_t driver, device_t bus); static int acpi_ec_probe(device_t dev); static int acpi_ec_attach(device_t dev); static device_method_t acpi_ec_methods[] = { /* Device interface */ DEVMETHOD(device_identify, acpi_ec_identify), DEVMETHOD(device_probe, acpi_ec_probe), DEVMETHOD(device_attach, acpi_ec_attach), {0, 0} }; static driver_t acpi_ec_driver = { "acpi_ec", acpi_ec_methods, sizeof(struct acpi_ec_softc), }; devclass_t acpi_ec_devclass; DRIVER_MODULE(acpi_ec, acpi, acpi_ec_driver, acpi_ec_devclass, 0, 0); /* * Look for an ECDT table and if we find one, set up a default EC * space handler to catch possible attempts to access EC space before * we have a real driver instance in place. * We're not really an identify routine, but because we get called * before most other things, this works out OK. */ static void acpi_ec_identify(driver_t driver, device_t bus) { FUNCTION_TRACE(__func__); /* XXX implement - need an ACPI 2.0 system to test this */ return_VOID; } /* * We could setup resources in the probe routine in order to have them printed * when the device is attached. */ static int acpi_ec_probe(device_t dev) { if ((acpi_get_type(dev) == ACPI_TYPE_DEVICE) && !acpi_disabled("ec") && acpi_MatchHid(dev, "PNP0C09")) { /* * Set device description */ device_set_desc(dev, "embedded controller"); return(0); } return(ENXIO); } static int acpi_ec_attach(device_t dev) { struct acpi_ec_softc *sc; ACPI_STATUS Status; FUNCTION_TRACE(__func__); /* * Fetch/initialise softc */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->ec_dev = dev; sc->ec_handle = acpi_get_handle(dev); /* * Evaluate resources */ - DEBUG_PRINT(TRACE_RESOURCES, ("parsing EC resources\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "parsing EC resources\n")); acpi_parse_resources(sc->ec_dev, sc->ec_handle, &acpi_res_parse_set); /* * Attach bus resources */ sc->ec_data_rid = 0; if ((sc->ec_data_res = bus_alloc_resource(sc->ec_dev, SYS_RES_IOPORT, &sc->ec_data_rid, 0, ~0, 1, RF_ACTIVE)) == NULL) { device_printf(dev, "can't allocate data port\n"); return_VALUE(ENXIO); } sc->ec_data_tag = rman_get_bustag(sc->ec_data_res); sc->ec_data_handle = rman_get_bushandle(sc->ec_data_res); sc->ec_csr_rid = 1; if ((sc->ec_csr_res = bus_alloc_resource(sc->ec_dev, SYS_RES_IOPORT, &sc->ec_csr_rid, 0, ~0, 1, RF_ACTIVE)) == NULL) { device_printf(dev, "can't allocate command/status port\n"); return_VALUE(ENXIO); } sc->ec_csr_tag = rman_get_bustag(sc->ec_csr_res); sc->ec_csr_handle = rman_get_bushandle(sc->ec_csr_res); /* * Install GPE handler * * Evaluate the _GPE method to find the GPE bit used by the EC to signal * status (SCI). */ - DEBUG_PRINT(TRACE_RESOURCES, ("attaching GPE\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching GPE\n")); if ((Status = acpi_EvaluateInteger(sc->ec_handle, "_GPE", &sc->ec_gpebit)) != AE_OK) { device_printf(dev, "can't evaluate _GPE - %s\n", AcpiFormatException(Status)); return_VALUE(ENXIO); } /* * Install a handler for this EC's GPE bit. Note that EC SCIs are * treated as both edge- and level-triggered interrupts; in other words * we clear the status bit immediately after getting an EC-SCI, then * again after we're done processing the event. This guarantees that * events we cause while performing a transaction (e.g. IBE/OBF) get * cleared before re-enabling the GPE. */ if ((Status = AcpiInstallGpeHandler(sc->ec_gpebit, ACPI_EVENT_LEVEL_TRIGGERED | ACPI_EVENT_EDGE_TRIGGERED, EcGpeHandler, sc)) != AE_OK) { device_printf(dev, "can't install GPE handler for %s - %s\n", acpi_name(sc->ec_handle), AcpiFormatException(Status)); return_VALUE(ENXIO); } /* * Install address space handler */ - DEBUG_PRINT(TRACE_RESOURCES, ("attaching address space handler\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching address space handler\n")); if ((Status = AcpiInstallAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC, EcSpaceHandler, EcSpaceSetup, sc)) != AE_OK) { device_printf(dev, "can't install address space handler for %s - %s\n", acpi_name(sc->ec_handle), AcpiFormatException(Status)); panic("very suck"); return_VALUE(ENXIO); } - DEBUG_PRINT(TRACE_RESOURCES, ("attach complete\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attach complete\n")); return_VALUE(0); } static void EcGpeQueryHandler(void *Context) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; UINT8 Data; ACPI_STATUS Status; char qxx[5]; FUNCTION_TRACE(__func__); for (;;) { /* * Check EC_SCI. * * Bail out if the EC_SCI bit of the status register is not set. * Note that this function should only be called when * this bit is set (polling is used to detect IBE/OBF events). * * It is safe to do this without locking the controller, as it's * OK to call EcQuery when there's no data ready; in the worst * case we should just find nothing waiting for us and bail. */ if (!(EC_GET_CSR(sc) & EC_EVENT_SCI)) break; /* * Find out why the EC is signalling us */ Status = EcQuery(sc, &Data); /* * If we failed to get anything from the EC, give up */ if (Status != AE_OK) { device_printf(sc->ec_dev, "GPE query failed - %s\n", AcpiFormatException(Status)); break; } /* * Evaluate _Qxx to respond to the controller. */ sprintf(qxx, "_Q%02x", Data); strupr(qxx); Status = AcpiEvaluateObject(sc->ec_handle, qxx, NULL, NULL); /* * Ignore spurious query requests. */ if (Status != AE_OK && (Data != 0 || Status != AE_NOT_FOUND)) { device_printf(sc->ec_dev, "evaluation of GPE query method %s failed - %s\n", qxx, AcpiFormatException(Status)); } } /* I know I request Level trigger cleanup */ if(AcpiClearEvent(sc->ec_gpebit,ACPI_EVENT_GPE) != AE_OK) printf("EcGpeQueryHandler:ClearEvent Failed\n"); if(AcpiEnableEvent(sc->ec_gpebit,ACPI_EVENT_GPE) != AE_OK) printf("EcGpeQueryHandler:EnableEvent Failed\n"); return_VOID; } /* * Handle a GPE sent to us. */ static void EcGpeHandler(void *Context) { struct acpi_ec_softc *sc = Context; int csrvalue; /* * If EC is locked, the intr must process EcRead/Write wait only. * Query request must be pending. */ if (EcIsLocked(sc)){ csrvalue = EC_GET_CSR(sc); if (csrvalue & EC_EVENT_SCI) sc->ec_pendquery = 1; if ((csrvalue & EC_FLAG_OUTPUT_BUFFER) || !(csrvalue & EC_FLAG_INPUT_BUFFER)) { sc->ec_csrvalue = csrvalue; wakeup((void *)&sc->ec_csrvalue); } }else{ /* Queue GpeQuery Handler */ if (AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, EcGpeQueryHandler,Context) != AE_OK){ printf("QueryHandler Queuing Failed\n"); } } return; } static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function, void *Context, void **RegionContext) { FUNCTION_TRACE(__func__); /* * Just pass the context through, there's nothing to do here. */ *RegionContext = Context; return_ACPI_STATUS(AE_OK); } static ACPI_STATUS EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width, UINT32 *Value, void *Context, void *RegionContext) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; ACPI_STATUS Status = AE_OK; EC_REQUEST EcRequest; int i; FUNCTION_TRACE_U32(__func__, (UINT32)Address); if ((Address > 0xFF) || (width % 8 != 0) || (Value == NULL) || (Context == NULL)) return_ACPI_STATUS(AE_BAD_PARAMETER); switch (Function) { case ACPI_READ_ADR_SPACE: EcRequest.Command = EC_COMMAND_READ; EcRequest.Address = Address; (*Value) = 0; break; case ACPI_WRITE_ADR_SPACE: EcRequest.Command = EC_COMMAND_WRITE; EcRequest.Address = Address; break; default: device_printf(sc->ec_dev, "invalid Address Space function %d\n", Function); return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Perform the transaction. */ for (i = 0; i < width; i += 8) { if (Function == ACPI_READ_ADR_SPACE) EcRequest.Data = 0; else EcRequest.Data = (UINT8)((*Value) >> i); if ((Status = EcTransaction(sc, &EcRequest)) != AE_OK) break; (*Value) |= (UINT32)EcRequest.Data << i; if (++EcRequest.Address == 0) return_ACPI_STATUS(AE_BAD_PARAMETER); } return_ACPI_STATUS(Status); } /* * Wait for an event interrupt for a specific condition. */ static ACPI_STATUS EcWaitEventIntr(struct acpi_ec_softc *sc, EC_EVENT Event) { EC_STATUS EcStatus; int i; FUNCTION_TRACE_U32(__func__, (UINT32)Event); /* XXX this should test whether interrupts are available some other way */ if(cold) return_ACPI_STATUS(EcWaitEvent(sc, Event)); if (!EcIsLocked(sc)) device_printf(sc->ec_dev, "EcWaitEventIntr called without EC lock!\n"); EcStatus = EC_GET_CSR(sc); /* XXX waiting too long? */ for(i = 0; i < 10; i++){ /* * Check EC status against the desired event. */ if ((Event == EC_EVENT_OUTPUT_BUFFER_FULL) && (EcStatus & EC_FLAG_OUTPUT_BUFFER)) return_ACPI_STATUS(AE_OK); if ((Event == EC_EVENT_INPUT_BUFFER_EMPTY) && !(EcStatus & EC_FLAG_INPUT_BUFFER)) return_ACPI_STATUS(AE_OK); sc->ec_csrvalue = 0; if (ACPI_MSLEEP(&sc->ec_csrvalue, &acpi_mutex, PZERO, "EcWait", 1) != EWOULDBLOCK){ EcStatus = sc->ec_csrvalue; }else{ EcStatus = EC_GET_CSR(sc); } } return_ACPI_STATUS(AE_ERROR); } static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event) { EC_STATUS EcStatus; UINT32 i = 0; if (!EcIsLocked(sc)) device_printf(sc->ec_dev, "EcWaitEvent called without EC lock!\n"); /* * Stall 1us: * ---------- * Stall for 1 microsecond before reading the status register * for the first time. This allows the EC to set the IBF/OBF * bit to its proper state. * * XXX it is not clear why we read the CSR twice. */ AcpiOsStall(1); EcStatus = EC_GET_CSR(sc); /* * Wait For Event: * --------------- * Poll the EC status register to detect completion of the last * command. Wait up to 10ms (in 10us chunks) for this to occur. */ for (i = 0; i < 1000; i++) { EcStatus = EC_GET_CSR(sc); if ((Event == EC_EVENT_OUTPUT_BUFFER_FULL) && (EcStatus & EC_FLAG_OUTPUT_BUFFER)) return(AE_OK); if ((Event == EC_EVENT_INPUT_BUFFER_EMPTY) && !(EcStatus & EC_FLAG_INPUT_BUFFER)) return(AE_OK); AcpiOsStall(10); } return(AE_ERROR); } static ACPI_STATUS EcQuery(struct acpi_ec_softc *sc, UINT8 *Data) { ACPI_STATUS Status; if ((Status = EcLock(sc)) != AE_OK) return(Status); EC_SET_CSR(sc, EC_COMMAND_QUERY); Status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL); if (Status == AE_OK) *Data = EC_GET_DATA(sc); EcUnlock(sc); if (Status != AE_OK) device_printf(sc->ec_dev, "timeout waiting for EC to respond to EC_COMMAND_QUERY\n"); return(Status); } static ACPI_STATUS EcTransaction(struct acpi_ec_softc *sc, EC_REQUEST *EcRequest) { ACPI_STATUS Status; /* * Lock the EC */ if ((Status = EcLock(sc)) != AE_OK) return(Status); /* * Perform the transaction. */ switch (EcRequest->Command) { case EC_COMMAND_READ: Status = EcRead(sc, EcRequest->Address, &(EcRequest->Data)); break; case EC_COMMAND_WRITE: Status = EcWrite(sc, EcRequest->Address, &(EcRequest->Data)); break; default: Status = AE_SUPPORT; break; } /* * Unlock the EC */ EcUnlock(sc); /* * Clear & Re-Enable the EC GPE: * ----------------------------- * 'Consume' any EC GPE events that we generated while performing * the transaction (e.g. IBF/OBF). Clearing the GPE here shouldn't * have an adverse affect on outstanding EC-SCI's, as the source * (EC-SCI) will still be high and thus should trigger the GPE * immediately after we re-enabling it. */ if (sc->ec_pendquery){ if(AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, EcGpeQueryHandler, sc) != AE_OK) printf("Pend Query Queuing Failed\n"); sc->ec_pendquery = 0; } if (AcpiClearEvent(sc->ec_gpebit, ACPI_EVENT_GPE) != AE_OK) device_printf(sc->ec_dev, "EcRequest: Unable to clear the EC GPE.\n"); if (AcpiEnableEvent(sc->ec_gpebit, ACPI_EVENT_GPE) != AE_OK) device_printf(sc->ec_dev, "EcRequest: Unable to re-enable the EC GPE.\n"); return(Status); } static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS Status; if (!EcIsLocked(sc)) device_printf(sc->ec_dev, "EcRead called without EC lock!\n"); /*EcBurstEnable(EmbeddedController);*/ EC_SET_CSR(sc, EC_COMMAND_READ); if ((Status = EcWaitEventIntr(sc, EC_EVENT_INPUT_BUFFER_EMPTY)) != AE_OK) { device_printf(sc->ec_dev, "EcRead: Failed waiting for EC to process read command.\n"); return(Status); } EC_SET_DATA(sc, Address); if ((Status = EcWaitEventIntr(sc, EC_EVENT_OUTPUT_BUFFER_FULL)) != AE_OK) { device_printf(sc->ec_dev, "EcRead: Failed waiting for EC to send data.\n"); return(Status); } (*Data) = EC_GET_DATA(sc); /*EcBurstDisable(EmbeddedController);*/ return(AE_OK); } static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS Status; if (!EcIsLocked(sc)) device_printf(sc->ec_dev, "EcWrite called without EC lock!\n"); /*EcBurstEnable(EmbeddedController);*/ EC_SET_CSR(sc, EC_COMMAND_WRITE); if ((Status = EcWaitEventIntr(sc, EC_EVENT_INPUT_BUFFER_EMPTY)) != AE_OK) { device_printf(sc->ec_dev, "EcWrite: Failed waiting for EC to process write command.\n"); return(Status); } EC_SET_DATA(sc, Address); if ((Status = EcWaitEventIntr(sc, EC_EVENT_INPUT_BUFFER_EMPTY)) != AE_OK) { device_printf(sc->ec_dev, "EcRead: Failed waiting for EC to process address.\n"); return(Status); } EC_SET_DATA(sc, *Data); if ((Status = EcWaitEventIntr(sc, EC_EVENT_INPUT_BUFFER_EMPTY)) != AE_OK) { device_printf(sc->ec_dev, "EcWrite: Failed waiting for EC to process data.\n"); return(Status); } /*EcBurstDisable(EmbeddedController);*/ return(AE_OK); } Index: head/sys/dev/acpica/acpi_isa.c =================================================================== --- head/sys/dev/acpica/acpi_isa.c (revision 82371) +++ head/sys/dev/acpica/acpi_isa.c (revision 82372) @@ -1,479 +1,479 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * ISA bus enumerator using PnP HIDs from ACPI space. */ #include "opt_acpi.h" #include #include #include #include #include #include "acpi.h" #include /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS MODULE_NAME("ISA") #define PNP_HEXTONUM(c) ((c) >= 'a' \ ? (c) - 'a' + 10 \ : ((c) >= 'A' \ ? (c) - 'A' + 10 \ : (c) - '0')) #define PNP_EISAID(s) \ ((((s[0] - '@') & 0x1f) << 2) \ | (((s[1] - '@') & 0x18) >> 3) \ | (((s[1] - '@') & 0x07) << 13) \ | (((s[2] - '@') & 0x1f) << 8) \ | (PNP_HEXTONUM(s[4]) << 16) \ | (PNP_HEXTONUM(s[3]) << 20) \ | (PNP_HEXTONUM(s[6]) << 24) \ | (PNP_HEXTONUM(s[5]) << 28)) static void acpi_isa_set_init(device_t dev, void **context); static void acpi_isa_set_done(device_t dev, void *context); static void acpi_isa_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_isa_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_isa_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_isa_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_isa_set_irq(device_t dev, void *context, u_int32_t irq); static void acpi_isa_set_drq(device_t dev, void *context, u_int32_t drq); static void acpi_isa_set_start_dependant(device_t dev, void *context, int preference); static void acpi_isa_set_end_dependant(device_t dev, void *context); static struct acpi_parse_resource_set acpi_isa_parse_set = { acpi_isa_set_init, acpi_isa_set_done, acpi_isa_set_ioport, acpi_isa_set_iorange, acpi_isa_set_memory, acpi_isa_set_memoryrange, acpi_isa_set_irq, acpi_isa_set_drq, acpi_isa_set_start_dependant, acpi_isa_set_end_dependant }; #define MAXDEP 8 struct acpi_isa_context { int ai_config; int ai_nconfigs; struct isa_config ai_configs[MAXDEP + 1]; int ai_priorities[MAXDEP + 1]; }; static void acpi_isa_set_config(void *arg, struct isa_config *config, int enable); static void acpi_isa_identify(driver_t *driver, device_t bus); static ACPI_STATUS acpi_isa_identify_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static device_method_t acpi_isa_methods[] = { DEVMETHOD(device_identify, acpi_isa_identify), {0, 0} }; static driver_t acpi_isa_driver = { "acpi_isa", acpi_isa_methods, 1, }; static devclass_t acpi_isa_devclass; DRIVER_MODULE(acpi_isa, isa, acpi_isa_driver, acpi_isa_devclass, 0, 0); /* * This function is called to make the selected configuration * active. */ static void acpi_isa_set_config(void *arg, struct isa_config *config, int enable) { } /* * Interrogate ACPI for devices which might be attatched to an ISA * bus. * * Note that it is difficult to determine whether a device in the ACPI * namespace is or is not visible to the ISA bus, and thus we are a * little too generous here and just export everything with _HID * and _CRS. */ static void acpi_isa_identify(driver_t *driver, device_t bus) { ACPI_HANDLE parent; ACPI_STATUS status; FUNCTION_TRACE(__func__); if (acpi_disabled("isa")) return_VOID; /* * If this driver is loaded from userland, we can assume that * the ISA bus has already been detected, and we should not * interfere. */ if (!cold) return_VOID; /* * Look for the _SB_ scope, which will contain all the devices * we are likely to support. */ if ((status = AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &parent)) != AE_OK) { device_printf(bus, "no ACPI _SB_ scope - %s\n", AcpiFormatException(status)); return_VOID; } if ((status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, parent, 100, acpi_isa_identify_child, bus, NULL)) != AE_OK) device_printf(bus, "AcpiWalkNamespace on _SB_ failed - %s\n", AcpiFormatException(status)); return_VOID; } /* * Check a device to see whether it makes sense to try attaching it to an * ISA bus, and if so, do so. * * Note that we *must* always return AE_OK, or the namespace walk will terminate. * * XXX Note also that this is picking up a *lot* of things that are not ISA devices. * Should we consider lazy-binding this so that only the ID is saved and resources * are not parsed until the device is claimed by a driver? */ static ACPI_STATUS acpi_isa_identify_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO devinfo; ACPI_BUFFER buf; device_t child, bus = (device_t)context; u_int32_t devid; FUNCTION_TRACE(__func__); /* * Skip this node if it's on the 'avoid' list. */ if (acpi_avoid(handle)) return_ACPI_STATUS(AE_OK); /* * Try to get information about the device. */ if (AcpiGetObjectInfo(handle, &devinfo) != AE_OK) return_ACPI_STATUS(AE_OK); /* * Reformat the _HID value into 32 bits. */ if (!(devinfo.Valid & ACPI_VALID_HID)) return_ACPI_STATUS(AE_OK); /* * XXX Try to avoid passing stuff to ISA that it just isn't interested * in. This is the *wrong* solution, and what needs to be done * involves just sending ISA the PnP ID and a handle, and then * lazy-parsing the resources if and only if a driver attaches. * With the way that ISA currently works (using bus_probe_and_attach) * this is very difficult. Maybe we need a device_configure method? */ if (!(strncmp(devinfo.HardwareId, "PNP0C", 5))) return_ACPI_STATUS(AE_OK); devid = PNP_EISAID(devinfo.HardwareId); /* XXX check _STA here? */ if (devinfo.Valid & ACPI_VALID_STA) { } /* * Fetch our current settings. * * XXX Note that we may want to support alternate settings at some * point as well. */ if (acpi_GetIntoBuffer(handle, AcpiGetCurrentResources, &buf) != AE_OK) return_ACPI_STATUS(AE_OK); /* * Add the device and parse our resources */ child = BUS_ADD_CHILD(bus, ISA_ORDER_PNP, NULL, -1); isa_set_vendorid(child, devid); isa_set_logicalid(child, devid); ISA_SET_CONFIG_CALLBACK(bus, child, acpi_isa_set_config, 0); acpi_parse_resources(child, handle, &acpi_isa_parse_set); AcpiOsFree(buf.Pointer); if (!device_get_desc(child)) device_set_desc_copy(child, devinfo.HardwareId); - DEBUG_PRINT(TRACE_OBJECTS, ("added ISA PnP info for %s\n", acpi_name(handle))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "added ISA PnP info for %s\n", acpi_name(handle))); /* * XXX Parse configuration data and _CID list to find compatible IDs */ return_ACPI_STATUS(AE_OK); } static void acpi_isa_set_init(device_t dev, void **context) { struct acpi_isa_context *cp; FUNCTION_TRACE(__func__); cp = malloc(sizeof(*cp), M_DEVBUF, M_NOWAIT); bzero(cp, sizeof(*cp)); cp->ai_nconfigs = 1; *context = cp; return_VOID; } static void acpi_isa_set_done(device_t dev, void *context) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *config, *configs; device_t parent; int i, j; FUNCTION_TRACE(__func__); if (cp == NULL) return_VOID; parent = device_get_parent(dev); /* simple config without dependants */ if (cp->ai_nconfigs == 1) { ISA_ADD_CONFIG(parent, dev, cp->ai_priorities[0], &cp->ai_configs[0]); goto done; } /* Cycle through dependant configs merging primary details */ configs = &cp->ai_configs[0]; for(i = 1; i < cp->ai_nconfigs; i++) { config = &configs[i]; for(j = 0; j < configs[0].ic_nmem; j++) { if (config->ic_nmem == ISA_NMEM) { device_printf(parent, "too many memory ranges\n"); free(configs, M_DEVBUF); return_VOID; } config->ic_mem[config->ic_nmem] = configs[0].ic_mem[j]; config->ic_nmem++; } for(j = 0; j < configs[0].ic_nport; j++) { if (config->ic_nport == ISA_NPORT) { device_printf(parent, "too many port ranges\n"); free(configs, M_DEVBUF); return_VOID; } config->ic_port[config->ic_nport] = configs[0].ic_port[j]; config->ic_nport++; } for(j = 0; j < configs[0].ic_nirq; j++) { if (config->ic_nirq == ISA_NIRQ) { device_printf(parent, "too many irq ranges\n"); free(configs, M_DEVBUF); return_VOID; } config->ic_irqmask[config->ic_nirq] = configs[0].ic_irqmask[j]; config->ic_nirq++; } for(j = 0; j < configs[0].ic_ndrq; j++) { if (config->ic_ndrq == ISA_NDRQ) { device_printf(parent, "too many drq ranges\n"); free(configs, M_DEVBUF); return_VOID; } config->ic_drqmask[config->ic_ndrq] = configs[0].ic_drqmask[j]; config->ic_ndrq++; } (void)ISA_ADD_CONFIG(parent, dev, cp->ai_priorities[i], &configs[i]); } done: free(cp, M_DEVBUF); return_VOID; } static void acpi_isa_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nport == ISA_NPORT) { printf("too many ports\n"); return; } ic->ic_port[ic->ic_nport].ir_start = base; ic->ic_port[ic->ic_nport].ir_end = base + length - 1; ic->ic_port[ic->ic_nport].ir_size = length; ic->ic_port[ic->ic_nport].ir_align = 1; ic->ic_nport++; } static void acpi_isa_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nport == ISA_NPORT) { printf("too many ports\n"); return; } ic->ic_port[ic->ic_nport].ir_start = low; ic->ic_port[ic->ic_nport].ir_end = high + length - 1; ic->ic_port[ic->ic_nport].ir_size = length; ic->ic_port[ic->ic_nport].ir_align = imin(1, align); ic->ic_nport++; } static void acpi_isa_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nmem == ISA_NMEM) { printf("too many memory ranges\n"); return; } ic->ic_mem[ic->ic_nmem].ir_start = base; ic->ic_mem[ic->ic_nmem].ir_end = base + length - 1; ic->ic_mem[ic->ic_nmem].ir_size = length; ic->ic_mem[ic->ic_nmem].ir_align = 1; ic->ic_nmem++; } static void acpi_isa_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nmem == ISA_NMEM) { printf("too many memory ranges\n"); return; } ic->ic_mem[ic->ic_nmem].ir_start = low; ic->ic_mem[ic->ic_nmem].ir_end = high + length - 1; ic->ic_mem[ic->ic_nmem].ir_size = length; ic->ic_mem[ic->ic_nmem].ir_align = imin(1, align); ic->ic_nmem++; } static void acpi_isa_set_irq(device_t dev, void *context, u_int32_t irq) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nirq == ISA_NIRQ) { printf("too many IRQs\n"); return; } ic->ic_irqmask[ic->ic_nirq] = 1 << irq; ic->ic_nirq++; } static void acpi_isa_set_drq(device_t dev, void *context, u_int32_t drq) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; struct isa_config *ic = &cp->ai_configs[cp->ai_config]; if (cp == NULL) return; if (ic->ic_nirq == ISA_NDRQ) { printf("too many DRQs\n"); return; } ic->ic_drqmask[ic->ic_ndrq] = drq; ic->ic_ndrq++; } /* * XXX the "too many dependant configs" logic here is wrong, and * will spam the last dependant config. */ static void acpi_isa_set_start_dependant(device_t dev, void *context, int preference) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; if (cp == NULL) return; if (cp->ai_nconfigs > MAXDEP) { printf("too many dependant configs\n"); return; } cp->ai_config = cp->ai_nconfigs; cp->ai_priorities[cp->ai_config] = preference; cp->ai_nconfigs++; } static void acpi_isa_set_end_dependant(device_t dev, void *context) { struct acpi_isa_context *cp = (struct acpi_isa_context *)context; if (cp == NULL) return; cp->ai_config = 0; } Index: head/sys/dev/acpica/acpi_pcib.c =================================================================== --- head/sys/dev/acpica/acpi_pcib.c (revision 82371) +++ head/sys/dev/acpica/acpi_pcib.c (revision 82372) @@ -1,513 +1,515 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include "acpi.h" #include #include #include #include "pcib_if.h" /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS MODULE_NAME("PCI") struct acpi_pcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; int ap_segment; /* analagous to Alpha 'hose' */ int ap_bus; /* bios-assigned bus number */ ACPI_BUFFER ap_prt; /* interrupt routing table */ }; static int acpi_pcib_probe(device_t bus); static int acpi_pcib_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static int acpi_pcib_maxslots(device_t dev); static u_int32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes); static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, u_int32_t data, int bytes); static int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin); static device_method_t acpi_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_probe), DEVMETHOD(device_attach, acpi_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, acpi_pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_route_interrupt), {0, 0} }; static driver_t acpi_pcib_driver = { "acpi_pcib", acpi_pcib_methods, sizeof(struct acpi_pcib_softc), }; devclass_t acpi_pcib_devclass; DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_driver, acpi_pcib_devclass, 0, 0); static int acpi_pcib_probe(device_t dev) { if ((acpi_get_type(dev) == ACPI_TYPE_DEVICE) && !acpi_disabled("pci") && acpi_MatchHid(dev, "PNP0A03")) { /* * Set device description */ device_set_desc(dev, "Host-PCI bridge"); return(0); } return(ENXIO); } static int acpi_pcib_attach(device_t dev) { struct acpi_pcib_softc *sc; device_t child; ACPI_STATUS status; int result; FUNCTION_TRACE(__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. * * XXX this isn't entirely correct, since we may be a PCI bus * on a hot-plug docking station, etc. */ if (!acpi_DeviceIsPresent(dev)) return_VALUE(ENXIO); /* * Get our segment number by evaluating _SEG * It's OK for this to not exist. */ if ((status = acpi_EvaluateInteger(sc->ap_handle, "_SEG", &sc->ap_segment)) != AE_OK) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE(ENXIO); } /* if it's not found, assume 0 */ sc->ap_segment = 0; } /* * Get our base bus number by evaluating _BBN * If this doesn't exist, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? */ if ((status = acpi_EvaluateInteger(sc->ap_handle, "_BBN", &sc->ap_bus)) != AE_OK) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return_VALUE(ENXIO); } /* if it's not found, assume 0 */ sc->ap_bus = 0; } /* * Make sure that this bus hasn't already been found. If it has, return silently * (should we complain here?). */ if (devclass_get_device(devclass_find("pci"), sc->ap_bus) != NULL) return_VALUE(0); /* * Get the PCI interrupt routing table. */ if ((status = acpi_GetIntoBuffer(sc->ap_handle, AcpiGetIrqRoutingTable, &sc->ap_prt)) != AE_OK) { device_printf(dev, "could not get PCI interrupt routing table - %s\n", AcpiFormatException(status)); /* this is not an error, but it may reduce functionality */ } /* * Attach the PCI bus proper. */ if ((child = device_add_child(dev, "pci", sc->ap_bus)) == NULL) { device_printf(device_get_parent(dev), "couldn't attach pci bus"); return_VALUE(ENXIO); } /* * Now go scan the bus. * * XXX It would be nice to defer this and count on the nexus getting it * after the first pass, but this does not seem to be reliable. */ result = bus_generic_attach(dev); /* * XXX cross-reference our children to attached devices on the child bus * via _ADR, so we can provide power management. */ /* XXX implement */ return_VALUE(result); } /* * ACPI doesn't tell us how many slots there are, so use the standard * maximum. */ static int acpi_pcib_maxslots(device_t dev) { return(PCI_SLOTMAX); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->ap_bus; return(0); } return(ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->ap_bus = value; return(0); } return(ENOENT); } static u_int32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { return(pci_cfgregread(bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, u_int32_t data, int bytes) { pci_cfgregwrite(bus, slot, func, reg, data, bytes); } /* * Route an interrupt for a child of the bridge. * * XXX clean up error messages * * XXX this function is somewhat bulky */ static int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_pcib_softc *sc; PCI_ROUTING_TABLE *prt; ACPI_HANDLE lnkdev; ACPI_BUFFER crsbuf, prsbuf; ACPI_RESOURCE *crsres, *prsres, resbuf; ACPI_DEVICE_INFO devinfo; ACPI_STATUS status; u_int8_t *prtp; device_t *devlist; int devcount; int bus; int interrupt; int i; + + FUNCTION_TRACE(__func__); crsbuf.Pointer = NULL; prsbuf.Pointer = NULL; devlist = NULL; interrupt = 255; /* ACPI numbers pins 0-3, not 1-4 like the BIOS */ pin--; /* find the bridge softc */ if (devclass_get_devices(acpi_pcib_devclass, &devlist, &devcount)) goto out; BUS_READ_IVAR(pcib, pcib, PCIB_IVAR_BUS, (uintptr_t *)&bus); sc = NULL; for (i = 0; i < devcount; i++) { sc = device_get_softc(*(devlist + i)); if (sc->ap_bus == bus) break; sc = NULL; } if (sc == NULL) /* not one of ours */ goto out; prtp = sc->ap_prt.Pointer; if (prtp == NULL) /* didn't get routing table */ goto out; /* scan the table looking for this device */ for (;;) { prt = (PCI_ROUTING_TABLE *)prtp; if (prt->Length == 0) /* end of table */ goto out; /* * Compare the slot number (high word of Address) and pin number * (note that ACPI uses 0 for INTA) to check for a match. * * Note that the low word of the Address field (function number) * is required by the specification to be 0xffff. We don't risk * checking it here. */ if ((((prt->Address & 0xffff0000) >> 16) == pci_get_slot(dev)) && (prt->Pin == pin)) { device_printf(sc->ap_dev, "matched entry for %d.%d.INT%c (source %s)\n", pci_get_bus(dev), pci_get_slot(dev), 'A' + pin, prt->Source); break; } /* skip to next entry */ prtp += prt->Length; } /* * If source is empty/NULL, the source index is the global IRQ number. */ if ((prt->Source == NULL) || (prt->Source[0] == '\0')) { device_printf(sc->ap_dev, "device is hardwired to IRQ %d\n", prt->SourceIndex); interrupt = prt->SourceIndex; goto out; } /* * We have to find the source device (PCI interrupt link device) */ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, prt->Source, &lnkdev))) { device_printf(sc->ap_dev, "couldn't find PCI interrupt link device %s\n", prt->Source); goto out; } /* * Verify that this is a PCI link device, and that it's present. */ if (ACPI_FAILURE(AcpiGetObjectInfo(lnkdev, &devinfo))) { device_printf(sc->ap_dev, "couldn't validate PCI interrupt link device %s\n", prt->Source); goto out; } if (!(devinfo.Valid & ACPI_VALID_HID) || strcmp("PNP0C0F", devinfo.HardwareId)) { device_printf(sc->ap_dev, "PCI interrupt link device %s has wrong _HID (%s)\n", prt->Source, devinfo.HardwareId); goto out; } /* should be 'present' and 'functioning' */ if ((devinfo.CurrentStatus & 0x09) != 0x09) { device_printf(sc->ap_dev, "PCI interrupt link device %s unavailable (CurrentStatus 0x%x)\n", prt->Source, devinfo.CurrentStatus); goto out; } /* * Get the current and possible resources for the interrupt link device. */ if (ACPI_FAILURE(status = acpi_GetIntoBuffer(lnkdev, AcpiGetCurrentResources, &crsbuf))) { device_printf(sc->ap_dev, "couldn't get PCI interrupt link device _CRS data - %s\n", AcpiFormatException(status)); goto out; /* this is fatal */ } if ((status = acpi_GetIntoBuffer(lnkdev, AcpiGetPossibleResources, &prsbuf)) != AE_OK) { device_printf(sc->ap_dev, "couldn't get PCI interrupt link device _PRS data - %s\n", AcpiFormatException(status)); /* this is not fatal, since it may be hardwired */ } - DEBUG_PRINT(TRACE_RESOURCES, ("got %d bytes for %s._CRS\n", crsbuf.Length, acpi_name(lnkdev))); - DEBUG_PRINT(TRACE_RESOURCES, ("got %d bytes for %s._PRS\n", prsbuf.Length, acpi_name(lnkdev))); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "got %d bytes for %s._CRS\n", crsbuf.Length, acpi_name(lnkdev))); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "got %d bytes for %s._PRS\n", prsbuf.Length, acpi_name(lnkdev))); /* * The interrupt may already be routed, so check _CRS first. We don't check the * 'decoding' bit in the _STA result, since there's nothing in the spec that * mandates it be set, however some BIOS' will set it if the decode is active. * * The Source Index points to the particular resource entry we're interested in. */ if (ACPI_FAILURE(acpi_FindIndexedResource(&crsbuf, prt->SourceIndex, &crsres))) { device_printf(sc->ap_dev, "_CRS buffer corrupt, cannot route interrupt\n"); goto out; } /* type-check the resource we've got */ if (crsres->Id != ACPI_RSTYPE_IRQ) { /* XXX ACPI_RSTYPE_EXT_IRQ */ device_printf(sc->ap_dev, "_CRS resource entry has unsupported type %d\n", crsres->Id); goto out; } /* if there's more than one interrupt, we are confused */ if (crsres->Data.Irq.NumberOfInterrupts > 1) { device_printf(sc->ap_dev, "device has too many interrupts (%d)\n", crsres->Data.Irq.NumberOfInterrupts); goto out; } /* * If there's only one interrupt, and it's not zero, then we're already routed. * * Note that we could also check the 'decoding' bit in _STA, but can't depend on * it since it's not part of the spec. * * XXX check ASL examples to see if this is an acceptable set of tests */ if ((crsres->Data.Irq.NumberOfInterrupts == 1) && (crsres->Data.Irq.Interrupts[0] != 0)) { device_printf(sc->ap_dev, "device is routed to IRQ %d\n", crsres->Data.Irq.Interrupts[0]); interrupt = crsres->Data.Irq.Interrupts[0]; goto out; } /* * There isn't an interrupt, so we have to look at _PRS to get one. * Get the set of allowed interrupts from the _PRS resource indexed by SourceIndex. */ if (prsbuf.Pointer == NULL) { device_printf(sc->ap_dev, "device has no routed interrupt and no _PRS on PCI interrupt link device\n"); goto out; } if (ACPI_FAILURE(acpi_FindIndexedResource(&prsbuf, prt->SourceIndex, &prsres))) { device_printf(sc->ap_dev, "_PRS buffer corrupt, cannot route interrupt\n"); goto out; } /* type-check the resource we've got */ if (prsres->Id != ACPI_RSTYPE_IRQ) { /* XXX ACPI_RSTYPE_EXT_IRQ */ device_printf(sc->ap_dev, "_PRS resource entry has unsupported type %d\n", prsres->Id); goto out; } /* there has to be at least one interrupt available */ if (prsres->Data.Irq.NumberOfInterrupts < 1) { device_printf(sc->ap_dev, "device has no interrupts\n"); goto out; } /* * Pick an interrupt to use. Note that a more scientific approach than just * taking the first one available would be desirable. * * The PCI BIOS $PIR table offers "preferred PCI interrupts", but ACPI doesn't * seem to offer a similar mechanism, so picking a "good" interrupt here is a * difficult task. * * Build a resource buffer and pass it to AcpiSetCurrentResources to route the * new interrupt. */ device_printf(sc->ap_dev, "possible interrupts:"); for (i = 0; i < prsres->Data.Irq.NumberOfInterrupts; i++) printf(" %d", prsres->Data.Irq.Interrupts[i]); printf("\n"); if (crsbuf.Pointer != NULL) /* should never happen */ AcpiOsFree(crsbuf.Pointer); crsbuf.Pointer = NULL; resbuf.Id = ACPI_RSTYPE_IRQ; resbuf.Length = SIZEOF_RESOURCE(ACPI_RESOURCE_IRQ); resbuf.Data.Irq = prsres->Data.Irq; /* structure copy other fields */ resbuf.Data.Irq.NumberOfInterrupts = 1; resbuf.Data.Irq.Interrupts[0] = prsres->Data.Irq.Interrupts[0]; /* just take first... */ if (ACPI_FAILURE(status = acpi_AppendBufferResource(&crsbuf, &resbuf))) { device_printf(sc->ap_dev, "couldn't route interrupt %d via %s, interupt resource build failed - %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev), AcpiFormatException(status)); goto out; } if (ACPI_FAILURE(status = AcpiSetCurrentResources(lnkdev, &crsbuf))) { device_printf(sc->ap_dev, "couldn't route interrupt %d via %s - %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev), AcpiFormatException(status)); goto out; } /* successful, return the interrupt we just routed */ device_printf(sc->ap_dev, "routed interrupt %d via %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev)); interrupt = prsres->Data.Irq.Interrupts[0]; out: if (devlist != NULL) free(devlist, M_TEMP); if (crsbuf.Pointer != NULL) AcpiOsFree(crsbuf.Pointer); if (prsbuf.Pointer != NULL) AcpiOsFree(prsbuf.Pointer); /* XXX APIC_IO interrupt mapping? */ - return(interrupt); + return_VALUE(interrupt); } Index: head/sys/dev/acpica/acpi_pcib_acpi.c =================================================================== --- head/sys/dev/acpica/acpi_pcib_acpi.c (revision 82371) +++ head/sys/dev/acpica/acpi_pcib_acpi.c (revision 82372) @@ -1,513 +1,515 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include "acpi.h" #include #include #include #include "pcib_if.h" /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS MODULE_NAME("PCI") struct acpi_pcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; int ap_segment; /* analagous to Alpha 'hose' */ int ap_bus; /* bios-assigned bus number */ ACPI_BUFFER ap_prt; /* interrupt routing table */ }; static int acpi_pcib_probe(device_t bus); static int acpi_pcib_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static int acpi_pcib_maxslots(device_t dev); static u_int32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes); static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, u_int32_t data, int bytes); static int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin); static device_method_t acpi_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_probe), DEVMETHOD(device_attach, acpi_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, acpi_pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_route_interrupt), {0, 0} }; static driver_t acpi_pcib_driver = { "acpi_pcib", acpi_pcib_methods, sizeof(struct acpi_pcib_softc), }; devclass_t acpi_pcib_devclass; DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_driver, acpi_pcib_devclass, 0, 0); static int acpi_pcib_probe(device_t dev) { if ((acpi_get_type(dev) == ACPI_TYPE_DEVICE) && !acpi_disabled("pci") && acpi_MatchHid(dev, "PNP0A03")) { /* * Set device description */ device_set_desc(dev, "Host-PCI bridge"); return(0); } return(ENXIO); } static int acpi_pcib_attach(device_t dev) { struct acpi_pcib_softc *sc; device_t child; ACPI_STATUS status; int result; FUNCTION_TRACE(__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. * * XXX this isn't entirely correct, since we may be a PCI bus * on a hot-plug docking station, etc. */ if (!acpi_DeviceIsPresent(dev)) return_VALUE(ENXIO); /* * Get our segment number by evaluating _SEG * It's OK for this to not exist. */ if ((status = acpi_EvaluateInteger(sc->ap_handle, "_SEG", &sc->ap_segment)) != AE_OK) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE(ENXIO); } /* if it's not found, assume 0 */ sc->ap_segment = 0; } /* * Get our base bus number by evaluating _BBN * If this doesn't exist, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? */ if ((status = acpi_EvaluateInteger(sc->ap_handle, "_BBN", &sc->ap_bus)) != AE_OK) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return_VALUE(ENXIO); } /* if it's not found, assume 0 */ sc->ap_bus = 0; } /* * Make sure that this bus hasn't already been found. If it has, return silently * (should we complain here?). */ if (devclass_get_device(devclass_find("pci"), sc->ap_bus) != NULL) return_VALUE(0); /* * Get the PCI interrupt routing table. */ if ((status = acpi_GetIntoBuffer(sc->ap_handle, AcpiGetIrqRoutingTable, &sc->ap_prt)) != AE_OK) { device_printf(dev, "could not get PCI interrupt routing table - %s\n", AcpiFormatException(status)); /* this is not an error, but it may reduce functionality */ } /* * Attach the PCI bus proper. */ if ((child = device_add_child(dev, "pci", sc->ap_bus)) == NULL) { device_printf(device_get_parent(dev), "couldn't attach pci bus"); return_VALUE(ENXIO); } /* * Now go scan the bus. * * XXX It would be nice to defer this and count on the nexus getting it * after the first pass, but this does not seem to be reliable. */ result = bus_generic_attach(dev); /* * XXX cross-reference our children to attached devices on the child bus * via _ADR, so we can provide power management. */ /* XXX implement */ return_VALUE(result); } /* * ACPI doesn't tell us how many slots there are, so use the standard * maximum. */ static int acpi_pcib_maxslots(device_t dev) { return(PCI_SLOTMAX); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->ap_bus; return(0); } return(ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->ap_bus = value; return(0); } return(ENOENT); } static u_int32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { return(pci_cfgregread(bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, u_int32_t data, int bytes) { pci_cfgregwrite(bus, slot, func, reg, data, bytes); } /* * Route an interrupt for a child of the bridge. * * XXX clean up error messages * * XXX this function is somewhat bulky */ static int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_pcib_softc *sc; PCI_ROUTING_TABLE *prt; ACPI_HANDLE lnkdev; ACPI_BUFFER crsbuf, prsbuf; ACPI_RESOURCE *crsres, *prsres, resbuf; ACPI_DEVICE_INFO devinfo; ACPI_STATUS status; u_int8_t *prtp; device_t *devlist; int devcount; int bus; int interrupt; int i; + + FUNCTION_TRACE(__func__); crsbuf.Pointer = NULL; prsbuf.Pointer = NULL; devlist = NULL; interrupt = 255; /* ACPI numbers pins 0-3, not 1-4 like the BIOS */ pin--; /* find the bridge softc */ if (devclass_get_devices(acpi_pcib_devclass, &devlist, &devcount)) goto out; BUS_READ_IVAR(pcib, pcib, PCIB_IVAR_BUS, (uintptr_t *)&bus); sc = NULL; for (i = 0; i < devcount; i++) { sc = device_get_softc(*(devlist + i)); if (sc->ap_bus == bus) break; sc = NULL; } if (sc == NULL) /* not one of ours */ goto out; prtp = sc->ap_prt.Pointer; if (prtp == NULL) /* didn't get routing table */ goto out; /* scan the table looking for this device */ for (;;) { prt = (PCI_ROUTING_TABLE *)prtp; if (prt->Length == 0) /* end of table */ goto out; /* * Compare the slot number (high word of Address) and pin number * (note that ACPI uses 0 for INTA) to check for a match. * * Note that the low word of the Address field (function number) * is required by the specification to be 0xffff. We don't risk * checking it here. */ if ((((prt->Address & 0xffff0000) >> 16) == pci_get_slot(dev)) && (prt->Pin == pin)) { device_printf(sc->ap_dev, "matched entry for %d.%d.INT%c (source %s)\n", pci_get_bus(dev), pci_get_slot(dev), 'A' + pin, prt->Source); break; } /* skip to next entry */ prtp += prt->Length; } /* * If source is empty/NULL, the source index is the global IRQ number. */ if ((prt->Source == NULL) || (prt->Source[0] == '\0')) { device_printf(sc->ap_dev, "device is hardwired to IRQ %d\n", prt->SourceIndex); interrupt = prt->SourceIndex; goto out; } /* * We have to find the source device (PCI interrupt link device) */ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, prt->Source, &lnkdev))) { device_printf(sc->ap_dev, "couldn't find PCI interrupt link device %s\n", prt->Source); goto out; } /* * Verify that this is a PCI link device, and that it's present. */ if (ACPI_FAILURE(AcpiGetObjectInfo(lnkdev, &devinfo))) { device_printf(sc->ap_dev, "couldn't validate PCI interrupt link device %s\n", prt->Source); goto out; } if (!(devinfo.Valid & ACPI_VALID_HID) || strcmp("PNP0C0F", devinfo.HardwareId)) { device_printf(sc->ap_dev, "PCI interrupt link device %s has wrong _HID (%s)\n", prt->Source, devinfo.HardwareId); goto out; } /* should be 'present' and 'functioning' */ if ((devinfo.CurrentStatus & 0x09) != 0x09) { device_printf(sc->ap_dev, "PCI interrupt link device %s unavailable (CurrentStatus 0x%x)\n", prt->Source, devinfo.CurrentStatus); goto out; } /* * Get the current and possible resources for the interrupt link device. */ if (ACPI_FAILURE(status = acpi_GetIntoBuffer(lnkdev, AcpiGetCurrentResources, &crsbuf))) { device_printf(sc->ap_dev, "couldn't get PCI interrupt link device _CRS data - %s\n", AcpiFormatException(status)); goto out; /* this is fatal */ } if ((status = acpi_GetIntoBuffer(lnkdev, AcpiGetPossibleResources, &prsbuf)) != AE_OK) { device_printf(sc->ap_dev, "couldn't get PCI interrupt link device _PRS data - %s\n", AcpiFormatException(status)); /* this is not fatal, since it may be hardwired */ } - DEBUG_PRINT(TRACE_RESOURCES, ("got %d bytes for %s._CRS\n", crsbuf.Length, acpi_name(lnkdev))); - DEBUG_PRINT(TRACE_RESOURCES, ("got %d bytes for %s._PRS\n", prsbuf.Length, acpi_name(lnkdev))); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "got %d bytes for %s._CRS\n", crsbuf.Length, acpi_name(lnkdev))); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "got %d bytes for %s._PRS\n", prsbuf.Length, acpi_name(lnkdev))); /* * The interrupt may already be routed, so check _CRS first. We don't check the * 'decoding' bit in the _STA result, since there's nothing in the spec that * mandates it be set, however some BIOS' will set it if the decode is active. * * The Source Index points to the particular resource entry we're interested in. */ if (ACPI_FAILURE(acpi_FindIndexedResource(&crsbuf, prt->SourceIndex, &crsres))) { device_printf(sc->ap_dev, "_CRS buffer corrupt, cannot route interrupt\n"); goto out; } /* type-check the resource we've got */ if (crsres->Id != ACPI_RSTYPE_IRQ) { /* XXX ACPI_RSTYPE_EXT_IRQ */ device_printf(sc->ap_dev, "_CRS resource entry has unsupported type %d\n", crsres->Id); goto out; } /* if there's more than one interrupt, we are confused */ if (crsres->Data.Irq.NumberOfInterrupts > 1) { device_printf(sc->ap_dev, "device has too many interrupts (%d)\n", crsres->Data.Irq.NumberOfInterrupts); goto out; } /* * If there's only one interrupt, and it's not zero, then we're already routed. * * Note that we could also check the 'decoding' bit in _STA, but can't depend on * it since it's not part of the spec. * * XXX check ASL examples to see if this is an acceptable set of tests */ if ((crsres->Data.Irq.NumberOfInterrupts == 1) && (crsres->Data.Irq.Interrupts[0] != 0)) { device_printf(sc->ap_dev, "device is routed to IRQ %d\n", crsres->Data.Irq.Interrupts[0]); interrupt = crsres->Data.Irq.Interrupts[0]; goto out; } /* * There isn't an interrupt, so we have to look at _PRS to get one. * Get the set of allowed interrupts from the _PRS resource indexed by SourceIndex. */ if (prsbuf.Pointer == NULL) { device_printf(sc->ap_dev, "device has no routed interrupt and no _PRS on PCI interrupt link device\n"); goto out; } if (ACPI_FAILURE(acpi_FindIndexedResource(&prsbuf, prt->SourceIndex, &prsres))) { device_printf(sc->ap_dev, "_PRS buffer corrupt, cannot route interrupt\n"); goto out; } /* type-check the resource we've got */ if (prsres->Id != ACPI_RSTYPE_IRQ) { /* XXX ACPI_RSTYPE_EXT_IRQ */ device_printf(sc->ap_dev, "_PRS resource entry has unsupported type %d\n", prsres->Id); goto out; } /* there has to be at least one interrupt available */ if (prsres->Data.Irq.NumberOfInterrupts < 1) { device_printf(sc->ap_dev, "device has no interrupts\n"); goto out; } /* * Pick an interrupt to use. Note that a more scientific approach than just * taking the first one available would be desirable. * * The PCI BIOS $PIR table offers "preferred PCI interrupts", but ACPI doesn't * seem to offer a similar mechanism, so picking a "good" interrupt here is a * difficult task. * * Build a resource buffer and pass it to AcpiSetCurrentResources to route the * new interrupt. */ device_printf(sc->ap_dev, "possible interrupts:"); for (i = 0; i < prsres->Data.Irq.NumberOfInterrupts; i++) printf(" %d", prsres->Data.Irq.Interrupts[i]); printf("\n"); if (crsbuf.Pointer != NULL) /* should never happen */ AcpiOsFree(crsbuf.Pointer); crsbuf.Pointer = NULL; resbuf.Id = ACPI_RSTYPE_IRQ; resbuf.Length = SIZEOF_RESOURCE(ACPI_RESOURCE_IRQ); resbuf.Data.Irq = prsres->Data.Irq; /* structure copy other fields */ resbuf.Data.Irq.NumberOfInterrupts = 1; resbuf.Data.Irq.Interrupts[0] = prsres->Data.Irq.Interrupts[0]; /* just take first... */ if (ACPI_FAILURE(status = acpi_AppendBufferResource(&crsbuf, &resbuf))) { device_printf(sc->ap_dev, "couldn't route interrupt %d via %s, interupt resource build failed - %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev), AcpiFormatException(status)); goto out; } if (ACPI_FAILURE(status = AcpiSetCurrentResources(lnkdev, &crsbuf))) { device_printf(sc->ap_dev, "couldn't route interrupt %d via %s - %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev), AcpiFormatException(status)); goto out; } /* successful, return the interrupt we just routed */ device_printf(sc->ap_dev, "routed interrupt %d via %s\n", prsres->Data.Irq.Interrupts[0], acpi_name(lnkdev)); interrupt = prsres->Data.Irq.Interrupts[0]; out: if (devlist != NULL) free(devlist, M_TEMP); if (crsbuf.Pointer != NULL) AcpiOsFree(crsbuf.Pointer); if (prsbuf.Pointer != NULL) AcpiOsFree(prsbuf.Pointer); /* XXX APIC_IO interrupt mapping? */ - return(interrupt); + return_VALUE(interrupt); } Index: head/sys/dev/acpica/acpi_powerres.c =================================================================== --- head/sys/dev/acpica/acpi_powerres.c (revision 82371) +++ head/sys/dev/acpica/acpi_powerres.c (revision 82372) @@ -1,636 +1,636 @@ /*- * Copyright (c) 2001 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" /* XXX trim includes */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi.h" #include #include /* * ACPI power resource management. * * Power resource behaviour is slightly complicated by the fact that * a single power resource may provide power for more than one device. * Thus, we must track the device(s) being powered by a given power * resource, and only deactivate it when there are no powered devices. * * Note that this only manages resources for known devices. There is an * ugly case where we may turn of power to a device which is in use because * we don't know that it depends on a given resource. We should perhaps * try to be smarter about this, but a more complete solution would involve * scanning all of the ACPI namespace to find devices we're not currently * aware of, and this raises questions about whether they should be left * on, turned off, etc. * * XXX locking */ MALLOC_DEFINE(M_ACPIPWR, "acpipwr", "ACPI power resources"); /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_POWER MODULE_NAME("POWERRES") /* return values from _STA on a power resource */ #define ACPI_PWR_OFF 0 #define ACPI_PWR_ON 1 /* * A relationship between a power resource and a consumer. */ struct acpi_powerreference { struct acpi_powerconsumer *ar_consumer; struct acpi_powerresource *ar_resource; TAILQ_ENTRY(acpi_powerreference) ar_rlink; /* link on resource list */ TAILQ_ENTRY(acpi_powerreference) ar_clink; /* link on consumer */ }; /* * A power-managed device. */ struct acpi_powerconsumer { ACPI_HANDLE ac_consumer; /* device which is powered */ int ac_state; TAILQ_ENTRY(acpi_powerconsumer) ac_link; TAILQ_HEAD(,acpi_powerreference) ac_references; }; /* * A power resource. */ struct acpi_powerresource { TAILQ_ENTRY(acpi_powerresource) ap_link; TAILQ_HEAD(,acpi_powerreference) ap_references; ACPI_HANDLE ap_resource; /* the resource's handle */ ACPI_INTEGER ap_systemlevel; ACPI_INTEGER ap_order; }; TAILQ_HEAD(acpi_powerresource_list, acpi_powerresource) acpi_powerresources; TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer) acpi_powerconsumers; static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer); static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer); static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res); static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res); static void acpi_pwr_reference_resource(ACPI_OBJECT *obj, void *arg); static ACPI_STATUS acpi_pwr_switch_power(void); static struct acpi_powerresource *acpi_pwr_find_resource(ACPI_HANDLE res); static struct acpi_powerconsumer *acpi_pwr_find_consumer(ACPI_HANDLE consumer); /* * Initialise our lists. */ static void acpi_pwr_init(void *junk) { TAILQ_INIT(&acpi_powerresources); TAILQ_INIT(&acpi_powerconsumers); } SYSINIT(acpi_powerresource, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_pwr_init, NULL); /* * Register a power resource. * * It's OK to call this if we already know about the resource. */ static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT *obj; struct acpi_powerresource *rp, *srp; FUNCTION_TRACE(__func__); rp = NULL; obj = NULL; /* look to see if we know about this resource */ if (acpi_pwr_find_resource(res) != NULL) return_ACPI_STATUS(AE_OK); /* already know about it */ /* allocate a new resource */ if ((rp = malloc(sizeof(*rp), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) { status = AE_NO_MEMORY; goto out; } TAILQ_INIT(&rp->ap_references); rp->ap_resource = res; /* get the Power Resource object */ if ((status = acpi_EvaluateIntoBuffer(res, NULL, NULL, &buf)) != AE_OK) { - DEBUG_PRINT(TRACE_OBJECTS, ("no power resource object\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no power resource object\n")); goto out; } obj = buf.Pointer; if (obj->Type != ACPI_TYPE_POWER) { - DEBUG_PRINT(TRACE_OBJECTS, ("questionable power resource object %s\n", acpi_name(res))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "questionable power resource object %s\n", acpi_name(res))); status = AE_TYPE; goto out; } rp->ap_systemlevel = obj->PowerResource.SystemLevel; rp->ap_order = obj->PowerResource.ResourceOrder; /* sort the resource into the list */ status = AE_OK; srp = TAILQ_FIRST(&acpi_powerresources); if ((srp == NULL) || (rp->ap_order < srp->ap_order)) { TAILQ_INSERT_HEAD(&acpi_powerresources, rp, ap_link); goto done; } TAILQ_FOREACH(srp, &acpi_powerresources, ap_link) if (rp->ap_order < srp->ap_order) { TAILQ_INSERT_BEFORE(srp, rp, ap_link); goto done; } TAILQ_INSERT_TAIL(&acpi_powerresources, rp, ap_link); done: - DEBUG_PRINT(TRACE_OBJECTS, ("registered power resource %s\n", acpi_name(res))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power resource %s\n", acpi_name(res))); out: if (obj != NULL) AcpiOsFree(obj); if ((status != AE_OK) && (rp != NULL)) free(rp, M_ACPIPWR); return_ACPI_STATUS(status); } /* * Deregister a power resource. */ static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res) { struct acpi_powerresource *rp; FUNCTION_TRACE(__func__); rp = NULL; /* find the resource */ if ((rp = acpi_pwr_find_resource(res)) == NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); /* check that there are no consumers referencing this resource */ if (TAILQ_FIRST(&rp->ap_references) != NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); /* pull it off the list and free it */ TAILQ_REMOVE(&acpi_powerresources, rp, ap_link); free(rp, M_ACPIPWR); - DEBUG_PRINT(TRACE_OBJECTS, ("deregistered power resource %s\n", acpi_name(res))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power resource %s\n", acpi_name(res))); return_ACPI_STATUS(AE_OK); } /* * Register a power consumer. * * It's OK to call this if we already know about the consumer. */ static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; FUNCTION_TRACE(__func__); /* check to see whether we know about this consumer already */ if ((pc = acpi_pwr_find_consumer(consumer)) != NULL) return_ACPI_STATUS(AE_OK); /* allocate a new power consumer */ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL) return_ACPI_STATUS(AE_NO_MEMORY); TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link); TAILQ_INIT(&pc->ac_references); pc->ac_consumer = consumer; pc->ac_state = ACPI_STATE_UNKNOWN; /* XXX we should try to find its current state */ - DEBUG_PRINT(TRACE_OBJECTS, ("registered power consumer %s\n", acpi_name(consumer))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n", acpi_name(consumer))); return_ACPI_STATUS(AE_OK); } /* * Deregister a power consumer. * * This should only be done once the consumer has been powered off. * (XXX is this correct? Check once implemented) */ static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; FUNCTION_TRACE(__func__); /* find the consumer */ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); /* make sure the consumer's not referencing anything right now */ if (TAILQ_FIRST(&pc->ac_references) != NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); /* pull the consumer off the list and free it */ TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link); - DEBUG_PRINT(TRACE_OBJECTS, ("deregistered power consumer %s\n", acpi_name(consumer))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n", acpi_name(consumer))); return_ACPI_STATUS(AE_OK); } /* * Set a power consumer to a particular power state. */ ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state) { struct acpi_powerconsumer *pc; struct acpi_powerreference *pr; ACPI_HANDLE method_handle, reslist_handle, pr0_handle; ACPI_BUFFER reslist_buffer; ACPI_OBJECT *reslist_object; ACPI_STATUS status; char *method_name, *reslist_name; int res_changed; FUNCTION_TRACE(__func__); /* find the consumer */ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) { if ((status = acpi_pwr_register_consumer(consumer)) != AE_OK) return_ACPI_STATUS(status); if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) { return_ACPI_STATUS(AE_ERROR); /* something very wrong */ } } /* check for valid transitions */ if ((pc->ac_state == ACPI_STATE_D3) && (state != ACPI_STATE_D0)) return_ACPI_STATUS(AE_BAD_PARAMETER); /* can only go to D0 from D3 */ /* find transition mechanism(s) */ switch(state) { case ACPI_STATE_D0: method_name = "_PS0"; reslist_name = "_PR0"; break; case ACPI_STATE_D1: method_name = "_PS1"; reslist_name = "_PR1"; break; case ACPI_STATE_D2: method_name = "_PS2"; reslist_name = "_PR2"; break; case ACPI_STATE_D3: method_name = "_PS3"; reslist_name = "_PR3"; break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } - DEBUG_PRINT(TRACE_OBJECTS, ("setup to switch %s D%d -> D%d\n", - acpi_name(consumer), pc->ac_state, state)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s D%d -> D%d\n", + acpi_name(consumer), pc->ac_state, state)); /* * Verify that this state is supported, ie. one of method or * reslist must be present. We need to do this before we go * dereferencing resources (since we might be trying to go to * a state we don't support). * * Note that if any states are supported, the device has to * support D0 and D3. It's never an error to try to go to * D0. */ reslist_object = NULL; if (AcpiGetHandle(consumer, method_name, &method_handle) != AE_OK) method_handle = NULL; if (AcpiGetHandle(consumer, reslist_name, &reslist_handle) != AE_OK) reslist_handle = NULL; if ((reslist_handle == NULL) && (method_handle == NULL)) { if (state == ACPI_STATE_D0) { pc->ac_state = ACPI_STATE_D0; return_ACPI_STATUS(AE_OK); } if (state != ACPI_STATE_D3) { goto bad; } /* turn off the resources listed in _PR0 to go to D3. */ if (AcpiGetHandle(consumer, "_PR0", &pr0_handle) != AE_OK) { goto bad; } status = acpi_EvaluateIntoBuffer(pr0_handle, NULL, NULL, &reslist_buffer); if (status != AE_OK) { goto bad; } reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer; if ((reslist_object->Type != ACPI_TYPE_PACKAGE) || (reslist_object->Package.Count == 0)) { goto bad; } AcpiOsFree(reslist_object); } /* * Check that we can actually fetch the list of power resources */ if (reslist_handle != NULL) { if ((status = acpi_EvaluateIntoBuffer(reslist_handle, NULL, NULL, &reslist_buffer)) != AE_OK) { - DEBUG_PRINT(TRACE_OBJECTS, ("can't evaluate resource list %s\n", - acpi_name(reslist_handle))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't evaluate resource list %s\n", + acpi_name(reslist_handle))); return_ACPI_STATUS(status); } reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer; if (reslist_object->Type != ACPI_TYPE_PACKAGE) { - DEBUG_PRINT(TRACE_OBJECTS, ("resource list is not ACPI_TYPE_PACKAGE (%d)\n", - reslist_object->Type)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "resource list is not ACPI_TYPE_PACKAGE (%d)\n", + reslist_object->Type)); return_ACPI_STATUS(AE_TYPE); } } else { reslist_object = NULL; } /* * Now we are ready to switch, so kill off any current power resource references. */ res_changed = 0; while((pr = TAILQ_FIRST(&pc->ac_references)) != NULL) { res_changed = 1; - DEBUG_PRINT(TRACE_OBJECTS, ("removing reference to %s\n", acpi_name(pr->ar_resource->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "removing reference to %s\n", acpi_name(pr->ar_resource->ap_resource))); TAILQ_REMOVE(&pr->ar_resource->ap_references, pr, ar_rlink); TAILQ_REMOVE(&pc->ac_references, pr, ar_clink); free(pr, M_ACPIPWR); } /* * Add new power resource references, if we have any. Traverse the * package that we got from evaluating reslist_handle, and look up each * of the resources that are referenced. */ if (reslist_object != NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("referencing %d new resources\n", - reslist_object->Package.Count)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "referencing %d new resources\n", + reslist_object->Package.Count)); acpi_ForeachPackageObject(reslist_object, acpi_pwr_reference_resource, pc); res_changed = 1; } /* * If we changed anything in the resource list, we need to run a switch * pass now. */ if ((status = acpi_pwr_switch_power()) != AE_OK) { - DEBUG_PRINT(TRACE_OBJECTS, ("failed to correctly switch resources to move %s to D%d\n", - acpi_name(consumer), state)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to correctly switch resources to move %s to D%d\n", + acpi_name(consumer), state)); return_ACPI_STATUS(status); /* XXX is this appropriate? Should we return to previous state? */ } /* invoke power state switch method (if present) */ if (method_handle != NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("invoking state transition method %s\n", - acpi_name(method_handle))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "invoking state transition method %s\n", + acpi_name(method_handle))); if ((status = AcpiEvaluateObject(method_handle, NULL, NULL, NULL)) != AE_OK) pc->ac_state = ACPI_STATE_UNKNOWN; return_ACPI_STATUS(status); /* XXX is this appropriate? Should we return to previous state? */ } /* transition was successful */ pc->ac_state = state; return_ACPI_STATUS(AE_OK); bad: - DEBUG_PRINT(TRACE_OBJECTS, ("attempt to set unsupported state D%d\n", - state)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "attempt to set unsupported state D%d\n", + state)); if (reslist_object) AcpiOsFree(reslist_object); return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Called to create a reference between a power consumer and a power resource * identified in the object. */ static void acpi_pwr_reference_resource(ACPI_OBJECT *obj, void *arg) { struct acpi_powerconsumer *pc = (struct acpi_powerconsumer *)arg; struct acpi_powerreference *pr; struct acpi_powerresource *rp; ACPI_HANDLE res; ACPI_STATUS status; FUNCTION_TRACE(__func__); /* check the object type */ if (obj->Type != ACPI_TYPE_STRING) { - DEBUG_PRINT(TRACE_OBJECTS, ("don't know how to create a power reference to object type %d\n", - obj->Type)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "don't know how to create a power reference to object type %d\n", + obj->Type)); return_VOID; } - DEBUG_PRINT(TRACE_OBJECTS, ("building reference from %s to %s\n", - acpi_name(pc->ac_consumer), obj->String.Pointer)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "building reference from %s to %s\n", + acpi_name(pc->ac_consumer), obj->String.Pointer)); /* get the handle of the resource */ if (ACPI_FAILURE(status = AcpiGetHandle(NULL, obj->String.Pointer, &res))) { - DEBUG_PRINT(TRACE_OBJECTS, ("couldn't find power resource %s\n", - obj->String.Pointer)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't find power resource %s\n", + obj->String.Pointer)); return_VOID; } /* create/look up the resource */ if (ACPI_FAILURE(status = acpi_pwr_register_resource(res))) { - DEBUG_PRINT(TRACE_OBJECTS, ("couldn't register power resource %s - %s\n", - obj->String.Pointer, AcpiFormatException(status))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't register power resource %s - %s\n", + obj->String.Pointer, AcpiFormatException(status))); return_VOID; } if ((rp = acpi_pwr_find_resource(res)) == NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("power resource list corrupted\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "power resource list corrupted\n")); return_VOID; } - DEBUG_PRINT(TRACE_OBJECTS, ("found power resource %s\n", acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "found power resource %s\n", acpi_name(rp->ap_resource))); /* create a reference between the consumer and resource */ if ((pr = malloc(sizeof(*pr), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("couldn't allocate memory for a power consumer reference\n")); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't allocate memory for a power consumer reference\n")); return_VOID; } pr->ar_consumer = pc; pr->ar_resource = rp; TAILQ_INSERT_TAIL(&pc->ac_references, pr, ar_clink); TAILQ_INSERT_TAIL(&rp->ap_references, pr, ar_rlink); return_VOID; } /* * Switch power resources to conform to the desired state. * * Consumers may have modified the power resource list in an arbitrary * fashion; we sweep it in sequence order. */ static ACPI_STATUS acpi_pwr_switch_power(void) { struct acpi_powerresource *rp; ACPI_STATUS status; int cur; FUNCTION_TRACE(__func__); /* * Sweep the list forwards turning things on. */ TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) { if (TAILQ_FIRST(&rp->ap_references) == NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("%s has no references, not turning on\n", - acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s has no references, not turning on\n", + acpi_name(rp->ap_resource))); continue; } /* we could cache this if we trusted it not to change under us */ if ((status = acpi_EvaluateInteger(rp->ap_resource, "_STA", &cur)) != AE_OK) { - DEBUG_PRINT(TRACE_OBJECTS, ("can't get status of %s - %d\n", - acpi_name(rp->ap_resource), status)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n", + acpi_name(rp->ap_resource), status)); continue; /* XXX is this correct? Always switch if in doubt? */ } /* * Switch if required. Note that we ignore the result of the switch * effort; we don't know what to do if it fails, so checking wouldn't * help much. */ if (cur != ACPI_PWR_ON) { if (ACPI_FAILURE(status = AcpiEvaluateObject(rp->ap_resource, "_ON", NULL, NULL))) { - DEBUG_PRINT(TRACE_OBJECTS, ("failed to switch %s on - %s\n", - acpi_name(rp->ap_resource), AcpiFormatException(status))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to switch %s on - %s\n", + acpi_name(rp->ap_resource), AcpiFormatException(status))); } else { - DEBUG_PRINT(TRACE_OBJECTS, ("switched %s on\n", acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s on\n", acpi_name(rp->ap_resource))); } } else { - DEBUG_PRINT(TRACE_OBJECTS, ("%s is already on\n", acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already on\n", acpi_name(rp->ap_resource))); } } /* * Sweep the list backwards turning things off. */ TAILQ_FOREACH_REVERSE(rp, &acpi_powerresources, acpi_powerresource_list, ap_link) { if (TAILQ_FIRST(&rp->ap_references) != NULL) { - DEBUG_PRINT(TRACE_OBJECTS, ("%s has references, not turning off\n", - acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s has references, not turning off\n", + acpi_name(rp->ap_resource))); continue; } /* we could cache this if we trusted it not to change under us */ if ((status = acpi_EvaluateInteger(rp->ap_resource, "_STA", &cur)) != AE_OK) { - DEBUG_PRINT(TRACE_OBJECTS, ("can't get status of %s - %d\n", - acpi_name(rp->ap_resource), status)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get status of %s - %d\n", + acpi_name(rp->ap_resource), status)); continue; /* XXX is this correct? Always switch if in doubt? */ } /* * Switch if required. Note that we ignore the result of the switch * effort; we don't know what to do if it fails, so checking wouldn't * help much. */ if (cur != ACPI_PWR_OFF) { if (ACPI_FAILURE(status = AcpiEvaluateObject(rp->ap_resource, "_OFF", NULL, NULL))) { - DEBUG_PRINT(TRACE_OBJECTS, ("failed to switch %s off - %s\n", - acpi_name(rp->ap_resource), AcpiFormatException(status))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to switch %s off - %s\n", + acpi_name(rp->ap_resource), AcpiFormatException(status))); } else { - DEBUG_PRINT(TRACE_OBJECTS, ("switched %s off\n", acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "switched %s off\n", acpi_name(rp->ap_resource))); } } else { - DEBUG_PRINT(TRACE_OBJECTS, ("%s is already off\n", acpi_name(rp->ap_resource))); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "%s is already off\n", acpi_name(rp->ap_resource))); } } return_ACPI_STATUS(AE_OK); } /* * Find a power resource's control structure. */ static struct acpi_powerresource * acpi_pwr_find_resource(ACPI_HANDLE res) { struct acpi_powerresource *rp; FUNCTION_TRACE(__func__); TAILQ_FOREACH(rp, &acpi_powerresources, ap_link) if (rp->ap_resource == res) break; return_VALUE(rp); } /* * Find a power consumer's control structure. */ static struct acpi_powerconsumer * acpi_pwr_find_consumer(ACPI_HANDLE consumer) { struct acpi_powerconsumer *pc; FUNCTION_TRACE(__func__); TAILQ_FOREACH(pc, &acpi_powerconsumers, ac_link) if (pc->ac_consumer == consumer) break; return_VALUE(pc); } Index: head/sys/dev/acpica/acpi_resource.c =================================================================== --- head/sys/dev/acpica/acpi_resource.c (revision 82371) +++ head/sys/dev/acpica/acpi_resource.c (revision 82372) @@ -1,322 +1,322 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include "acpi.h" #include /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS MODULE_NAME("RESOURCE") /* * Fetch a device's resources and associate them with the device. * * Note that it might be nice to also locate ACPI-specific resource items, such * as GPE bits. */ ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle, struct acpi_parse_resource_set *set) { ACPI_BUFFER buf; ACPI_RESOURCE *res; char *curr, *last; ACPI_STATUS status; int i; void *context; FUNCTION_TRACE(__func__); /* * Fetch the device resources */ if (((status = acpi_GetIntoBuffer(handle, AcpiGetPossibleResources, &buf)) != AE_OK) && ((status = acpi_GetIntoBuffer(handle, AcpiGetCurrentResources, &buf)) != AE_OK)) { device_printf(dev, "can't fetch ACPI resources - %s\n", AcpiFormatException(status)); return_ACPI_STATUS(status); } - DEBUG_PRINT(TRACE_RESOURCES, ("got %d bytes of resources\n", buf.Length)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "got %d bytes of resources\n", buf.Length)); set->set_init(dev, &context); /* * Iterate through the resources */ curr = buf.Pointer; last = (char *)buf.Pointer + buf.Length; while (curr < last) { res = (ACPI_RESOURCE *)curr; curr += res->Length; /* * Handle the individual resource types */ switch(res->Id) { case ACPI_RSTYPE_END_TAG: - DEBUG_PRINT(TRACE_RESOURCES, ("EndTag\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n")); curr = last; break; case ACPI_RSTYPE_FIXED_IO: - DEBUG_PRINT(TRACE_RESOURCES, ("FixedIo 0x%x/%d\n", res->Data.FixedIo.BaseAddress, res->Data.FixedIo.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n", res->Data.FixedIo.BaseAddress, res->Data.FixedIo.RangeLength)); set->set_ioport(dev, context, res->Data.FixedIo.BaseAddress, res->Data.FixedIo.RangeLength); break; case ACPI_RSTYPE_IO: if (res->Data.Io.MinBaseAddress == res->Data.Io.MaxBaseAddress) { - DEBUG_PRINT(TRACE_RESOURCES, ("Io 0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.RangeLength)); set->set_ioport(dev, context, res->Data.Io.MinBaseAddress, res->Data.Io.RangeLength); } else { - DEBUG_PRINT(TRACE_RESOURCES, ("Io 0x%x-0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.MaxBaseAddress, - res->Data.Io.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.MaxBaseAddress, + res->Data.Io.RangeLength)); set->set_iorange(dev, context, res->Data.Io.MinBaseAddress, res->Data.Io.MaxBaseAddress, res->Data.Io.RangeLength, res->Data.Io.Alignment); } break; case ACPI_RSTYPE_FIXED_MEM32: - DEBUG_PRINT(TRACE_RESOURCES, ("FixedMemory32 0x%x/%d\n", res->Data.FixedMemory32.RangeBaseAddress, - res->Data.FixedMemory32.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n", res->Data.FixedMemory32.RangeBaseAddress, + res->Data.FixedMemory32.RangeLength)); set->set_memory(dev, context, res->Data.FixedMemory32.RangeBaseAddress, res->Data.FixedMemory32.RangeLength); break; case ACPI_RSTYPE_MEM32: if (res->Data.Memory32.MinBaseAddress == res->Data.Memory32.MaxBaseAddress) { - DEBUG_PRINT(TRACE_RESOURCES, ("Memory32 0x%x/%d\n", res->Data.Memory32.MinBaseAddress, - res->Data.Memory32.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n", res->Data.Memory32.MinBaseAddress, + res->Data.Memory32.RangeLength)); set->set_memory(dev, context, res->Data.Memory32.MinBaseAddress, res->Data.Memory32.RangeLength); } else { - DEBUG_PRINT(TRACE_RESOURCES, ("Memory32 0x%x-0x%x/%d\n", res->Data.Memory32.MinBaseAddress, - res->Data.Memory32.MaxBaseAddress, res->Data.Memory32.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n", res->Data.Memory32.MinBaseAddress, + res->Data.Memory32.MaxBaseAddress, res->Data.Memory32.RangeLength)); set->set_memoryrange(dev, context, res->Data.Memory32.MinBaseAddress, res->Data.Memory32.MaxBaseAddress, res->Data.Memory32.RangeLength, res->Data.Memory32.Alignment); } break; case ACPI_RSTYPE_MEM24: if (res->Data.Memory24.MinBaseAddress == res->Data.Memory24.MaxBaseAddress) { - DEBUG_PRINT(TRACE_RESOURCES, ("Memory24 0x%x/%d\n", res->Data.Memory24.MinBaseAddress, - res->Data.Memory24.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n", res->Data.Memory24.MinBaseAddress, + res->Data.Memory24.RangeLength)); set->set_memory(dev, context, res->Data.Memory24.MinBaseAddress, res->Data.Memory24.RangeLength); } else { - DEBUG_PRINT(TRACE_RESOURCES, ("Memory24 0x%x-0x%x/%d\n", res->Data.Memory24.MinBaseAddress, - res->Data.Memory24.MaxBaseAddress, res->Data.Memory24.RangeLength)); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n", res->Data.Memory24.MinBaseAddress, + res->Data.Memory24.MaxBaseAddress, res->Data.Memory24.RangeLength)); set->set_memoryrange(dev, context, res->Data.Memory24.MinBaseAddress, res->Data.Memory24.MaxBaseAddress, res->Data.Memory24.RangeLength, res->Data.Memory24.Alignment); } break; case ACPI_RSTYPE_IRQ: for (i = 0; i < res->Data.Irq.NumberOfInterrupts; i++) { - DEBUG_PRINT(TRACE_RESOURCES, ("Irq %d\n", res->Data.Irq.Interrupts[i])); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Irq %d\n", res->Data.Irq.Interrupts[i])); set->set_irq(dev, context, res->Data.Irq.Interrupts[i]); } break; case ACPI_RSTYPE_DMA: for (i = 0; i < res->Data.Dma.NumberOfChannels; i++) { - DEBUG_PRINT(TRACE_RESOURCES, ("Drq %d\n", res->Data.Dma.Channels[i])); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Drq %d\n", res->Data.Dma.Channels[i])); set->set_drq(dev, context, res->Data.Dma.Channels[i]); } break; case ACPI_RSTYPE_START_DPF: - DEBUG_PRINT(TRACE_RESOURCES, ("start dependant functions")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependant functions")); set->set_start_dependant(dev, context, res->Data.StartDpf.CompatibilityPriority); break; case ACPI_RSTYPE_END_DPF: - DEBUG_PRINT(TRACE_RESOURCES, ("end dependant functions")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependant functions")); set->set_end_dependant(dev, context); break; case ACPI_RSTYPE_ADDRESS32: - DEBUG_PRINT(TRACE_RESOURCES, ("unimplemented Address32 resource\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented Address32 resource\n")); break; case ACPI_RSTYPE_ADDRESS16: - DEBUG_PRINT(TRACE_RESOURCES, ("unimplemented Address16 resource\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented Address16 resource\n")); break; case ACPI_RSTYPE_EXT_IRQ: - DEBUG_PRINT(TRACE_RESOURCES, ("unimplemented ExtendedIrq resource\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented ExtendedIrq resource\n")); break; case ACPI_RSTYPE_VENDOR: - DEBUG_PRINT(TRACE_RESOURCES, ("unimplemented VendorSpecific resource\n")); + ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented VendorSpecific resource\n")); break; default: break; } } AcpiOsFree(buf.Pointer); set->set_done(dev, context); return_ACPI_STATUS(AE_OK); } static void acpi_res_set_init(device_t dev, void **context); static void acpi_res_set_done(device_t dev, void *context); static void acpi_res_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_res_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_res_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_res_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_res_set_irq(device_t dev, void *context, u_int32_t irq); static void acpi_res_set_drq(device_t dev, void *context, u_int32_t drq); static void acpi_res_set_start_dependant(device_t dev, void *context, int preference); static void acpi_res_set_end_dependant(device_t dev, void *context); struct acpi_parse_resource_set acpi_res_parse_set = { acpi_res_set_init, acpi_res_set_done, acpi_res_set_ioport, acpi_res_set_iorange, acpi_res_set_memory, acpi_res_set_memoryrange, acpi_res_set_irq, acpi_res_set_drq, acpi_res_set_start_dependant, acpi_res_set_end_dependant }; struct acpi_res_context { int ar_nio; int ar_nmem; int ar_nirq; }; static void acpi_res_set_init(device_t dev, void **context) { struct acpi_res_context *cp; if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) { bzero(cp, sizeof(*cp)); *context = cp; } } static void acpi_res_set_done(device_t dev, void *context) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; AcpiOsFree(cp); } static void acpi_res_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length); } static void acpi_res_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "I/O range not supported\n"); } static void acpi_res_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length); } static void acpi_res_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "memory range not supported\n"); } static void acpi_res_set_irq(device_t dev, void *context, u_int32_t irq) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq, 1); } static void acpi_res_set_drq(device_t dev, void *context, u_int32_t drq) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "DRQ not supported\n"); } static void acpi_res_set_start_dependant(device_t dev, void *context, int preference) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "dependant functions not supported"); } static void acpi_res_set_end_dependant(device_t dev, void *context) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; } Index: head/sys/dev/acpica/acpi_thermal.c =================================================================== --- head/sys/dev/acpica/acpi_thermal.c (revision 82371) +++ head/sys/dev/acpica/acpi_thermal.c (revision 82372) @@ -1,693 +1,693 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include "acpi.h" #include /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_THERMAL MODULE_NAME("THERMAL") #define TZ_ZEROC 2732 #define TZ_KELVTOC(x) (((x) - TZ_ZEROC) / 10), (((x) - TZ_ZEROC) % 10) #define TZ_NOTIFY_TEMPERATURE 0x80 #define TZ_NOTIFY_DEVICES 0x81 #define TZ_NOTIFY_LEVELS 0x82 #define TZ_POLLRATE (hz * 10) /* every ten seconds */ #define TZ_NUMLEVELS 10 /* defined by ACPI spec */ struct acpi_tz_zone { int ac[TZ_NUMLEVELS]; ACPI_BUFFER al[TZ_NUMLEVELS]; int crt; int hot; ACPI_BUFFER psl; int psv; int tc1; int tc2; int tsp; int tzp; }; struct acpi_tz_softc { device_t tz_dev; /* device handle */ ACPI_HANDLE tz_handle; /* thermal zone handle */ struct callout_handle tz_timeout; /* poll routine handle */ int tz_temperature; /* current temperature */ int tz_active; /* current active cooling */ #define TZ_ACTIVE_NONE -1 int tz_requested; /* user-requested minimum active cooling */ int tz_thflags; /* current temperature-related flags */ #define TZ_THFLAG_NONE 0 #define TZ_THFLAG_PSV (1<<0) #define TZ_THFLAG_HOT (1<<2) #define TZ_THFLAG_CRT (1<<3) int tz_flags; #define TZ_FLAG_NO_SCP (1<<0) /* no _SCP method */ #define TZ_FLAG_GETPROFILE (1<<1) /* fetch powerprofile in timeout */ struct sysctl_ctx_list tz_sysctl_ctx; /* sysctl tree */ struct sysctl_oid *tz_sysctl_tree; struct acpi_tz_zone tz_zone; /* thermal zone parameters */ }; static int acpi_tz_probe(device_t dev); static int acpi_tz_attach(device_t dev); static int acpi_tz_establish(struct acpi_tz_softc *sc); static void acpi_tz_monitor(struct acpi_tz_softc *sc); static void acpi_tz_all_off(struct acpi_tz_softc *sc); static void acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg); static void acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg); static void acpi_tz_getparam(struct acpi_tz_softc *sc, char *node, int *data); static void acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what); static int acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context); static void acpi_tz_timeout(void *arg); static void acpi_tz_powerprofile(void *arg); static device_method_t acpi_tz_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_tz_probe), DEVMETHOD(device_attach, acpi_tz_attach), {0, 0} }; static driver_t acpi_tz_driver = { "acpi_tz", acpi_tz_methods, sizeof(struct acpi_tz_softc), }; devclass_t acpi_tz_devclass; DRIVER_MODULE(acpi_tz, acpi, acpi_tz_driver, acpi_tz_devclass, 0, 0); static struct sysctl_ctx_list acpi_tz_sysctl_ctx; static struct sysctl_oid *acpi_tz_sysctl_tree; /* * Match an ACPI thermal zone. */ static int acpi_tz_probe(device_t dev) { int result; ACPI_LOCK; /* no FUNCTION_TRACE - too noisy */ if ((acpi_get_type(dev) == ACPI_TYPE_THERMAL) && !acpi_disabled("thermal")) { device_set_desc(dev, "thermal zone"); result = -10; } else { result = ENXIO; } ACPI_UNLOCK; return(result); } /* * Attach to an ACPI thermal zone. */ static int acpi_tz_attach(device_t dev) { struct acpi_tz_softc *sc; struct acpi_softc *acpi_sc; int error; char oidname[8]; int i; FUNCTION_TRACE(__func__); ACPI_LOCK; sc = device_get_softc(dev); sc->tz_dev = dev; sc->tz_handle = acpi_get_handle(dev); sc->tz_requested = TZ_ACTIVE_NONE; /* * Parse the current state of the thermal zone and build control * structures. */ if ((error = acpi_tz_establish(sc)) != 0) goto out; /* * Register for any Notify events sent to this zone. */ AcpiInstallNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY, acpi_tz_notify_handler, sc); /* * Create our sysctl nodes. * * XXX we need a mechanism for adding nodes under ACPI. */ if (device_get_unit(dev) == 0) { acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&acpi_tz_sysctl_ctx); acpi_tz_sysctl_tree = SYSCTL_ADD_NODE(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "thermal", CTLFLAG_RD, 0, ""); } sysctl_ctx_init(&sc->tz_sysctl_ctx); sprintf(oidname, "tz%d", device_get_unit(dev)); sc->tz_sysctl_tree = SYSCTL_ADD_NODE(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, oidname, CTLFLAG_RD, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "temperature", CTLFLAG_RD, &sc->tz_temperature, 0, "current thermal zone temperature"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "active", CTLTYPE_INT | CTLFLAG_RW, sc, 0, acpi_tz_active_sysctl, "I", ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "thermal_flags", CTLFLAG_RD, &sc->tz_thflags, 0, "thermal zone flags"); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_PSV", CTLFLAG_RD, &sc->tz_zone.psv, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_HOT", CTLFLAG_RD, &sc->tz_zone.hot, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_CRT", CTLFLAG_RD, &sc->tz_zone.crt, 0, ""); for (i = 0; i < TZ_NUMLEVELS; i++) { sprintf(oidname, "_AC%d", i); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, oidname, CTLFLAG_RD, &sc->tz_zone.ac[i], 0, ""); } /* * Register our power profile event handler, and flag it for a manual * invocation by our timeout. We defer it like this so that the rest * of the subsystem has time to come up. */ EVENTHANDLER_REGISTER(powerprofile_change, acpi_tz_powerprofile, sc, 0); sc->tz_flags |= TZ_FLAG_GETPROFILE; /* * Don't bother evaluating/printing the temperature at this point; * on many systems it'll be bogus until the EC is running. */ out: ACPI_UNLOCK; /* * Start the timeout routine, with enough delay for the rest of the * subsystem to come up. */ sc->tz_timeout = timeout(acpi_tz_timeout, sc, TZ_POLLRATE); return_VALUE(error); } /* * Parse the current state of this thermal zone and set up to use it. * * Note that we may have previous state, which will have to be discarded. */ static int acpi_tz_establish(struct acpi_tz_softc *sc) { ACPI_OBJECT *obj; int i; char nbuf[8]; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; /* * Power everything off and erase any existing state. */ acpi_tz_all_off(sc); for (i = 0; i < TZ_NUMLEVELS; i++) if (sc->tz_zone.al[i].Pointer != NULL) AcpiOsFree(sc->tz_zone.al[i].Pointer); if (sc->tz_zone.psl.Pointer != NULL) AcpiOsFree(sc->tz_zone.psl.Pointer); bzero(&sc->tz_zone, sizeof(sc->tz_zone)); /* * Evaluate thermal zone parameters. */ for (i = 0; i < TZ_NUMLEVELS; i++) { sprintf(nbuf, "_AC%d", i); acpi_tz_getparam(sc, nbuf, &sc->tz_zone.ac[i]); sprintf(nbuf, "_AL%d", i); acpi_EvaluateIntoBuffer(sc->tz_handle, nbuf, NULL, &sc->tz_zone.al[i]); obj = (ACPI_OBJECT *)sc->tz_zone.al[i].Pointer; if (obj != NULL) { /* should be a package containing a list of power objects */ if (obj->Type != ACPI_TYPE_PACKAGE) { device_printf(sc->tz_dev, "%s has unknown object type %d, rejecting\n", nbuf, obj->Type); return_VALUE(ENXIO); } } } acpi_tz_getparam(sc, "_CRT", &sc->tz_zone.crt); acpi_tz_getparam(sc, "_HOT", &sc->tz_zone.hot); acpi_EvaluateIntoBuffer(sc->tz_handle, "_PSL", NULL, &sc->tz_zone.psl); acpi_tz_getparam(sc, "_PSV", &sc->tz_zone.psv); acpi_tz_getparam(sc, "_TC1", &sc->tz_zone.tc1); acpi_tz_getparam(sc, "_TC2", &sc->tz_zone.tc2); acpi_tz_getparam(sc, "_TSP", &sc->tz_zone.tsp); acpi_tz_getparam(sc, "_TZP", &sc->tz_zone.tzp); /* * Sanity-check the values we've been given. * * XXX what do we do about systems that give us the same value for * more than one of these setpoints? */ acpi_tz_sanity(sc, &sc->tz_zone.crt, "_CRT"); acpi_tz_sanity(sc, &sc->tz_zone.hot, "_HOT"); acpi_tz_sanity(sc, &sc->tz_zone.psv, "_PSV"); for (i = 0; i < TZ_NUMLEVELS; i++) acpi_tz_sanity(sc, &sc->tz_zone.ac[i], "_ACx"); /* * Power off everything that we've just been given. */ acpi_tz_all_off(sc); return_VALUE(0); } /* * Evaluate the condition of a thermal zone, take appropriate actions. */ static void acpi_tz_monitor(struct acpi_tz_softc *sc) { int temp; int i; int newactive, newflags; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; /* * Get the current temperature. */ if ((acpi_EvaluateInteger(sc->tz_handle, "_TMP", &temp)) != AE_OK) { device_printf(sc->tz_dev, "error fetching current temperature\n"); /* XXX disable zone? go to max cooling? */ return_VOID; } - DEBUG_PRINT(TRACE_VALUES, ("got %d.%dC\n", TZ_KELVTOC(temp))); + ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "got %d.%dC\n", TZ_KELVTOC(temp))); sc->tz_temperature = temp; /* * Work out what we ought to be doing right now. * * Note that the _ACx levels sort from hot to cold. */ newactive = TZ_ACTIVE_NONE; for (i = TZ_NUMLEVELS - 1; i >= 0; i--) { if ((sc->tz_zone.ac[i] != -1) && (temp >= sc->tz_zone.ac[i])) { device_printf(sc->tz_dev, "_AC%d: temperature %d > setpoint %d\n", i, temp, sc->tz_zone.ac[i]); newactive = i; } } /* handle user override of active mode */ if (sc->tz_requested > newactive) newactive = sc->tz_requested; /* update temperature-related flags */ newflags = TZ_THFLAG_NONE; if ((sc->tz_zone.psv != -1) && (temp >= sc->tz_zone.psv)) newflags |= TZ_THFLAG_PSV; if ((sc->tz_zone.hot != -1) && (temp >= sc->tz_zone.hot)) newflags |= TZ_THFLAG_HOT; if ((sc->tz_zone.crt != -1) && (temp >= sc->tz_zone.crt)) newflags |= TZ_THFLAG_CRT; /* * If the active cooling state has changed, we have to switch things. */ if (newactive != sc->tz_active) { /* turn off the cooling devices that are on, if any are */ if (sc->tz_active != TZ_ACTIVE_NONE) acpi_ForeachPackageObject((ACPI_OBJECT *)sc->tz_zone.al[sc->tz_active].Pointer, acpi_tz_switch_cooler_off, sc); /* turn on cooling devices that are required, if any are */ if (newactive != TZ_ACTIVE_NONE) acpi_ForeachPackageObject((ACPI_OBJECT *)sc->tz_zone.al[newactive].Pointer, acpi_tz_switch_cooler_on, sc); device_printf(sc->tz_dev, "switched from _AC%d to _AC%d\n", sc->tz_active, newactive); sc->tz_active = newactive; } /* * XXX (de)activate any passive cooling that may be required. */ /* * If we have just become _HOT or _CRT, warn the user. * * We should actually shut down at this point, but it's not clear * that some systems don't actually map _CRT to the same value as _AC0. */ if ((newflags & (TZ_THFLAG_HOT | TZ_THFLAG_CRT)) && !(sc->tz_thflags & (TZ_THFLAG_HOT | TZ_THFLAG_CRT))) { device_printf(sc->tz_dev, "WARNING - current temperature (%d.%dC) exceeds system limits\n", TZ_KELVTOC(sc->tz_temperature), sc->tz_temperature); /* shutdown_nice(RB_POWEROFF);*/ } sc->tz_thflags = newflags; return_VOID; } /* * Turn off all the cooling devices. */ static void acpi_tz_all_off(struct acpi_tz_softc *sc) { int i; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; /* * Scan all the _ALx objects, and turn them all off. */ for (i = 0; i < TZ_NUMLEVELS; i++) { if (sc->tz_zone.al[i].Pointer == NULL) continue; acpi_ForeachPackageObject((ACPI_OBJECT *)sc->tz_zone.al[i].Pointer, acpi_tz_switch_cooler_off, sc); } /* * XXX revert any passive-cooling options. */ sc->tz_active = TZ_ACTIVE_NONE; sc->tz_thflags = TZ_THFLAG_NONE; return_VOID; } /* * Given an object, verify that it's a reference to a device of some sort, * and try to switch it off. */ static void acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg) { ACPI_HANDLE cooler; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; switch(obj->Type) { case ACPI_TYPE_STRING: - DEBUG_PRINT(TRACE_OBJECTS, ("called to turn %s off\n", obj->String.Pointer)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s off\n", obj->String.Pointer)); /* * Find the handle for the device and turn it off. * The String object here seems to contain a fully-qualified path, so we * don't have to search for it in our parents. * * XXX This may not always be the case. */ if (AcpiGetHandle(NULL, obj->String.Pointer, &cooler) == AE_OK) acpi_pwr_switch_consumer(cooler, ACPI_STATE_D3); break; default: - DEBUG_PRINT(TRACE_OBJECTS, ("called to handle unsupported object type %d\n", - obj->Type)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to handle unsupported object type %d\n", + obj->Type)); break; } return_VOID; } /* * Given an object, verify that it's a reference to a device of some sort, * and try to switch it on. * * XXX replication of off/on function code is bad, mmmkay? */ static void acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg) { struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg; ACPI_HANDLE cooler; ACPI_STATUS status; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; switch(obj->Type) { case ACPI_TYPE_STRING: - DEBUG_PRINT(TRACE_OBJECTS, ("called to turn %s on\n", obj->String.Pointer)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s on\n", obj->String.Pointer)); /* * Find the handle for the device and turn it off. * The String object here seems to contain a fully-qualified path, so we * don't have to search for it in our parents. * * XXX This may not always be the case. */ if (AcpiGetHandle(NULL, obj->String.Pointer, &cooler) == AE_OK) { if (ACPI_FAILURE(status = acpi_pwr_switch_consumer(cooler, ACPI_STATE_D0))) { device_printf(sc->tz_dev, "failed to activate %s - %s\n", obj->String.Pointer, AcpiFormatException(status)); } } else { device_printf(sc->tz_dev, "couldn't find %s\n", obj->String.Pointer); } break; default: - DEBUG_PRINT(TRACE_OBJECTS, ("called to handle unsupported object type %d\n", - obj->Type)); + ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to handle unsupported object type %d\n", + obj->Type)); break; } return_VOID; } /* * Read/debug-print a parameter, default it to -1. */ static void acpi_tz_getparam(struct acpi_tz_softc *sc, char *node, int *data) { FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; if (acpi_EvaluateInteger(sc->tz_handle, node, data) != AE_OK) { *data = -1; } else { - DEBUG_PRINT(TRACE_VALUES, ("%s.%s = %d\n", acpi_name(sc->tz_handle), - node, *data)); + ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "%s.%s = %d\n", acpi_name(sc->tz_handle), + node, *data)); } return_VOID; } /* * Sanity-check a temperature value. Assume that setpoints * should be between 0C and 150C. */ static void acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what) { if ((*val != -1) && ((*val < TZ_ZEROC) || (*val > (TZ_ZEROC + 1500)))) { device_printf(sc->tz_dev, "%s value is absurd, ignored (%d.%dC)\n", what, TZ_KELVTOC(*val)); *val = -1; } } /* * Respond to a sysctl on the active state node. */ static int acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_tz_softc *sc; int active; int error; ACPI_LOCK; sc = (struct acpi_tz_softc *)oidp->oid_arg1; active = sc->tz_active; error = sysctl_handle_int(oidp, &active, 0, req); /* error or no new value */ if ((error != 0) || (req->newptr == NULL)) goto out; /* range check */ if ((active < -1) || (active >= TZ_NUMLEVELS)) { error = EINVAL; goto out; } /* set new preferred level and re-switch */ sc->tz_requested = active; acpi_tz_monitor(sc); out: ACPI_UNLOCK; return(error); } /* * Respond to a Notify event sent to the zone. */ static void acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_tz_softc *sc = (struct acpi_tz_softc *)context; FUNCTION_TRACE(__func__); ACPI_ASSERTLOCK; switch(notify) { case TZ_NOTIFY_TEMPERATURE: /* temperature change occurred */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_monitor, sc); break; case TZ_NOTIFY_DEVICES: case TZ_NOTIFY_LEVELS: /* zone devices/setpoints changed */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_establish, sc); break; default: device_printf(sc->tz_dev, "unknown Notify event 0x%x\n", notify); break; } return_VOID; } /* * Poll the thermal zone. */ static void acpi_tz_timeout(void *arg) { struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg; /* do we need to get the power profile settings? */ if (sc->tz_flags & TZ_FLAG_GETPROFILE) { acpi_tz_powerprofile(arg); sc->tz_flags &= ~TZ_FLAG_GETPROFILE; } ACPI_LOCK; /* check temperature, take action */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_monitor, sc); /* XXX passive cooling actions? */ /* re-register ourself */ sc->tz_timeout = timeout(acpi_tz_timeout, sc, TZ_POLLRATE); ACPI_UNLOCK; } /* * System power profile may have changed; fetch and notify the * thermal zone accordingly. * * Since this can be called from an arbitrary eventhandler, it needs * to get the ACPI lock itself. */ static void acpi_tz_powerprofile(void *arg) { ACPI_OBJECT_LIST args; ACPI_OBJECT obj; ACPI_STATUS status; struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg; ACPI_LOCK; /* check that we haven't decided there's no _SCP method */ if (!(sc->tz_flags & TZ_FLAG_NO_SCP)) { /* call _SCP to set the new profile */ obj.Type = ACPI_TYPE_INTEGER; obj.Integer.Value = (powerprofile_get_state() == POWERPROFILE_PERFORMANCE) ? 0 : 1; args.Count = 1; args.Pointer = &obj; if (ACPI_FAILURE(status = AcpiEvaluateObject(sc->tz_handle, "_SCP", &args, NULL))) { if (status != AE_NOT_FOUND) device_printf(sc->tz_dev, "can't evaluate %s._SCP - %s\n", acpi_name(sc->tz_handle), AcpiFormatException(status)); sc->tz_flags |= TZ_FLAG_NO_SCP; } else { /* we have to re-evaluate the entire zone now */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_establish, sc); } } ACPI_UNLOCK; }