Index: head/sys/dev/bhnd/bhnd.c =================================================================== --- head/sys/dev/bhnd/bhnd.c (revision 296297) +++ head/sys/dev/bhnd/bhnd.c (revision 296298) @@ -1,910 +1,910 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom Home Networking Division (HND) Bus Driver. * * The Broadcom HND family of devices consists of both SoCs and host-connected * networking chipsets containing a common family of Broadcom IP cores, * including an integrated MIPS and/or ARM cores. * * HND devices expose a nearly identical interface whether accessible over a * native SoC interconnect, or when connected via a host interface such as * PCIe. As a result, the majority of hardware support code should be re-usable * across host drivers for HND networking chipsets, as well as FreeBSD support * for Broadcom MIPS/ARM HND SoCs. * * Earlier HND models used the siba(4) on-chip interconnect, while later models * use bcma(4); the programming model is almost entirely independent * of the actual underlying interconect. */ #include #include #include #include #include #include #include #include #include "bhnd.h" #include "bhndvar.h" #include "bhnd_nvram_if.h" MALLOC_DEFINE(M_BHND, "bhnd", "bhnd bus data structures"); /** * bhnd_generic_probe_nomatch() reporting configuration. */ static const struct bhnd_nomatch { uint16_t vendor; /**< core designer */ uint16_t device; /**< core id */ bool if_verbose; /**< print when bootverbose is set. */ } bhnd_nomatch_table[] = { { BHND_MFGID_ARM, BHND_COREID_OOB_ROUTER, true }, { BHND_MFGID_ARM, BHND_COREID_EROM, true }, { BHND_MFGID_ARM, BHND_COREID_PL301, true }, { BHND_MFGID_ARM, BHND_COREID_APB_BRIDGE, true }, { BHND_MFGID_ARM, BHND_COREID_AXI_UNMAPPED, false }, { BHND_MFGID_INVALID, BHND_COREID_INVALID, false } }; static device_t find_nvram_child(device_t dev); static int compare_ascending_probe_order(const void *lhs, const void *rhs); static int compare_descending_probe_order(const void *lhs, const void *rhs); /** * Helper function for implementing DEVICE_ATTACH(). * * This function can be used to implement DEVICE_ATTACH() for bhnd(4) * bus implementations. It calls device_probe_and_attach() for each * of the device's children, in order. */ int bhnd_generic_attach(device_t dev) { device_t *devs; int ndevs; int error; if (device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); qsort(devs, ndevs, sizeof(*devs), compare_ascending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; device_probe_and_attach(child); } free(devs, M_TEMP); return (0); } /** * Helper function for implementing DEVICE_DETACH(). * * This function can be used to implement DEVICE_DETACH() for bhnd(4) * bus implementations. It calls device_detach() for each * of the device's children, in reverse order, terminating if * any call to device_detach() fails. */ int bhnd_generic_detach(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Detach in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_detach(child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); } /** * Helper function for implementing DEVICE_SHUTDOWN(). * * This function can be used to implement DEVICE_SHUTDOWN() for bhnd(4) * bus implementations. It calls device_shutdown() for each * of the device's children, in reverse order, terminating if * any call to device_shutdown() fails. */ int bhnd_generic_shutdown(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Shutdown in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_shutdown(child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); } /** * Helper function for implementing DEVICE_RESUME(). * * This function can be used to implement DEVICE_RESUME() for bhnd(4) * bus implementations. It calls BUS_RESUME_CHILD() for each * of the device's children, in order, terminating if * any call to BUS_RESUME_CHILD() fails. */ int bhnd_generic_resume(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); qsort(devs, ndevs, sizeof(*devs), compare_ascending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = BUS_RESUME_CHILD(device_get_parent(child), child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); } /** * Helper function for implementing DEVICE_SUSPEND(). * * This function can be used to implement DEVICE_SUSPEND() for bhnd(4) * bus implementations. It calls BUS_SUSPEND_CHILD() for each * of the device's children, in reverse order. If any call to * BUS_SUSPEND_CHILD() fails, the suspend operation is terminated and * any devices that were suspended are resumed immediately by calling * their BUS_RESUME_CHILD() methods. */ int bhnd_generic_suspend(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Suspend in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; error = BUS_SUSPEND_CHILD(device_get_parent(child), child); /* On error, resume suspended devices and then terminate */ if (error) { for (int j = 0; j < i; j++) { BUS_RESUME_CHILD(device_get_parent(devs[j]), devs[j]); } goto cleanup; } } cleanup: free(devs, M_TEMP); return (error); } /* * Ascending comparison of bhnd device's probe order. */ static int compare_ascending_probe_order(const void *lhs, const void *rhs) { device_t ldev, rdev; int lorder, rorder; ldev = (*(const device_t *) lhs); rdev = (*(const device_t *) rhs); lorder = BHND_BUS_GET_PROBE_ORDER(device_get_parent(ldev), ldev); rorder = BHND_BUS_GET_PROBE_ORDER(device_get_parent(rdev), rdev); if (lorder < rorder) { return (-1); } else if (lorder > rorder) { return (1); } else { return (0); } } /* * Descending comparison of bhnd device's probe order. */ static int compare_descending_probe_order(const void *lhs, const void *rhs) { return (compare_ascending_probe_order(rhs, lhs)); } /** * Helper function for implementing BHND_BUS_GET_PROBE_ORDER(). * * This implementation determines probe ordering based on the device's class * and other properties, including whether the device is serving as a host * bridge. */ int bhnd_generic_get_probe_order(device_t dev, device_t child) { switch (bhnd_get_class(child)) { case BHND_DEVCLASS_CC: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_FIRST); case BHND_DEVCLASS_CC_B: /* fall through */ case BHND_DEVCLASS_PMU: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_SOC_ROUTER: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_LATE); case BHND_DEVCLASS_SOC_BRIDGE: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_LAST); case BHND_DEVCLASS_CPU: return (BHND_PROBE_CPU + BHND_PROBE_ORDER_FIRST); case BHND_DEVCLASS_RAM: /* fall through */ case BHND_DEVCLASS_MEMC: return (BHND_PROBE_CPU + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_NVRAM: return (BHND_PROBE_RESOURCE + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_PCI: case BHND_DEVCLASS_PCIE: case BHND_DEVCLASS_PCCARD: case BHND_DEVCLASS_ENET: case BHND_DEVCLASS_ENET_MAC: case BHND_DEVCLASS_ENET_PHY: case BHND_DEVCLASS_WLAN: case BHND_DEVCLASS_WLAN_MAC: case BHND_DEVCLASS_WLAN_PHY: case BHND_DEVCLASS_EROM: case BHND_DEVCLASS_OTHER: case BHND_DEVCLASS_INVALID: if (bhnd_is_hostb_device(child)) return (BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY); return (BHND_PROBE_DEFAULT); } } /** * Helper function for implementing BHND_BUS_IS_REGION_VALID(). * * This implementation assumes that port and region numbers are 0-indexed and * are allocated non-sparsely, using BHND_BUS_GET_PORT_COUNT() and * BHND_BUS_GET_REGION_COUNT() to determine if @p port and @p region fall * within the defined range. */ bool bhnd_generic_is_region_valid(device_t dev, device_t child, bhnd_port_type type, u_int port, u_int region) { if (port >= bhnd_get_port_count(child, type)) return (false); if (region >= bhnd_get_region_count(child, type, port)) return (false); return (true); } /** * Find an NVRAM child device on @p dev, if any. * * @retval device_t An NVRAM device. * @retval NULL If no NVRAM device is found. */ static device_t find_nvram_child(device_t dev) { device_t chipc, nvram; /* Look for a directly-attached NVRAM child */ nvram = device_find_child(dev, devclass_get_name(bhnd_nvram_devclass), -1); if (nvram == NULL) return (NULL); /* Further checks require a bhnd(4) bus */ if (device_get_devclass(dev) != bhnd_devclass) return (NULL); /* Look for a ChipCommon-attached OTP device */ if ((chipc = bhnd_find_child(dev, BHND_DEVCLASS_CC, -1)) != NULL) { /* Recursively search the ChipCommon device */ if ((nvram = find_nvram_child(chipc)) != NULL) return (nvram); } /* Not found */ return (NULL); } /** * Helper function for implementing BHND_BUS_READ_NVRAM_VAR(). * * This implementation searches @p dev for a valid NVRAM device. If no NVRAM * child device is found on @p dev, the request is delegated to the * BHND_BUS_READ_NVRAM_VAR() method on the parent * of @p dev. */ int bhnd_generic_read_nvram_var(device_t dev, device_t child, const char *name, void *buf, size_t *size) { device_t nvram; /* Try to find an NVRAM device applicable to @p child */ if ((nvram = find_nvram_child(dev)) == NULL) return (BHND_BUS_READ_NVRAM_VAR(device_get_parent(dev), child, name, buf, size)); return BHND_NVRAM_GETVAR(nvram, name, buf, size); } /** * Helper function for implementing BUS_PRINT_CHILD(). * * This implementation requests the device's struct resource_list via * BUS_GET_RESOURCE_LIST. */ int bhnd_generic_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); } retval += printf(" at core %u", bhnd_get_core_index(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * Helper function for implementing BUS_PRINT_CHILD(). * * This implementation requests the device's struct resource_list via * BUS_GET_RESOURCE_LIST. */ void bhnd_generic_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const struct bhnd_nomatch *nm; bool report; /* Fetch reporting configuration for this device */ report = true; for (nm = bhnd_nomatch_table; nm->device != BHND_COREID_INVALID; nm++) { if (nm->vendor != bhnd_get_vendor(child)) continue; if (nm->device != bhnd_get_device(child)) continue; report = false; if (bootverbose && nm->if_verbose) report = true; break; } if (!report) return; /* Print the non-matched device info */ device_printf(dev, "<%s %s>", bhnd_get_vendor_name(child), bhnd_get_device_name(child)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); printf(" at core %u (no driver attached)\n", bhnd_get_core_index(child)); } /** * Default implementation of BUS_CHILD_PNPINFO_STR(). */ static int bhnd_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) { if (device_get_parent(child) != dev) { return (BUS_CHILD_PNPINFO_STR(device_get_parent(dev), child, buf, buflen)); } snprintf(buf, buflen, "vendor=0x%hx device=0x%hx rev=0x%hhx", bhnd_get_vendor(child), bhnd_get_device(child), bhnd_get_hwrev(child)); return (0); } /** * Default implementation of implementing BUS_PRINT_CHILD(). */ static int bhnd_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { bhnd_addr_t addr; bhnd_size_t size; if (device_get_parent(child) != dev) { return (BUS_CHILD_LOCATION_STR(device_get_parent(dev), child, buf, buflen)); } if (bhnd_get_region_addr(child, BHND_PORT_DEVICE, 0, 0, &addr, &size)) { /* No device default port/region */ if (buflen > 0) *buf = '\0'; return (0); } snprintf(buf, buflen, "port0.0=0x%llx", (unsigned long long) addr); return (0); } /** * Helper function for implementing BUS_SUSPEND_CHILD(). * * TODO: Power management * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_suspend_child(device_t dev, device_t child) { if (device_get_parent(child) != dev) BUS_SUSPEND_CHILD(device_get_parent(dev), child); return bus_generic_suspend_child(dev, child); } /** * Helper function for implementing BUS_RESUME_CHILD(). * * TODO: Power management * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_resume_child(device_t dev, device_t child) { if (device_get_parent(child) != dev) BUS_RESUME_CHILD(device_get_parent(dev), child); return bus_generic_resume_child(dev, child); } /** * Helper function for implementing BHND_BUS_IS_HOSTB_DEVICE(). * * If a parent device is available, this implementation delegates the * request to the BHND_BUS_IS_HOSTB_DEVICE() method on the parent of @p dev. * * If no parent device is available (i.e. on a the bus root), false * is returned. */ bool bhnd_generic_is_hostb_device(device_t dev, device_t child) { if (device_get_parent(dev) != NULL) return (BHND_BUS_IS_HOSTB_DEVICE(device_get_parent(dev), child)); return (false); } /** * Helper function for implementing BHND_BUS_IS_HW_DISABLED(). * * If a parent device is available, this implementation delegates the * request to the BHND_BUS_IS_HW_DISABLED() method on the parent of @p dev. * * If no parent device is available (i.e. on a the bus root), the hardware * is assumed to be usable and false is returned. */ bool bhnd_generic_is_hw_disabled(device_t dev, device_t child) { if (device_get_parent(dev) != NULL) return (BHND_BUS_IS_HW_DISABLED(device_get_parent(dev), child)); return (false); } /** * Helper function for implementing BHND_BUS_GET_CHIPID(). * * This implementation delegates the request to the BHND_BUS_GET_CHIPID() * method on the parent of @p dev. */ const struct bhnd_chipid * bhnd_generic_get_chipid(device_t dev, device_t child) { return (BHND_BUS_GET_CHIPID(device_get_parent(dev), child)); } /** * Helper function for implementing BHND_BUS_ALLOC_RESOURCE(). * * This simple implementation of BHND_BUS_ALLOC_RESOURCE() determines * any default values via BUS_GET_RESOURCE_LIST(), and calls * BHND_BUS_ALLOC_RESOURCE() method of the parent of @p dev. * * If no parent device is available, the request is instead delegated to * BUS_ALLOC_RESOURCE(). */ struct bhnd_resource * bhnd_generic_alloc_bhnd_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhnd_resource *r; struct resource_list *rl; struct resource_list_entry *rle; bool isdefault; bool passthrough; passthrough = (device_get_parent(child) != dev); - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* the default RID must always be the first device port/region. */ if (!passthrough && *rid == 0) { int rid0 = bhnd_get_port_rid(child, BHND_PORT_DEVICE, 0, 0); KASSERT(*rid == rid0, ("rid 0 does not map to the first device port (%d)", rid0)); } /* Determine locally-known defaults before delegating the request. */ if (!passthrough && isdefault) { /* fetch resource list from child's bus */ rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl == NULL) return (NULL); /* no resource list */ /* look for matching type/rid pair */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) return (NULL); /* set default values */ start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Try to delegate to our parent. */ if (device_get_parent(dev) != NULL) { return (BHND_BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } /* If this is the bus root, use a real bus-allocated resource */ r = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT); if (r == NULL) return NULL; /* Allocate the bus resource, marking it as 'direct' (not requiring * any bus window remapping to perform I/O) */ r->direct = true; r->res = BUS_ALLOC_RESOURCE(dev, child, type, rid, start, end, count, flags); if (r->res == NULL) { free(r, M_BHND); return NULL; } return (r); } /** * Helper function for implementing BHND_BUS_RELEASE_RESOURCE(). * * This simple implementation of BHND_BUS_RELEASE_RESOURCE() simply calls the * BHND_BUS_RELEASE_RESOURCE() method of the parent of @p dev. * * If no parent device is available, the request is delegated to * BUS_RELEASE_RESOURCE(). */ int bhnd_generic_release_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Try to delegate to the parent. */ if (device_get_parent(dev) != NULL) return (BHND_BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); /* Release the resource directly */ if (!r->direct) { panic("bhnd indirect resource released without " "bhnd parent bus"); } error = BUS_RELEASE_RESOURCE(dev, child, type, rid, r->res); if (error) return (error); free(r, M_BHND); return (0); } /** * Helper function for implementing BHND_BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BHND_BUS_ACTIVATE_RESOURCE() simply calls the * BHND_BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. * * If no parent device is available, the request is delegated to * BUS_ACTIVATE_RESOURCE(). */ int bhnd_generic_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { /* Try to delegate to the parent */ if (device_get_parent(dev) != NULL) return (BHND_BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r)); /* Activate the resource directly */ if (!r->direct) { panic("bhnd indirect resource released without " "bhnd parent bus"); } return (BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res)); }; /** * Helper function for implementing BHND_BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BHND_BUS_ACTIVATE_RESOURCE() simply calls the * BHND_BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. * * If no parent device is available, the request is delegated to * BUS_DEACTIVATE_RESOURCE(). */ int bhnd_generic_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { if (device_get_parent(dev) != NULL) return (BHND_BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r)); /* De-activate the resource directly */ if (!r->direct) { panic("bhnd indirect resource released without " "bhnd parent bus"); } return (BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res)); }; /* * Delegate all indirect I/O to the parent device. When inherited by * non-bridged bus implementations, resources will never be marked as * indirect, and these methods should never be called. */ static uint8_t bhnd_read_1(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset) { return (BHND_BUS_READ_1(device_get_parent(dev), child, r, offset)); } static uint16_t bhnd_read_2(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset) { return (BHND_BUS_READ_2(device_get_parent(dev), child, r, offset)); } static uint32_t bhnd_read_4(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset) { return (BHND_BUS_READ_4(device_get_parent(dev), child, r, offset)); } static void bhnd_write_1(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, uint8_t value) { BHND_BUS_WRITE_1(device_get_parent(dev), child, r, offset, value); } static void bhnd_write_2(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, uint16_t value) { BHND_BUS_WRITE_2(device_get_parent(dev), child, r, offset, value); } static void bhnd_write_4(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, uint32_t value) { BHND_BUS_WRITE_4(device_get_parent(dev), child, r, offset, value); } static void bhnd_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHND_BUS_BARRIER(device_get_parent(dev), child, r, offset, length, flags); } static device_method_t bhnd_methods[] = { /* Device interface */ \ DEVMETHOD(device_attach, bhnd_generic_attach), DEVMETHOD(device_detach, bhnd_generic_detach), DEVMETHOD(device_shutdown, bhnd_generic_shutdown), DEVMETHOD(device_suspend, bhnd_generic_suspend), DEVMETHOD(device_resume, bhnd_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhnd_generic_probe_nomatch), DEVMETHOD(bus_print_child, bhnd_generic_print_child), DEVMETHOD(bus_child_pnpinfo_str, bhnd_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, bhnd_child_location_str), DEVMETHOD(bus_suspend_child, bhnd_generic_suspend_child), DEVMETHOD(bus_resume_child, bhnd_generic_resume_child), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), DEVMETHOD(bus_get_dma_tag, bus_generic_get_dma_tag), /* BHND interface */ DEVMETHOD(bhnd_bus_alloc_resource, bhnd_generic_alloc_bhnd_resource), DEVMETHOD(bhnd_bus_release_resource, bhnd_generic_release_bhnd_resource), DEVMETHOD(bhnd_bus_activate_resource, bhnd_generic_activate_bhnd_resource), DEVMETHOD(bhnd_bus_activate_resource, bhnd_generic_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_chipid, bhnd_generic_get_chipid), DEVMETHOD(bhnd_bus_get_probe_order, bhnd_generic_get_probe_order), DEVMETHOD(bhnd_bus_read_1, bhnd_read_1), DEVMETHOD(bhnd_bus_read_2, bhnd_read_2), DEVMETHOD(bhnd_bus_read_4, bhnd_read_4), DEVMETHOD(bhnd_bus_write_1, bhnd_write_1), DEVMETHOD(bhnd_bus_write_2, bhnd_write_2), DEVMETHOD(bhnd_bus_write_4, bhnd_write_4), DEVMETHOD(bhnd_bus_barrier, bhnd_barrier), DEVMETHOD_END }; devclass_t bhnd_devclass; /**< bhnd bus. */ devclass_t bhnd_hostb_devclass; /**< bhnd bus host bridge. */ devclass_t bhnd_nvram_devclass; /**< bhnd NVRAM device */ DEFINE_CLASS_0(bhnd, bhnd_driver, bhnd_methods, sizeof(struct bhnd_softc)); MODULE_VERSION(bhnd, 1); Index: head/sys/dev/pci/pci_host_generic.c =================================================================== --- head/sys/dev/pci/pci_host_generic.c (revision 296297) +++ head/sys/dev/pci/pci_host_generic.c (revision 296298) @@ -1,815 +1,815 @@ /*- * Copyright (c) 2015 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Assembling ECAM Configuration Address */ #define PCIE_BUS_SHIFT 20 #define PCIE_SLOT_SHIFT 15 #define PCIE_FUNC_SHIFT 12 #define PCIE_BUS_MASK 0xFF #define PCIE_SLOT_MASK 0x1F #define PCIE_FUNC_MASK 0x07 #define PCIE_REG_MASK 0xFFF #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ ((reg) & PCIE_REG_MASK)) #define PCI_IO_WINDOW_OFFSET 0x1000 #define SPACE_CODE_SHIFT 24 #define SPACE_CODE_MASK 0x3 #define SPACE_CODE_IO_SPACE 0x1 #define PROPS_CELL_SIZE 1 #define PCI_ADDR_CELL_SIZE 2 /* OFW bus interface */ struct generic_pcie_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; /* Forward prototypes */ static int generic_pcie_probe(device_t dev); static int parse_pci_mem_ranges(struct generic_pcie_softc *sc); static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static struct resource *generic_pcie_alloc_resource_ofw(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static struct resource *generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int generic_pcie_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res); static int generic_pcie_release_resource_ofw(device_t, device_t, int, int, struct resource *); static int generic_pcie_release_resource_pcie(device_t, device_t, int, int, struct resource *); static int generic_pcie_ofw_bus_attach(device_t); static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t, device_t); static __inline void get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) { *addr_cells = 2; /* Find address cells if present */ OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); *size_cells = 2; /* Find size cells if present */ OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); } static int generic_pcie_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) { device_set_desc(dev, "Generic PCI host controller"); return (BUS_PROBE_GENERIC); } return (ENXIO); } int pci_host_generic_attach(device_t dev) { struct generic_pcie_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; int error; int tuple; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Retrieve 'ranges' property from FDT */ if (bootverbose) device_printf(dev, "parsing FDT for ECAM%d:\n", sc->ecam); if (parse_pci_mem_ranges(sc)) return (ENXIO); /* Attach OFW bus */ if (generic_pcie_ofw_bus_attach(dev) != 0) return (ENXIO); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not map memory.\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PCIe Memory"; sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PCIe IO window"; /* Initialize rman and allocate memory regions */ error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->ranges[tuple].phys_base; pci_base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; if (phys_base == 0 || size == 0) continue; /* empty range element */ if (sc->ranges[tuple].flags & FLAG_MEM) { error = rman_manage_region(&sc->mem_rman, phys_base, phys_base + size); } else if (sc->ranges[tuple].flags & FLAG_IO) { error = rman_manage_region(&sc->io_rman, pci_base + PCI_IO_WINDOW_OFFSET, pci_base + PCI_IO_WINDOW_OFFSET + size); } else continue; if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); rman_fini(&sc->mem_rman); return (error); } } ofw_bus_setup_iinfo(ofw_bus_get_node(dev), &sc->pci_iinfo, sizeof(cell_t)); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int parse_pci_mem_ranges(struct generic_pcie_softc *sc) { pcell_t pci_addr_cells, parent_addr_cells; pcell_t attributes, size_cells; cell_t *base_ranges; int nbase_ranges; phandle_t node; int i, j, k; int tuple; node = ofw_bus_get_node(sc->dev); OF_getencprop(node, "#address-cells", &pci_addr_cells, sizeof(pci_addr_cells)); OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells, sizeof(parent_addr_cells)); if (parent_addr_cells != 2 || pci_addr_cells != 3 || size_cells != 2) { device_printf(sc->dev, "Unexpected number of address or size cells in FDT\n"); return (ENXIO); } nbase_ranges = OF_getproplen(node, "ranges"); sc->nranges = nbase_ranges / sizeof(cell_t) / (parent_addr_cells + pci_addr_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \ SPACE_CODE_MASK; if (attributes == SPACE_CODE_IO_SPACE) { sc->ranges[i].flags |= FLAG_IO; } else { sc->ranges[i].flags |= FLAG_MEM; } sc->ranges[i].pci_base = 0; for (k = 0; k < (pci_addr_cells - 1); k++) { sc->ranges[i].pci_base <<= 32; sc->ranges[i].pci_base |= base_ranges[j++]; } sc->ranges[i].phys_base = 0; for (k = 0; k < parent_addr_cells; k++) { sc->ranges[i].phys_base <<= 32; sc->ranges[i].phys_base |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < size_cells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } for (; i < MAX_RANGES_TUPLES; i++) { /* zero-fill remaining tuples to mark empty elements in array */ sc->ranges[i].pci_base = 0; sc->ranges[i].phys_base = 0; sc->ranges[i].size = 0; } if (bootverbose) { for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { device_printf(sc->dev, "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n", sc->ranges[tuple].pci_base, sc->ranges[tuple].phys_base, sc->ranges[tuple].size); } } free(base_ranges, M_DEVBUF); return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; uint32_t data; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); sc = device_get_softc(dev); offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: data = bus_space_read_1(t, h, offset); break; case 2: data = le16toh(bus_space_read_2(t, h, offset)); break; case 4: data = le32toh(bus_space_read_4(t, h, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; sc = device_get_softc(dev); offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: bus_space_write_1(t, h, offset, val); break; case 2: bus_space_write_2(t, h, offset, htole16(val)); break; case 4: bus_space_write_4(t, h, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_route_interrupt(device_t bus, device_t dev, int pin) { struct generic_pcie_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[2]; phandle_t iparent; int intrcells; sc = device_get_softc(bus); pintr = pin; bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_softc *sc; int secondary_bus; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { /* this pcib adds only pci bus 0 as child */ secondary_bus = 0; *result = secondary_bus; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_rman(struct generic_pcie_softc *sc, int type) { switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } static int generic_pcie_release_resource_pcie(device_t dev, device_t child, int type, int rid, struct resource *res) { struct generic_pcie_softc *sc; struct rman *rm; sc = device_get_softc(dev); rm = generic_pcie_rman(sc, type); if (rm != NULL) { KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); rman_release_resource(res); } return (bus_generic_release_resource(dev, child, type, rid, res)); } static int generic_pcie_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { /* For PCIe devices that do not have FDT nodes, use PCIB method */ if ((int)ofw_bus_get_node(child) <= 0) { return (generic_pcie_release_resource_pcie(dev, child, type, rid, res)); } /* For other devices use OFW method */ return (generic_pcie_release_resource_ofw(dev, child, type, rid, res)); } struct resource * pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* For PCIe devices that do not have FDT nodes, use PCIB method */ if ((int)ofw_bus_get_node(child) <= 0) return (generic_pcie_alloc_resource_pcie(dev, child, type, rid, start, end, count, flags)); /* For other devices use OFW method */ return (generic_pcie_alloc_resource_ofw(dev, child, type, rid, start, end, count, flags)); } static struct resource * generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_softc *sc; struct resource *res; struct rman *rm; sc = device_get_softc(dev); rm = generic_pcie_rman(sc, type); if (rm == NULL) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); if (bootverbose) { device_printf(dev, "rman_reserve_resource: start=%#lx, end=%#lx, count=%#lx\n", start, end, count); } res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) goto fail; rman_set_rid(res, *rid); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); goto fail; } return (res); fail: device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016lx, end=%016lx, count=%016lx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } static int generic_pcie_adjust_resource(device_t dev, device_t child, int type, struct resource *res, rman_res_t start, rman_res_t end) { struct generic_pcie_softc *sc; struct rman *rm; sc = device_get_softc(dev); rm = generic_pcie_rman(sc, type); if (rm != NULL) return (rman_adjust_resource(res, start, end)); return (bus_generic_adjust_resource(dev, child, type, res, start, end)); } static int generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct generic_pcie_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; int found; int res; int i; sc = device_get_softc(dev); if ((res = rman_activate_resource(r)) != 0) return (res); switch(type) { case SYS_RES_IOPORT: found = 0; for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; phys_base = sc->ranges[i].phys_base; size = sc->ranges[i].size; if ((rid > pci_base) && (rid < (pci_base + size))) { found = 1; break; } } if (found) { rman_set_start(r, rman_get_start(r) + phys_base); BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); } else { device_printf(dev, "Failed to activate IOPORT resource\n"); res = 0; } break; case SYS_RES_MEMORY: BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); break; default: break; } return (res); } static int generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct generic_pcie_softc *sc; vm_offset_t vaddr; int res; sc = device_get_softc(dev); if ((res = rman_deactivate_resource(r)) != 0) return (res); switch(type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: vaddr = (vm_offset_t)rman_get_virtual(r); pmap_unmapdev(vaddr, rman_get_size(r)); break; default: break; } return (res); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_probe, generic_pcie_probe), DEVMETHOD(device_attach, pci_host_generic_attach), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_release_resource, generic_pcie_release_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_route_interrupt, generic_pcie_route_interrupt), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), #if defined(__aarch64__) DEVMETHOD(pcib_alloc_msi, arm_alloc_msi), DEVMETHOD(pcib_release_msi, arm_release_msi), DEVMETHOD(pcib_alloc_msix, arm_alloc_msix), DEVMETHOD(pcib_release_msix, arm_release_msix), DEVMETHOD(pcib_map_msi, arm_map_msi), #endif /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, generic_pcie_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static const struct ofw_bus_devinfo * generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) { struct generic_pcie_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } static struct resource * generic_pcie_alloc_resource_ofw(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_softc *sc; struct generic_pcie_ofw_devinfo *di; struct resource_list_entry *rle; int i; sc = device_get_softc(bus); - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* Find defaults for this rid */ rle = resource_list_find(&di->di_rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (i = 0; i < MAX_RANGES_TUPLES; i++) { if (start >= sc->ranges[i].phys_base && end < sc->ranges[i].pci_base + sc->ranges[i].size) { start -= sc->ranges[i].phys_base; start += sc->ranges[i].pci_base; end -= sc->ranges[i].phys_base; end += sc->ranges[i].pci_base; break; } } if (i == MAX_RANGES_TUPLES) { device_printf(bus, "Could not map resource " "%#lx-%#lx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int generic_pcie_release_resource_ofw(device_t bus, device_t child, int type, int rid, struct resource *res) { return (bus_generic_release_resource(bus, child, type, rid, res)); } /* Helper functions */ static int generic_pcie_ofw_bus_attach(device_t dev) { struct generic_pcie_ofw_devinfo *di; device_t child; phandle_t parent, node; pcell_t addr_cells, size_cells; parent = ofw_bus_get_node(dev); if (parent > 0) { get_addr_size_cells(parent, &addr_cells, &size_cells); /* Iterate through all bus subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { free(di, M_DEVBUF); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, &di->di_rl); ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (child == NULL) { resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_DEVBUF); continue; } device_set_ivars(child, di); } } return (0); } DEFINE_CLASS_0(pcib, generic_pcie_driver, generic_pcie_methods, sizeof(struct generic_pcie_softc)); devclass_t generic_pcie_devclass; DRIVER_MODULE(pcib, simplebus, generic_pcie_driver, generic_pcie_devclass, 0, 0); DRIVER_MODULE(pcib, ofwbus, generic_pcie_driver, generic_pcie_devclass, 0, 0); Index: head/sys/dev/puc/puc.c =================================================================== --- head/sys/dev/puc/puc.c (revision 296297) +++ head/sys/dev/puc/puc.c (revision 296298) @@ -1,768 +1,768 @@ /*- * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PUC_ISRCCNT 5 struct puc_port { struct puc_bar *p_bar; struct resource *p_rres; struct resource *p_ires; device_t p_dev; int p_nr; int p_type; int p_rclk; int p_hasintr:1; serdev_intr_t *p_ihsrc[PUC_ISRCCNT]; void *p_iharg; int p_ipend; }; devclass_t puc_devclass; const char puc_driver_name[] = "puc"; static MALLOC_DEFINE(M_PUC, "PUC", "PUC driver"); SYSCTL_NODE(_hw, OID_AUTO, puc, CTLFLAG_RD, 0, "puc(9) driver configuration"); struct puc_bar * puc_get_bar(struct puc_softc *sc, int rid) { struct puc_bar *bar; struct rman *rm; rman_res_t end, start; int error, i; /* Find the BAR entry with the given RID. */ i = 0; while (i < PUC_PCI_BARS && sc->sc_bar[i].b_rid != rid) i++; if (i < PUC_PCI_BARS) return (&sc->sc_bar[i]); /* Not found. If we're looking for an unused entry, return NULL. */ if (rid == -1) return (NULL); /* Get an unused entry for us to fill. */ bar = puc_get_bar(sc, -1); if (bar == NULL) return (NULL); bar->b_rid = rid; bar->b_type = SYS_RES_IOPORT; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = rid; bar->b_type = SYS_RES_MEMORY; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = -1; return (NULL); } } /* Update our managed space. */ rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport : &sc->sc_iomem; start = rman_get_start(bar->b_res); end = rman_get_end(bar->b_res); error = rman_manage_region(rm, start, end); if (error) { bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); bar->b_res = NULL; bar->b_rid = -1; bar = NULL; } return (bar); } static int puc_intr(void *arg) { struct puc_port *port; struct puc_softc *sc = arg; u_long ds, dev, devs; int i, idx, ipend, isrc, nints; uint8_t ilr; nints = 0; while (1) { /* * Obtain the set of devices with pending interrupts. */ devs = sc->sc_serdevs; if (sc->sc_ilr == PUC_ILR_DIGI) { idx = 0; while (devs & (0xfful << idx)) { ilr = ~bus_read_1(sc->sc_port[idx].p_rres, 7); devs &= ~0ul ^ ((u_long)ilr << idx); idx += 8; } } else if (sc->sc_ilr == PUC_ILR_QUATECH) { /* * Don't trust the value if it's the same as the option * register. It may mean that the ILR is not active and * we're reading the option register instead. This may * lead to false positives on 8-port boards. */ ilr = bus_read_1(sc->sc_port[0].p_rres, 7); if (ilr != (sc->sc_cfg_data & 0xff)) devs &= (u_long)ilr; } if (devs == 0UL) break; /* * Obtain the set of interrupt sources from those devices * that have pending interrupts. */ ipend = 0; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; port->p_ipend = SERDEV_IPEND(port->p_dev); ipend |= port->p_ipend; } if (ipend == 0) break; i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < PUC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < PUC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; if (!(port->p_ipend & isrc)) continue; if (port->p_ihsrc[i] != NULL) (*port->p_ihsrc[i])(port->p_iharg); nints++; } } } return ((nints > 0) ? FILTER_HANDLED : FILTER_STRAY); } int puc_bfe_attach(device_t dev) { char buffer[64]; struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; struct rman *rm; intptr_t res; bus_addr_t ofs, start; bus_size_t size; bus_space_handle_t bsh; bus_space_tag_t bst; int error, idx; sc = device_get_softc(dev); for (idx = 0; idx < PUC_PCI_BARS; idx++) sc->sc_bar[idx].b_rid = -1; do { sc->sc_ioport.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_ioport); if (!error) { sc->sc_iomem.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_iomem); if (!error) { sc->sc_irq.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_irq); if (!error) break; rman_fini(&sc->sc_iomem); } rman_fini(&sc->sc_ioport); } return (error); } while (0); snprintf(buffer, sizeof(buffer), "%s I/O port mapping", device_get_nameunit(dev)); sc->sc_ioport.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s I/O memory mapping", device_get_nameunit(dev)); sc->sc_iomem.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s port numbers", device_get_nameunit(dev)); sc->sc_irq.rm_descr = strdup(buffer, M_PUC); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); KASSERT(error == 0, ("%s %d", __func__, __LINE__)); sc->sc_nports = (int)res; sc->sc_port = malloc(sc->sc_nports * sizeof(struct puc_port), M_PUC, M_WAITOK|M_ZERO); error = rman_manage_region(&sc->sc_irq, 1, sc->sc_nports); if (error) goto fail; error = puc_config(sc, PUC_CFG_SETUP, 0, &res); if (error) goto fail; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; port->p_nr = idx + 1; error = puc_config(sc, PUC_CFG_GET_TYPE, idx, &res); if (error) goto fail; port->p_type = res; error = puc_config(sc, PUC_CFG_GET_RID, idx, &res); if (error) goto fail; bar = puc_get_bar(sc, res); if (bar == NULL) { error = ENXIO; goto fail; } port->p_bar = bar; start = rman_get_start(bar->b_res); error = puc_config(sc, PUC_CFG_GET_OFS, idx, &res); if (error) goto fail; ofs = res; error = puc_config(sc, PUC_CFG_GET_LEN, idx, &res); if (error) goto fail; size = res; rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport: &sc->sc_iomem; port->p_rres = rman_reserve_resource(rm, start + ofs, start + ofs + size - 1, size, 0, NULL); if (port->p_rres != NULL) { bsh = rman_get_bushandle(bar->b_res); bst = rman_get_bustag(bar->b_res); bus_space_subregion(bst, bsh, ofs, size, &bsh); rman_set_bushandle(port->p_rres, bsh); rman_set_bustag(port->p_rres, bst); } port->p_ires = rman_reserve_resource(&sc->sc_irq, port->p_nr, port->p_nr, 1, 0, NULL); if (port->p_ires == NULL) { error = ENXIO; goto fail; } error = puc_config(sc, PUC_CFG_GET_CLOCK, idx, &res); if (error) goto fail; port->p_rclk = res; port->p_dev = device_add_child(dev, NULL, -1); if (port->p_dev != NULL) device_set_ivars(port->p_dev, (void *)port); } error = puc_config(sc, PUC_CFG_GET_ILR, 0, &res); if (error) goto fail; sc->sc_ilr = res; if (bootverbose && sc->sc_ilr != 0) device_printf(dev, "using interrupt latch register\n"); sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE|RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, puc_intr, NULL, sc, &sc->sc_icookie); if (error) error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)puc_intr, sc, &sc->sc_icookie); else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) { /* XXX no interrupt resource. Force polled mode. */ sc->sc_polled = 1; } /* Probe and attach our children. */ for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; error = device_probe_and_attach(port->p_dev); if (error) { device_delete_child(dev, port->p_dev); port->p_dev = NULL; } } /* * If there are no serdev devices, then our interrupt handler * will do nothing. Tear it down. */ if (sc->sc_serdevs == 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); return (0); fail: for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev != NULL) device_delete_child(dev, port->p_dev); if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (error); } int puc_bfe_detach(device_t dev) { struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; int error, idx; sc = device_get_softc(dev); /* Detach our children. */ error = 0; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; if (device_detach(port->p_dev) == 0) { device_delete_child(dev, port->p_dev); if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } else error = ENXIO; } if (error) return (error); if (sc->sc_serdevs != 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (0); } int puc_bfe_probe(device_t dev, const struct puc_cfg *cfg) { struct puc_softc *sc; intptr_t res; int error; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_cfg = cfg; /* We don't attach to single-port serial cards. */ if (cfg->ports == PUC_PORT_1S || cfg->ports == PUC_PORT_1P) return (EDOOFUS); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); if (error) return (error); error = puc_config(sc, PUC_CFG_GET_DESC, 0, &res); if (error) return (error); if (res != 0) device_set_desc(dev, (const char *)res); return (BUS_PROBE_DEFAULT); } struct resource * puc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct puc_port *port; struct resource *res; device_t assigned, originator; int error; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (NULL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid == NULL || *rid != 0) return (NULL); /* We only support default allocations. */ - if (start != 0UL || end != ~0UL) + if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (NULL); if (res == NULL) return (NULL); assigned = rman_get_device(res); if (assigned == NULL) /* Not allocated */ rman_set_device(res, originator); else if (assigned != originator) return (NULL); if (flags & RF_ACTIVE) { error = rman_activate_resource(res); if (error) { if (assigned == NULL) rman_set_device(res, NULL); return (NULL); } } return (res); } int puc_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct puc_port *port; device_t originator; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid != 0 || res == NULL) return (EINVAL); if (type == port->p_bar->b_type) { if (res != port->p_rres) return (EINVAL); } else if (type == SYS_RES_IRQ) { if (res != port->p_ires) return (EINVAL); if (port->p_hasintr) return (EBUSY); } else return (EINVAL); if (rman_get_device(res) != originator) return (ENXIO); if (rman_get_flags(res) & RF_ACTIVE) rman_deactivate_resource(res); rman_set_device(res, NULL); return (0); } int puc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct puc_port *port; struct resource *res; rman_res_t start; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (ENXIO); if (rid != 0 || res == NULL) return (ENXIO); start = rman_get_start(res); if (startp != NULL) *startp = start; if (countp != NULL) *countp = rman_get_end(res) - start + 1; return (0); } int puc_bus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i, isrc, serdev; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (cookiep == NULL || res != port->p_ires) return (EINVAL); /* We demand that serdev devices use filter_only interrupts. */ if (port->p_type == PUC_TYPE_SERIAL && ihand != NULL) return (ENXIO); if (rman_get_device(port->p_ires) != originator) return (ENXIO); /* * Have non-serdev ports handled by the bus implementation. It * supports multiple handlers for a single interrupt as it is, * so we wouldn't add value if we did it ourselves. */ serdev = 0; if (port->p_type == PUC_TYPE_SERIAL) { i = 0, isrc = SER_INT_OVERRUN; while (i < PUC_ISRCCNT) { port->p_ihsrc[i] = SERDEV_IHAND(originator, isrc); if (port->p_ihsrc[i] != NULL) serdev = 1; i++, isrc <<= 1; } } if (!serdev) return (BUS_SETUP_INTR(device_get_parent(dev), originator, sc->sc_ires, flags, filt, ihand, arg, cookiep)); sc->sc_serdevs |= 1UL << (port->p_nr - 1); port->p_hasintr = 1; port->p_iharg = arg; *cookiep = port; return (0); } int puc_bus_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (res != port->p_ires) return (EINVAL); if (rman_get_device(port->p_ires) != originator) return (ENXIO); if (!port->p_hasintr) return (BUS_TEARDOWN_INTR(device_get_parent(dev), originator, sc->sc_ires, cookie)); if (cookie != port) return (EINVAL); port->p_hasintr = 0; port->p_iharg = NULL; for (i = 0; i < PUC_ISRCCNT; i++) port->p_ihsrc[i] = NULL; return (0); } int puc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct puc_port *port; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (result == NULL) return (EINVAL); switch(index) { case PUC_IVAR_CLOCK: *result = port->p_rclk; break; case PUC_IVAR_TYPE: *result = port->p_type; break; default: return (ENOENT); } return (0); } int puc_bus_print_child(device_t dev, device_t child) { struct puc_port *port; int retval; port = device_get_ivars(child); retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at port %d", port->p_nr); retval += bus_print_child_footer(dev, child); return (retval); } int puc_bus_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { struct puc_port *port; port = device_get_ivars(child); snprintf(buf, buflen, "port=%d", port->p_nr); return (0); } int puc_bus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) { struct puc_port *port; port = device_get_ivars(child); snprintf(buf, buflen, "type=%d", port->p_type); return (0); } Index: head/sys/dev/quicc/quicc_core.c =================================================================== --- head/sys/dev/quicc/quicc_core.c (revision 296297) +++ head/sys/dev/quicc/quicc_core.c (revision 296298) @@ -1,401 +1,401 @@ /*- * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define quicc_read2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, o) #define quicc_read4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, o) #define quicc_write2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, o, v) #define quicc_write4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, o, v) devclass_t quicc_devclass; char quicc_driver_name[] = "quicc"; static MALLOC_DEFINE(M_QUICC, "QUICC", "QUICC driver"); struct quicc_device { struct rman *qd_rman; struct resource_list qd_rlist; device_t qd_dev; int qd_devtype; driver_filter_t *qd_ih; void *qd_ih_arg; }; static int quicc_bfe_intr(void *arg) { struct quicc_device *qd; struct quicc_softc *sc = arg; uint32_t sipnr; sipnr = quicc_read4(sc->sc_rres, QUICC_REG_SIPNR_L); if (sipnr & 0x00f00000) qd = sc->sc_device; else qd = NULL; if (qd == NULL || qd->qd_ih == NULL) { device_printf(sc->sc_dev, "Stray interrupt %08x\n", sipnr); return (FILTER_STRAY); } return ((*qd->qd_ih)(qd->qd_ih_arg)); } int quicc_bfe_attach(device_t dev) { struct quicc_device *qd; struct quicc_softc *sc; struct resource_list_entry *rle; const char *sep; rman_res_t size, start; int error; sc = device_get_softc(dev); /* * Re-allocate. We expect that the softc contains the information * collected by quicc_bfe_probe() intact. */ sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); start = rman_get_start(sc->sc_rres); size = rman_get_size(sc->sc_rres); sc->sc_rman.rm_start = start; sc->sc_rman.rm_end = start + size - 1; sc->sc_rman.rm_type = RMAN_ARRAY; sc->sc_rman.rm_descr = "QUICC resources"; error = rman_init(&sc->sc_rman); if (!error) error = rman_manage_region(&sc->sc_rman, start, start + size - 1); if (error) { bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (error); } /* * Allocate interrupt resource. */ sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, quicc_bfe_intr, NULL, sc, &sc->sc_icookie); if (error) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) sc->sc_polled = 1; if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode", sep); sep = ", "; } printf("\n"); } sc->sc_device = qd = malloc(sizeof(struct quicc_device), M_QUICC, M_WAITOK | M_ZERO); qd->qd_devtype = QUICC_DEVTYPE_SCC; qd->qd_rman = &sc->sc_rman; resource_list_init(&qd->qd_rlist); resource_list_add(&qd->qd_rlist, sc->sc_rtype, 0, start, start + size - 1, size); resource_list_add(&qd->qd_rlist, SYS_RES_IRQ, 0, 0xf00, 0xf00, 1); rle = resource_list_find(&qd->qd_rlist, SYS_RES_IRQ, 0); rle->res = sc->sc_ires; qd->qd_dev = device_add_child(dev, NULL, -1); device_set_ivars(qd->qd_dev, (void *)qd); error = device_probe_and_attach(qd->qd_dev); /* Enable all SCC interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIMR_L, 0x00f00000); /* Clear all pending interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_H, ~0); quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_L, ~0); return (error); } int quicc_bfe_detach(device_t dev) { struct quicc_softc *sc; sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (0); } int quicc_bfe_probe(device_t dev, u_int clock) { struct quicc_softc *sc; uint16_t rev; sc = device_get_softc(dev); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, "Quad integrated communications controller"); sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_MEMORY; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_IOPORT; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } sc->sc_clock = clock; /* * Check that the microcode revision is 0x00e8, as documented * in the MPC8555E PowerQUICC III Integrated Processor Family * Reference Manual. */ rev = quicc_read2(sc->sc_rres, QUICC_PRAM_REV_NUM); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((rev == 0x00e8) ? BUS_PROBE_DEFAULT : ENXIO); } struct resource * quicc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (NULL); /* We only support default allocations. */ - if (start != 0UL || end != ~0UL) + if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, *rid); if (rle == NULL) return (NULL); if (rle->res == NULL) { rle->res = rman_reserve_resource(qd->qd_rman, rle->start, rle->start + rle->count - 1, rle->count, flags, child); if (rle->res != NULL) { rman_set_bustag(rle->res, &bs_be_tag); rman_set_bushandle(rle->res, rle->start); } } return (rle->res); } int quicc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, rid); if (rle == NULL) return (EINVAL); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } int quicc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct quicc_device *qd; struct quicc_softc *sc; uint32_t sccr; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); qd = device_get_ivars(child); switch (index) { case QUICC_IVAR_CLOCK: *result = sc->sc_clock; break; case QUICC_IVAR_BRGCLK: sccr = quicc_read4(sc->sc_rres, QUICC_REG_SCCR) & 3; *result = sc->sc_clock / ((1 << (sccr + 1)) << sccr); break; case QUICC_IVAR_DEVTYPE: *result = qd->qd_devtype; break; default: return (EINVAL); } return (0); } int quicc_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, rid); return ((rle == NULL) ? EINVAL : 0); } int quicc_bus_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct quicc_device *qd; struct quicc_softc *sc; if (device_get_parent(child) != dev) return (EINVAL); /* Interrupt handlers must be FAST or MPSAFE. */ if (filt == NULL && !(flags & INTR_MPSAFE)) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_polled) return (ENXIO); if (sc->sc_fastintr && filt == NULL) { sc->sc_fastintr = 0; bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } qd = device_get_ivars(child); qd->qd_ih = (filt != NULL) ? filt : (driver_filter_t *)ihand; qd->qd_ih_arg = arg; *cookiep = ihand; return (0); } int quicc_bus_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct quicc_device *qd; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); if (qd->qd_ih != cookie) return (EINVAL); qd->qd_ih = NULL; qd->qd_ih_arg = NULL; return (0); } Index: head/sys/dev/scc/scc_core.c =================================================================== --- head/sys/dev/scc/scc_core.c (revision 296297) +++ head/sys/dev/scc/scc_core.c (revision 296298) @@ -1,584 +1,584 @@ /*- * Copyright (c) 2004-2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "scc_if.h" devclass_t scc_devclass; const char scc_driver_name[] = "scc"; static MALLOC_DEFINE(M_SCC, "SCC", "SCC driver"); static int scc_bfe_intr(void *arg) { struct scc_softc *sc = arg; struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; int c, i, ipend, isrc; cl = sc->sc_class; while (!sc->sc_leaving && (ipend = SCC_IPEND(sc)) != 0) { i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < SCC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < SCC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (!(ch->ch_ipend & isrc)) continue; m = &ch->ch_mode[0]; if (m->ih_src[i] == NULL) continue; if ((*m->ih_src[i])(m->ih_arg)) ch->ch_ipend &= ~isrc; } } for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (!ch->ch_ipend) continue; m = &ch->ch_mode[0]; if (m->ih != NULL) (*m->ih)(m->ih_arg); else SCC_ICLEAR(sc, ch); } return (FILTER_HANDLED); } return (FILTER_STRAY); } int scc_bfe_attach(device_t dev, u_int ipc) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc, *sc0; const char *sep; bus_space_handle_t bh; rman_res_t base, size, start, sz; int c, error, mode, sysdev; /* * The sc_class field defines the type of SCC we're going to work * with and thus the size of the softc. Replace the generic softc * with one that matches the SCC now that we're certain we handle * the device. */ sc0 = device_get_softc(dev); cl = sc0->sc_class; if (cl->size > sizeof(*sc)) { sc = malloc(cl->size, M_SCC, M_WAITOK|M_ZERO); bcopy(sc0, sc, sizeof(*sc)); device_set_softc(dev, sc); } else sc = sc0; size = abs(cl->cl_range) << sc->sc_bas.regshft; mtx_init(&sc->sc_hwmtx, "scc_hwmtx", NULL, MTX_SPIN); /* * Re-allocate. We expect that the softc contains the information * collected by scc_bfe_probe() intact. */ sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* * Allocate interrupt resources. There may be a different interrupt * per channel. We allocate them all... */ sc->sc_chan = malloc(sizeof(struct scc_chan) * cl->cl_channels, M_SCC, M_WAITOK | M_ZERO); for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; /* * XXX temporary hack. If we have more than 1 interrupt * per channel, allocate the first for the channel. At * this time only the macio bus front-end has more than * 1 interrupt per channel and we don't use the 2nd and * 3rd, because we don't support DMA yet. */ ch->ch_irid = c * ipc; ch->ch_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ch->ch_irid, RF_ACTIVE | RF_SHAREABLE); if (ipc == 0) break; } /* * Create the control structures for our children. Probe devices * and query them to see if we can reset the hardware. */ sysdev = 0; base = rman_get_start(sc->sc_rres); sz = (size != 0) ? size : rman_get_size(sc->sc_rres); start = base + ((cl->cl_range < 0) ? size * (cl->cl_channels - 1) : 0); for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; resource_list_init(&ch->ch_rlist); ch->ch_nr = c + 1; if (!SCC_ENABLED(sc, ch)) goto next; ch->ch_enabled = 1; resource_list_add(&ch->ch_rlist, sc->sc_rtype, 0, start, start + sz - 1, sz); rle = resource_list_find(&ch->ch_rlist, sc->sc_rtype, 0); rle->res = &ch->ch_rres; bus_space_subregion(rman_get_bustag(sc->sc_rres), rman_get_bushandle(sc->sc_rres), start - base, sz, &bh); rman_set_bushandle(rle->res, bh); rman_set_bustag(rle->res, rman_get_bustag(sc->sc_rres)); resource_list_add(&ch->ch_rlist, SYS_RES_IRQ, 0, c, c, 1); rle = resource_list_find(&ch->ch_rlist, SYS_RES_IRQ, 0); rle->res = (ch->ch_ires != NULL) ? ch->ch_ires : sc->sc_chan[0].ch_ires; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; m->m_chan = ch; m->m_mode = 1U << mode; if ((cl->cl_modes & m->m_mode) == 0 || ch->ch_sysdev) continue; m->m_dev = device_add_child(dev, NULL, -1); device_set_ivars(m->m_dev, (void *)m); error = device_probe_child(dev, m->m_dev); if (!error) { m->m_probed = 1; m->m_sysdev = SERDEV_SYSDEV(m->m_dev) ? 1 : 0; ch->ch_sysdev |= m->m_sysdev; } } next: start += (cl->cl_range < 0) ? -size : size; sysdev |= ch->ch_sysdev; } /* * Have the hardware driver initialize the hardware. Tell it * whether or not a hardware reset should be performed. */ if (bootverbose) { device_printf(dev, "%sresetting hardware\n", (sysdev) ? "not " : ""); } error = SCC_ATTACH(sc, !sysdev); if (error) goto fail; /* * Setup our interrupt handler. Make it FAST under the assumption * that our children's are fast as well. We make it MPSAFE as soon * as a child sets up a MPSAFE interrupt handler. * Of course, if we can't setup a fast handler, we make it MPSAFE * right away. */ for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; error = bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY, scc_bfe_intr, NULL, sc, &ch->ch_icookie); if (error) { error = bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)scc_bfe_intr, sc, &ch->ch_icookie); } else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); ch->ch_ires = NULL; } } sc->sc_polled = 1; for (c = 0; c < cl->cl_channels; c++) { if (sc->sc_chan[0].ch_ires != NULL) sc->sc_polled = 0; } /* * Attach all child devices that were probed successfully. */ for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; if (!m->m_probed) continue; error = device_attach(m->m_dev); if (error) continue; m->m_attached = 1; } } if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode", sep); sep = ", "; } printf("\n"); } return (0); fail: for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (error); } int scc_bfe_detach(device_t dev) { struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc; int chan, error, mode; sc = device_get_softc(dev); cl = sc->sc_class; /* Detach our children. */ error = 0; for (chan = 0; chan < cl->cl_channels; chan++) { ch = &sc->sc_chan[chan]; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; if (!m->m_attached) continue; if (device_detach(m->m_dev) != 0) error = ENXIO; else m->m_attached = 0; } } if (error) return (error); for (chan = 0; chan < cl->cl_channels; chan++) { ch = &sc->sc_chan[chan]; if (ch->ch_ires == NULL) continue; bus_teardown_intr(dev, ch->ch_ires, ch->ch_icookie); bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); free(sc->sc_chan, M_SCC); mtx_destroy(&sc->sc_hwmtx); return (0); } int scc_bfe_probe(device_t dev, u_int regshft, u_int rclk, u_int rid) { struct scc_softc *sc; struct scc_class *cl; u_long size, sz; int error; /* * Initialize the instance. Note that the instance (=softc) does * not necessarily match the hardware specific softc. We can't do * anything about it now, because we may not attach to the device. * Hardware drivers cannot use any of the class specific fields * while probing. */ sc = device_get_softc(dev); cl = sc->sc_class; kobj_init((kobj_t)sc, (kobj_class_t)cl); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, cl->name); size = abs(cl->cl_range) << regshft; /* * Allocate the register resource. We assume that all SCCs have a * single register window in either I/O port space or memory mapped * I/O space. Any SCC that needs multiple windows will consequently * not be supported by this driver as-is. */ sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_MEMORY; sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_IOPORT; sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } /* * Fill in the bus access structure and call the hardware specific * probe method. */ sz = (size != 0) ? size : rman_get_size(sc->sc_rres); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); sc->sc_bas.range = sz; sc->sc_bas.rclk = rclk; sc->sc_bas.regshft = regshft; error = SCC_PROBE(sc); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((error == 0) ? BUS_PROBE_DEFAULT : error); } struct resource * scc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (NULL); /* We only support default allocations. */ - if (start != 0UL || end != ~0UL) + if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); m = device_get_ivars(child); ch = m->m_chan; rle = resource_list_find(&ch->ch_rlist, type, 0); if (rle == NULL) return (NULL); *rid = 0; return (rle->res); } int scc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); ch = m->m_chan; rle = resource_list_find(&ch->ch_rlist, type, rid); if (rle == NULL) return (EINVAL); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } int scc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); cl = sc->sc_class; m = device_get_ivars(child); ch = m->m_chan; switch (index) { case SCC_IVAR_CHANNEL: *result = ch->ch_nr; break; case SCC_IVAR_CLASS: *result = cl->cl_class; break; case SCC_IVAR_CLOCK: *result = sc->sc_bas.rclk; break; case SCC_IVAR_MODE: *result = m->m_mode; break; case SCC_IVAR_REGSHFT: *result = sc->sc_bas.regshft; break; case SCC_IVAR_HWMTX: *result = (uintptr_t)&sc->sc_hwmtx; break; default: return (EINVAL); } return (0); } int scc_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); ch = m->m_chan; rle = resource_list_find(&ch->ch_rlist, type, rid); return ((rle == NULL) ? EINVAL : 0); } int scc_bus_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct scc_chan *ch; struct scc_mode *m; struct scc_softc *sc; int c, i, isrc; if (device_get_parent(child) != dev) return (EINVAL); /* Interrupt handlers must be FAST or MPSAFE. */ if (filt == NULL && !(flags & INTR_MPSAFE)) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_polled) return (ENXIO); if (sc->sc_fastintr && filt == NULL) { sc->sc_fastintr = 0; for (c = 0; c < sc->sc_class->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; bus_teardown_intr(dev, ch->ch_ires, ch->ch_icookie); bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)scc_bfe_intr, sc, &ch->ch_icookie); } } m = device_get_ivars(child); m->m_hasintr = 1; m->m_fastintr = (filt != NULL) ? 1 : 0; m->ih = (filt != NULL) ? filt : (driver_filter_t *)ihand; m->ih_arg = arg; i = 0, isrc = SER_INT_OVERRUN; while (i < SCC_ISRCCNT) { m->ih_src[i] = SERDEV_IHAND(child, isrc); if (m->ih_src[i] != NULL) m->ih = NULL; i++, isrc <<= 1; } return (0); } int scc_bus_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct scc_mode *m; int i; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); if (!m->m_hasintr) return (EINVAL); m->m_hasintr = 0; m->m_fastintr = 0; m->ih = NULL; m->ih_arg = NULL; for (i = 0; i < SCC_ISRCCNT; i++) m->ih_src[i] = NULL; return (0); }