Index: head/sys/dev/bhnd/bhndb/bhndb.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb.c (revision 298275) +++ head/sys/dev/bhnd/bhndb/bhndb.c (revision 298276) @@ -1,1943 +1,2007 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(device_t *devlist, int num_devs, const struct bhndb_hw *hw); static int bhndb_initialize_region_cfg( struct bhndb_softc *sc, device_t *devs, int ndevs, const struct bhndb_hw_priority *table, struct bhndb_resources *r); static int bhndb_find_hwspec(struct bhndb_softc *sc, device_t *devs, int ndevs, const struct bhndb_hw **hw); static int bhndb_read_chipid(struct bhndb_softc *sc, const struct bhndb_hwcfg *cfg, struct bhnd_chipid *result); +bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, + device_t child); + static struct rman *bhndb_get_rman(struct bhndb_softc *sc, - int type); + device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct bhndb_softc *sc; struct resource_list *rl; int retval = 0; sc = device_get_softc(dev); retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { *buf = '\0'; return (0); } static int bhndb_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { struct bhndb_softc *sc; sc = device_get_softc(dev); snprintf(buf, buflen, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p devlist matches the @p hw specification. * * @param devlist A device table to match against. * @param num_devs The number of devices in @p devlist. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(device_t *devlist, int num_devs, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (int d = 0; d < num_devs; d++) { if (!bhnd_device_matches(devlist[d], match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p r using * the provided priority @p table and the set of devices attached to * the bridged @p bus_dev . * * @param sc The bhndb device state. * @param devs All devices enumerated on the bridged bhnd bus. * @param ndevs The length of @p devs. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. * @param r The resource state to be configured. */ static int bhndb_initialize_region_cfg(struct bhndb_softc *sc, device_t *devs, int ndevs, const struct bhndb_hw_priority *table, struct bhndb_resources *r) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (int i = 0; i < ndevs; i++) { const struct bhndb_regwin *regw; device_t child; child = devs[i]; for (regw = r->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-applicable register windows. */ if (!bhndb_regwin_matches_device(regw, child)) continue; /* Fetch the base address of the mapped port. */ error = bhnd_get_region_addr(child, regw->core.port_type, regw->core.port, regw->core.region, &addr, &size); if (error) return (error); /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* * Add to the bus region list. * * The window priority for a statically mapped * region is always HIGH. */ error = bhndb_add_resource_region(r, addr, size, BHNDB_PRIORITY_HIGH, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (int i = 0; i < ndevs; i++) { struct bhndb_region *region; device_t child; child = devs[i]; /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (bhnd_is_hw_disabled(child) || !device_is_enabled(child)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_device(table, child); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Skip ports not defined on this device */ if (!bhnd_is_region_valid(child, pp->type, pp->port, pp->region)) { continue; } /* Fetch the address+size of the mapped port. */ error = bhnd_get_region_addr(child, pp->type, pp->port, pp->region, &addr, &size); if (error) return (error); /* Skip ports with an existing static mapping */ region = bhndb_find_resource_region(r, addr, size); if (region != NULL && region->static_regwin != NULL) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(r, addr, size, pp->priority, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= r->dwa_count) { /* low+default+high priority regions get windows */ r->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= r->dwa_count) { /* default+high priority regions get windows */ r->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ r->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; prio_min = r->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &r->bus_regions, link) { prio = region->priority; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s\n", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param devs All devices enumerated on the bridged bhnd bus. * @param ndevs The length of @p devs. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, device_t *devs, int ndevs, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(devs, ndevs, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Read the ChipCommon identification data for this device. * * @param sc bhndb device state. * @param cfg The hardware configuration to use when mapping the ChipCommon * registers. * @param[out] result the chip identification data. * * @retval 0 success * @retval non-zero if the ChipCommon identification data could not be read. */ static int bhndb_read_chipid(struct bhndb_softc *sc, const struct bhndb_hwcfg *cfg, struct bhnd_chipid *result) { const struct bhnd_chipid *parent_cid; const struct bhndb_regwin *cc_win; struct resource_spec rs; int error; /* Let our parent device override the discovery process */ parent_cid = BHNDB_BUS_GET_CHIPID(sc->parent_dev, sc->dev); if (parent_cid != NULL) { *result = *parent_cid; return (0); } /* Find a register window we can use to map the first CHIPC_CHIPID_SIZE * of ChipCommon registers. */ cc_win = bhndb_regwin_find_best(cfg->register_windows, BHND_DEVCLASS_CC, 0, BHND_PORT_DEVICE, 0, 0, CHIPC_CHIPID_SIZE); if (cc_win == NULL) { device_printf(sc->dev, "no chipcommon register window\n"); return (0); } /* We can assume a device without a static ChipCommon window uses the * default ChipCommon address. */ if (cc_win->win_type == BHNDB_REGWIN_T_DYN) { error = BHNDB_SET_WINDOW_ADDR(sc->dev, cc_win, BHND_DEFAULT_CHIPC_ADDR); if (error) { device_printf(sc->dev, "failed to set chipcommon " "register window\n"); return (error); } } /* Let the default bhnd implemenation alloc/release the resource and * perform the read */ rs.type = cc_win->res.type; rs.rid = cc_win->res.rid; rs.flags = RF_ACTIVE; return (bhnd_read_chipid(sc->parent_dev, &rs, cc_win->win_offset, result)); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * @param dev The bridge device to attach. * @param bridge_devclass The device class of the bridging core. This is used * to automatically detect the bridge core, and to disable additional bridge * cores (e.g. PCMCIA on a PCIe device). */ int bhndb_attach(device_t dev, bhnd_devclass_t bridge_devclass) { + struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hwcfg *cfg; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_class = bridge_devclass; BHNDB_LOCK_INIT(sc); /* Read our chip identification data */ cfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, sc->dev); if ((error = bhndb_read_chipid(sc, cfg, &sc->chipid))) return (error); - /* Set up a resource manager for the device's address space. */ - sc->mem_rman.rm_start = 0; - sc->mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT; - sc->mem_rman.rm_type = RMAN_ARRAY; - sc->mem_rman.rm_descr = "BHND I/O memory addresses"; - - if ((error = rman_init(&sc->mem_rman))) { - device_printf(dev, "could not initialize mem_rman\n"); - return (error); - } - - error = rman_manage_region(&sc->mem_rman, 0, BUS_SPACE_MAXADDR_32BIT); - if (error) { - device_printf(dev, "could not configure mem_rman\n"); - goto failed; - } - - /* Initialize basic resource allocation state. */ + /* Populate generic resource allocation state. */ sc->bus_res = bhndb_alloc_resources(dev, sc->parent_dev, cfg); if (sc->bus_res == NULL) { - error = ENXIO; - goto failed; + return (ENXIO); } /* Attach our bridged bus device */ - sc->bus_dev = device_add_child(dev, devclass_get_name(bhnd_devclass), + sc->bus_dev = BUS_ADD_CHILD(dev, 0, devclass_get_name(bhnd_devclass), -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } + /* Configure address space */ + dinfo = device_get_ivars(sc->bus_dev); + dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; + + /* Finish attach */ return (bus_generic_attach(dev)); failed: BHNDB_LOCK_DESTROY(sc); - rman_fini(&sc->mem_rman); - if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); return (error); } /** * Default bhndb(4) implementation of BHNDB_INIT_FULL_CONFIG(). * * This function provides the default bhndb implementation of * BHNDB_INIT_FULL_CONFIG(), and must be called by any subclass driver * overriding BHNDB_INIT_FULL_CONFIG(). * * As documented by BHNDB_INIT_FULL_CONFIG, this function performs final * bridge configuration based on the hardware information enumerated by the * child bus, and will reset all resource allocation state on the bridge. * * When calling this method: * - Any bus resources previously allocated by @p child must be deallocated. * - The @p child bus must have performed initial enumeration -- but not * probe or attachment -- of its children. */ int bhndb_generic_init_full_config(device_t dev, device_t child, const struct bhndb_hw_priority *hw_prio_table) { struct bhndb_softc *sc; const struct bhndb_hw *hw; struct bhndb_resources *r; device_t *devs; device_t hostb; int ndevs; int error; sc = device_get_softc(dev); hostb = NULL; /* Fetch the full set of attached devices */ if ((error = device_get_children(sc->bus_dev, &devs, &ndevs))) return (error); /* Find our host bridge device */ for (int i = 0; i < ndevs; i++) { if (bhnd_is_hostb_device(devs[i])) { hostb = devs[i]; break; } } if (hostb == NULL) { device_printf(sc->dev, "no host bridge core found\n"); error = ENODEV; goto cleanup; } /* Find our full register window configuration */ if ((error = bhndb_find_hwspec(sc, devs, ndevs, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); error = 0; goto cleanup; } if (bootverbose) device_printf(sc->dev, "%s resource configuration\n", hw->name); /* Release existing resource state */ BHNDB_LOCK(sc); bhndb_free_resources(sc->bus_res); sc->bus_res = NULL; BHNDB_UNLOCK(sc); /* Allocate new resource state */ r = bhndb_alloc_resources(dev, sc->parent_dev, hw->cfg); if (r == NULL) { error = ENXIO; goto cleanup; } /* Initialize our resource priority configuration */ error = bhndb_initialize_region_cfg(sc, devs, ndevs, hw_prio_table, r); if (error) { bhndb_free_resources(r); goto cleanup; } /* Update our bridge state */ BHNDB_LOCK(sc); sc->bus_res = r; sc->hostb_dev = hostb; BHNDB_UNLOCK(sc); cleanup: free(devs, M_TEMP); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Clean up our driver state. */ - rman_fini(&sc->mem_rman); bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); // TODO: IRQs? if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%lx+0x%lx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); // TODO: IRQs? if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%lx+0x%lx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), type, rman_get_rid(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** + * Return the address space for the given @p child device. + */ +bhndb_addrspace +bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) +{ + struct bhndb_devinfo *dinfo; + device_t imd_dev; + + /* Find the directly attached parent of the requesting device */ + imd_dev = child; + while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) + imd_dev = device_get_parent(imd_dev); + + if (imd_dev == NULL) + panic("bhndb address space request for non-child device %s\n", + device_get_nameunit(child)); + + dinfo = device_get_ivars(imd_dev); + return (dinfo->addrspace); +} + +/** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. + * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * -bhndb_get_rman(struct bhndb_softc *sc, int type) -{ - switch (type) { - case SYS_RES_MEMORY: - return &sc->mem_rman; - case SYS_RES_IRQ: - // TODO - // return &sc->irq_rman; - return (NULL); - default: - return (NULL); - }; +bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) +{ + switch (bhndb_get_addrspace(sc, child)) { + case BHNDB_ADDRSPACE_NATIVE: + switch (type) { + case SYS_RES_MEMORY: + return (&sc->bus_res->ht_mem_rman); + case SYS_RES_IRQ: + return (NULL); + default: + return (NULL); + }; + + case BHNDB_ADDRSPACE_BRIDGED: + switch (type) { + case SYS_RES_MEMORY: + return (&sc->bus_res->br_mem_rman); + case SYS_RES_IRQ: + // TODO + // return &sc->irq_rman; + return (NULL); + default: + return (NULL); + }; + } } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } + dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_HW_DISABLED(). */ static bool bhndb_is_hw_disabled(device_t dev, device_t child) { struct bhndb_softc *sc; struct bhnd_core_info core; sc = device_get_softc(dev); /* Requestor must be attached to the bhnd bus */ if (device_get_parent(child) != sc->bus_dev) { return (BHND_BUS_IS_HW_DISABLED(device_get_parent(dev), child)); } /* Fetch core info */ core = bhnd_get_core_info(child); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, &core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(&core))) return (!BHND_BUS_IS_HOSTB_DEVICE(dev, child)); /* Otherwise, assume the core is populated */ return (false); } /* ascending core index comparison used by bhndb_is_hostb_device() */ static int compare_core_index(const void *lhs, const void *rhs) { u_int left = bhnd_get_core_index(*(const device_t *) lhs); u_int right = bhnd_get_core_index(*(const device_t *) rhs); if (left < right) return (-1); else if (left > right) return (1); else return (0); } /** * Default bhndb(4) implementation of BHND_BUS_IS_HOSTB_DEVICE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices to determine the hostb core: * * - The core must have a Broadcom vendor ID. * - The core devclass must match the bridge type. * - The core must be the first device on the bus with the bridged device * class. * * @param sc The bridge device state. * @param cores The table of bridge-enumerated cores. * @param num_cores The length of @p cores. * @param core The core to check. */ static bool bhndb_is_hostb_device(device_t dev, device_t child) { struct bhndb_softc *sc; struct bhnd_core_match md; device_t hostb_dev, *devlist; int devcnt, error; sc = device_get_softc(dev); /* Requestor must be attached to the bhnd bus */ if (device_get_parent(child) != sc->bus_dev) return (BHND_BUS_IS_HOSTB_DEVICE(device_get_parent(dev), child)); /* Determine required device class and set up a match descriptor. */ md = (struct bhnd_core_match) { .vendor = BHND_MFGID_BCM, .device = BHND_COREID_INVALID, .hwrev = { BHND_HWREV_INVALID, BHND_HWREV_INVALID }, .class = sc->bridge_class, .unit = 0 }; /* Pre-screen the device before searching over the full device list. */ if (!bhnd_device_matches(child, &md)) return (false); /* Must be the absolute first matching device on the bus. */ if ((error = device_get_children(sc->bus_dev, &devlist, &devcnt))) return (false); /* Sort by core index value, ascending */ qsort(devlist, devcnt, sizeof(*devlist), compare_core_index); /* Find the actual hostb device */ hostb_dev = NULL; for (int i = 0; i < devcnt; i++) { if (bhnd_device_matches(devlist[i], &md)) { hostb_dev = devlist[i]; break; } } /* Clean up */ free(devlist, M_TEMP); return (child == hostb_dev); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool immed_child, defaults; sc = device_get_softc(dev); immed_child = (device_get_parent(child) == dev); defaults = (start == 0UL && end == ~0UL); rle = NULL; /* Populate defaults */ if (immed_child && defaults) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || end < start || count > ((end - start) + 1)) return (NULL); /* Fetch the resource manager */ - rm = bhndb_get_rman(sc, type); + rm = bhndb_get_rman(sc, child, type); if (rm == NULL) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " - "child %s\n", - *rid, type, device_get_nameunit(child)); + "child %s: %d\n", + *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { int error; /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); error = 0; /* Fetch resource manager */ - rm = bhndb_get_rman(sc, type); + rm = bhndb_get_rman(sc, child, type); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (ENXIO); /* If active, adjustment is limited by the assigned window. */ BHNDB_LOCK(sc); // TODO: Currently unsupported error = ENODEV; BHNDB_UNLOCK(sc); if (!error) error = rman_adjust_resource(r, start, end); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle - * copied from @p parent, adjusted to contain only the range defined by @p win. + * copied from @p parent, adjusted to contain only the range defined by + * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { - bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_find_regwin_resource(sc->bus_res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param type The type of the resource to be activated. * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); // TODO - IRQs if (type != SYS_RES_MEMORY) return (ENXIO); if (indirect) *indirect = false; + + r_start = rman_get_start(r); + r_size = rman_get_size(r); + /* Activate native addrspace resources using the host address space */ + if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { + struct resource *parent; + + /* Find the bridge resource referenced by the child */ + parent = bhndb_find_resource_range(sc->bus_res, r_start, + r_size); + if (parent == NULL) { + device_printf(sc->dev, "host resource not found " + "for 0x%llx-0x%llx\n", + (unsigned long long) r_start, + (unsigned long long) r_start + r_size - 1); + return (ENOENT); + } + + /* Initialize child resource with the real bus values */ + error = bhndb_init_child_resource(r, parent, + r_start - rman_get_start(parent), r_size); + if (error) + return (error); + + /* Try to activate child resource */ + return (rman_activate_resource(r)); + } + /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ - r_start = rman_get_start(r); - r_size = rman_get_size(r); region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, type, rid, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). * * Maps resource activation requests to a viable static or dynamic * register window, if any. */ static int bhndb_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); - if ((rm = bhndb_get_rman(sc, type)) == NULL) + if ((rm = bhndb_get_rman(sc, child, type)) == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Free any dynamic window allocation. */ - BHNDB_LOCK(sc); - dwa = bhndb_dw_find_resource(sc->bus_res, r); - if (dwa != NULL) - bhndb_dw_release(sc->bus_res, dwa, r); - BHNDB_UNLOCK(sc); + if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { + BHNDB_LOCK(sc); + dwa = bhndb_dw_find_resource(sc->bus_res, r); + if (dwa != NULL) + bhndb_dw_release(sc->bus_res, dwa, r); + BHNDB_UNLOCK(sc); + } return (0); } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ALLOC_RESOURCE(). */ static struct bhnd_resource * bhndb_alloc_bhnd_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct bhnd_resource *br; sc = device_get_softc(dev); /* Allocate resource wrapper */ br = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT|M_ZERO); if (br == NULL) return (NULL); /* Configure */ br->direct = false; br->res = bus_alloc_resource(child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (br->res == NULL) goto failed; if (flags & RF_ACTIVE) { if (bhnd_activate_resource(child, type, *rid, br)) goto failed; } return (br); failed: if (br->res != NULL) bus_release_resource(child, type, *rid, br->res); free(br, M_BHND); return (NULL); } /** * Default bhndb(4) implementation of BHND_BUS_RELEASE_RESOURCE(). */ static int bhndb_release_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; if ((error = bus_release_resource(child, type, rid, r->res))) return (error); free(r, M_BHND); return (0); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). + * + * For BHNDB_ADDRSPACE_NATIVE children, all resources may be assumed to + * be actived by the bridge. * - * Attempts to activate a static register window, a dynamic register window, - * or configures @p r as an indirect resource -- in that order. + * For BHNDB_ADDRSPACE_BRIDGED children, attempts to activate a static register + * window, a dynamic register window, or configures @p r as an indirect + * resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; - bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); - /* Fetch the address range's resource priority */ r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); - r_prio = BHNDB_PRIORITY_NONE; - region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); - if (region != NULL) - r_prio = region->priority; - - /* If less than the minimum dynamic window priority, this - * resource should always be indirect. */ - if (r_prio < sc->bus_res->min_prio) - return (0); + /* Verify bridged address range's resource priority, and skip direct + * allocation if the priority is too low. */ + if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { + bhndb_priority_t r_prio; + region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); + if (region != NULL) + r_prio = region->priority; + else + r_prio = BHNDB_PRIORITY_NONE; + + /* If less than the minimum dynamic window priority, this + * resource should always be indirect. */ + if (r_prio < sc->bus_res->min_prio) + return (0); + } + /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, type, rid, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } - if (BHNDB_DEBUG(PRIO)) { + if (BHNDB_DEBUG(PRIO) && + bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) + { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); }; /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = bus_deactivate_resource(child, type, rid, r->res); if (!error) r->direct = false; return (error); }; /** * Slow path for bhndb_io_resource(). * * Iterates over the existing allocated dynamic windows looking for a viable * in-use region; the first matching region is returned. */ static struct bhndb_dw_alloc * bhndb_io_resource_slow(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *offset = dwa->win->win_offset; *offset += addr - dwa->target; return (dwa); } /* not found */ return (NULL); } /** * Find the bridge resource to be used for I/O requests. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; /* Try to fetch a free window */ dwa = bhndb_dw_next_free(br); /* * If no dynamic windows are available, look for an existing * region that maps the target range. * * If none are found, this is a child driver bug -- our window * over-commit should only fail in the case where a child driver leaks * resources, or perform operations out-of-order. * * Broadcom HND chipsets are designed to not require register window * swapping during execution; as long as the child devices are * attached/detached correctly, using the hardware's required order * of operations, there should always be a window available for the * current operation. */ if (dwa == NULL) { dwa = bhndb_io_resource_slow(sc, addr, size, offset); if (dwa == NULL) { panic("register windows exhausted attempting to map " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } return (dwa); } /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || (dwa->target + dwa->win->win_size) - addr < size) { error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _size) \ static _type \ bhndb_bus_read_ ## _size (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _size (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _size) \ static void \ bhndb_bus_write_ ## _size (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _size (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } BHNDB_IO_READ(uint8_t, 1); BHNDB_IO_READ(uint16_t, 2); BHNDB_IO_READ(uint32_t, 4); BHNDB_IO_WRITE(uint8_t, 1); BHNDB_IO_WRITE(uint16_t, 2); BHNDB_IO_WRITE(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { bus_size_t remain; BHNDB_IO_COMMON_SETUP(length); /* TODO: It's unclear whether we need a barrier implementation, * and if we do, what it needs to actually do. This may need * revisiting once we have a better idea of requirements after * porting the core drivers. */ panic("implementation incorrect"); /* Use 4-byte reads where possible */ remain = length % sizeof(uint32_t); for (bus_size_t i = 0; i < (length - remain); i += 4) bus_read_4(io_res, io_offset + offset + i); /* Use 1 byte reads for the remainder */ for (bus_size_t i = 0; i < remain; i++) bus_read_1(io_res, io_offset + offset + length + i); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { // TODO return (EOPNOTSUPP); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { // TODO return (EOPNOTSUPP); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { // TODO return (EOPNOTSUPP); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *r, int cpu) { // TODO return (EOPNOTSUPP); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { // TODO return (EOPNOTSUPP); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { // TODO return (NULL); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_pnpinfo_str, bhndb_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, bhndb_child_location_str), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_init_full_config, bhndb_generic_init_full_config), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_is_hw_disabled, bhndb_is_hw_disabled), DEVMETHOD(bhnd_bus_is_hostb_device, bhndb_is_hostb_device), DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_alloc_resource, bhndb_alloc_bhnd_resource), DEVMETHOD(bhnd_bus_release_resource, bhndb_release_bhnd_resource), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_activate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; devclass_t bhndb_devclass; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); MODULE_DEPEND(bhndb, bhnd_chipc, 1, 1, 1); Index: head/sys/dev/bhnd/bhndb/bhndb_pci.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 298275) +++ head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 298276) @@ -1,1096 +1,1081 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * PCI-specific implementation for the BHNDB bridge driver. * * Provides support for bridging from a PCI parent bus to a BHND-compatible * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point * mode. * * This driver handles all interactions with the PCI bridge core. On the * bridged bhnd bus, the PCI core device will be claimed by a simple * bhnd_hostb driver. */ // Quirk TODO // WARs for the following are not yet implemented: // - BHND_PCI_QUIRK_SBINTVEC // - BHND_PCIE_QUIRK_ASPM_OVR // - BHND_PCIE_QUIRK_SERDES_NOPLLDOWN // Quirks (and WARs) for the following are not yet defined: // - Power savings via MDIO BLK1/PWR_MGMT3 on PCIe hwrev 15-20, 21-22 // - WOWL PME enable/disable // - 4360 PCIe SerDes Tx amplitude/deemphasis (vendor Apple, boards // BCM94360X51P2, BCM94360X51A). // - PCI latency timer (boards CB2_4321_BOARD, CB2_4321_AG_BOARD) // - Max SerDes TX drive strength (vendor Apple, pcie >= rev10, // board BCM94322X9) // - 700mV SerDes TX drive strength (chipid BCM4331, boards BCM94331X19, // BCM94331X28, BCM94331X29B, BCM94331X19C) #include #include #include #include #include #include #include #include #include #include #include #include #include "bhndb_pcireg.h" #include "bhndb_pcivar.h" #include "bhndb_private.h" static int bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc); static int bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc); static int bhndb_pci_compat_setregwin(struct bhndb_pci_softc *, const struct bhndb_regwin *, bhnd_addr_t); static int bhndb_pci_fast_setregwin(struct bhndb_pci_softc *, const struct bhndb_regwin *, bhnd_addr_t); static uint32_t bhndb_pcie_read_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr); static void bhndb_pcie_write_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr, uint32_t val); static void bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc); static int bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc); static int bhndb_pci_wars_early_once(struct bhndb_pci_softc *sc); static int bhndb_pci_wars_hwup(struct bhndb_pci_softc *sc); static int bhndb_pci_wars_hwdown(struct bhndb_pci_softc *sc); static uint32_t bhndb_pci_discover_quirks(struct bhndb_pci_softc *, const struct bhndb_pci_id *); static const struct bhndb_pci_id *bhndb_pci_find_core_id( struct bhnd_core_info *core); /* * Supported PCI bridge cores. * * This table defines quirks specific to core hwrev ranges; see also * bhndb_pci_discover_quirks() for additional quirk detection. */ static const struct bhndb_pci_id bhndb_pci_ids[] = { /* PCI */ BHNDB_PCI_ID(PCI, BHND_QUIRK_HWREV_GTE (0, BHNDB_PCI_QUIRK_EXT_CLOCK_GATING | BHNDB_PCI_QUIRK_SBTOPCI2_PREF_BURST), BHND_QUIRK_HWREV_RANGE (0, 5, BHNDB_PCI_QUIRK_SBINTVEC), BHND_QUIRK_HWREV_GTE (11, BHNDB_PCI_QUIRK_SBTOPCI2_READMULTI | BHNDB_PCI_QUIRK_CLKRUN_DSBL), BHND_QUIRK_HWREV_END ), /* PCI Gen 1 */ BHNDB_PCI_ID(PCIE, BHND_QUIRK_HWREV_EQ (0, BHNDB_PCIE_QUIRK_SDR9_L0s_HANG), BHND_QUIRK_HWREV_RANGE (0, 1, BHNDB_PCIE_QUIRK_UR_STATUS_FIX), BHND_QUIRK_HWREV_EQ (1, BHNDB_PCIE_QUIRK_PCIPM_REQEN), BHND_QUIRK_HWREV_RANGE (3, 5, BHNDB_PCIE_QUIRK_ASPM_OVR | BHNDB_PCIE_QUIRK_SDR9_POLARITY | BHNDB_PCIE_QUIRK_SDR9_NO_FREQRETRY), BHND_QUIRK_HWREV_LTE (6, BHNDB_PCIE_QUIRK_L1_IDLE_THRESH), BHND_QUIRK_HWREV_GTE (6, BHNDB_PCIE_QUIRK_SPROM_L23_PCI_RESET), BHND_QUIRK_HWREV_EQ (7, BHNDB_PCIE_QUIRK_SERDES_NOPLLDOWN), BHND_QUIRK_HWREV_GTE (8, BHNDB_PCIE_QUIRK_L1_TIMER_PERF), BHND_QUIRK_HWREV_GTE (10, BHNDB_PCIE_QUIRK_SD_C22_EXTADDR), BHND_QUIRK_HWREV_END ), { BHND_COREID_INVALID, BHND_PCI_REGFMT_PCI, NULL } }; /* quirk flag convenience macros */ #define BHNDB_PCI_QUIRK(_sc, _name) \ ((_sc)->quirks & BHNDB_PCI_QUIRK_ ## _name) #define BHNDB_PCIE_QUIRK(_sc, _name) \ ((_sc)->quirks & BHNDB_PCIE_QUIRK_ ## _name) #define BHNDB_PCI_ASSERT_QUIRK(_sc, name) \ KASSERT(BHNDB_PCI_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define BHNDB_PCIE_ASSERT_QUIRK(_sc, name) \ KASSERT(BHNDB_PCIE_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) /* bus_(read|write)_* convenience macros */ #define BHNDB_PCI_READ_2(_sc, _reg) \ bus_read_2((_sc)->mem_res, (_sc)->mem_off + (_reg)) #define BHNDB_PCI_READ_4(_sc, _reg) \ bus_read_4((_sc)->mem_res, (_sc)->mem_off + (_reg)) #define BHNDB_PCI_WRITE_2(_sc, _reg, _val) \ bus_write_2((_sc)->mem_res, (_sc)->mem_off + (_reg), (_val)) #define BHNDB_PCI_WRITE_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_sc)->mem_off + (_reg), (_val)) /* BHNDB_PCI_REG_* convenience macros */ #define BPCI_REG_EXTRACT(_rv, _a) BHND_PCI_REG_EXTRACT(_rv, BHND_ ## _a) #define BPCI_REG_INSERT(_rv, _a, _v) BHND_PCI_REG_INSERT(_rv, BHND_ ## _a, _v) #define BPCI_COMMON_REG_EXTRACT(_r, _a) \ BHND_PCI_COMMON_REG_EXTRACT(sc->regfmt, _r, _a) #define BPCI_COMMON_REG_INSERT(_r, _a, _v) \ BHND_PCI_COMMON_REG_INSERT(sc->regfmt, _r, _a, _v) #define BPCI_COMMON_REG(_name) \ BHND_PCI_COMMON_REG(sc->regfmt, _name) #define BPCI_COMMON_REG_OFFSET(_base, _offset) \ (BPCI_COMMON_REG(_base) + BPCI_COMMON_REG(_offset)) /** * Default bhndb_pci implementation of device_probe(). * * Verifies that the parent is a PCI/PCIe device. */ static int bhndb_pci_probe(device_t dev) { device_t parent; devclass_t parent_bus; devclass_t pci; /* Our parent must be a PCI/PCIe device. */ pci = devclass_find("pci"); parent = device_get_parent(dev); parent_bus = device_get_devclass(device_get_parent(parent)); if (parent_bus != pci) return (ENXIO); device_set_desc(dev, "PCI-BHND bridge"); return (BUS_PROBE_DEFAULT); } static int bhndb_pci_attach(device_t dev) { struct bhndb_pci_softc *sc; int error, reg; sc = device_get_softc(dev); sc->dev = dev; /* Enable PCI bus mastering */ pci_enable_busmaster(device_get_parent(dev)); /* Determine our bridge device class */ sc->pci_devclass = BHND_DEVCLASS_PCI; if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0) sc->pci_devclass = BHND_DEVCLASS_PCIE; /* Determine the basic set of applicable quirks. This will be updated * in bhndb_pci_init_full_config() once the PCI device core has * been enumerated. */ sc->quirks = bhndb_pci_discover_quirks(sc, NULL); /* Using the discovered quirks, apply any WARs required for basic * register access. */ if ((error = bhndb_pci_wars_register_access(sc))) return (error); /* Use siba(4)-compatible regwin handling until we know * what kind of bus is attached */ sc->set_regwin = bhndb_pci_compat_setregwin; /* Perform full bridge attach. This should call back into our * bhndb_pci_init_full_config() implementation once the bridged * bhnd(4) bus has been enumerated, but before any devices have been * probed or attached. */ if ((error = bhndb_attach(dev, sc->pci_devclass))) return (error); /* If supported, switch to the faster regwin handling */ if (sc->bhndb.chipid.chip_type != BHND_CHIPTYPE_SIBA) { atomic_store_rel_ptr((volatile void *) &sc->set_regwin, (uintptr_t) &bhndb_pci_fast_setregwin); } return (0); } /** * Initialize the full bridge configuration. * * This is called during the DEVICE_ATTACH() process by the bridged bhndb(4) * bus, prior to probe/attachment of child cores. * * At this point, we can introspect the enumerated cores, find our host * bridge device, and apply any bridge-level hardware workarounds required * for proper operation of the bridged device cores. */ static int bhndb_pci_init_full_config(device_t dev, device_t child, const struct bhndb_hw_priority *prio_table) { struct bhnd_core_info core; const struct bhndb_pci_id *id; struct bhndb_pci_softc *sc; struct bhndb_region *pcir; bhnd_addr_t pcir_addr; bhnd_size_t pcir_size; int error; sc = device_get_softc(dev); /* Let bhndb perform full discovery and initialization of the * available register windows and bridge resources. */ if ((error = bhndb_generic_init_full_config(dev, child, prio_table))) return (error); /* * Identify our PCI bridge core, its register family, and any * applicable hardware quirks. */ KASSERT(sc->bhndb.hostb_dev, ("missing hostb device\n")); core = bhnd_get_core_info(sc->bhndb.hostb_dev); id = bhndb_pci_find_core_id(&core); if (id == NULL) { device_printf(dev, "%s %s hostb core is not recognized\n", bhnd_vendor_name(core.vendor), bhnd_core_name(&core)); } sc->regfmt = id->regfmt; /* Now that we've identified the PCI bridge core, we can determine the * full set of device quirks */ sc->quirks = bhndb_pci_discover_quirks(sc, id); /* * Determine and save a reference to the bhndb resource and offset * at which the bridge core's device registers are mapped. * * All known bhnd(4) hardware provides a fixed static mapping of * the PCI core's registers. If this changes in the future -- which * is unlikely -- this driver will need to be adjusted to use * dynamic register windows. */ /* Find base address and size of the PCI core's register block. */ error = bhnd_get_region_addr(sc->bhndb.hostb_dev, BHND_PORT_DEVICE, 0, 0, &pcir_addr, &pcir_size); if (error) { device_printf(dev, "failed to locate PCI core registers\n"); return (error); } /* Find the bhndb_region that statically maps this block */ pcir = bhndb_find_resource_region(sc->bhndb.bus_res, pcir_addr, pcir_size); if (pcir == NULL || pcir->static_regwin == NULL) { device_printf(dev, "missing static PCI core register window\n"); return (ENXIO); } /* Save borrowed reference to the mapped PCI core registers */ sc->mem_off = pcir->static_regwin->win_offset; sc->mem_res = bhndb_find_regwin_resource(sc->bhndb.bus_res, pcir->static_regwin); if (sc->mem_res == NULL || !(rman_get_flags(sc->mem_res) & RF_ACTIVE)) { device_printf(dev, "no active resource maps the PCI core register window\n"); return (ENXIO); } /* Configure a direct bhnd_resource wrapper that we can pass to * bhnd_resource APIs */ sc->bhnd_mem_res = (struct bhnd_resource) { .res = sc->mem_res, .direct = true }; /* * Attach MMIO device (if this is a PCIe device), which is used for * access to the PCIe SerDes required by the quirk workarounds. */ if (sc->pci_devclass == BHND_DEVCLASS_PCIE) { - sc->mdio = device_add_child(dev, + sc->mdio = BUS_ADD_CHILD(dev, 0, devclass_get_name(bhnd_mdio_pci_devclass), 0); if (sc->mdio == NULL) return (ENXIO); + + error = bus_set_resource(sc->mdio, SYS_RES_MEMORY, 0, + rman_get_start(sc->mem_res) + sc->mem_off + + BHND_PCIE_MDIO_CTL, sizeof(uint32_t)*2); + if (error) { + device_printf(dev, "failed to set MDIO resource\n"); + return (error); + } if ((error = device_probe_and_attach(sc->mdio))) { device_printf(dev, "failed to attach MDIO device\n"); return (error); } } /* Apply any early one-time quirk workarounds */ if ((error = bhndb_pci_wars_early_once(sc))) return (error); /* Apply attach-time quirk workarounds, required before the bridged * bhnd(4) bus itself performs a full attach(). */ if ((error = bhndb_pci_wars_hwup(sc))) return (error); return (0); } /** * Apply any hardware workarounds that must be executed prior to attempting * register access on the bridged chipset. * * This must be called very early in attach() or resume(), after the basic * set of applicable device quirks has been determined. */ static int bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc) { int error; if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { if ((error = bhndb_enable_pci_clocks(sc))) { device_printf(sc->dev, "failed to enable clocks\n"); return (error); } } return (0); } /** * Apply any hardware work-arounds that must be executed exactly once, early in * the attach process. * * This must be called after core enumeration and discovery of all applicable * quirks, but prior to probe/attach of any cores, parsing of * SPROM, etc. */ static int bhndb_pci_wars_early_once(struct bhndb_pci_softc *sc) { /* Determine correct polarity by observing the attach-time PCIe PHY * link status. This is used later to reset/force the SerDes * polarity */ if (BHNDB_PCIE_QUIRK(sc, SDR9_POLARITY)) { uint32_t st; bool inv; st = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_PLP_STATUSREG); inv = ((st & BHND_PCIE_PLP_POLARITY_INV) != 0); sc->sdr9_quirk_polarity.inv = inv; } return (0); } /** * Apply any hardware workarounds that are required upon attach or resume * of the bridge device. */ static int bhndb_pci_wars_hwup(struct bhndb_pci_softc *sc) { /* Note that the order here matters; these work-arounds * should not be re-ordered without careful review of their * interdependencies */ /* Fix up any PoR defaults on SROMless devices */ bhndb_init_sromless_pci_config(sc); /* Enable PCI prefetch/burst/readmulti flags */ if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST) || BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) { uint32_t sbp2; sbp2 = BHNDB_PCI_READ_4(sc, BHND_PCI_SBTOPCI2); if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST)) sbp2 |= (BHND_PCI_SBTOPCI_PREF|BHND_PCI_SBTOPCI_BURST); if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) sbp2 |= BHND_PCI_SBTOPCI_RC_READMULTI; BHNDB_PCI_WRITE_4(sc, BHND_PCI_SBTOPCI2, sbp2); } /* Disable PCI CLKRUN# */ if (BHNDB_PCI_QUIRK(sc, CLKRUN_DSBL)) { uint32_t ctl; ctl = BHNDB_PCI_READ_4(sc, BHND_PCI_CLKRUN_CTL); ctl |= BHND_PCI_CLKRUN_DSBL; BHNDB_PCI_WRITE_4(sc, BHND_PCI_CLKRUN_CTL, ctl); } /* Enable TLP unmatched address handling work-around */ if (BHNDB_PCIE_QUIRK(sc, UR_STATUS_FIX)) { uint32_t wrs; wrs = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG); wrs |= BHND_PCIE_TLP_WORKAROUND_URBIT; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG, wrs); } /* Adjust SerDes CDR tuning to ensure that CDR is stable before sending * data during L0s to L0 exit transitions. */ if (BHNDB_PCIE_QUIRK(sc, SDR9_L0s_HANG)) { uint16_t sdv; /* Set RX track/acquire timers to 2.064us/40.96us */ sdv = BPCI_REG_INSERT(0, PCIE_SDR9_RX_TIMER1_LKTRK, (2064/16)); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_TIMER1_LKACQ, (40960/1024)); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_TIMER1, sdv); /* Apply CDR frequency workaround */ sdv = BHND_PCIE_SDR9_RX_CDR_FREQ_OVR_EN; sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDR_FREQ_OVR, 0x0); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CDR, sdv); /* Apply CDR BW tunings */ sdv = 0; sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGTRK, 0x2); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGACQ, 0x4); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPTRK, 0x6); sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPACQ, 0x6); MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CDRBW, sdv); } /* Force correct SerDes polarity */ if (BHNDB_PCIE_QUIRK(sc, SDR9_POLARITY)) { uint16_t rxctl; rxctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CTRL); rxctl |= BHND_PCIE_SDR9_RX_CTRL_FORCE; if (sc->sdr9_quirk_polarity.inv) rxctl |= BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; else rxctl &= ~BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, BHND_PCIE_SDR9_RX_CTRL, rxctl); } /* Disable startup retry on PLL frequency detection failure */ if (BHNDB_PCIE_QUIRK(sc, SDR9_NO_FREQRETRY)) { uint16_t pctl; pctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, BHND_PCIE_SDR9_PLL_CTRL); pctl &= ~BHND_PCIE_SDR9_PLL_CTRL_FREQDET_EN; MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, BHND_PCIE_SDR9_PLL_CTRL, pctl); } /* Explicitly enable PCI-PM */ if (BHNDB_PCIE_QUIRK(sc, PCIPM_REQEN)) { uint32_t lcreg; lcreg = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_LCREG); lcreg |= BHND_PCIE_DLLP_LCREG_PCIPM_EN; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_LCREG, lcreg); } /* Adjust L1 timer to fix slow L1->L0 transitions */ if (BHNDB_PCIE_QUIRK(sc, L1_IDLE_THRESH)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt = BPCI_REG_INSERT(pmt, PCIE_L1THRESHOLDTIME, BHND_PCIE_L1THRESHOLD_WARVAL); bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Extend L1 timer for better performance. * TODO: We could enable/disable this on demand for better power * savings if we tie this to HT clock request handling */ if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt |= BHND_PCIE_ASPMTIMER_EXTEND; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Enable L23READY_EXIT_NOPRST if not already set in SPROM. */ if (BHNDB_PCIE_QUIRK(sc, SPROM_L23_PCI_RESET)) { bus_size_t reg; uint16_t cfg; /* Fetch the misc cfg flags from SPROM */ reg = BHND_PCIE_SPROM_SHADOW + BHND_PCIE_SRSH_PCIE_MISC_CONFIG; cfg = BHNDB_PCI_READ_2(sc, reg); /* Write EXIT_NOPRST flag if not already set in SPROM */ if (!(cfg & BHND_PCIE_SRSH_L23READY_EXIT_NOPRST)) { cfg |= BHND_PCIE_SRSH_L23READY_EXIT_NOPRST; BHNDB_PCI_WRITE_2(sc, reg, cfg); } } return (0); } /** * Apply any hardware workarounds that are required upon resume of the * bridge device. * * This must be called before any bridged bhnd(4) cores have been resumed. */ static int bhndb_pci_wars_hwresume(struct bhndb_pci_softc *sc) { int error; /* Nothing is possible without register access */ if ((error = bhndb_pci_wars_register_access(sc))) return (error); /* Apply the general hwup workarounds */ return (bhndb_pci_wars_hwup(sc)); } /** * Apply any hardware workarounds that are required upon detach or suspend * of the bridge device. */ static int bhndb_pci_wars_hwdown(struct bhndb_pci_softc *sc) { int error; /* Reduce L1 timer for better power savings. * TODO: We could enable/disable this on demand for better power * savings if we tie this to HT clock request handling */ if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { uint32_t pmt; pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); pmt &= ~BHND_PCIE_ASPMTIMER_EXTEND; bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); } /* Disable clocks */ if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { if ((error = bhndb_disable_pci_clocks(sc))) { device_printf(sc->dev, "failed to disable clocks\n"); return (error); } } return (0); } /* * On devices without a SROM, the PCI(e) cores will be initialized with * their Power-on-Reset defaults; this can leave the the BAR0 PCI windows * potentially mapped to the wrong core index. * * This function updates the PCI core's BAR0 PCI configuration to point at the * current PCI core. * * Applies to all PCI/PCIe revisions. Must be applied before bus devices * are probed/attached or the SPROM is parsed. */ static void bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc) { bus_size_t sprom_addr; u_int sprom_core_idx; u_int pci_core_idx; uint16_t val; /* Fetch the SPROM's configured core index */ sprom_addr = BPCI_COMMON_REG_OFFSET(SPROM_SHADOW, SRSH_PI_OFFSET); val = BHNDB_PCI_READ_2(sc, sprom_addr); /* If it doesn't match host bridge's core index, update the index * value */ sprom_core_idx = BPCI_COMMON_REG_EXTRACT(val, SRSH_PI); pci_core_idx = bhnd_get_core_index(sc->bhndb.hostb_dev); if (sprom_core_idx != pci_core_idx) { val = BPCI_COMMON_REG_INSERT(val, SRSH_PI, pci_core_idx); BHNDB_PCI_WRITE_2(sc, sprom_addr, val); } } static int bhndb_pci_detach(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); if ((error = bhndb_generic_detach(dev))) return (error); /* Apply any hardware workarounds. This may disable the clock, and * thus must be called *after* any children have been detached. */ if ((error = bhndb_pci_wars_hwdown(sc))) return (error); /* Disable PCI bus mastering */ pci_disable_busmaster(device_get_parent(dev)); return (0); } static int bhndb_pci_suspend(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); if ((error = bhndb_generic_suspend(dev))) return (error); /* Apply any hardware workarounds. This may disable the clock, and * thus must be called *after* any children have been suspended. */ if ((error = bhndb_pci_wars_hwdown(sc))) return (error); return (0); } static int bhndb_pci_resume(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Apply any resume workarounds; these may be required for bridged * device access, and thus must be called *before* any children are * resumed. */ if ((error = bhndb_pci_wars_hwresume(sc))) return (error); if ((error = bhndb_generic_resume(dev))) return (error); return (0); } static int bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { struct bhndb_pci_softc *sc = device_get_softc(dev); return (sc->set_regwin(sc, rw, addr)); } /** * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation. * * On siba(4) devices, it's possible that writing a PCI window register may * not succeed; it's necessary to immediately read the configuration register * and retry if not set to the desired value. * * This is not necessary on bcma(4) devices, but other than the overhead of * validating the register, there's no harm in performing the verification. */ static int bhndb_pci_compat_setregwin(struct bhndb_pci_softc *sc, const struct bhndb_regwin *rw, bhnd_addr_t addr) { device_t parent; int error; parent = sc->bhndb.parent_dev; if (rw->win_type != BHNDB_REGWIN_T_DYN) return (ENODEV); for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) { if ((error = bhndb_pci_fast_setregwin(sc, rw, addr))) return (error); if (pci_read_config(parent, rw->dyn.cfg_offset, 4) == addr) return (0); DELAY(10); } /* Unable to set window */ return (ENODEV); } /** * A bcma(4)-only bhndb_set_window_addr implementation. */ static int bhndb_pci_fast_setregwin(struct bhndb_pci_softc *sc, const struct bhndb_regwin *rw, bhnd_addr_t addr) { device_t parent = sc->bhndb.parent_dev; /* The PCI bridge core only supports 32-bit addressing, regardless * of the bus' support for 64-bit addressing */ if (addr > UINT32_MAX) return (ERANGE); switch (rw->win_type) { case BHNDB_REGWIN_T_DYN: /* Addresses must be page aligned */ if (addr % rw->win_size != 0) return (EINVAL); pci_write_config(parent, rw->dyn.cfg_offset, addr, 4); break; default: return (ENODEV); } return (0); } /** * Read a 32-bit PCIe TLP/DLLP/PLP protocol register. * * @param sc The bhndb_pci driver state. * @param addr The protocol register offset. */ static uint32_t bhndb_pcie_read_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr) { uint32_t val; KASSERT(bhnd_get_class(sc->bhndb.hostb_dev) == BHND_DEVCLASS_PCIE, ("not a pcie device!")); BHNDB_LOCK(&sc->bhndb); BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_ADDR, addr); val = BHNDB_PCI_READ_4(sc, BHND_PCIE_IND_DATA); BHNDB_UNLOCK(&sc->bhndb); return (val); } /** * Write a 32-bit PCIe TLP/DLLP/PLP protocol register value. * * @param sc The bhndb_pci driver state. * @param addr The protocol register offset. * @param val The value to write to @p addr. */ static void bhndb_pcie_write_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr, uint32_t val) { KASSERT(bhnd_get_class(sc->bhndb.hostb_dev) == BHND_DEVCLASS_PCIE, ("not a pcie device!")); BHNDB_LOCK(&sc->bhndb); BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_ADDR, addr); BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_DATA, val); BHNDB_UNLOCK(&sc->bhndb); } /** * Enable externally managed clocks. * * Quirk Required: EXT_CLOCK_GATING * * @param sc Bridge driver state. */ static int bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc) { device_t pci_parent; uint32_t gpio_in, gpio_out, gpio_en; uint32_t gpio_flags; uint16_t pci_status; BHNDB_PCI_ASSERT_QUIRK(sc, EXT_CLOCK_GATING); pci_parent = device_get_parent(sc->dev); /* Read state of XTAL pin */ gpio_in = pci_read_config(pci_parent, BHNDB_PCI_GPIO_IN, 4); if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON) return (0); /* already enabled */ /* Fetch current config */ gpio_out = pci_read_config(pci_parent, BHNDB_PCI_GPIO_OUT, 4); gpio_en = pci_read_config(pci_parent, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */ gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); gpio_out |= gpio_flags; gpio_en |= gpio_flags; pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); DELAY(1000); /* Reset PLL_OFF */ gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF; pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); DELAY(5000); /* Clear any PCI 'sent target-abort' flag. */ pci_status = pci_read_config(pci_parent, PCIR_STATUS, 2); pci_status &= ~PCIM_STATUS_STABORT; pci_write_config(pci_parent, PCIR_STATUS, pci_status, 2); return (0); } /** * Disable externally managed clocks. * * Quirk Required: EXT_CLOCK_GATING * * @param sc Bridge driver state. */ static int bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc) { device_t parent_dev; uint32_t gpio_out, gpio_en; BHNDB_PCI_ASSERT_QUIRK(sc, EXT_CLOCK_GATING); parent_dev = device_get_parent(sc->dev); // TODO: Check board flags for BFL2_XTALBUFOUTEN? // TODO: Check PCI core revision? // TODO: Switch to 'slow' clock? /* Fetch current config */ gpio_out = pci_read_config(parent_dev, BHNDB_PCI_GPIO_OUT, 4); gpio_en = pci_read_config(parent_dev, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */ gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON; gpio_out |= BHNDB_PCI_GPIO_PLL_OFF; pci_write_config(parent_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); /* Enable both output pins */ gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); pci_write_config(parent_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); return (0); } /** * Find the identification table entry for a core descriptor. * * @param sc bhndb PCI driver state. */ static const struct bhndb_pci_id * bhndb_pci_find_core_id(struct bhnd_core_info *core) { const struct bhndb_pci_id *id; for (id = bhndb_pci_ids; id->device != BHND_COREID_INVALID; id++) { if (core->vendor == BHND_MFGID_BCM && core->device == id->device) return (id); } return (NULL); } /** * Return all quirks known to be applicable to the host bridge. * * If the PCI bridge core has not yet been identified, no core-specific * quirk flags will be returned. This function may be called again to * rediscover applicable quirks after the host bridge core has been * identified. * * @param sc bhndb PCI driver state. * @param id The host bridge core's identification table entry, or NULL * if the host bridge core has not yet been identified. * * @return Returns the set of quirks applicable to the current hardware. */ static uint32_t bhndb_pci_discover_quirks(struct bhndb_pci_softc *sc, const struct bhndb_pci_id *id) { struct bhnd_device_quirk *qt; uint32_t quirks; uint8_t hwrev; quirks = BHNDB_PCI_QUIRK_NONE; /* Determine any device class-specific quirks */ switch (sc->pci_devclass) { case BHND_DEVCLASS_PCI: /* All PCI devices require external clock gating */ sc->quirks |= BHNDB_PCI_QUIRK_EXT_CLOCK_GATING; break; default: break; } // TODO: Additional quirk matching /* Determine any PCI core hwrev-specific device quirks */ if (id != NULL) { hwrev = bhnd_get_hwrev(sc->bhndb.hostb_dev); for (qt = id->quirks; qt->quirks != 0; qt++) { if (bhnd_hwrev_matches(hwrev, &qt->hwrev)) quirks |= qt->quirks; } } return (quirks); } /* * Support for attaching the PCIe-Gen1 MDIO driver to a parent bhndb PCIe * bridge device. */ static int bhndb_mdio_pcie_probe(device_t dev) { - struct bhndb_softc *psc; - device_t parent; - - /* Parent must be a bhndb_pcie instance */ - parent = device_get_parent(dev); - if (device_get_driver(parent) != &bhndb_pci_driver) - return (ENXIO); - - /* Parent must have PCIe-Gen1 hostb device */ - psc = device_get_softc(parent); - if (psc->hostb_dev == NULL) - return (ENXIO); - - if (bhnd_get_vendor(psc->hostb_dev) != BHND_MFGID_BCM || - bhnd_get_device(psc->hostb_dev) != BHND_COREID_PCIE) - { - return (ENXIO); - } - device_quiet(dev); return (BUS_PROBE_NOWILDCARD); } static int bhndb_mdio_pcie_attach(device_t dev) { struct bhndb_pci_softc *psc; - psc = device_get_softc(device_get_parent(dev)); - return (bhnd_mdio_pcie_attach(dev, &psc->bhnd_mem_res, -1, psc->mem_off + BHND_PCIE_MDIO_CTL, (psc->quirks & BHNDB_PCIE_QUIRK_SD_C22_EXTADDR) != 0)); - - return (ENXIO); } static device_method_t bhnd_mdio_pcie_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bhndb_mdio_pcie_probe), DEVMETHOD(device_attach, bhndb_mdio_pcie_attach), DEVMETHOD_END }; static device_method_t bhndb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bhndb_pci_probe), DEVMETHOD(device_attach, bhndb_pci_attach), DEVMETHOD(device_detach, bhndb_pci_detach), DEVMETHOD(device_suspend, bhndb_pci_suspend), DEVMETHOD(device_resume, bhndb_pci_resume), /* BHNDB interface */ DEVMETHOD(bhndb_init_full_config, bhndb_pci_init_full_config), DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr), DEVMETHOD_END }; DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods, sizeof(struct bhndb_pci_softc), bhndb_driver); DEFINE_CLASS_1(bhnd_mdio_pci, bhndb_mdio_pcie_driver, bhnd_mdio_pcie_methods, sizeof(struct bhnd_mdio_pcie_softc), bhnd_mdio_pcie_driver); DRIVER_MODULE(bhnd_mdio_pcie, bhndb, bhndb_mdio_pcie_driver, bhnd_mdio_pci_devclass, NULL, NULL); MODULE_VERSION(bhndb_pci, 1); MODULE_DEPEND(bhndb_pci, bhnd_pci, 1, 1, 1); MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1); MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1); Index: head/sys/dev/bhnd/bhndb/bhndb_private.h =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_private.h (revision 298275) +++ head/sys/dev/bhnd/bhndb/bhndb_private.h (revision 298276) @@ -1,239 +1,246 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDB_PRIVATE_H_ #define _BHND_BHNDB_PRIVATE_H_ #include #include #include #include #include #include #include "bhndbvar.h" /* * Private bhndb(4) driver definitions. */ struct bhndb_dw_alloc; struct bhndb_region; struct bhndb_resources; +struct resource *bhndb_find_resource_range( + struct bhndb_resources *br, + rman_res_t start, rman_res_t count); + struct resource *bhndb_find_regwin_resource( struct bhndb_resources *br, const struct bhndb_regwin *win); struct bhndb_resources *bhndb_alloc_resources(device_t dev, device_t parent_dev, const struct bhndb_hwcfg *cfg); void bhndb_free_resources( struct bhndb_resources *br); int bhndb_add_resource_region( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size, bhndb_priority_t priority, const struct bhndb_regwin *static_regwin); struct bhndb_region *bhndb_find_resource_region( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size); struct bhndb_dw_alloc *bhndb_dw_find_resource( struct bhndb_resources *dr, struct resource *r); struct bhndb_dw_alloc *bhndb_dw_find_mapping( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size); int bhndb_dw_retain( struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res); void bhndb_dw_release( struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res); int bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size); size_t bhndb_regwin_count( const struct bhndb_regwin *table, bhndb_regwin_type_t type); const struct bhndb_regwin *bhndb_regwin_find_type( const struct bhndb_regwin *table, bhndb_regwin_type_t type, bus_size_t min_size); const struct bhndb_regwin *bhndb_regwin_find_core( const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region); const struct bhndb_regwin *bhndb_regwin_find_best( const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region, bus_size_t min_size); bool bhndb_regwin_matches_device( const struct bhndb_regwin *regw, device_t dev); const struct bhndb_hw_priority *bhndb_hw_priority_find_device( const struct bhndb_hw_priority *table, device_t device); /** * Dynamic register window allocation reference. */ struct bhndb_dw_rentry { struct resource *dw_res; /**< child resource */ LIST_ENTRY(bhndb_dw_rentry) dw_link; }; /** * A dynamic register window allocation record. */ struct bhndb_dw_alloc { const struct bhndb_regwin *win; /**< window definition */ struct resource *parent_res; /**< enclosing resource */ u_int rnid; /**< region identifier */ rman_res_t target; /**< the current window address, or 0x0 if unknown */ LIST_HEAD(, bhndb_dw_rentry) refs; /**< references */ }; /** * A bus address region description. */ struct bhndb_region { bhnd_addr_t addr; /**< start of mapped range */ bhnd_size_t size; /**< size of mapped range */ bhndb_priority_t priority; /**< direct resource allocation priority */ const struct bhndb_regwin *static_regwin; /**< fixed mapping regwin, if any */ STAILQ_ENTRY(bhndb_region) link; }; /** * BHNDB resource allocation state. */ struct bhndb_resources { device_t dev; /**< bridge device */ const struct bhndb_hwcfg *cfg; /**< hardware configuration */ device_t parent_dev; /**< parent device */ struct resource_spec *res_spec; /**< parent bus resource specs */ struct resource **res; /**< parent bus resources */ + + struct rman ht_mem_rman; /**< host memory manager */ + struct rman br_mem_rman; /**< bridged memory manager */ STAILQ_HEAD(, bhndb_region) bus_regions; /**< bus region descriptors */ struct bhndb_dw_alloc *dw_alloc; /**< dynamic window allocation records */ size_t dwa_count; /**< number of dynamic windows available. */ uint32_t dwa_freelist; /**< dynamic window free list */ bhndb_priority_t min_prio; /**< minimum resource priority required to allocate a dynamic window */ }; /** * Returns true if the all dynamic windows have been exhausted, false * otherwise. * * @param br The resource state to check. */ static inline bool bhndb_dw_exhausted(struct bhndb_resources *br) { return (br->dwa_freelist == 0); } /** * Find the next free dynamic window region in @p br. * * @param br The resource state to search. */ static inline struct bhndb_dw_alloc * bhndb_dw_next_free(struct bhndb_resources *br) { struct bhndb_dw_alloc *dw_free; if (bhndb_dw_exhausted(br)) return (NULL); dw_free = &br->dw_alloc[__builtin_ctz(br->dwa_freelist)]; KASSERT(LIST_EMPTY(&dw_free->refs), ("free list out of sync with refs")); return (dw_free); } /** * Returns true if a dynamic window allocation is marked as free. * * @param br The resource state owning @p dwa. * @param dwa The dynamic window allocation record to be checked. */ static inline bool bhndb_dw_is_free(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa) { bool is_free = LIST_EMPTY(&dwa->refs); KASSERT(is_free == ((br->dwa_freelist & (1 << dwa->rnid)) != 0), ("refs out of sync with free list")); return (is_free); } #define BHNDB_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ "bhndb resource allocator lock", MTX_DEF) #define BHNDB_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define BHNDB_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define BHNDB_LOCK_ASSERT(sc, what) mtx_assert(&(sc)->sc_mtx, what) #define BHNDB_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #endif /* _BHND_BHNDB_PRIVATE_H_ */ Index: head/sys/dev/bhnd/bhndb/bhndb_subr.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 298275) +++ head/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 298276) @@ -1,889 +1,986 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include "bhndb_private.h" #include "bhndbvar.h" /** * Attach a BHND bridge device to @p parent. * * @param parent A parent PCI device. * @param[out] bhndb On success, the probed and attached bhndb bridge device. * @param unit The device unit number, or -1 to select the next available unit * number. * * @retval 0 success * @retval non-zero Failed to attach the bhndb device. */ int bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit) { int error; *bhndb = device_add_child(parent, devclass_get_name(bhndb_devclass), unit); if (*bhndb == NULL) return (ENXIO); if (!(error = device_probe_and_attach(*bhndb))) return (0); if ((device_delete_child(parent, *bhndb))) device_printf(parent, "failed to detach bhndb child\n"); return (error); } /* * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl. */ static void bhndb_do_suspend_resources(device_t dev, struct resource_list *rl) { struct resource_list_entry *rle; /* Suspend all child resources. */ STAILQ_FOREACH(rle, rl, link) { /* Skip non-allocated resources */ if (rle->res == NULL) continue; BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type, rle->res); } } /** * Helper function for implementing BUS_RESUME_CHILD() on bridged * bhnd(4) buses. * * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all * child resources, ensuring that the device's allocated bridge resources * will be available to other devices during bus resumption. * * Before suspending any resources, @p child is suspended by * calling bhnd_generic_suspend_child(). * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_br_suspend_child(device_t dev, device_t child) { struct resource_list *rl; int error; if (device_get_parent(child) != dev) BUS_SUSPEND_CHILD(device_get_parent(dev), child); if (device_is_suspended(child)) return (EBUSY); /* Suspend the child device */ if ((error = bhnd_generic_suspend_child(dev, child))) return (error); /* Fetch the resource list. If none, there's nothing else to do */ rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); if (rl == NULL) return (0); /* Suspend all child resources. */ bhndb_do_suspend_resources(dev, rl); return (0); } /** * Helper function for implementing BUS_RESUME_CHILD() on bridged * bhnd(4) bus devices. * * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all * child resources, before delegating to bhnd_generic_resume_child(). * * If resource resumption fails, @p child will not be resumed. * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_br_resume_child(device_t dev, device_t child) { struct resource_list *rl; struct resource_list_entry *rle; int error; if (device_get_parent(child) != dev) BUS_RESUME_CHILD(device_get_parent(dev), child); if (!device_is_suspended(child)) return (EBUSY); /* Fetch the resource list. If none, there's nothing else to do */ rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); if (rl == NULL) return (bhnd_generic_resume_child(dev, child)); /* Resume all resources */ STAILQ_FOREACH(rle, rl, link) { /* Skip non-allocated resources */ if (rle->res == NULL) continue; error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev, rle->type, rle->res); if (error) { /* Put all resources back into a suspend state */ bhndb_do_suspend_resources(dev, rl); return (error); } } /* Now that all resources are resumed, resume child */ if ((error = bhnd_generic_resume_child(dev, child))) { /* Put all resources back into a suspend state */ bhndb_do_suspend_resources(dev, rl); } return (error); } /** + * Find a SYS_RES_MEMORY resource containing the given address range. + * + * @param br The bhndb resource state to search. + * @param start The start address of the range to search for. + * @param count The size of the range to search for. + * + * @retval resource the host resource containing the requested range. + * @retval NULL if no resource containing the requested range can be found. + */ +struct resource * +bhndb_find_resource_range(struct bhndb_resources *br, rman_res_t start, + rman_res_t count) +{ + for (u_int i = 0; br->res_spec[i].type != -1; i++) { + struct resource *r = br->res[i]; + + if (br->res_spec->type != SYS_RES_MEMORY) + continue; + + /* Verify range */ + if (rman_get_start(r) > start) + continue; + + if (rman_get_end(r) < (start + count - 1)) + continue; + + return (r); + } + + return (NULL); +} + +/** * Find the resource containing @p win. * * @param br The bhndb resource state to search. * @param win A register window. * * @retval resource the resource containing @p win. * @retval NULL if no resource containing @p win can be found. */ struct resource * bhndb_find_regwin_resource(struct bhndb_resources *br, const struct bhndb_regwin *win) { const struct resource_spec *rspecs; rspecs = br->cfg->resource_specs; for (u_int i = 0; rspecs[i].type != -1; i++) { if (win->res.type != rspecs[i].type) continue; if (win->res.rid != rspecs[i].rid) continue; /* Found declared resource */ return (br->res[i]); } device_printf(br->dev, "missing regwin resource spec (type=%d, rid=%d)\n", win->res.type, win->res.rid); return (NULL); } /** * Allocate and initialize a new resource state structure, allocating * bus resources from @p parent_dev according to @p cfg. * * @param dev The bridge device. * @param parent_dev The parent device from which resources will be allocated. * @param cfg The hardware configuration to be used. */ struct bhndb_resources * bhndb_alloc_resources(device_t dev, device_t parent_dev, const struct bhndb_hwcfg *cfg) { struct bhndb_resources *r; const struct bhndb_regwin *win; bus_size_t last_window_size; size_t res_num; u_int rnid; int error; bool free_parent_res; + bool free_ht_mem, free_br_mem; free_parent_res = false; + free_ht_mem = false; + free_br_mem = false; r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO); if (r == NULL) return (NULL); /* Basic initialization */ r->dev = dev; r->parent_dev = parent_dev; r->cfg = cfg; r->min_prio = BHNDB_PRIORITY_NONE; STAILQ_INIT(&r->bus_regions); + /* Initialize host address space resource manager. */ + r->ht_mem_rman.rm_start = 0; + r->ht_mem_rman.rm_end = ~0; + r->ht_mem_rman.rm_type = RMAN_ARRAY; + r->ht_mem_rman.rm_descr = "BHNDB host memory"; + if ((error = rman_init(&r->ht_mem_rman))) { + device_printf(r->dev, "could not initialize ht_mem_rman\n"); + goto failed; + } + free_ht_mem = true; + + + /* Initialize resource manager for the bridged address space. */ + r->br_mem_rman.rm_start = 0; + r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT; + r->br_mem_rman.rm_type = RMAN_ARRAY; + r->br_mem_rman.rm_descr = "BHNDB bridged memory"; + + if ((error = rman_init(&r->br_mem_rman))) { + device_printf(r->dev, "could not initialize br_mem_rman\n"); + goto failed; + } + free_br_mem = true; + + error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT); + if (error) { + device_printf(r->dev, "could not configure br_mem_rman\n"); + goto failed; + } + + /* Determine our bridge resource count from the hardware config. */ res_num = 0; for (size_t i = 0; cfg->resource_specs[i].type != -1; i++) res_num++; /* Allocate space for a non-const copy of our resource_spec * table; this will be updated with the RIDs assigned by * bus_alloc_resources. */ r->res_spec = malloc(sizeof(r->res_spec[0]) * (res_num + 1), M_BHND, M_NOWAIT); if (r->res_spec == NULL) goto failed; /* Initialize and terminate the table */ for (size_t i = 0; i < res_num; i++) r->res_spec[i] = cfg->resource_specs[i]; r->res_spec[res_num].type = -1; /* Allocate space for our resource references */ r->res = malloc(sizeof(r->res[0]) * res_num, M_BHND, M_NOWAIT); if (r->res == NULL) goto failed; /* Allocate resources */ error = bus_alloc_resources(r->parent_dev, r->res_spec, r->res); if (error) { device_printf(r->dev, "could not allocate bridge resources on %s: %d\n", device_get_nameunit(r->parent_dev), error); goto failed; } else { free_parent_res = true; } + /* Add allocated memory resources to our host memory resource manager */ + for (u_int i = 0; r->res_spec[i].type != -1; i++) { + struct resource *res; + + /* skip non-memory resources */ + if (r->res_spec[i].type != SYS_RES_MEMORY) + continue; + + /* add host resource to set of managed regions */ + res = r->res[i]; + error = rman_manage_region(&r->ht_mem_rman, rman_get_start(res), + rman_get_end(res)); + if (error) { + device_printf(r->dev, + "could not register host memory region with " + "ht_mem_rman: %d\n", error); + goto failed; + } + } + /* Fetch the dynamic regwin count and verify that it does not exceed * what is representable via our freelist bitmask. */ r->dwa_count = bhndb_regwin_count(cfg->register_windows, BHNDB_REGWIN_T_DYN); if (r->dwa_count >= (8 * sizeof(r->dwa_freelist))) { device_printf(r->dev, "max dynamic regwin count exceeded\n"); goto failed; } /* Allocate the dynamic window allocation table. */ r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND, M_NOWAIT); if (r->dw_alloc == NULL) goto failed; /* Initialize the dynamic window table and freelist. */ r->dwa_freelist = 0; rnid = 0; last_window_size = 0; for (win = cfg->register_windows; win->win_type != BHNDB_REGWIN_T_INVALID; win++) { struct bhndb_dw_alloc *dwa; /* Skip non-DYN windows */ if (win->win_type != BHNDB_REGWIN_T_DYN) continue; /* Validate the window size */ if (win->win_size == 0) { device_printf(r->dev, "ignoring zero-length dynamic " "register window\n"); continue; } else if (last_window_size == 0) { last_window_size = win->win_size; } else if (last_window_size != win->win_size) { /* * No existing hardware should trigger this. * * If you run into this in the future, the dynamic * window allocator and the resource priority system * will need to be extended to support multiple register * window allocation pools. */ device_printf(r->dev, "devices that vend multiple " "dynamic register window sizes are not currently " "supported\n"); goto failed; } dwa = &r->dw_alloc[rnid]; dwa->win = win; dwa->parent_res = NULL; dwa->rnid = rnid; dwa->target = 0x0; LIST_INIT(&dwa->refs); /* Find and validate corresponding resource. */ dwa->parent_res = bhndb_find_regwin_resource(r, win); if (dwa->parent_res == NULL) goto failed; if (rman_get_size(dwa->parent_res) < win->win_offset + win->win_size) { device_printf(r->dev, "resource %d too small for " "register window with offset %llx and size %llx\n", rman_get_rid(dwa->parent_res), (unsigned long long) win->win_offset, (unsigned long long) win->win_size); error = EINVAL; goto failed; } /* Add to freelist */ r->dwa_freelist |= (1 << rnid); rnid++; } return (r); failed: if (free_parent_res) bus_release_resources(r->parent_dev, r->res_spec, r->res); + + if (free_ht_mem) + rman_fini(&r->ht_mem_rman); + if (free_br_mem) + rman_fini(&r->br_mem_rman); + if (r->res != NULL) free(r->res, M_BHND); if (r->res_spec != NULL) free(r->res_spec, M_BHND); if (r->dw_alloc != NULL) free(r->dw_alloc, M_BHND); free (r, M_BHND); return (NULL); } /** * Deallocate the given bridge resource structure and any associated resources. * * @param br Resource state to be deallocated. */ void bhndb_free_resources(struct bhndb_resources *br) { struct bhndb_region *region, *r_next; struct bhndb_dw_alloc *dwa; struct bhndb_dw_rentry *dwr, *dwr_next; /* No window regions may still be held */ if (__builtin_popcount(br->dwa_freelist) != br->dwa_count) { device_printf(br->dev, "leaked %llu dynamic register regions\n", (unsigned long long) br->dwa_count - br->dwa_freelist); } /* Release resources allocated through our parent. */ bus_release_resources(br->parent_dev, br->res_spec, br->res); /* Clean up resource reservations */ for (size_t i = 0; i < br->dwa_count; i++) { dwa = &br->dw_alloc[i]; LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) { LIST_REMOVE(dwr, dw_link); free(dwr, M_BHND); } } /* Release bus regions */ STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) { STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link); free(region, M_BHND); } + + /* Release our resource managers */ + rman_fini(&br->ht_mem_rman); + rman_fini(&br->br_mem_rman); /* Free backing resource state structures */ free(br->res, M_BHND); free(br->res_spec, M_BHND); free(br->dw_alloc, M_BHND); } /** * Add a bus region entry to @p r for the given base @p addr and @p size. * * @param br The resource state to which the bus region entry will be added. * @param addr The base address of this region. * @param size The size of this region. * @param priority The resource priority to be assigned to allocations * made within this bus region. * @param static_regwin If available, a static register window mapping this * bus region entry. If not available, NULL. * * @retval 0 success * @retval non-zero if adding the bus region fails. */ int bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size, bhndb_priority_t priority, const struct bhndb_regwin *static_regwin) { struct bhndb_region *reg; /* Insert in the bus resource list */ reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT); if (reg == NULL) return (ENOMEM); *reg = (struct bhndb_region) { .addr = addr, .size = size, .priority = priority, .static_regwin = static_regwin }; STAILQ_INSERT_HEAD(&br->bus_regions, reg, link); return (0); } /** * Find a bus region that maps @p size bytes at @p addr. * * @param br The resource state to search. * @param addr The requested starting address. * @param size The requested size. * * @retval bhndb_region A region that fully contains the requested range. * @retval NULL If no mapping region can be found. */ struct bhndb_region * bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size) { struct bhndb_region *region; STAILQ_FOREACH(region, &br->bus_regions, link) { /* Request must fit within the region's mapping */ if (addr < region->addr) continue; if (addr + size > region->addr + region->size) continue; return (region); } /* Not found */ return (NULL); } /** * Find the entry matching @p r in @p dwa's references, if any. * * @param dwa The dynamic window allocation to search * @param r The resource to search for in @p dwa. */ static struct bhndb_dw_rentry * bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r) { struct bhndb_dw_rentry *rentry; LIST_FOREACH(rentry, &dwa->refs, dw_link) { struct resource *dw_res = rentry->dw_res; /* Match dev/rid/addr/size */ if (rman_get_device(dw_res) != rman_get_device(r) || rman_get_rid(dw_res) != rman_get_rid(r) || rman_get_start(dw_res) != rman_get_start(r) || rman_get_size(dw_res) != rman_get_size(r)) { continue; } /* Matching allocation found */ return (rentry); } return (NULL); } /** * Find the dynamic region allocated for @p r, if any. * * @param br The resource state to search. * @param r The resource to search for. * * @retval bhndb_dw_alloc The allocation record for @p r. * @retval NULL if no dynamic window is allocated for @p r. */ struct bhndb_dw_alloc * bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r) { struct bhndb_dw_alloc *dwa; for (size_t i = 0; i < br->dwa_count; i++) { dwa = &br->dw_alloc[i]; /* Skip free dynamic windows */ if (bhndb_dw_is_free(br, dwa)) continue; /* Matching allocation found? */ if (bhndb_dw_find_resource_entry(dwa, r) != NULL) return (dwa); } return (NULL); } /** * Find an existing dynamic window mapping @p size bytes * at @p addr. The window may or may not be free. * * @param br The resource state to search. * @param addr The requested starting address. * @param size The requested size. * * @retval bhndb_dw_alloc A window allocation that fully contains the requested * range. * @retval NULL If no mapping region can be found. */ struct bhndb_dw_alloc * bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size) { struct bhndb_dw_alloc *dwr; const struct bhndb_regwin *win; /* Search for an existing dynamic mapping of this address range. */ for (size_t i = 0; i < br->dwa_count; i++) { dwr = &br->dw_alloc[i]; win = dwr->win; /* Verify the range */ if (addr < dwr->target) continue; if (addr + size > dwr->target + win->win_size) continue; /* Found a usable mapping */ return (dwr); } /* not found */ return (NULL); } /** * Retain a reference to @p dwa for use by @p res. * * @param br The resource state owning @p dwa. * @param dwa The allocation record to be retained. * @param res The resource that will own a reference to @p dwa. * * @retval 0 success * @retval ENOMEM Failed to allocate a new reference structure. */ int bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res) { struct bhndb_dw_rentry *rentry; KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL, ("double-retain of dynamic window for same resource")); /* Insert a reference entry; we use M_NOWAIT to allow use from * within a non-sleepable lock */ rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT); if (rentry == NULL) return (ENOMEM); rentry->dw_res = res; LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link); /* Update the free list */ br->dwa_freelist &= ~(1 << (dwa->rnid)); return (0); } /** * Release a reference to @p dwa previously retained by @p res. If the * reference count of @p dwa reaches zero, it will be added to the * free list. * * @param br The resource state owning @p dwa. * @param dwa The allocation record to be released. * @param res The resource that currently owns a reference to @p dwa. */ void bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *r) { struct bhndb_dw_rentry *rentry; /* Find the rentry */ rentry = bhndb_dw_find_resource_entry(dwa, r); KASSERT(rentry != NULL, ("over release of resource entry")); LIST_REMOVE(rentry, dw_link); free(rentry, M_BHND); /* If this was the last reference, update the free list */ if (LIST_EMPTY(&dwa->refs)) br->dwa_freelist |= (1 << (dwa->rnid)); } /** * Attempt to set (or reset) the target address of @p dwa to map @p size bytes * at @p addr. * * This will apply any necessary window alignment and verify that * the window is capable of mapping the requested range prior to modifying * therecord. * * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request. * @param br The resource state owning @p dwa. * @param dwa The allocation record to be configured. * @param addr The address to be mapped via @p dwa. * @param size The number of bytes to be mapped at @p addr. * * @retval 0 success * @retval non-zero no usable register window available. */ int bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size) { const struct bhndb_regwin *rw; bus_addr_t offset; int error; rw = dwa->win; KASSERT(bhndb_dw_is_free(br, dwa), ("attempting to set the target address on an in-use window")); /* Page-align the target address */ offset = addr % rw->win_size; dwa->target = addr - offset; /* Verify that the window is large enough for the full target */ if (rw->win_size - offset < size) return (ENOMEM); /* Update the window target */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) { dwa->target = 0x0; return (error); } return (0); } /** * Return the count of @p type register windows in @p table. * * @param table The table to search. * @param type The required window type, or BHNDB_REGWIN_T_INVALID to * count all register window types. */ size_t bhndb_regwin_count(const struct bhndb_regwin *table, bhndb_regwin_type_t type) { const struct bhndb_regwin *rw; size_t count; count = 0; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type) count++; } return (count); } /** * Search @p table for the first window with the given @p type. * * @param table The table to search. * @param type The required window type. * @param min_size The minimum window size. * * @retval bhndb_regwin The first matching window. * @retval NULL If no window of the requested type could be found. */ const struct bhndb_regwin * bhndb_regwin_find_type(const struct bhndb_regwin *table, bhndb_regwin_type_t type, bus_size_t min_size) { const struct bhndb_regwin *rw; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (rw->win_type == type && rw->win_size >= min_size) return (rw); } return (NULL); } /** * Search @p windows for the first matching core window. * * @param table The table to search. * @param class The required core class. * @param unit The required core unit, or -1. * @param port_type The required port type. * @param port The required port. * @param region The required region. * * @retval bhndb_regwin The first matching window. * @retval NULL If no matching window was found. */ const struct bhndb_regwin * bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region) { const struct bhndb_regwin *rw; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (rw->win_type != BHNDB_REGWIN_T_CORE) continue; if (rw->core.class != class) continue; if (unit != -1 && rw->core.unit != unit) continue; if (rw->core.port_type != port_type) continue; if (rw->core.port != port) continue; if (rw->core.region != region) continue; return (rw); } return (NULL); } /** * Search @p windows for the best available window of at least @p min_size. * * Search order: * - BHND_REGWIN_T_CORE * - BHND_REGWIN_T_DYN * * @param table The table to search. * @param class The required core class. * @param unit The required core unit, or -1. * @param port_type The required port type. * @param port The required port. * @param region The required region. * @param min_size The minimum window size. * * @retval bhndb_regwin The first matching window. * @retval NULL If no matching window was found. */ const struct bhndb_regwin * bhndb_regwin_find_best(const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region, bus_size_t min_size) { const struct bhndb_regwin *rw; /* Prefer a fixed core mapping */ rw = bhndb_regwin_find_core(table, class, unit, port_type, port, region); if (rw != NULL) return (rw); /* Fall back on a generic dynamic window */ return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size)); } /** * Return true if @p regw defines a static port register window, and * the mapped port is actually defined on @p dev. * * @param regw A register window to match against. * @param dev A bhnd(4) bus device. */ bool bhndb_regwin_matches_device(const struct bhndb_regwin *regw, device_t dev) { /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) return (false); /* Device class must match */ if (bhnd_get_class(dev) != regw->core.class) return (false); /* Device unit must match */ if (bhnd_get_core_unit(dev) != regw->core.unit) return (false); /* The regwin port/region must be defined. */ if (!bhnd_is_region_valid(dev, regw->core.port_type, regw->core.port, regw->core.region)) { return (false); } /* Matches */ return (true); } /** * Search for a core resource priority descriptor in @p table that matches * @p device. * * @param table The table to search. * @param device A bhnd(4) bus device. */ const struct bhndb_hw_priority * bhndb_hw_priority_find_device(const struct bhndb_hw_priority *table, device_t device) { const struct bhndb_hw_priority *hp; for (hp = table; hp->ports != NULL; hp++) { if (bhnd_device_matches(device, &hp->match)) return (hp); } /* not found */ return (NULL); } Index: head/sys/dev/bhnd/bhndb/bhndbvar.h =================================================================== --- head/sys/dev/bhnd/bhndb/bhndbvar.h (revision 298275) +++ head/sys/dev/bhnd/bhndb/bhndbvar.h (revision 298276) @@ -1,94 +1,103 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDBVAR_H_ #define _BHND_BHNDBVAR_H_ #include #include #include #include #include #include #include #include #include "bhndb.h" #include "bhndb_if.h" /* * Definitions shared by bhndb(4) driver implementations. */ DECLARE_CLASS(bhndb_driver); struct bhndb_resources; int bhndb_attach(device_t dev, bhnd_devclass_t bridge_devclass); int bhndb_generic_probe(device_t dev); int bhndb_generic_detach(device_t dev); int bhndb_generic_suspend(device_t dev); int bhndb_generic_resume(device_t dev); int bhndb_generic_init_full_config(device_t dev, device_t child, const struct bhndb_hw_priority *hw_prio_table); int bhnd_generic_br_suspend_child(device_t dev, device_t child); int bhnd_generic_br_resume_child(device_t dev, device_t child); +/** + * bhndb child address space. Children either operate in the bridged + * SoC address space, or within the address space mapped to the host + * device (e.g. the PCI BAR(s)). + */ +typedef enum { + BHNDB_ADDRSPACE_BRIDGED, /**< bridged (SoC) address space */ + BHNDB_ADDRSPACE_NATIVE /**< host address space */ +} bhndb_addrspace; + /** bhndb child instance state */ struct bhndb_devinfo { - struct resource_list resources; /**< child resources. */ + bhndb_addrspace addrspace; /**< child address space. */ + struct resource_list resources; /**< child resources. */ }; /** * bhndb driver instance state. Must be first member of all subclass * softc structures. */ struct bhndb_softc { device_t dev; /**< bridge device */ struct bhnd_chipid chipid; /**< chip identification */ bhnd_devclass_t bridge_class; /**< bridge core type */ device_t parent_dev; /**< parent device */ device_t bus_dev; /**< child bhnd(4) bus */ device_t hostb_dev; /**< child host bridge device, or NULL if the @p bus_dev has not yet called BHNDB_INIT_FULL_CONFIG() */ - struct rman mem_rman; /**< bridged bus memory manager */ struct mtx sc_mtx; /**< resource lock. */ - struct bhndb_resources *bus_res; /**< bus resource state */ }; #endif /* _BHND_BHNDBVAR_H_ */