Index: head/sys/dev/bhnd/bcma/bcma.c =================================================================== --- head/sys/dev/bhnd/bcma/bcma.c (revision 324070) +++ head/sys/dev/bhnd/bcma/bcma.c (revision 324071) @@ -1,800 +1,804 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "bcma_dmp.h" #include "bcma_eromreg.h" #include "bcma_eromvar.h" #include "bcmavar.h" /* RID used when allocating EROM table */ #define BCMA_EROM_RID 0 static bhnd_erom_class_t * bcma_get_erom_class(driver_t *driver) { return (&bcma_erom_parser); } int bcma_probe(device_t dev) { device_set_desc(dev, "BCMA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default bcma(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal bcma(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int bcma_attach(device_t dev) { int error; /* Enumerate children */ if ((error = bcma_add_children(dev))) { device_delete_children(dev); return (error); } return (0); } int bcma_detach(device_t dev) { return (bhnd_generic_detach(dev)); } static device_t bcma_add_child(device_t dev, u_int order, const char *name, int unit) { struct bcma_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = bcma_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void bcma_child_deleted(device_t dev, device_t child) { struct bhnd_softc *sc; struct bcma_devinfo *dinfo; sc = device_get_softc(dev); /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free bcma device info */ if ((dinfo = device_get_ivars(child)) != NULL) bcma_free_dinfo(dev, dinfo); device_set_ivars(child, NULL); } static int bcma_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { const struct bcma_devinfo *dinfo; const struct bhnd_core_info *ci; dinfo = device_get_ivars(child); ci = &dinfo->corecfg->core_info; switch (index) { case BHND_IVAR_VENDOR: *result = ci->vendor; return (0); case BHND_IVAR_DEVICE: *result = ci->device; return (0); case BHND_IVAR_HWREV: *result = ci->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(ci); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(ci->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(ci); return (0); case BHND_IVAR_CORE_INDEX: *result = ci->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = ci->unit; return (0); case BHND_IVAR_PMU_INFO: *result = (uintptr_t) dinfo->pmu_info; return (0); default: return (ENOENT); } } static int bcma_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct bcma_devinfo *dinfo; dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: dinfo->pmu_info = (struct bhnd_core_pmu_info *) value; return (0); default: return (ENOENT); } } static struct resource_list * bcma_get_resource_list(device_t dev, device_t child) { struct bcma_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int bcma_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t value; int error; if ((error = bhnd_read_config(child, BCMA_DMP_IOSTATUS, &value, 4))) return (error); /* Return only the bottom 16 bits */ *iost = (value & BCMA_DMP_IOST_MASK); return (0); } static int bcma_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t value; int error; if ((error = bhnd_read_config(child, BCMA_DMP_IOCTRL, &value, 4))) return (error); /* Return only the bottom 16 bits */ *ioctl = (value & BCMA_DMP_IOCTRL_MASK); return (0); } static int bcma_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; uint32_t ioctl; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Write new value */ ioctl = bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); ioctl &= ~(BCMA_DMP_IOCTRL_MASK & mask); ioctl |= (value & mask); bhnd_bus_write_4(r, BCMA_DMP_IOCTRL, ioctl); /* Perform read-back and wait for completion */ bhnd_bus_read_4(r, BCMA_DMP_IOCTRL); DELAY(10); return (0); } static bool bcma_is_hw_suspended(device_t dev, device_t child) { uint32_t rst; uint16_t ioctl; int error; /* Is core held in RESET? */ error = bhnd_read_config(child, BCMA_DMP_RESETCTRL, &rst, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } if (rst & BCMA_DMP_RC_RESET) return (true); /* Is core clocked? */ error = bhnd_read_ioctl(child, &ioctl); if (error) { device_printf(child, "error reading HW ioctl register: %d\n", error); return (true); } if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int bcma_reset_hw(device_t dev, device_t child, uint16_t ioctl) { struct bcma_devinfo *dinfo; struct bhnd_core_pmu_info *pm; struct bhnd_resource *r; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); pm = dinfo->pmu_info; /* We require exclusive control over BHND_IOCTL_CLK_EN and * BHND_IOCTL_CLK_FORCE. */ if (ioctl & (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE)) return (EINVAL); /* Can't suspend the core without access to the agent registers */ if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Place core into known RESET state */ if ((error = BHND_BUS_SUSPEND_HW(dev, child))) return (error); /* * Leaving the core in reset: * - Set the caller's IOCTL flags * - Enable clocks * - Force clock distribution to ensure propagation throughout the * core. */ error = bhnd_write_ioctl(child, ioctl | BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE, UINT16_MAX); if (error) return (error); /* Bring the core out of reset */ if ((error = bcma_dmp_write_reset(child, dinfo, 0x0))) return (error); /* Disable forced clock gating (leaving clock enabled) */ error = bhnd_write_ioctl(child, 0x0, BHND_IOCTL_CLK_FORCE); if (error) return (error); return (0); } static int bcma_suspend_hw(device_t dev, device_t child) { struct bcma_devinfo *dinfo; struct bhnd_core_pmu_info *pm; struct bhnd_resource *r; uint32_t rst; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); pm = dinfo->pmu_info; /* Can't suspend the core without access to the agent registers */ if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Wait for any pending reset operations to clear */ if ((error = bcma_dmp_wait_reset(child, dinfo))) return (error); /* Already in reset? */ rst = bhnd_bus_read_4(r, BCMA_DMP_RESETCTRL); if (rst & BCMA_DMP_RC_RESET) return (0); /* Put core into reset */ if ((error = bcma_dmp_write_reset(child, dinfo, BCMA_DMP_RC_RESET))) return (error); /* Clear core flags */ if ((error = bhnd_write_ioctl(child, 0x0, UINT16_MAX))) return (error); /* Inform PMU that all outstanding request state should be discarded */ if (pm != NULL) { if ((error = BHND_PMU_CORE_RELEASE(pm->pm_pmu, pm))) return (error); } return (0); } static int bcma_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; /* Must be a directly attached child core */ if (device_get_parent(child) != dev) return (EINVAL); /* Fetch the agent registers */ dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Verify bounds */ if (offset > rman_get_size(r->res)) return (EFAULT); if (rman_get_size(r->res) - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(r, offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(r, offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(r, offset); return (0); default: return (EINVAL); } } static int bcma_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct bcma_devinfo *dinfo; struct bhnd_resource *r; /* Must be a directly attached child core */ if (device_get_parent(child) != dev) return (EINVAL); /* Fetch the agent registers */ dinfo = device_get_ivars(child); if ((r = dinfo->res_agent) == NULL) return (ENODEV); /* Verify bounds */ if (offset > rman_get_size(r->res)) return (EFAULT); if (rman_get_size(r->res) - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint16_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint32_t *)value); return (0); default: return (EINVAL); } } static u_int bcma_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct bcma_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); switch (type) { case BHND_PORT_DEVICE: return (dinfo->corecfg->num_dev_ports); case BHND_PORT_BRIDGE: return (dinfo->corecfg->num_bridge_ports); case BHND_PORT_AGENT: return (dinfo->corecfg->num_wrapper_ports); default: device_printf(dev, "%s: unknown type (%d)\n", __func__, type); return (0); } } static u_int bcma_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port_num) { struct bcma_devinfo *dinfo; struct bcma_sport_list *ports; struct bcma_sport *port; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port_num)); dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, type); STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num == port_num) return (port->sp_num_maps); } /* not found */ return (0); } static int bcma_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num != port_num) continue; STAILQ_FOREACH(map, &port->sp_maps, m_link) if (map->m_region_num == region_num) return map->m_rid; } return -1; } static int bcma_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); /* Starting with the most likely device list, search all three port * lists */ bhnd_port_type types[] = { BHND_PORT_DEVICE, BHND_PORT_AGENT, BHND_PORT_BRIDGE }; for (int i = 0; i < nitems(types); i++) { ports = bcma_corecfg_get_port_list(dinfo->corecfg, types[i]); STAILQ_FOREACH(port, ports, sp_link) { STAILQ_FOREACH(map, &port->sp_maps, m_link) { if (map->m_rid != rid) continue; *port_type = port->sp_type; *port_num = port->sp_num; *region_num = map->m_region_num; return (0); } } } return (ENOENT); } static int bcma_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct bcma_devinfo *dinfo; struct bcma_map *map; struct bcma_sport_list *ports; struct bcma_sport *port; dinfo = device_get_ivars(child); ports = bcma_corecfg_get_port_list(dinfo->corecfg, port_type); /* Search the port list */ STAILQ_FOREACH(port, ports, sp_link) { if (port->sp_num != port_num) continue; STAILQ_FOREACH(map, &port->sp_maps, m_link) { if (map->m_region_num != region_num) continue; /* Found! */ *addr = map->m_base; *size = map->m_size; return (0); } } return (ENOENT); } /** * Default bcma(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). * * This implementation consults @p child's agent register block, * returning the number of interrupt output lines routed to @p child. */ int bcma_get_intr_count(device_t dev, device_t child) { struct bcma_devinfo *dinfo; uint32_t dmpcfg, oobw; dinfo = device_get_ivars(child); /* Agent block must be mapped */ if (dinfo->res_agent == NULL) return (0); /* Agent must support OOB */ dmpcfg = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_CONFIG); if (!BCMA_DMP_GET_FLAG(dmpcfg, BCMA_DMP_CFG_OOB)) return (0); /* Return OOB width as interrupt count */ oobw = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_OOB_OUTWIDTH(BCMA_OOB_BANK_INTR)); if (oobw > BCMA_OOB_NUM_SEL) { device_printf(dev, "ignoring invalid OOBOUTWIDTH for core %u: " "%#x\n", BCMA_DINFO_COREIDX(dinfo), oobw); return (0); } return (oobw); } /** * Default bcma(4) bus driver implementation of BHND_BUS_GET_CORE_IVEC(). * * This implementation consults @p child's agent register block, * returning the interrupt output line routed to @p child, at OOB selector * @p intr. */ int bcma_get_core_ivec(device_t dev, device_t child, u_int intr, uint32_t *ivec) { struct bcma_devinfo *dinfo; uint32_t oobsel; dinfo = device_get_ivars(child); /* Interrupt ID must be valid. */ if (intr >= bcma_get_intr_count(dev, child)) return (ENXIO); /* Fetch OOBSEL busline value */ KASSERT(dinfo->res_agent != NULL, ("missing agent registers")); oobsel = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_OOBSELOUT( BCMA_OOB_BANK_INTR, intr)); *ivec = (oobsel >> BCMA_DMP_OOBSEL_SHIFT(intr)) & BCMA_DMP_OOBSEL_BUSLINE_MASK; return (0); } /** * Scan the device enumeration ROM table, adding all valid discovered cores to * the bus. * * @param bus The bcma bus. */ int bcma_add_children(device_t bus) { bhnd_erom_t *erom; struct bcma_erom *bcma_erom; + struct bhnd_erom_io *eio; const struct bhnd_chipid *cid; struct bcma_corecfg *corecfg; struct bcma_devinfo *dinfo; device_t child; int error; cid = BHND_BUS_GET_CHIPID(bus, bus); corecfg = NULL; /* Allocate our EROM parser */ - erom = bhnd_erom_alloc(&bcma_erom_parser, cid, bus, BCMA_EROM_RID); - if (erom == NULL) + eio = bhnd_erom_iores_new(bus, BCMA_EROM_RID); + erom = bhnd_erom_alloc(&bcma_erom_parser, cid, eio); + if (erom == NULL) { + bhnd_erom_io_fini(eio); return (ENODEV); + } /* Add all cores. */ bcma_erom = (struct bcma_erom *)erom; while ((error = bcma_erom_next_corecfg(bcma_erom, &corecfg)) == 0) { int nintr; /* Add the child device */ child = BUS_ADD_CHILD(bus, 0, NULL, -1); if (child == NULL) { error = ENXIO; goto cleanup; } /* Initialize device ivars */ dinfo = device_get_ivars(child); if ((error = bcma_init_dinfo(bus, dinfo, corecfg))) goto cleanup; /* The dinfo instance now owns the corecfg value */ corecfg = NULL; /* Allocate device's agent registers, if any */ if ((error = bcma_dinfo_alloc_agent(bus, child, dinfo))) goto cleanup; /* Assign interrupts */ nintr = bhnd_get_intr_count(child); for (int rid = 0; rid < nintr; rid++) { error = BHND_BUS_ASSIGN_INTR(bus, child, rid); if (error) { device_printf(bus, "failed to assign interrupt " "%d to core %u: %d\n", rid, BCMA_DINFO_COREIDX(dinfo), error); } } /* If pins are floating or the hardware is otherwise * unpopulated, the device shouldn't be used. */ if (bhnd_is_hw_disabled(child)) device_disable(child); /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(bus, child); } /* EOF while parsing cores is expected */ if (error == ENOENT) error = 0; cleanup: bhnd_erom_free(erom); if (corecfg != NULL) bcma_free_corecfg(corecfg); if (error) device_delete_children(bus); return (error); } static device_method_t bcma_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcma_probe), DEVMETHOD(device_attach, bcma_attach), DEVMETHOD(device_detach, bcma_detach), /* Bus interface */ DEVMETHOD(bus_add_child, bcma_add_child), DEVMETHOD(bus_child_deleted, bcma_child_deleted), DEVMETHOD(bus_read_ivar, bcma_read_ivar), DEVMETHOD(bus_write_ivar, bcma_write_ivar), DEVMETHOD(bus_get_resource_list, bcma_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, bcma_get_erom_class), DEVMETHOD(bhnd_bus_read_ioctl, bcma_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, bcma_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, bcma_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, bcma_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, bcma_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, bcma_suspend_hw), DEVMETHOD(bhnd_bus_read_config, bcma_read_config), DEVMETHOD(bhnd_bus_write_config, bcma_write_config), DEVMETHOD(bhnd_bus_get_port_count, bcma_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, bcma_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, bcma_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, bcma_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, bcma_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, bcma_get_intr_count), DEVMETHOD(bhnd_bus_get_core_ivec, bcma_get_core_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, bcma_driver, bcma_methods, sizeof(struct bcma_softc), bhnd_driver); MODULE_VERSION(bcma, 1); MODULE_DEPEND(bcma, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/bcma/bcma_erom.c =================================================================== --- head/sys/dev/bhnd/bcma/bcma_erom.c (revision 324070) +++ head/sys/dev/bhnd/bcma/bcma_erom.c (revision 324071) @@ -1,1536 +1,1430 @@ /*- - * Copyright (c) 2015 Landon Fuller + * Copyright (c) 2015-2017 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "bcma_eromreg.h" #include "bcma_eromvar.h" /* * BCMA Enumeration ROM (EROM) Table * * Provides auto-discovery of BCMA cores on Broadcom's HND SoC. * * The EROM core address can be found at BCMA_CC_EROM_ADDR within the * ChipCommon registers. The table itself is comprised of 32-bit * type-tagged entries, organized into an array of variable-length * core descriptor records. * * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF) * marker. */ -struct bcma_erom_io; - static const char *bcma_erom_entry_type_name (uint8_t entry); -static uint32_t bcma_eio_read4(struct bcma_erom_io *io, - bus_size_t offset); - static int bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry); static int bcma_erom_skip32(struct bcma_erom *erom); static int bcma_erom_skip_core(struct bcma_erom *erom); static int bcma_erom_skip_mport(struct bcma_erom *erom); static int bcma_erom_skip_sport_region(struct bcma_erom *erom); static int bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype); static int bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type, bhnd_port_type *port_type); static int bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry); static bus_size_t bcma_erom_tell(struct bcma_erom *erom); static void bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset); static void bcma_erom_reset(struct bcma_erom *erom); static int bcma_erom_seek_matching_core(struct bcma_erom *sc, const struct bhnd_core_match *desc, struct bhnd_core_info *core); static int bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core); static int bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport); static int bcma_erom_parse_sport_region(struct bcma_erom *erom, struct bcma_erom_sport_region *region); static void bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx, int core_unit, struct bhnd_core_info *info); /** - * BCMA EROM generic I/O context - */ -struct bcma_erom_io { - struct bhnd_resource *res; /**< memory resource, or NULL if initialized - with bus space tag and handle */ - int rid; /**< memory resource id, or -1 */ - - bus_space_tag_t bst; /**< bus space tag, if any */ - bus_space_handle_t bsh; /**< bus space handle, if any */ - - bus_size_t start; /**< base read offset */ -}; - -/** * BCMA EROM per-instance state. */ struct bcma_erom { - struct bhnd_erom obj; - device_t dev; /**< parent device, or NULL if none. */ - struct bcma_erom_io io; /**< I/O context */ - bus_size_t offset; /**< current read offset */ + struct bhnd_erom obj; + device_t dev; /**< parent device, or NULL if none. */ + struct bhnd_erom_io *eio; /**< bus I/O callbacks */ + bhnd_size_t offset; /**< current read offset */ }; -#define EROM_LOG(erom, fmt, ...) do { \ - if (erom->dev != NULL) { \ - device_printf(erom->dev, "erom[0x%llx]: " fmt, \ - (unsigned long long) (erom->offset), ##__VA_ARGS__);\ - } else { \ - printf("erom[0x%llx]: " fmt, \ - (unsigned long long) (erom->offset), ##__VA_ARGS__);\ - } \ +#define EROM_LOG(erom, fmt, ...) do { \ + printf("%s erom[0x%llx]: " fmt, __FUNCTION__, \ + (unsigned long long)(erom->offset), ##__VA_ARGS__); \ } while(0) /** Return the type name for an EROM entry */ static const char * bcma_erom_entry_type_name (uint8_t entry) { switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { case BCMA_EROM_ENTRY_TYPE_CORE: return "core"; case BCMA_EROM_ENTRY_TYPE_MPORT: return "mport"; case BCMA_EROM_ENTRY_TYPE_REGION: return "region"; default: return "unknown"; } } - -/** - * Read a 32-bit value from an EROM I/O context. - * - * @param io EROM I/O context. - * @param offset Read offset. - */ -static uint32_t -bcma_eio_read4(struct bcma_erom_io *io, bus_size_t offset) -{ - bus_size_t read_off; - - read_off = io->start + offset; - if (io->res != NULL) - return (bhnd_bus_read_4(io->res, read_off)); - else - return (bus_space_read_4(io->bst, io->bsh, read_off)); -} - -/* Initialize bcma_erom resource I/O context */ -static void -bcma_eio_init(struct bcma_erom_io *io, struct bhnd_resource *res, int rid, - bus_size_t offset) -{ - io->res = res; - io->rid = rid; - io->start = offset; -} - -/* Initialize bcma_erom bus space I/O context */ -static void -bcma_eio_init_static(struct bcma_erom_io *io, bus_space_tag_t bst, - bus_space_handle_t bsh, bus_size_t offset) -{ - io->res = NULL; - io->rid = -1; - io->bst = bst; - io->bsh = bsh; - io->start = offset; -} - /* BCMA implementation of BHND_EROM_INIT() */ static int bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid, - device_t parent, int rid) + struct bhnd_erom_io *eio) { struct bcma_erom *sc; - struct bhnd_resource *res; + bhnd_addr_t table_addr; + int error; sc = (struct bcma_erom *)erom; - sc->dev = parent; + sc->eio = eio; sc->offset = 0; - res = bhnd_alloc_resource(parent, SYS_RES_MEMORY, &rid, cid->enum_addr, - cid->enum_addr + BCMA_EROM_TABLE_SIZE - 1, BCMA_EROM_TABLE_SIZE, - RF_ACTIVE|RF_SHAREABLE); + /* Determine erom table address */ + if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr) + return (ENXIO); /* would overflow */ - if (res == NULL) - return (ENOMEM); + table_addr = cid->enum_addr + BCMA_EROM_TABLE_START; - bcma_eio_init(&sc->io, res, rid, BCMA_EROM_TABLE_START); + /* Try to map the erom table */ + error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE); + if (error) + return (error); return (0); } -/* BCMA implementation of BHND_EROM_INIT_STATIC() */ +/* BCMA implementation of BHND_EROM_PROBE() */ static int -bcma_erom_init_static(bhnd_erom_t *erom, const struct bhnd_chipid *cid, - bus_space_tag_t bst, bus_space_handle_t bsh) +bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio, + const struct bhnd_chipid *hint, struct bhnd_chipid *cid) { - struct bcma_erom *sc; + uint32_t idreg, eromptr; - sc = (struct bcma_erom *)erom; - sc->dev = NULL; - sc->offset = 0; - - bcma_eio_init_static(&sc->io, bst, bsh, BCMA_EROM_TABLE_START); - - return (0); -} - -/* Common implementation of BHND_EROM_PROBE/BHND_EROM_PROBE_STATIC */ -static int -bcma_erom_probe_common(struct bcma_erom_io *io, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) -{ - uint32_t idreg, eromptr; - /* Hints aren't supported; all BCMA devices have a ChipCommon * core */ if (hint != NULL) return (EINVAL); - /* Confirm CHIPC_EROMPTR availability */ - idreg = bcma_eio_read4(io, CHIPC_ID); + /* Confirm CHIPC_EROMPTR availability */ + idreg = bhnd_erom_io_read(eio, CHIPC_ID, 4); if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS))) return (ENXIO); /* Fetch EROM address */ - eromptr = bcma_eio_read4(io, CHIPC_EROMPTR); + eromptr = bhnd_erom_io_read(eio, CHIPC_EROMPTR, 4); /* Parse chip identifier */ *cid = bhnd_parse_chipid(idreg, eromptr); /* Verify chip type */ switch (cid->chip_type) { case BHND_CHIPTYPE_BCMA: return (BUS_PROBE_DEFAULT); case BHND_CHIPTYPE_BCMA_ALT: case BHND_CHIPTYPE_UBUS: return (BUS_PROBE_GENERIC); default: return (ENXIO); } } -static int -bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_resource *res, - bus_size_t offset, const struct bhnd_chipid *hint, struct bhnd_chipid *cid) -{ - struct bcma_erom_io io; - - bcma_eio_init(&io, res, rman_get_rid(res->res), - offset + BCMA_EROM_TABLE_START); - - return (bcma_erom_probe_common(&io, hint, cid)); -} - -static int -bcma_erom_probe_static(bhnd_erom_class_t *cls, bus_space_tag_t bst, - bus_space_handle_t bsh, bus_addr_t paddr, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) -{ - struct bcma_erom_io io; - - bcma_eio_init_static(&io, bst, bsh, BCMA_EROM_TABLE_START); - return (bcma_erom_probe_common(&io, hint, cid)); -} - - static void bcma_erom_fini(bhnd_erom_t *erom) { struct bcma_erom *sc = (struct bcma_erom *)erom; - if (sc->io.res != NULL) { - bhnd_release_resource(sc->dev, SYS_RES_MEMORY, sc->io.rid, - sc->io.res); - - sc->io.res = NULL; - sc->io.rid = -1; - } + bhnd_erom_io_fini(sc->eio); } static int bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc, struct bhnd_core_info *core) { struct bcma_erom *sc = (struct bcma_erom *)erom; /* Search for the first matching core */ return (bcma_erom_seek_matching_core(sc, desc, core)); } static int bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc, bhnd_port_type port_type, u_int port_num, u_int region_num, struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size) { struct bcma_erom *sc; struct bcma_erom_core ec; uint32_t entry; uint8_t region_port, region_type; bool found; int error; sc = (struct bcma_erom *)erom; /* Seek to the first matching core and provide the core info * to the caller */ if ((error = bcma_erom_seek_matching_core(sc, desc, core))) return (error); if ((error = bcma_erom_parse_core(sc, &ec))) return (error); /* Skip master ports */ for (u_long i = 0; i < ec.num_mport; i++) { if ((error = bcma_erom_skip_mport(sc))) return (error); } /* Seek to the region block for the given port type */ found = false; while (1) { bhnd_port_type p_type; uint8_t r_type; if ((error = bcma_erom_peek32(sc, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, REGION)) return (ENOENT); /* Expected region type? */ r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); error = bcma_erom_region_to_port_type(sc, r_type, &p_type); if (error) return (error); if (p_type == port_type) { found = true; break; } /* Skip to next entry */ if ((error = bcma_erom_skip_sport_region(sc))) return (error); } if (!found) return (ENOENT); /* Found the appropriate port type block; now find the region records * for the given port number */ found = false; for (u_int i = 0; i <= port_num; i++) { bhnd_port_type p_type; if ((error = bcma_erom_peek32(sc, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, REGION)) return (ENOENT); /* Fetch the type/port of the first region entry */ region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); /* Have we found the region entries for the desired port? */ if (i == port_num) { error = bcma_erom_region_to_port_type(sc, region_type, &p_type); if (error) return (error); if (p_type == port_type) found = true; break; } /* Otherwise, seek to next block of region records */ while (1) { uint8_t next_type, next_port; if ((error = bcma_erom_skip_sport_region(sc))) return (error); if ((error = bcma_erom_peek32(sc, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, REGION)) return (ENOENT); next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); if (next_type != region_type || next_port != region_port) break; } } if (!found) return (ENOENT); /* Finally, search for the requested region number */ for (u_int i = 0; i <= region_num; i++) { struct bcma_erom_sport_region region; uint8_t next_port, next_type; if ((error = bcma_erom_peek32(sc, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, REGION)) return (ENOENT); /* Check for the end of the region block */ next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); if (next_type != region_type || next_port != region_port) break; /* Parse the region */ if ((error = bcma_erom_parse_sport_region(sc, ®ion))) return (error); /* Is this our target region_num? */ if (i == region_num) { /* Found */ *addr = region.base_addr; *size = region.size; return (0); } } /* Not found */ return (ENOENT); }; static int bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores, u_int *num_cores) { struct bcma_erom *sc; struct bhnd_core_info *buffer; bus_size_t initial_offset; u_int count; int error; sc = (struct bcma_erom *)erom; buffer = NULL; initial_offset = bcma_erom_tell(sc); /* Determine the core count */ bcma_erom_reset(sc); for (count = 0, error = 0; !error; count++) { struct bcma_erom_core core; /* Seek to the first readable core entry */ error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); if (error == ENOENT) break; else if (error) goto cleanup; /* Read past the core descriptor */ if ((error = bcma_erom_parse_core(sc, &core))) goto cleanup; } /* Allocate our output buffer */ buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND, M_NOWAIT); if (buffer == NULL) { error = ENOMEM; goto cleanup; } /* Parse all core descriptors */ bcma_erom_reset(sc); for (u_int i = 0; i < count; i++) { struct bcma_erom_core core; int unit; /* Parse the core */ error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); if (error) goto cleanup; error = bcma_erom_parse_core(sc, &core); if (error) goto cleanup; /* Determine the unit number */ unit = 0; for (u_int j = 0; j < i; j++) { if (buffer[i].vendor == buffer[j].vendor && buffer[i].device == buffer[j].device) unit++; } /* Convert to a bhnd info record */ bcma_erom_to_core_info(&core, i, unit, &buffer[i]); } cleanup: if (!error) { *cores = buffer; *num_cores = count; } else { if (buffer != NULL) free(buffer, M_BHND); } /* Restore the initial position */ bcma_erom_seek(sc, initial_offset); return (error); } static void bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores) { free(cores, M_BHND); } /** * Return the current read position. */ static bus_size_t bcma_erom_tell(struct bcma_erom *erom) { return (erom->offset); } /** * Seek to an absolute read position. */ static void bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset) { erom->offset = offset; } /** * Read a 32-bit entry value from the EROM table without advancing the * read position. * * @param erom EROM read state. * @param entry Will contain the read result on success. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry) { if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) { EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n"); return (EINVAL); } - - *entry = bcma_eio_read4(&erom->io, erom->offset); + + *entry = bhnd_erom_io_read(erom->eio, erom->offset, 4); return (0); } /** * Read a 32-bit entry value from the EROM table. * * @param erom EROM read state. * @param entry Will contain the read result on success. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry) { int error; if ((error = bcma_erom_peek32(erom, entry)) == 0) erom->offset += 4; return (error); } /** * Read and discard 32-bit entry value from the EROM table. * * @param erom EROM read state. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_skip32(struct bcma_erom *erom) { uint32_t entry; return bcma_erom_read32(erom, &entry); } /** * Read and discard a core descriptor from the EROM table. * * @param erom EROM read state. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_skip_core(struct bcma_erom *erom) { struct bcma_erom_core core; return (bcma_erom_parse_core(erom, &core)); } /** * Read and discard a master port descriptor from the EROM table. * * @param erom EROM read state. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_skip_mport(struct bcma_erom *erom) { struct bcma_erom_mport mp; return (bcma_erom_parse_mport(erom, &mp)); } /** * Read and discard a port region descriptor from the EROM table. * * @param erom EROM read state. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ static int bcma_erom_skip_sport_region(struct bcma_erom *erom) { struct bcma_erom_sport_region r; return (bcma_erom_parse_sport_region(erom, &r)); } /** * Seek to the next entry matching the given EROM entry type. * * @param erom EROM read state. * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE, * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero Reading or parsing the descriptor failed. */ static int bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype) { uint32_t entry; int error; /* Iterate until we hit an entry matching the requested type. */ while (!(error = bcma_erom_peek32(erom, &entry))) { /* Handle EOF */ if (entry == BCMA_EROM_TABLE_EOF) return (ENOENT); /* Invalid entry */ if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) return (EINVAL); /* Entry type matches? */ if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype) return (0); /* Skip non-matching entry types. */ switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { case BCMA_EROM_ENTRY_TYPE_CORE: if ((error = bcma_erom_skip_core(erom))) return (error); break; case BCMA_EROM_ENTRY_TYPE_MPORT: if ((error = bcma_erom_skip_mport(erom))) return (error); break; case BCMA_EROM_ENTRY_TYPE_REGION: if ((error = bcma_erom_skip_sport_region(erom))) return (error); break; default: /* Unknown entry type! */ return (EINVAL); } } return (error); } /** * Return the read position to the start of the EROM table. * * @param erom EROM read state. */ static void bcma_erom_reset(struct bcma_erom *erom) { erom->offset = 0; } /** * Seek to the first core entry matching @p desc. * * @param erom EROM read state. * @param desc The core match descriptor. * @param[out] core On success, the matching core info. If the core info * is not desired, a NULL pointer may be provided. * @retval 0 success * @retval ENOENT The end of the EROM table was reached before @p index was * found. * @retval non-zero Reading or parsing failed. */ static int bcma_erom_seek_matching_core(struct bcma_erom *sc, const struct bhnd_core_match *desc, struct bhnd_core_info *core) { struct bhnd_core_match imatch; bus_size_t core_offset, next_offset; int error; /* Seek to table start. */ bcma_erom_reset(sc); /* We can't determine a core's unit number during the initial scan. */ imatch = *desc; imatch.m.match.core_unit = 0; /* Locate the first matching core */ for (u_int i = 0; i < UINT_MAX; i++) { struct bcma_erom_core ec; struct bhnd_core_info ci; /* Seek to the next core */ error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); if (error) return (error); /* Save the core offset */ core_offset = bcma_erom_tell(sc); /* Parse the core */ if ((error = bcma_erom_parse_core(sc, &ec))) return (error); bcma_erom_to_core_info(&ec, i, 0, &ci); /* Check for initial match */ if (!bhnd_core_matches(&ci, &imatch)) continue; /* Re-scan preceding cores to determine the unit number. */ next_offset = bcma_erom_tell(sc); bcma_erom_reset(sc); for (u_int j = 0; j < i; j++) { /* Parse the core */ error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE); if (error) return (error); if ((error = bcma_erom_parse_core(sc, &ec))) return (error); /* Bump the unit number? */ if (ec.vendor == ci.vendor && ec.device == ci.device) ci.unit++; } /* Check for full match against now-valid unit number */ if (!bhnd_core_matches(&ci, desc)) { /* Reposition to allow reading the next core */ bcma_erom_seek(sc, next_offset); continue; } /* Found; seek to the core's initial offset and provide * the core info to the caller */ bcma_erom_seek(sc, core_offset); if (core != NULL) *core = ci; return (0); } /* Not found, or a parse error occured */ return (error); } /** * Read the next core descriptor from the EROM table. * * @param erom EROM read state. * @param[out] core On success, will be populated with the parsed core * descriptor data. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero Reading or parsing the core descriptor failed. */ static int bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core) { uint32_t entry; int error; /* Parse CoreDescA */ if ((error = bcma_erom_read32(erom, &entry))) return (error); /* Handle EOF */ if (entry == BCMA_EROM_TABLE_EOF) return (ENOENT); if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n", entry, bcma_erom_entry_type_name(entry)); return (EINVAL); } core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER); core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID); /* Parse CoreDescB */ if ((error = bcma_erom_read32(erom, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { return (EINVAL); } core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV); core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP); core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP); core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP); core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP); return (0); } /** * Read the next master port descriptor from the EROM table. * * @param erom EROM read state. * @param[out] mport On success, will be populated with the parsed * descriptor data. * @retval 0 success * @retval non-zero Reading or parsing the descriptor failed. */ static int bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport) { uint32_t entry; int error; /* Parse the master port descriptor */ if ((error = bcma_erom_read32(erom, &entry))) return (error); if (!BCMA_EROM_ENTRY_IS(entry, MPORT)) return (EINVAL); mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID); mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM); return (0); } /** * Read the next slave port region descriptor from the EROM table. * * @param erom EROM read state. * @param[out] mport On success, will be populated with the parsed * descriptor data. * @retval 0 success * @retval ENOENT The end of the region descriptor table was reached. * @retval non-zero Reading or parsing the descriptor failed. */ static int bcma_erom_parse_sport_region(struct bcma_erom *erom, struct bcma_erom_sport_region *region) { uint32_t entry; uint8_t size_type; int error; /* Peek at the region descriptor */ if (bcma_erom_peek32(erom, &entry)) return (EINVAL); /* A non-region entry signals the end of the region table */ if (!BCMA_EROM_ENTRY_IS(entry, REGION)) { return (ENOENT); } else { bcma_erom_skip32(erom); } region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE); region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE); region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT); size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE); /* If region address is 64-bit, fetch the high bits. */ if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) { if ((error = bcma_erom_read32(erom, &entry))) return (error); region->base_addr |= ((bhnd_addr_t) entry << 32); } /* Parse the region size; it's either encoded as the binary logarithm * of the number of 4K pages (i.e. log2 n), or its encoded as a * 32-bit/64-bit literal value directly following the current entry. */ if (size_type == BCMA_EROM_REGION_SIZE_OTHER) { if ((error = bcma_erom_read32(erom, &entry))) return (error); region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL); if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) { if ((error = bcma_erom_read32(erom, &entry))) return (error); region->size |= ((bhnd_size_t) entry << 32); } } else { region->size = BCMA_EROM_REGION_SIZE_BASE << size_type; } /* Verify that addr+size does not overflow. */ if (region->size != 0 && BHND_ADDR_MAX - (region->size - 1) < region->base_addr) { EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n", bcma_erom_entry_type_name(region->region_type), region->region_port, (unsigned long long) region->base_addr, (unsigned long long) region->size); return (EINVAL); } return (0); } /** * Convert a bcma_erom_core record to its bhnd_core_info representation. * * @param core EROM core record to convert. * @param core_idx The core index of @p core. * @param core_unit The core unit of @p core. * @param[out] info The populated bhnd_core_info representation. */ static void bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx, int core_unit, struct bhnd_core_info *info) { info->vendor = core->vendor; info->device = core->device; info->hwrev = core->rev; info->core_idx = core_idx; info->unit = core_unit; } /** * Map an EROM region type to its corresponding port type. * * @param region_type Region type value. * @param[out] port_type On success, the corresponding port type. */ static int bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type, bhnd_port_type *port_type) { switch (region_type) { case BCMA_EROM_REGION_TYPE_DEVICE: *port_type = BHND_PORT_DEVICE; return (0); case BCMA_EROM_REGION_TYPE_BRIDGE: *port_type = BHND_PORT_BRIDGE; return (0); case BCMA_EROM_REGION_TYPE_MWRAP: case BCMA_EROM_REGION_TYPE_SWRAP: *port_type = BHND_PORT_AGENT; return (0); default: EROM_LOG(erom, "unsupported region type %hhx\n", region_type); return (EINVAL); } } /** * Register all MMIO region descriptors for the given slave port. * * @param erom EROM read state. * @param corecfg Core info to be populated with the scanned port regions. * @param port_num Port index for which regions will be parsed. * @param region_type The region type to be parsed. * @param[out] offset The offset at which to perform parsing. On success, this * will be updated to point to the next EROM table entry. */ static int bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom, struct bcma_corecfg *corecfg, bcma_pid_t port_num, uint8_t region_type) { struct bcma_sport *sport; struct bcma_sport_list *sports; bus_size_t entry_offset; int error; bhnd_port_type port_type; error = 0; /* Determine the port type for this region type. */ error = bcma_erom_region_to_port_type(erom, region_type, &port_type); if (error) return (error); /* Fetch the list to be populated */ sports = bcma_corecfg_get_port_list(corecfg, port_type); /* Allocate a new port descriptor */ sport = bcma_alloc_sport(port_num, port_type); if (sport == NULL) return (ENOMEM); /* Read all address regions defined for this port */ for (bcma_rmid_t region_num = 0;; region_num++) { struct bcma_map *map; struct bcma_erom_sport_region spr; /* No valid port definition should come anywhere near * BCMA_RMID_MAX. */ if (region_num == BCMA_RMID_MAX) { EROM_LOG(erom, "core%u %s%u: region count reached " "upper limit of %u\n", corecfg->core_info.core_idx, bhnd_port_type_name(port_type), port_num, BCMA_RMID_MAX); error = EINVAL; goto cleanup; } /* Parse the next region entry. */ entry_offset = bcma_erom_tell(erom); error = bcma_erom_parse_sport_region(erom, &spr); if (error && error != ENOENT) { EROM_LOG(erom, "core%u %s%u.%u: invalid slave port " "address region\n", corecfg->core_info.core_idx, bhnd_port_type_name(port_type), port_num, region_num); goto cleanup; } /* ENOENT signals no further region entries */ if (error == ENOENT) { /* No further entries */ error = 0; break; } /* A region or type mismatch also signals no further region * entries */ if (spr.region_port != port_num || spr.region_type != region_type) { /* We don't want to consume this entry */ bcma_erom_seek(erom, entry_offset); error = 0; goto cleanup; } /* * Create the map entry. */ map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT); if (map == NULL) { error = ENOMEM; goto cleanup; } map->m_region_num = region_num; map->m_base = spr.base_addr; map->m_size = spr.size; map->m_rid = -1; /* Add the region map to the port */ STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link); sport->sp_num_maps++; } cleanup: /* Append the new port descriptor on success, or deallocate the * partially parsed descriptor on failure. */ if (error == 0) { STAILQ_INSERT_TAIL(sports, sport, sp_link); } else if (sport != NULL) { bcma_free_sport(sport); } return error; } /** * Parse the next core entry from the EROM table and produce a bcma_corecfg * to be owned by the caller. * * @param erom A bcma EROM instance. * @param[out] result On success, the core's device info. The caller inherits * ownership of this allocation. * * @return If successful, returns 0. If the end of the EROM table is hit, * ENOENT will be returned. On error, returns a non-zero error value. */ int bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result) { struct bcma_corecfg *cfg; struct bcma_erom_core core; uint8_t first_region_type; bus_size_t initial_offset; u_int core_index; int core_unit; int error; cfg = NULL; initial_offset = bcma_erom_tell(erom); /* Parse the next core entry */ if ((error = bcma_erom_parse_core(erom, &core))) return (error); /* Determine the core's index and unit numbers */ bcma_erom_reset(erom); core_unit = 0; core_index = 0; for (; bcma_erom_tell(erom) != initial_offset; core_index++) { struct bcma_erom_core prev_core; /* Parse next core */ error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); if (error) return (error); if ((error = bcma_erom_parse_core(erom, &prev_core))) return (error); /* Is earlier unit? */ if (core.vendor == prev_core.vendor && core.device == prev_core.device) { core_unit++; } /* Seek to next core */ error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE); if (error) return (error); } /* We already parsed the core descriptor */ if ((error = bcma_erom_skip_core(erom))) return (error); /* Allocate our corecfg */ cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor, core.device, core.rev); if (cfg == NULL) return (ENOMEM); /* These are 5-bit values in the EROM table, and should never be able * to overflow BCMA_PID_MAX. */ KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count")); KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count")); KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX, ("unsupported wport count")); if (bootverbose) { EROM_LOG(erom, "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n", core_index, bhnd_vendor_name(core.vendor), bhnd_find_core_name(core.vendor, core.device), core.device, core.rev, core_unit); } cfg->num_master_ports = core.num_mport; cfg->num_dev_ports = 0; /* determined below */ cfg->num_bridge_ports = 0; /* determined blow */ cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap; /* Parse Master Port Descriptors */ for (uint8_t i = 0; i < core.num_mport; i++) { struct bcma_mport *mport; struct bcma_erom_mport mpd; /* Parse the master port descriptor */ error = bcma_erom_parse_mport(erom, &mpd); if (error) goto failed; /* Initialize a new bus mport structure */ mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT); if (mport == NULL) { error = ENOMEM; goto failed; } mport->mp_vid = mpd.port_vid; mport->mp_num = mpd.port_num; /* Update dinfo */ STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link); } /* * Determine whether this is a bridge device; if so, we can * expect the first sequence of address region descriptors to * be of EROM_REGION_TYPE_BRIDGE instead of * BCMA_EROM_REGION_TYPE_DEVICE. * * It's unclear whether this is the correct mechanism by which we * should detect/handle bridge devices, but this approach matches * that of (some of) Broadcom's published drivers. */ if (core.num_dport > 0) { uint32_t entry; if ((error = bcma_erom_peek32(erom, &entry))) goto failed; if (BCMA_EROM_ENTRY_IS(entry, REGION) && BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE) { first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE; cfg->num_dev_ports = 0; cfg->num_bridge_ports = core.num_dport; } else { first_region_type = BCMA_EROM_REGION_TYPE_DEVICE; cfg->num_dev_ports = core.num_dport; cfg->num_bridge_ports = 0; } } /* Device/bridge port descriptors */ for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) { error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, first_region_type); if (error) goto failed; } /* Wrapper (aka device management) descriptors (for master ports). */ for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) { error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, BCMA_EROM_REGION_TYPE_MWRAP); if (error) goto failed; } /* Wrapper (aka device management) descriptors (for slave ports). */ for (uint8_t i = 0; i < core.num_swrap; i++) { /* Slave wrapper ports are not numbered distinctly from master * wrapper ports. */ /* * Broadcom DDR1/DDR2 Memory Controller * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) -> * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2) * * ARM BP135 AMBA3 AXI to APB Bridge * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) -> * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2) * * core.num_mwrap * ===> * (core.num_mwrap > 0) ? * core.num_mwrap : * ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) */ uint8_t sp_num; sp_num = (core.num_mwrap > 0) ? core.num_mwrap : ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i; error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num, BCMA_EROM_REGION_TYPE_SWRAP); if (error) goto failed; } /* * Seek to the next core entry (if any), skipping any dangling/invalid * region entries. * * On the BCM4706, the EROM entry for the memory controller core * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region * descriptor. */ if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) { if (error != ENOENT) goto failed; } *result = cfg; return (0); failed: if (cfg != NULL) bcma_free_corecfg(cfg); return error; } static int bcma_erom_dump(bhnd_erom_t *erom) { struct bcma_erom *sc; uint32_t entry; int error; sc = (struct bcma_erom *)erom; bcma_erom_reset(sc); while (!(error = bcma_erom_read32(sc, &entry))) { /* Handle EOF */ if (entry == BCMA_EROM_TABLE_EOF) { EROM_LOG(sc, "EOF\n"); return (0); } /* Invalid entry */ if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) { EROM_LOG(sc, "invalid EROM entry %#x\n", entry); return (EINVAL); } switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) { case BCMA_EROM_ENTRY_TYPE_CORE: { /* CoreDescA */ EROM_LOG(sc, "coreA (0x%x)\n", entry); EROM_LOG(sc, "\tdesigner:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER)); EROM_LOG(sc, "\tid:\t\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREA_ID)); EROM_LOG(sc, "\tclass:\t\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREA_CLASS)); /* CoreDescB */ if ((error = bcma_erom_read32(sc, &entry))) { EROM_LOG(sc, "error reading CoreDescB: %d\n", error); return (error); } if (!BCMA_EROM_ENTRY_IS(entry, CORE)) { EROM_LOG(sc, "invalid core descriptor; found " "unexpected entry %#x (type=%s)\n", entry, bcma_erom_entry_type_name(entry)); return (EINVAL); } EROM_LOG(sc, "coreB (0x%x)\n", entry); EROM_LOG(sc, "\trev:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREB_REV)); EROM_LOG(sc, "\tnummp:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP)); EROM_LOG(sc, "\tnumdp:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP)); EROM_LOG(sc, "\tnumwmp:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP)); EROM_LOG(sc, "\tnumwsp:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP)); break; } case BCMA_EROM_ENTRY_TYPE_MPORT: EROM_LOG(sc, "\tmport 0x%x\n", entry); EROM_LOG(sc, "\t\tport:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, MPORT_NUM)); EROM_LOG(sc, "\t\tid:\t\t0x%x\n", BCMA_EROM_GET_ATTR(entry, MPORT_ID)); break; case BCMA_EROM_ENTRY_TYPE_REGION: { bool addr64; uint8_t size_type; addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0); size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE); EROM_LOG(sc, "\tregion 0x%x:\n", entry); EROM_LOG(sc, "\t\t%s:\t0x%x\n", addr64 ? "baselo" : "base", BCMA_EROM_GET_ATTR(entry, REGION_BASE)); EROM_LOG(sc, "\t\tport:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, REGION_PORT)); EROM_LOG(sc, "\t\ttype:\t0x%x\n", BCMA_EROM_GET_ATTR(entry, REGION_TYPE)); EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type); /* Read the base address high bits */ if (addr64) { if ((error = bcma_erom_read32(sc, &entry))) { EROM_LOG(sc, "error reading region " "base address high bits %d\n", error); return (error); } EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry); } /* Read extended size descriptor */ if (size_type == BCMA_EROM_REGION_SIZE_OTHER) { bool size64; if ((error = bcma_erom_read32(sc, &entry))) { EROM_LOG(sc, "error reading region " "size descriptor %d\n", error); return (error); } if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) size64 = true; else size64 = false; EROM_LOG(sc, "\t\t%s:\t0x%x\n", size64 ? "sizelo" : "size", BCMA_EROM_GET_ATTR(entry, RSIZE_VAL)); if (size64) { error = bcma_erom_read32(sc, &entry); if (error) { EROM_LOG(sc, "error reading " "region size high bits: " "%d\n", error); return (error); } EROM_LOG(sc, "\t\tsizehi:\t0x%x\n", entry); } } break; } default: EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n", entry, bcma_erom_entry_type_name(entry)); return (EINVAL); } } if (error == ENOENT) EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n"); else if (error) EROM_LOG(sc, "EROM read failed: %d\n", error); return (error); } static kobj_method_t bcma_erom_methods[] = { KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe), - KOBJMETHOD(bhnd_erom_probe_static, bcma_erom_probe_static), KOBJMETHOD(bhnd_erom_init, bcma_erom_init), - KOBJMETHOD(bhnd_erom_init_static, bcma_erom_init_static), KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini), KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table), KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table), KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core), KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr), KOBJMETHOD(bhnd_erom_dump, bcma_erom_dump), KOBJMETHOD_END }; BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom)); Index: head/sys/dev/bhnd/bhnd_erom.c =================================================================== --- head/sys/dev/bhnd/bhnd_erom.c (revision 324070) +++ head/sys/dev/bhnd/bhnd_erom.c (revision 324071) @@ -1,141 +1,486 @@ /*- * Copyright (c) 2016 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include +#include #include + +#include +#include +#include #include #include +#include +static int bhnd_erom_iores_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, + bhnd_size_t size); +static uint32_t bhnd_erom_iores_read(struct bhnd_erom_io *eio, + bhnd_size_t offset, u_int width); +static void bhnd_erom_iores_fini(struct bhnd_erom_io *eio); + +static int bhnd_erom_iobus_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, + bhnd_size_t size); +static uint32_t bhnd_erom_iobus_read(struct bhnd_erom_io *eio, + bhnd_size_t offset, u_int width); + /** + * An implementation of bhnd_erom_io that manages mappings via + * bhnd_alloc_resource() and bhnd_release_resource(). + */ +struct bhnd_erom_iores { + struct bhnd_erom_io eio; + device_t owner; /**< device from which we'll allocate resources */ + int owner_rid; /**< rid to use when allocating new mappings */ + struct bhnd_resource *mapped; /**< current mapping, or NULL */ + int mapped_rid; /**< resource ID of current mapping, or -1 */ +}; + +/** + * Fetch the device enumeration parser class from all bhnd(4)-compatible drivers + * registered for @p bus_devclass, probe @p eio for supporting parser classes, + * and return the best available supporting enumeration parser class. + * + * @param bus_devclass The bus device class to be queried for + * bhnd(4)-compatible drivers. + * @param eio An erom bus I/O instance, configured with a + * mapping of the first bus core. + * @param hint Identification hint used to identify the device. + * If the chipset supports standard chip + * identification registers within the first core, + * this parameter should be NULL. + * @param[out] cid On success, the probed chip identifier. + * + * @retval non-NULL on success, the best available EROM class. + * @retval NULL if no erom class returned a successful probe result for + * @p eio. + */ +bhnd_erom_class_t * +bhnd_erom_probe_driver_classes(devclass_t bus_devclass, + struct bhnd_erom_io *eio, const struct bhnd_chipid *hint, + struct bhnd_chipid *cid) +{ + driver_t **drivers; + int drv_count; + bhnd_erom_class_t *erom_cls; + int error, prio, result; + + erom_cls = NULL; + prio = 0; + + /* Fetch all available drivers */ + error = devclass_get_drivers(bus_devclass, &drivers, &drv_count); + if (error) { + printf("error fetching bhnd(4) drivers for %s: %d\n", + devclass_get_name(bus_devclass), error); + return (NULL); + } + + /* Enumerate the drivers looking for the best available EROM class */ + for (int i = 0; i < drv_count; i++) { + struct bhnd_chipid pcid; + bhnd_erom_class_t *cls; + + /* The default implementation of BHND_BUS_GET_EROM_CLASS() + * returns NULL if unimplemented; this should always be safe + * to call on arbitrary drivers */ + cls = bhnd_driver_get_erom_class(drivers[i]); + if (cls == NULL) + continue; + + kobj_class_compile(cls); + + /* Probe the bus */ + result = bhnd_erom_probe(cls, eio, hint, &pcid); + + /* The parser did not match if an error was returned */ + if (result > 0) + continue; + + /* Check for a new highest priority match */ + if (erom_cls == NULL || result > prio) { + prio = result; + + *cid = pcid; + erom_cls = cls; + } + + /* Terminate immediately on BUS_PROBE_SPECIFIC */ + if (result == BUS_PROBE_SPECIFIC) + break; + } + + return (erom_cls); +} + +/** * Allocate and return a new device enumeration table parser. * * @param cls The parser class for which an instance will be * allocated. - * @param parent The parent device from which EROM resources should - * be allocated. - * @param rid The resource ID to be used when allocating EROM - * resources. + * @param eio The bus I/O callbacks to use when reading the device + * enumeration table. * @param cid The device's chip identifier. * * @retval non-NULL success * @retval NULL if an error occured allocating or initializing the * EROM parser. */ bhnd_erom_t * bhnd_erom_alloc(bhnd_erom_class_t *cls, const struct bhnd_chipid *cid, - device_t parent, int rid) + struct bhnd_erom_io *eio) { bhnd_erom_t *erom; int error; erom = (bhnd_erom_t *)kobj_create((kobj_class_t)cls, M_BHND, M_WAITOK|M_ZERO); - if ((error = BHND_EROM_INIT(erom, cid, parent, rid))) { - printf("error initializing %s parser at %#jx with " - "rid %d: %d\n", cls->name, (uintmax_t)cid->enum_addr, rid, - error); + if ((error = BHND_EROM_INIT(erom, cid, eio))) { + printf("error initializing %s parser at %#jx: %d\n", cls->name, + (uintmax_t)cid->enum_addr, error); kobj_delete((kobj_t)erom, M_BHND); return (NULL); } return (erom); } /** - * Perform static initialization of aa device enumeration table parser using - * the provided bus space tag and handle. + * Perform static initialization of a device enumeration table parser. * * This may be used to initialize a caller-allocated erom instance state * during early boot, prior to malloc availability. * * @param cls The parser class for which an instance will be * allocated. * @param erom The erom parser instance to initialize. * @param esize The total available number of bytes allocated for * @p erom. If this is less than is required by @p cls, * ENOMEM will be returned. * @param cid The device's chip identifier. - * @param bst Bus space tag. - * @param bsh Bus space handle mapping the device enumeration - * space. + * @param eio The bus I/O callbacks to use when reading the device + * enumeration table. * * @retval 0 success * @retval ENOMEM if @p esize is smaller than required by @p cls. * @retval non-zero if an error occurs initializing the EROM parser, * a regular unix error code will be returned. */ int bhnd_erom_init_static(bhnd_erom_class_t *cls, bhnd_erom_t *erom, size_t esize, - const struct bhnd_chipid *cid, bus_space_tag_t bst, bus_space_handle_t bsh) + const struct bhnd_chipid *cid, struct bhnd_erom_io *eio) { kobj_class_t kcls; kcls = (kobj_class_t)cls; /* Verify allocation size */ if (kcls->size > esize) return (ENOMEM); /* Perform instance initialization */ kobj_init_static((kobj_t)erom, kcls); - return (BHND_EROM_INIT_STATIC(erom, cid, bst, bsh)); + return (BHND_EROM_INIT(erom, cid, eio)); } /** * Release any resources held by a @p erom parser previously * initialized via bhnd_erom_init_static(). * * @param erom An erom parser instance previously initialized via * bhnd_erom_init_static(). */ void bhnd_erom_fini_static(bhnd_erom_t *erom) { return (BHND_EROM_FINI(erom)); } /** * Release all resources held by a @p erom parser previously * allocated via bhnd_erom_alloc(). * * @param erom An erom parser instance previously allocated via * bhnd_erom_alloc(). */ void bhnd_erom_free(bhnd_erom_t *erom) { BHND_EROM_FINI(erom); kobj_delete((kobj_t)erom, M_BHND); +} + + +/** + * Attempt to map @p size bytes at @p addr, replacing any existing + * @p eio mapping. + * + * @param eio I/O instance state. + * @param addr The address to be mapped. + * @param size The number of bytes to be mapped at @p addr. + * + * @retval 0 success + * @retval non-zero if mapping @p addr otherwise fails, a regular + * unix error code should be returned. + */ +int +bhnd_erom_io_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size) +{ + return (eio->map(eio, addr, size)); +} + +/** + * Read a 1, 2, or 4 byte data item from @p eio, at the given @p offset + * relative to @p eio's current mapping. + * + * @param eio erom I/O callbacks + * @param offset read offset. + * @param width item width (1, 2, or 4 bytes). + */ +uint32_t +bhnd_erom_io_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) +{ + return (eio->read(eio, offset, width)); +} + +/** + * Free all resources held by @p eio. + */ +void +bhnd_erom_io_fini(struct bhnd_erom_io *eio) +{ + if (eio->fini != NULL) + return (eio->fini(eio)); +} + +/** + * Allocate, initialize, and return a new I/O instance that will perform + * mapping by allocating SYS_RES_MEMORY resources from @p dev using @p rid. + * + * @param dev The device to pass to bhnd_alloc_resource() and + * bhnd_release_resource() functions. + * @param rid The resource ID to be used when allocating memory resources. + */ +struct bhnd_erom_io * +bhnd_erom_iores_new(device_t dev, int rid) +{ + struct bhnd_erom_iores *iores; + + iores = malloc(sizeof(*iores), M_BHND, M_WAITOK | M_ZERO); + iores->eio.map = bhnd_erom_iores_map; + iores->eio.read = bhnd_erom_iores_read; + iores->eio.fini = bhnd_erom_iores_fini; + + iores->owner = dev; + iores->owner_rid = rid; + iores->mapped = NULL; + iores->mapped_rid = -1; + + return (&iores->eio); +} + +static int +bhnd_erom_iores_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, + bhnd_size_t size) +{ + struct bhnd_erom_iores *iores; + + iores = (struct bhnd_erom_iores *)eio; + + /* Sanity check the addr/size */ + if (size == 0) + return (EINVAL); + + if (BHND_ADDR_MAX - size < addr) + return (EINVAL); /* would overflow */ + + /* Check for an existing mapping */ + if (iores->mapped) { + /* If already mapped, nothing else to do */ + if (rman_get_start(iores->mapped->res) == addr && + rman_get_size(iores->mapped->res) == size) + { + return (0); + } + + /* Otherwise, we need to drop the existing mapping */ + bhnd_release_resource(iores->owner, SYS_RES_MEMORY, + iores->mapped_rid, iores->mapped); + iores->mapped = NULL; + iores->mapped_rid = -1; + } + + /* Try to allocate the new mapping */ + iores->mapped_rid = iores->owner_rid; + iores->mapped = bhnd_alloc_resource(iores->owner, SYS_RES_MEMORY, + &iores->mapped_rid, addr, addr+size-1, size, + RF_ACTIVE|RF_SHAREABLE); + if (iores->mapped == NULL) { + iores->mapped_rid = -1; + return (ENXIO); + } + + return (0); +} + +static uint32_t +bhnd_erom_iores_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) +{ + struct bhnd_erom_iores *iores = (struct bhnd_erom_iores *)eio; + + if (iores->mapped == NULL) + panic("read with invalid mapping"); + + switch (width) { + case 1: + return (bhnd_bus_read_1(iores->mapped, offset)); + case 2: + return (bhnd_bus_read_2(iores->mapped, offset)); + case 4: + return (bhnd_bus_read_4(iores->mapped, offset)); + default: + panic("invalid width %u", width); + } +} + +static void +bhnd_erom_iores_fini(struct bhnd_erom_io *eio) +{ + struct bhnd_erom_iores *iores = (struct bhnd_erom_iores *)eio; + + /* Release any mapping */ + if (iores->mapped) { + bhnd_release_resource(iores->owner, SYS_RES_MEMORY, + iores->mapped_rid, iores->mapped); + iores->mapped = NULL; + iores->mapped_rid = -1; + } + + free(eio, M_BHND); +} + +/** + * Initialize an I/O instance that will perform mapping directly from the + * given bus space tag and handle. + * + * @param addr The base address mapped by @p bsh. + * @param size The total size mapped by @p bsh. + * @param bst Bus space tag for @p bsh. + * @param bsh Bus space handle mapping the full bus enumeration space. + * + * @retval 0 success + * @retval non-zero if initializing @p iobus otherwise fails, a regular + * unix error code will be returned. + */ +int +bhnd_erom_iobus_init(struct bhnd_erom_iobus *iobus, bhnd_addr_t addr, + bhnd_size_t size, bus_space_tag_t bst, bus_space_handle_t bsh) +{ + iobus->eio.map = bhnd_erom_iobus_map; + iobus->eio.read = bhnd_erom_iobus_read; + iobus->eio.fini = NULL; + + iobus->addr = addr; + iobus->size = size; + iobus->bst = bst; + iobus->bsh = bsh; + iobus->mapped = false; + + return (0); +} + +static int +bhnd_erom_iobus_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, + bhnd_size_t size) +{ + struct bhnd_erom_iobus *iobus = (struct bhnd_erom_iobus *)eio; + + /* Sanity check the addr/size */ + if (size == 0) + return (EINVAL); + + /* addr+size must not overflow */ + if (BHND_ADDR_MAX - size < addr) + return (EINVAL); + + /* addr/size must fit within our bus tag's mapping */ + if (addr < iobus->addr || size > iobus->size) + return (ENXIO); + + if (iobus->size - (addr - iobus->addr) < size) + return (ENXIO); + + /* The new addr offset and size must be representible as a bus_size_t */ + if ((addr - iobus->addr) > BUS_SPACE_MAXSIZE) + return (ENXIO); + + if (size > BUS_SPACE_MAXSIZE) + return (ENXIO); + + iobus->offset = addr - iobus->addr; + iobus->limit = size; + iobus->mapped = true; + + return (0); +} + +static uint32_t +bhnd_erom_iobus_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) +{ + struct bhnd_erom_iobus *iobus = (struct bhnd_erom_iobus *)eio; + + if (!iobus->mapped) + panic("no active mapping"); + + if (iobus->limit < width || iobus->limit - width < offset) + panic("invalid offset %#jx", offset); + + switch (width) { + case 1: + return (bus_space_read_1(iobus->bst, iobus->bsh, + iobus->offset + offset)); + case 2: + return (bus_space_read_2(iobus->bst, iobus->bsh, + iobus->offset + offset)); + case 4: + return (bus_space_read_4(iobus->bst, iobus->bsh, + iobus->offset + offset)); + default: + panic("invalid width %u", width); + } } Index: head/sys/dev/bhnd/bhnd_erom.h =================================================================== --- head/sys/dev/bhnd/bhnd_erom.h (revision 324070) +++ head/sys/dev/bhnd/bhnd_erom.h (revision 324071) @@ -1,258 +1,247 @@ /*- - * Copyright (c) 2015-2016 Landon Fuller + * Copyright (c) 2015-2017 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_EROM_BHND_EROM_H_ #define _BHND_EROM_BHND_EROM_H_ #include #include #include #include #include #include "bhnd_erom_if.h" -bhnd_erom_t *bhnd_erom_alloc(bhnd_erom_class_t *cls, - const struct bhnd_chipid *cid, - device_t parent, int rid); +/* forward declarations */ +struct bhnd_erom_io; +struct bhnd_erom_iobus; -int bhnd_erom_init_static(bhnd_erom_class_t *cls, - bhnd_erom_t *erom, size_t esize, - const struct bhnd_chipid *cid, - bus_space_tag_t bst, - bus_space_handle_t bsh); +bhnd_erom_class_t *bhnd_erom_probe_driver_classes(devclass_t bus_devclass, + struct bhnd_erom_io *eio, + const struct bhnd_chipid *hint, + struct bhnd_chipid *cid); -void bhnd_erom_fini_static(bhnd_erom_t *erom); +bhnd_erom_t *bhnd_erom_alloc(bhnd_erom_class_t *cls, + const struct bhnd_chipid *cid, + struct bhnd_erom_io *eio); -void bhnd_erom_free(bhnd_erom_t *erom); +int bhnd_erom_init_static(bhnd_erom_class_t *cls, + bhnd_erom_t *erom, size_t esize, + const struct bhnd_chipid *cid, + struct bhnd_erom_io *eio); +void bhnd_erom_fini_static(bhnd_erom_t *erom); + +void bhnd_erom_free(bhnd_erom_t *erom); + +struct bhnd_erom_io *bhnd_erom_iores_new(device_t dev, int rid); +int bhnd_erom_iobus_init(struct bhnd_erom_iobus *iobus, + bhnd_addr_t addr, bhnd_size_t size, + bus_space_tag_t bst, bus_space_handle_t bsh); + +int bhnd_erom_io_map(struct bhnd_erom_io *eio, + bhnd_addr_t addr, bhnd_size_t size); +uint32_t bhnd_erom_io_read(struct bhnd_erom_io *eio, + bhnd_size_t offset, u_int width); +void bhnd_erom_io_fini(struct bhnd_erom_io *eio); + /** * Abstract bhnd_erom instance state. Must be first member of all subclass * instances. */ struct bhnd_erom { KOBJ_FIELDS; }; /** Number of additional bytes to reserve for statically allocated * bhnd_erom instances. */ #define BHND_EROM_STATIC_BYTES 64 /** * A bhnd_erom instance structure large enough to statically allocate * any known bhnd_erom subclass. * * The maximum size of subclasses is verified statically in * BHND_EROM_DEFINE_CLASS(), and at runtime in bhnd_erom_init_static(). */ struct bhnd_erom_static { struct bhnd_erom obj; uint8_t idata[BHND_EROM_STATIC_BYTES]; }; /** Registered EROM parser class instances. */ SET_DECLARE(bhnd_erom_class_set, bhnd_erom_class_t); #define BHND_EROM_DEFINE_CLASS(name, classvar, methods, size) \ DEFINE_CLASS_0(name, classvar, methods, size); \ BHND_EROM_CLASS_DEF(classvar); \ _Static_assert(size <= sizeof(struct bhnd_erom_static), \ "cannot statically allocate instance data; " \ "increase BHND_EROM_STATIC_BYTES"); #define BHND_EROM_CLASS_DEF(classvar) DATA_SET(bhnd_erom_class_set, classvar) - /** * Probe to see if this device enumeration class supports the bhnd bus - * mapped by the given resource, returning a standard newbus device probe - * result (see BUS_PROBE_*) and the probed chip identification. + * mapped by @p eio, returning a standard newbus device probe result + * (see BUS_PROBE_*) and the probed chip identification. * * @param cls The erom class to probe. - * @param res A resource mapping the first bus core (EXTIF or - * ChipCommon) - * @param offset Offset to the first bus core within @p res. - * @param hint Identification hint used to identify the device. If - * chipset supports standard chip identification registers - * within the first core, this parameter should be NULL. + * @param eio A bus I/O instance, configured with a mapping of the + * first bus core. + * @param hint Identification hint used to identify the device. + * If chipset supports standard chip identification + * registers within the first core, this parameter should + * be NULL. * @param[out] cid On success, the probed chip identifier. * * @retval 0 if this is the only possible device enumeration * parser for the probed bus. * @retval negative if the probe succeeds, a negative value should be * returned; the parser returning the highest negative * value will be selected to handle device enumeration. * @retval ENXIO If the bhnd bus type is not handled by this parser. * @retval positive if an error occurs during probing, a regular unix error * code should be returned. */ static inline int -bhnd_erom_probe(bhnd_erom_class_t *cls, struct bhnd_resource *res, - bus_size_t offset, const struct bhnd_chipid *hint, struct bhnd_chipid *cid) +bhnd_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio, + const struct bhnd_chipid *hint, struct bhnd_chipid *cid) { - return (BHND_EROM_PROBE(cls, res, offset, hint, cid)); -} - -/** - * Probe to see if this device enumeration class supports the bhnd bus - * mapped at the given bus space tag and handle, returning a standard - * newbus device probe result (see BUS_PROBE_*) and the probed - * chip identification. - * - * @param cls The erom class to probe. - * @param bst Bus space tag. - * @param bsh Bus space handle mapping the EXTIF or ChipCommon core. - * @param paddr The physical address of the core mapped by @p bst and - * @p bsh. - * @param hint Identification hint used to identify the device. If - * chipset supports standard chip identification registers - * within the first core, this parameter should be NULL. - * @param[out] cid On success, the probed chip identifier. - * - * @retval 0 if this is the only possible device enumeration - * parser for the probed bus. - * @retval negative if the probe succeeds, a negative value should be - * returned; the parser returning the lowest value will - * be selected to handle device enumeration. - * @retval ENXIO If the bhnd bus type is not handled by this parser. - * @retval positive if an error occurs during probing, a regular unix error - * code should be returned. - */ -static inline int -bhnd_erom_probe_static(bhnd_erom_class_t *cls, bus_space_tag_t bst, - bus_space_handle_t bsh, bus_addr_t paddr, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) -{ - return (BHND_EROM_PROBE_STATIC(cls, bst, bsh, paddr, hint, cid)); + return (BHND_EROM_PROBE(cls, eio, hint, cid)); } /** * Parse all cores descriptors in @p erom, returning the array in @p cores and * the count in @p num_cores. * * The memory allocated for the table must be freed via * bhnd_erom_free_core_table(). * * @param erom The erom parser to be queried. * @param[out] cores The table of parsed core descriptors. * @param[out] num_cores The number of core records in @p cores. * * @retval 0 success * @retval non-zero if an error occurs, a regular unix error code will * be returned. */ static inline int bhnd_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores, u_int *num_cores) { return (BHND_EROM_GET_CORE_TABLE(erom, cores, num_cores)); } /** * Free any memory allocated in a previous call to BHND_EROM_GET_CORE_TABLE(). * * @param erom The erom parser instance. * @param cores A core table allocated by @p erom. */ static inline void bhnd_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores) { return (BHND_EROM_FREE_CORE_TABLE(erom, cores)); }; /** * Locate the first core table entry in @p erom that matches @p desc. * * @param erom The erom parser to be queried. * @param desc A core match descriptor. * @param[out] core On success, the matching core info record. * * @retval 0 success * @retval ENOENT No core matching @p desc was found. * @retval non-zero Reading or parsing failed. */ static inline int bhnd_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc, struct bhnd_core_info *core) { return (BHND_EROM_LOOKUP_CORE(erom, desc, core)); } /** * Locate the first core table entry in @p erom that matches @p desc, * and return the specified port region's base address and size. * * If a core matching @p desc is not found, or the requested port region * is not mapped to the matching core, ENOENT is returned. * * @param erom The erom parser to be queried. * @param desc A core match descriptor. * @param type The port type to search for. * @param port The port to search for. * @param region The port region to search for. * @param[out] core If not NULL, will be populated with the matched core * info record on success. * @param[out] addr On success, the base address of the port region. * @param[out] size On success, the total size of the port region. * * @retval 0 success * @retval ENOENT No core matching @p desc was found. * @retval ENOENT No port region matching @p type, @p port, and @p region * was found. * @retval non-zero Reading or parsing failed. */ static inline int bhnd_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc, bhnd_port_type type, u_int port, u_int region, struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size) { return (BHND_EROM_LOOKUP_CORE_ADDR(erom, desc, type, port, region, core, addr, size)); }; /** * Enumerate and print all entries in @p erom. * * @param erom The erom parser to be enumerated. * * @retval 0 success * @retval non-zero If an error occurs parsing the EROM table, a regular * unix error code will be returned. */ static inline int bhnd_erom_dump(bhnd_erom_t *erom) { return (BHND_EROM_DUMP(erom)); } #endif /* _BHND_EROM_BHND_EROM_H_ */ Index: head/sys/dev/bhnd/bhnd_erom_if.m =================================================================== --- head/sys/dev/bhnd/bhnd_erom_if.m (revision 324070) +++ head/sys/dev/bhnd/bhnd_erom_if.m (revision 324071) @@ -1,256 +1,208 @@ #- -# Copyright (c) 2016 Landon Fuller +# Copyright (c) 2016-2017 Landon Fuller +# Copyright (c) 2017 The FreeBSD Foundation # All rights reserved. # +# Portions of this software were developed by Landon Fuller +# under sponsorship from the FreeBSD Foundation. +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # $FreeBSD$ #include #include #include #include #include #include #include INTERFACE bhnd_erom; # # bhnd(4) device enumeration. # # Provides a common parser interface to the incompatible device enumeration # tables used by bhnd(4) buses. # -/** - * Probe to see if this device enumeration class supports the bhnd bus - * mapped by the given resource, returning a standard newbus device probe - * result (see BUS_PROBE_*) and the probed chip identification. - * - * @param cls The erom class to probe. - * @param res A resource mapping the first bus core. - * @param offset Offset to the first bus core within @p res. - * @param hint Hint used to identify the device. If chipset supports - * standard chip identification registers within the first - * core, this parameter should be NULL. - * @param[out] cid On success, the probed chip identifier. - * - * @retval 0 if this is the only possible device enumeration - * parser for the probed bus. - * @retval negative if the probe succeeds, a negative value should be - * returned; the parser returning the highest negative - * value will be selected to handle device enumeration. - * @retval ENXIO If the bhnd bus type is not handled by this parser. - * @retval positive if an error occurs during probing, a regular unix error - * code should be returned. - */ -STATICMETHOD int probe { - bhnd_erom_class_t *cls; - struct bhnd_resource *res; - bus_size_t offset; - const struct bhnd_chipid *hint; - struct bhnd_chipid *cid; +HEADER { + /* forward declarations */ + struct bhnd_erom_io; }; /** - * Probe to see if this device enumeration class supports the bhnd bus - * mapped at the given bus space tag and handle, returning a standard - * newbus device probe result (see BUS_PROBE_*) and the probed - * chip identification. + * Probe to see if this device enumeration class supports the bhnd bus at + * @p addr, returning a standard newbus device probe result (see BUS_PROBE_*) + * and the probed chip identification. * - * @param cls The erom class to probe. - * @param bst Bus space tag. - * @param bsh Bus space handle mapping the first bus core. - * @param paddr The physical address of the core mapped by @p bst and - * @p bsh. - * @param hint Hint used to identify the device. If chipset supports - * standard chip identification registers within the first - * core, this parameter should be NULL. - * @param[out] cid On success, the probed chip identifier. + * @param cls The erom class to probe. + * @param eio A bus I/O instance, configured with a mapping of + * the first bus core. + * @param base_addr Address of the first bus core. + * @param hint Hint used to identify the device. If chipset + * supports standard chip identification registers + * within the first core, this parameter should be + * NULL. + * @param[out] cid On success, the probed chip identifier. * * @retval 0 if this is the only possible device enumeration * parser for the probed bus. * @retval negative if the probe succeeds, a negative value should be * returned; the parser returning the highest negative * value will be selected to handle device enumeration. * @retval ENXIO If the bhnd bus type is not handled by this parser. * @retval positive if an error occurs during probing, a regular unix error * code should be returned. */ -STATICMETHOD int probe_static { +STATICMETHOD int probe { bhnd_erom_class_t *cls; - bus_space_tag_t bst; - bus_space_handle_t bsh; - bus_addr_t paddr; + struct bhnd_erom_io *eio; const struct bhnd_chipid *hint; struct bhnd_chipid *cid; }; /** * Initialize a device enumeration table parser. * * @param erom The erom parser to initialize. * @param cid The device's chip identifier. - * @param parent The parent device from which EROM resources should - * be allocated. - * @param rid The resource id to be used when allocating the - * enumeration table. - * + * @param eio The bus I/O instance to use when reading the device + * enumeration table. On success, the erom parser assumes + * ownership of this instance. * @retval 0 success * @retval non-zero if an error occurs initializing the EROM parser, * a regular unix error code will be returned. */ METHOD int init { bhnd_erom_t *erom; const struct bhnd_chipid *cid; - device_t parent; - int rid; -}; - -/** - * Initialize an device enumeration table parser using the provided bus space - * tag and handle. - * - * @param erom The erom parser to initialize. - * @param cid The device's chip identifier. - * @param bst Bus space tag. - * @param bsh Bus space handle mapping the full bus enumeration - * space. - * - * @retval 0 success - * @retval non-zero if an error occurs initializing the EROM parser, - * a regular unix error code will be returned. - */ -METHOD int init_static { - bhnd_erom_t *erom; - const struct bhnd_chipid *cid; - bus_space_tag_t bst; - bus_space_handle_t bsh; + struct bhnd_erom_io *eio; }; /** * Release all resources held by @p erom. * * @param erom An erom parser instance previously initialized via * BHND_EROM_INIT() or BHND_EROM_INIT_STATIC(). */ METHOD void fini { bhnd_erom_t *erom; }; /** * Parse all cores descriptors, returning the array in @p cores and the count * in @p num_cores. * * The memory allocated for the table must be freed via * BHND_EROM_FREE_CORE_TABLE(). * * @param erom The erom parser to be queried. * @param[out] cores The table of parsed core descriptors. * @param[out] num_cores The number of core records in @p cores. * * @retval 0 success * @retval non-zero if an error occurs, a regular unix error code will * be returned. */ METHOD int get_core_table { bhnd_erom_t *erom; struct bhnd_core_info **cores; u_int *num_cores; }; /** * Free any memory allocated in a previous call to BHND_EROM_GET_CORE_TABLE(). * * @param erom The erom parser instance. * @param cores A core table allocated by @p erom. */ METHOD void free_core_table { bhnd_erom_t *erom; struct bhnd_core_info *cores; }; /** * Locate the first core table entry in @p erom that matches @p desc. * * @param erom The erom parser to be queried. * @param desc A core match descriptor. * @param[out] core On success, the matching core info record. * * @retval 0 success * @retval ENOENT No core matching @p desc was found. * @retval non-zero Reading or parsing failed. */ METHOD int lookup_core { bhnd_erom_t *erom; const struct bhnd_core_match *desc; struct bhnd_core_info *core; }; /** * Locate the first core table entry in @p erom that matches @p desc, * and return the specified port region's base address and size. * * If a core matching @p desc is not found, or the requested port region * is not mapped to the matching core, ENOENT is returned. * * @param erom The erom parser to be queried. * @param desc A core match descriptor. * @param type The port type to search for. * @param port The port to search for. * @param region The port region to search for. * @param[out] core If not NULL, will be populated with the matched core * info record on success. * @param[out] addr On success, the base address of the port region. * @param[out] size On success, the total size of the port region. * * @retval 0 success * @retval ENOENT No core matching @p desc was found. * @retval ENOENT No port region matching @p type, @p port, and @p region * was found. * @retval non-zero Reading or parsing failed. */ METHOD int lookup_core_addr { bhnd_erom_t *erom; const struct bhnd_core_match *desc; bhnd_port_type type; u_int port; u_int region; struct bhnd_core_info *core; bhnd_addr_t *addr; bhnd_size_t *size; }; /** * Enumerate and print all EROM table entries. * * @param erom The erom parser to be enumerated. * * @retval 0 success * @retval non-zero If an error occurs reading the EROM table, a regular * unix error code will be returned. */ METHOD int dump { bhnd_erom_t *erom; }; Index: head/sys/dev/bhnd/bhnd_eromvar.h =================================================================== --- head/sys/dev/bhnd/bhnd_eromvar.h (nonexistent) +++ head/sys/dev/bhnd/bhnd_eromvar.h (revision 324071) @@ -0,0 +1,79 @@ +/*- + * Copyright (c) 2017 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Landon Fuller under sponsorship from + * the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +#ifndef _BHND_EROM_BHND_EROMVAR_H_ +#define _BHND_EROM_BHND_EROMVAR_H_ + +#include + +#include "bhnd_erom.h" + +/* forward declarations */ +struct bhnd_erom_io; +struct bhnd_erom_iobus; + +/** @see bhnd_erom_io_map() */ +typedef int (bhnd_erom_io_map_t)(struct bhnd_erom_io *eio, + bhnd_addr_t addr, bhnd_size_t size); + +/** @see bhnd_erom_io_read() */ +typedef uint32_t (bhnd_erom_io_read_t)(struct bhnd_erom_io *eio, + bhnd_size_t offset, u_int width); + +/** @see bhnd_erom_io_fini() */ +typedef void (bhnd_erom_io_fini_t)(struct bhnd_erom_io *eio); + +/** + * Abstract EROM bus I/O support. + */ +struct bhnd_erom_io { + bhnd_erom_io_map_t *map; /**< @see bhnd_erom_io_map() */ + bhnd_erom_io_read_t *read; /**< @see bhnd_erom_io_read() */ + bhnd_erom_io_fini_t *fini; /**< @see bhnd_erom_io_fini(). May be NULL */ +}; + +/** + * EROM bus handle/tag I/O instance state. + */ +struct bhnd_erom_iobus { + struct bhnd_erom_io eio; + bhnd_addr_t addr; /**< the address of @p bsh */ + bhnd_size_t size; /**< the size of @p bsh */ + bus_space_tag_t bst; /**< bus space tag */ + bus_space_handle_t bsh; /**< bus space handle mapping the full enumeration space */ + bool mapped; /**< if a mapping is active */ + bus_size_t offset; /**< the current mapped offset within bsh */ + bus_size_t limit; /**< the current mapped size relative to offset */ +}; + +#endif /* _BHND_EROM_BHND_EROMVAR_H_ */ Property changes on: head/sys/dev/bhnd/bhnd_eromvar.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/bhnd/bhndb/bhndb.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb.c (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndb.c (revision 324071) @@ -1,2238 +1,1920 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) -static int bhndb_find_hostb_core(struct bhndb_softc *sc, - bhnd_erom_t *erom, - struct bhnd_core_info *core); - -static bhnd_erom_class_t *bhndb_probe_erom_class(struct bhndb_softc *sc, - struct bhnd_chipid *cid); - -static int bhndb_init_full_config(struct bhndb_softc *sc, - bhnd_erom_class_t *eromcls); - -static struct bhnd_core_info *bhndb_get_bridge_core(struct bhndb_softc *sc); - static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct bhndb_softc *sc; struct resource_list *rl; int retval = 0; sc = device_get_softc(dev); retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { *buf = '\0'; return (0); } static int bhndb_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { struct bhndb_softc *sc; sc = device_get_softc(dev); snprintf(buf, buflen, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** - * Return the bridge core info. Will panic if the bridge core info has not yet - * been populated during full bridge configuration. - * - * @param sc BHNDB device state. - */ -static struct bhnd_core_info * -bhndb_get_bridge_core(struct bhndb_softc *sc) -{ - if (!sc->have_br_core) - panic("bridge not yet fully configured; no bridge core!"); - - return (&sc->bridge_core); -} - -/** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* * Add to the bus region list. * * The window priority for a statically mapped * region is always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhndb_region *region; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ region = bhndb_find_resource_region(br, addr, size); if (region != NULL && region->static_regwin != NULL) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s\n", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * - * @param dev The bridge device to attach. - * @param bridge_devclass The device class of the bridging core. This is used - * to automatically detect the bridge core, and to disable additional bridge - * cores (e.g. PCMCIA on a PCIe device). + * @param dev The bridge device to attach. + * @param cid The bridged device's chip identification. + * @param cores The bridged device's core table. + * @param ncores The number of cores in @p cores. + * @param bridge_core Core info for the bhnd(4) core serving as the host + * bridge. + * @param erom_class An erom parser class that may be used to parse + * the bridged device's device enumeration table. */ int -bhndb_attach(device_t dev, bhnd_devclass_t bridge_devclass) +bhndb_attach(device_t dev, struct bhnd_chipid *cid, + struct bhnd_core_info *cores, u_int ncores, + struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; - const struct bhndb_hwcfg *cfg; - bhnd_erom_class_t *eromcls; + const struct bhndb_hw *hw; + const struct bhndb_hwcfg *hwcfg; + const struct bhndb_hw_priority *hwprio; + struct bhnd_erom_io *eio; + bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); - sc->bridge_class = bridge_devclass; + sc->bridge_core = *bridge_core; + sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); - - /* Populate generic resource allocation state. */ - cfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, sc->dev); - sc->bus_res = bhndb_alloc_resources(dev, sc->parent_dev, cfg); - if (sc->bus_res == NULL) - goto failed; - /* Allocate our host resources */ - if ((error = bhndb_alloc_host_resources(sc->bus_res))) - goto failed; + erom = NULL; - /* Probe for a usable EROM class for our bridged bhnd(4) bus and - * populate our chip identifier. */ - BHNDB_LOCK(sc); - if ((eromcls = bhndb_probe_erom_class(sc, &sc->chipid)) == NULL) { - BHNDB_UNLOCK(sc); + /* Find a matching bridge hardware configuration */ + if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { + device_printf(sc->dev, "unable to identify device, " + " using generic bridge resource definitions\n"); - device_printf(sc->dev, "device enumeration unsupported; no " - "compatible driver found\n"); - return (ENXIO); + hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); + hw = NULL; + } else { + hwcfg = hw->cfg; } - BHNDB_UNLOCK(sc); + if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { + device_printf(sc->dev, "%s resource configuration\n", hw->name); + } + + /* Allocate bridge resource state using the discovered hardware + * configuration */ + sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); + if (sc->bus_res == NULL) { + device_printf(sc->dev, "failed to allocate bridge resource " + "state\n"); + error = ENOMEM; + goto failed; + } + /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; - /* Enumerate the bridged device and fully initialize our bridged - * resource configuration */ - if ((error = bhndb_init_full_config(sc, eromcls))) { - device_printf(sc->dev, "initializing full bridge " - "configuration failed: %d\n", error); + /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; + * we use this to instantiate an erom parser instance */ + eio = bhnd_erom_iores_new(sc->bus_dev, 0); + if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { + bhnd_erom_io_fini(eio); + error = ENXIO; goto failed; } - return (0); - -failed: - BHNDB_LOCK_DESTROY(sc); - - if (sc->bus_res != NULL) - bhndb_free_resources(sc->bus_res); - - bhnd_service_registry_fini(&sc->services); - - return (error); -} - - -/** - * Return a borrowed reference to the host resource mapping at least - * BHND_DEFAULT_CORE_SIZE bytes at the first bus core, for use with - * bhnd_erom_probe(). - * - * This may return a borrowed reference to a bhndb_dw_alloc-managed - * resource; any additional resource mapping requests may invalidate this - * borrowed reference. - * - * @param sc BHNDB driver state. - * @param[out] offset On success, the offset within the returned resource - * at which the first bus core can be found. - * - * @retval non-NULL success. - * @retval NULL If no usable mapping could be found. - */ -static struct resource * -bhndb_erom_chipc_resource(struct bhndb_softc *sc, bus_size_t *offset) -{ - const struct bhndb_hwcfg *cfg; - struct bhndb_dw_alloc *dwa; - struct resource *res; - const struct bhndb_regwin *win; - - BHNDB_LOCK_ASSERT(sc, MA_OWNED); - - cfg = sc->bus_res->cfg; - - /* Find a static register window mapping ChipCommon. */ - win = bhndb_regwin_find_core(cfg->register_windows, BHND_DEVCLASS_CC, - 0, BHND_PORT_DEVICE, 0, 0); - if (win != NULL) { - if (win->win_size < BHND_DEFAULT_CORE_SIZE) { - device_printf(sc->dev, - "chipcommon register window too small\n"); - return (NULL); - } - - res = bhndb_find_regwin_resource(sc->bus_res, win); - if (res == NULL) { - device_printf(sc->dev, - "chipcommon register window not allocated\n"); - return (NULL); - } - - *offset = win->win_offset; - return (res); - } - - /* We'll need to fetch and configure a dynamic window. We can assume a - * device without a static ChipCommon mapping uses the default siba(4) - * base address. */ - dwa = bhndb_io_resource(sc, BHND_DEFAULT_CHIPC_ADDR, - BHND_DEFAULT_CORE_SIZE, offset); - if (dwa != NULL) - return (dwa->parent_res); - - device_printf(sc->dev, "unable to map chipcommon registers; no usable " - "register window found\n"); - return (NULL); -} - -/** - * Probe all supported EROM classes, returning the best matching class - * (or NULL if not found), writing the probed chip identifier to @p cid. - * - * @param sc BHNDB driver state. - * @param cid On success, the bridged chipset's chip identifier. - */ -static bhnd_erom_class_t * -bhndb_probe_erom_class(struct bhndb_softc *sc, struct bhnd_chipid *cid) -{ - devclass_t bhndb_devclass; - const struct bhnd_chipid *hint; - struct resource *res; - bus_size_t res_offset; - driver_t **drivers; - int drv_count; - bhnd_erom_class_t *erom_cls; - int prio, result; - - BHNDB_LOCK_ASSERT(sc, MA_OWNED); - - erom_cls = NULL; - prio = 0; - - /* Let our parent device provide a chipid hint */ - hint = BHNDB_BUS_GET_CHIPID(sc->parent_dev, sc->dev); - - /* Fetch a borrowed reference to the resource mapping ChipCommon. */ - res = bhndb_erom_chipc_resource(sc, &res_offset); - if (res == NULL) - return (NULL); - - /* Fetch all available drivers */ - bhndb_devclass = device_get_devclass(sc->dev); - if (devclass_get_drivers(bhndb_devclass, &drivers, &drv_count) != 0) - return (NULL); - - /* Enumerate the drivers looking for the best available EROM class */ - for (int i = 0; i < drv_count; i++) { - struct bhnd_chipid pcid; - bhnd_erom_class_t *cls; - - cls = bhnd_driver_get_erom_class(drivers[i]); - if (cls == NULL) - continue; - - kobj_class_compile(cls); - - /* Probe the bus */ - result = bhnd_erom_probe(cls, &BHND_DIRECT_RESOURCE(res), - res_offset, hint, &pcid); - - /* The parser did not match if an error was returned */ - if (result > 0) - continue; - - /* Check for a new highest priority match */ - if (erom_cls == NULL || result > prio) { - prio = result; - - *cid = pcid; - erom_cls = cls; - } - - /* Terminate immediately on BUS_PROBE_SPECIFIC */ - if (result == BUS_PROBE_SPECIFIC) - break; - } - - return (erom_cls); -} - -/* ascending core index comparison used by bhndb_find_hostb_core() */ -static int -compare_core_index(const void *lhs, const void *rhs) -{ - u_int left = ((const struct bhnd_core_info *)lhs)->core_idx; - u_int right = ((const struct bhnd_core_info *)rhs)->core_idx; - - if (left < right) - return (-1); - else if (left > right) - return (1); - else - return (0); -} - -/** - * Search @p erom for the core serving as the bhnd host bridge. - * - * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged - * bhnd(4) devices to determine the hostb core: - * - * - The core must have a Broadcom vendor ID. - * - The core devclass must match the bridge type. - * - The core must be the first device on the bus with the bridged device - * class. - * - * @param sc BHNDB device state. - * @param erom The device enumeration table parser to be used to fetch - * core info. - * @param[out] core If found, the matching core info. - * - * @retval 0 success - * @retval ENOENT not found - * @retval non-zero if an error occured fetching core info. - */ -static int -bhndb_find_hostb_core(struct bhndb_softc *sc, bhnd_erom_t *erom, - struct bhnd_core_info *core) -{ - struct bhnd_core_match md; - struct bhnd_core_info *cores; - u_int ncores; - int error; - - if ((error = bhnd_erom_get_core_table(erom, &cores, &ncores))) - return (error); - - /* Set up a match descriptor for the required device class. */ - md = (struct bhnd_core_match) { - BHND_MATCH_CORE_CLASS(sc->bridge_class), - BHND_MATCH_CORE_UNIT(0) - }; - - /* Ensure the table is sorted by core index value, ascending; - * the host bridge must be the absolute first matching device on the - * bus. */ - qsort(cores, ncores, sizeof(*cores), compare_core_index); - - /* Find the hostb core */ - error = ENOENT; - for (u_int i = 0; i < ncores; i++) { - if (bhnd_core_matches(&cores[i], &md)) { - /* Found! */ - *core = cores[i]; - error = 0; - break; - } - } - - /* Clean up */ - bhnd_erom_free_core_table(erom, cores); - - return (error); -} - -/** - * Identify the bridged device and perform final bridge resource configuration - * based on capabilities of the enumerated device. - * - * Any bridged resources allocated using the generic brige hardware - * configuration must be released prior to calling this function. - */ -static int -bhndb_init_full_config(struct bhndb_softc *sc, bhnd_erom_class_t *eromcls) -{ - struct bhnd_core_info *cores; - struct bhndb_resources *br; - const struct bhndb_hw_priority *hwprio; - bhnd_erom_t *erom; - const struct bhndb_hw *hw; - u_int ncores; - int error; - - erom = NULL; - cores = NULL; - br = NULL; - - /* Allocate EROM parser instance */ - erom = bhnd_erom_alloc(eromcls, &sc->chipid, sc->bus_dev, 0); - if (erom == NULL) { - device_printf(sc->dev, "failed to allocate device enumeration " - "table parser\n"); - return (ENXIO); - } - - /* Look for our host bridge core */ - if ((error = bhndb_find_hostb_core(sc, erom, &sc->bridge_core))) { - device_printf(sc->dev, "no host bridge core found\n"); - goto cleanup; - } else { - sc->have_br_core = true; - } - - /* Fetch the bridged device's core table */ - if ((error = bhnd_erom_get_core_table(erom, &cores, &ncores))) { - device_printf(sc->dev, "error fetching core table: %d\n", - error); - goto cleanup; - } - - /* Find our full register window configuration */ - if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { - device_printf(sc->dev, "unable to identify device, " - " using generic bridge resource definitions\n"); - error = 0; - goto cleanup; - } - - if (bootverbose || BHNDB_DEBUG(PRIO)) - device_printf(sc->dev, "%s resource configuration\n", hw->name); - - /* Allocate new bridge resource state using the discovered hardware - * configuration */ - br = bhndb_alloc_resources(sc->dev, sc->parent_dev, hw->cfg); - if (br == NULL) { - device_printf(sc->dev, - "failed to allocate new resource state\n"); - error = ENOMEM; - goto cleanup; - } - /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); - error = bhndb_init_region_cfg(sc, erom, br, cores, ncores, hwprio); + error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, + hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); - goto cleanup; + goto failed; } - /* The EROM parser holds a reference to the resource state we're - * about to invalidate */ - bhnd_erom_free_core_table(erom, cores); + /* Free our erom instance */ bhnd_erom_free(erom); - - cores = NULL; erom = NULL; - /* Replace existing resource state */ - bhndb_free_resources(sc->bus_res); - sc->bus_res = br; - - /* Pointer is now owned by sc->bus_res */ - br = NULL; - - /* Re-allocate host resources */ - if ((error = bhndb_alloc_host_resources(sc->bus_res))) { - device_printf(sc->dev, "failed to reallocate bridge host " - "resources: %d\n", error); - goto cleanup; - } - return (0); -cleanup: - if (cores != NULL) - bhnd_erom_free_core_table(erom, cores); +failed: + BHNDB_LOCK_DESTROY(sc); + if (sc->bus_res != NULL) + bhndb_free_resources(sc->bus_res); + if (erom != NULL) bhnd_erom_free(erom); - if (br != NULL) - bhndb_free_resources(br); + bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), type, rman_get_rid(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; - struct bhnd_core_info *bridge_core; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ - bridge_core = bhndb_get_bridge_core(sc); if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) - return (!bhnd_cores_equal(core, bridge_core)); + return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); - *core = *bhndb_get_bridge_core(sc); + *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, type, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited to the existing register window * mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ - bridge_res = bhndb_find_regwin_resource(sc->bus_res, win); + bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param type The type of the resource to be activated. * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); /* Only MMIO resources can be mapped via register windows */ if (type != SYS_RES_MEMORY) return (ENXIO); if (indirect) *indirect = false; r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ - parent = bhndb_find_resource_range(sc->bus_res, r_start, - r_size); + parent = bhndb_host_resource_for_range(sc->bus_res->res, + type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, type, rid, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). * * Maps resource activation requests to a viable static or dynamic * register window, if any. */ static int bhndb_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources may be assumed to * be activated by the bridge. * * For BHNDB_ADDRSPACE_BRIDGED children, attempts to activate a static register * window, a dynamic register window, or configures @p r as an indirect * resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Verify bridged address range's resource priority, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { bhndb_priority_t r_prio; region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, this * resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, type, rid, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (!error) r->direct = false; return (error); } /** * Slow path for bhndb_io_resource(). * * Iterates over the existing allocated dynamic windows looking for a viable * in-use region; the first matching region is returned. */ static struct bhndb_dw_alloc * bhndb_io_resource_slow(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *offset = dwa->win->win_offset; *offset += addr - dwa->target; return (dwa); } /* not found */ return (NULL); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation * record mapping the requested space, or will configure and return a free * allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; /* Try to fetch a free window */ dwa = bhndb_dw_next_free(br); /* * If no dynamic windows are available, look for an existing * region that maps the target range. * * If none are found, this is a child driver bug -- our window * over-commit should only fail in the case where a child driver leaks * resources, or perform operations out-of-order. * * Broadcom HND chipsets are designed to not require register window * swapping during execution; as long as the child devices are * attached/detached correctly, using the hardware's required order * of operations, there should always be a window available for the * current operation. */ if (dwa == NULL) { dwa = bhndb_io_resource_slow(sc, addr, size, offset); if (dwa == NULL) { panic("register windows exhausted attempting to map " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } return (dwa); } /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { // TODO return (NULL); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_pnpinfo_str, bhndb_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, bhndb_child_location_str), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; devclass_t bhndb_devclass; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/bhndb/bhndb_pci.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 324071) @@ -1,786 +1,1179 @@ /*- * Copyright (c) 2015-2016 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * PCI-specific implementation for the BHNDB bridge driver. * * Provides support for bridging from a PCI parent bus to a BHND-compatible * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point * mode. * * This driver handles all initial generic host-level PCI interactions with a * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4) * bus has been enumerated, this driver works in tandem with a core-specific * bhnd_pci_hostb driver to manage the PCI core. */ #include #include #include #include #include #include #include #include #include #include +#include +#include +#include + #include #include "bhndb_pcireg.h" #include "bhndb_pcivar.h" #include "bhndb_private.h" +struct bhndb_pci_eio; + static int bhndb_pci_init_msi(struct bhndb_pci_softc *sc); +static int bhndb_pci_read_core_table(device_t dev, + struct bhnd_chipid *chipid, + struct bhnd_core_info **cores, u_int *ncores, + bhnd_erom_class_t **eromcls); static int bhndb_pci_add_children(struct bhndb_pci_softc *sc); -static int bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc); -static int bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc); +static bool bhndb_is_pcie_attached(device_t dev); -static int bhndb_pci_compat_setregwin(struct bhndb_pci_softc *, +static int bhndb_enable_pci_clocks(device_t dev); +static int bhndb_disable_pci_clocks(device_t dev); + +static int bhndb_pci_compat_setregwin(device_t dev, + device_t pci_dev, const struct bhndb_regwin *, + bhnd_addr_t); +static int bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *, bhnd_addr_t); -static int bhndb_pci_fast_setregwin(struct bhndb_pci_softc *, - const struct bhndb_regwin *, bhnd_addr_t); static void bhndb_init_sromless_pci_config( struct bhndb_pci_softc *sc); static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc); static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc); +static int bhndb_pci_eio_init(struct bhndb_pci_eio *pio, + device_t dev, device_t pci_dev, + struct bhndb_host_resources *hr); +static int bhndb_pci_eio_map(struct bhnd_erom_io *eio, + bhnd_addr_t addr, bhnd_size_t size); +static uint32_t bhndb_pci_eio_read(struct bhnd_erom_io *eio, + bhnd_size_t offset, u_int width); + #define BHNDB_PCI_MSI_COUNT 1 +/* bhndb_pci erom I/O implementation */ +struct bhndb_pci_eio { + struct bhnd_erom_io eio; + device_t dev; /**< bridge device */ + device_t pci_dev; /**< parent PCI device */ + struct bhndb_host_resources *hr; /**< borrowed reference to host resources */ + const struct bhndb_regwin *win; /**< mapped register window, or NULL */ + struct resource *res; /**< resource containing the register window, or NULL if no window mapped */ + bhnd_addr_t res_target; /**< current target address (if mapped) */ + bool mapped; /**< true if a valid mapping exists, false otherwise */ + bhnd_addr_t addr; /**< mapped address */ + bhnd_size_t size; /**< mapped size */ +}; + /** * Default bhndb_pci implementation of device_probe(). * * Verifies that the parent is a PCI/PCIe device. */ static int bhndb_pci_probe(device_t dev) { device_t parent; devclass_t parent_bus; devclass_t pci; /* Our parent must be a PCI/PCIe device. */ pci = devclass_find("pci"); parent = device_get_parent(dev); parent_bus = device_get_devclass(device_get_parent(parent)); if (parent_bus != pci) return (ENXIO); device_set_desc(dev, "PCI-BHND bridge"); return (BUS_PROBE_DEFAULT); } /* Configure MSI interrupts */ static int bhndb_pci_init_msi(struct bhndb_pci_softc *sc) { int error; /* Is MSI available? */ if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT) return (ENXIO); /* Allocate expected message count */ sc->intr.msi_count = BHNDB_PCI_MSI_COUNT; if ((error = pci_alloc_msi(sc->parent, &sc->intr.msi_count))) { device_printf(sc->dev, "failed to allocate MSI interrupts: " "%d\n", error); return (error); } if (sc->intr.msi_count < BHNDB_PCI_MSI_COUNT) return (ENXIO); /* MSI uses resource IDs starting at 1 */ sc->intr.intr_rid = 1; return (0); } static int bhndb_pci_attach(device_t dev) { struct bhndb_pci_softc *sc; + struct bhnd_chipid cid; + struct bhnd_core_info *cores, hostb_core; + bhnd_erom_class_t *erom_class; + u_int ncores; int error, reg; sc = device_get_softc(dev); sc->dev = dev; sc->parent = device_get_parent(dev); - sc->set_regwin = bhndb_pci_compat_setregwin; + sc->set_regwin = NULL; + cores = NULL; + /* Enable PCI bus mastering */ pci_enable_busmaster(sc->parent); - /* Set up interrupt handling */ + /* Set up PCI interrupt handling */ if (bhndb_pci_init_msi(sc) == 0) { device_printf(dev, "Using MSI interrupts on %s\n", device_get_nameunit(sc->parent)); } else { device_printf(dev, "Using INTx interrupts on %s\n", device_get_nameunit(sc->parent)); sc->intr.intr_rid = 0; } /* Determine our bridge device class */ sc->pci_devclass = BHND_DEVCLASS_PCI; if (pci_find_cap(sc->parent, PCIY_EXPRESS, ®) == 0) sc->pci_devclass = BHND_DEVCLASS_PCIE; else sc->pci_devclass = BHND_DEVCLASS_PCI; /* Enable clocks (if required by this hardware) */ - if ((error = bhndb_enable_pci_clocks(sc))) + if ((error = bhndb_enable_pci_clocks(sc->dev))) goto cleanup; - /* Perform bridge attach, fully initializing the bridge - * configuration. */ - if ((error = bhndb_attach(dev, sc->pci_devclass))) + /* Identify the chip and enumerate the bridged cores */ + error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores, + &erom_class); + if (error) goto cleanup; - /* If supported, switch to faster regwin handling */ - if (sc->bhndb.chipid.chip_type != BHND_CHIPTYPE_SIBA) { - atomic_store_rel_ptr((volatile void *) &sc->set_regwin, - (uintptr_t) &bhndb_pci_fast_setregwin); + /* Select the appropriate register window handler */ + if (cid.chip_type == BHND_CHIPTYPE_SIBA) { + sc->set_regwin = bhndb_pci_compat_setregwin; + } else { + sc->set_regwin = bhndb_pci_fast_setregwin; } - /* Enable PCI bus mastering */ - pci_enable_busmaster(sc->parent); + /* Determine our host bridge core */ + error = bhndb_find_hostb_core(cores, ncores, sc->pci_devclass, + &hostb_core); + if (error) + goto cleanup; + /* Perform bridge attach */ + error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class); + if (error) + goto cleanup; + /* Fix-up power on defaults for SROM-less devices. */ bhndb_init_sromless_pci_config(sc); /* Add any additional child devices */ if ((error = bhndb_pci_add_children(sc))) goto cleanup; /* Probe and attach our children */ if ((error = bus_generic_attach(dev))) goto cleanup; + free(cores, M_BHND); + return (0); cleanup: device_delete_children(dev); - bhndb_disable_pci_clocks(sc); + bhndb_disable_pci_clocks(sc->dev); + if (sc->intr.msi_count > 0) pci_release_msi(dev); + if (cores != NULL) + free(cores, M_BHND); + pci_disable_busmaster(sc->parent); return (error); } static int bhndb_pci_detach(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Attempt to detach our children */ if ((error = bus_generic_detach(dev))) return (error); /* Perform generic bridge detach */ if ((error = bhndb_generic_detach(dev))) return (error); /* Disable clocks (if required by this hardware) */ - if ((error = bhndb_disable_pci_clocks(sc))) + if ((error = bhndb_disable_pci_clocks(sc->dev))) return (error); /* Release MSI interrupts */ if (sc->intr.msi_count > 0) pci_release_msi(dev); /* Disable PCI bus mastering */ pci_disable_busmaster(sc->parent); return (0); } +/** + * Use the generic PCI bridge hardware configuration to enumerate the bridged + * bhnd(4) bus' core table. + * + * @note This function may be safely called prior to device attach, (e.g. + * from DEVICE_PROBE). + * @note This function requires exclusive ownership over allocating and + * configuring host bridge resources, and should only be called prior to + * completion of device attach and full configuration of the bridge. + * + * @param dev The bhndb_pci bridge device. + * @param[out] chipid On success, the parsed chip identification. + * @param[out] cores On success, the enumerated core table. The + * caller is responsible for freeing this table via + * bhndb_pci_free_core_table(). + * @param[out] ncores On success, the number of cores found in + * @p cores. + * @param[out] eromcls On success, a pointer to the erom class used to + * parse the device enumeration table. This + * argument may be NULL if the class is not + * desired. + * + * @retval 0 success + * @retval non-zero if enumerating the bridged bhnd(4) bus fails, a regular + * unix error code will be returned. + */ static int +bhndb_pci_read_core_table(device_t dev, struct bhnd_chipid *chipid, + struct bhnd_core_info **cores, u_int *ncores, + bhnd_erom_class_t **eromcls) +{ + const struct bhndb_hwcfg *cfg; + struct bhndb_host_resources *hr; + struct bhndb_pci_eio pio; + struct bhnd_core_info *erom_cores; + const struct bhnd_chipid *hint; + struct bhnd_chipid cid; + bhnd_erom_class_t *erom_class; + bhnd_erom_t *erom; + device_t parent_dev; + u_int erom_ncores; + int error; + + parent_dev = device_get_parent(dev); + erom = NULL; + erom_cores = NULL; + + /* Fetch our chipid hint (if any) and generic hardware configuration */ + cfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev); + hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev); + + /* Allocate our host resources */ + if ((error = bhndb_alloc_host_resources(parent_dev, cfg, &hr))) + return (error); + + /* Initialize our erom I/O state */ + if ((error = bhndb_pci_eio_init(&pio, dev, parent_dev, hr))) + goto failed; + + /* Map the first bus core from our bridged bhnd(4) bus */ + error = bhndb_pci_eio_map(&pio.eio, BHND_DEFAULT_CHIPC_ADDR, + BHND_DEFAULT_CORE_SIZE); + if (error) + goto failed; + + /* Probe for a usable EROM class, and read the chip identifier */ + erom_class = bhnd_erom_probe_driver_classes(device_get_devclass(dev), + &pio.eio, hint, &cid); + if (erom_class == NULL) { + device_printf(dev, "device enumeration unsupported; no " + "compatible driver found\n"); + + error = ENXIO; + goto failed; + } + + /* Allocate EROM parser */ + if ((erom = bhnd_erom_alloc(erom_class, &cid, &pio.eio)) == NULL) { + device_printf(dev, "failed to allocate device enumeration " + "table parser\n"); + error = ENXIO; + goto failed; + } + + /* Read the full core table */ + error = bhnd_erom_get_core_table(erom, &erom_cores, &erom_ncores); + if (error) { + device_printf(dev, "error fetching core table: %d\n", error); + goto failed; + } + + /* Provide the results to our caller */ + *cores = malloc(sizeof(erom_cores[0]) * erom_ncores, M_BHND, M_WAITOK); + memcpy(*cores, erom_cores, sizeof(erom_cores[0]) * erom_ncores); + *ncores = erom_ncores; + + *chipid = cid; + if (eromcls != NULL) + *eromcls = erom_class; + + /* Clean up */ + bhnd_erom_free_core_table(erom, erom_cores); + bhnd_erom_free(erom); + bhndb_release_host_resources(hr); + + return (0); + +failed: + if (erom_cores != NULL) + bhnd_erom_free_core_table(erom, erom_cores); + + if (erom != NULL) + bhnd_erom_free(erom); + + bhndb_release_host_resources(hr); + return (error); +} + +static int bhndb_pci_add_children(struct bhndb_pci_softc *sc) { bus_size_t nv_sz; int error; /** * If SPROM is mapped directly into BAR0, add child NVRAM * device. */ nv_sz = bhndb_pci_sprom_size(sc); if (nv_sz > 0) { struct bhndb_devinfo *dinfo; device_t child; if (bootverbose) { device_printf(sc->dev, "found SPROM (%ju bytes)\n", (uintmax_t)nv_sz); } /* Add sprom device, ordered early enough to be available * before the bridged bhnd(4) bus is attached. */ child = BUS_ADD_CHILD(sc->dev, BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add sprom device\n"); return (ENXIO); } /* Initialize device address space and resource covering the * BAR0 SPROM shadow. */ dinfo = device_get_ivars(child); dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; error = bus_set_resource(child, SYS_RES_MEMORY, 0, bhndb_pci_sprom_addr(sc), nv_sz); if (error) { device_printf(sc->dev, "failed to register sprom resources\n"); return (error); } } return (0); } static const struct bhndb_regwin * bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc) { struct bhndb_resources *bres; const struct bhndb_hwcfg *cfg; const struct bhndb_regwin *sprom_win; bres = sc->bhndb.bus_res; cfg = bres->cfg; sprom_win = bhndb_regwin_find_type(cfg->register_windows, BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE); return (sprom_win); } static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc) { const struct bhndb_regwin *sprom_win; struct resource *r; /* Fetch the SPROM register window */ sprom_win = bhndb_pci_sprom_regwin(sc); KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+")); /* Fetch the associated resource */ - r = bhndb_find_regwin_resource(sc->bhndb.bus_res, sprom_win); + r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win); KASSERT(r != NULL, ("missing resource for sprom window\n")); return (rman_get_start(r) + sprom_win->win_offset); } static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc) { const struct bhndb_regwin *sprom_win; uint32_t sctl; bus_size_t sprom_sz; sprom_win = bhndb_pci_sprom_regwin(sc); /* PCI_V2 and later devices map SPROM/OTP via ChipCommon */ if (sprom_win == NULL) return (0); /* Determine SPROM size */ sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4); if (sctl & BHNDB_PCI_SPROM_BLANK) return (0); switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) { case BHNDB_PCI_SPROM_SZ_1KB: sprom_sz = (1 * 1024); break; case BHNDB_PCI_SPROM_SZ_4KB: sprom_sz = (4 * 1024); break; case BHNDB_PCI_SPROM_SZ_16KB: sprom_sz = (16 * 1024); break; case BHNDB_PCI_SPROM_SZ_RESERVED: default: device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl); return (0); } if (sprom_sz > sprom_win->win_size) { device_printf(sc->dev, "PCI sprom size (0x%x) overruns defined register window\n", sctl); return (0); } return (sprom_sz); } /* * On devices without a SROM, the PCI(e) cores will be initialized with * their Power-on-Reset defaults; this can leave two of the BAR0 PCI windows * mapped to the wrong core. * * This function updates the SROM shadow to point the BAR0 windows at the * current PCI core. * * Applies to all PCI/PCIe revisions. */ static void bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc) { struct bhndb_resources *bres; const struct bhndb_hwcfg *cfg; const struct bhndb_regwin *win; struct bhnd_core_info hostb_core; struct resource *core_regs; bus_size_t srom_offset; u_int pci_cidx, sprom_cidx; uint16_t val; int error; bres = sc->bhndb.bus_res; cfg = bres->cfg; /* Find our hostb core */ error = BHNDB_GET_HOSTB_CORE(sc->dev, sc->bhndb.bus_dev, &hostb_core); if (error) { device_printf(sc->dev, "no host bridge device found\n"); return; } if (hostb_core.vendor != BHND_MFGID_BCM) return; switch (hostb_core.device) { case BHND_COREID_PCI: srom_offset = BHND_PCI_SRSH_PI_OFFSET; break; case BHND_COREID_PCIE: srom_offset = BHND_PCIE_SRSH_PI_OFFSET; break; default: device_printf(sc->dev, "unsupported PCI host bridge device\n"); return; } /* Locate the static register window mapping the PCI core */ win = bhndb_regwin_find_core(cfg->register_windows, sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0); if (win == NULL) { device_printf(sc->dev, "missing PCI core register window\n"); return; } /* Fetch the resource containing the register window */ - core_regs = bhndb_find_regwin_resource(bres, win); + core_regs = bhndb_host_resource_for_regwin(bres->res, win); if (core_regs == NULL) { device_printf(sc->dev, "missing PCI core register resource\n"); return; } /* Fetch the SPROM's configured core index */ val = bus_read_2(core_regs, win->win_offset + srom_offset); sprom_cidx = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT; /* If it doesn't match host bridge's core index, update the index * value */ pci_cidx = hostb_core.core_idx; if (sprom_cidx != pci_cidx) { val &= ~BHND_PCI_SRSH_PI_MASK; val |= (pci_cidx << BHND_PCI_SRSH_PI_SHIFT); bus_write_2(core_regs, win->win_offset + srom_offset, val); } } static int bhndb_pci_resume(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Enable clocks (if supported by this hardware) */ - if ((error = bhndb_enable_pci_clocks(sc))) + if ((error = bhndb_enable_pci_clocks(sc->dev))) return (error); /* Perform resume */ return (bhndb_generic_resume(dev)); } static int bhndb_pci_suspend(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Disable clocks (if supported by this hardware) */ - if ((error = bhndb_disable_pci_clocks(sc))) + if ((error = bhndb_disable_pci_clocks(sc->dev))) return (error); /* Perform suspend */ return (bhndb_generic_suspend(dev)); } static int bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { struct bhndb_pci_softc *sc = device_get_softc(dev); - return (sc->set_regwin(sc, rw, addr)); + return (sc->set_regwin(sc->dev, sc->parent, rw, addr)); } /** * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation. * * On siba(4) devices, it's possible that writing a PCI window register may * not succeed; it's necessary to immediately read the configuration register * and retry if not set to the desired value. * * This is not necessary on bcma(4) devices, but other than the overhead of * validating the register, there's no harm in performing the verification. */ static int -bhndb_pci_compat_setregwin(struct bhndb_pci_softc *sc, +bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { int error; int reg; if (rw->win_type != BHNDB_REGWIN_T_DYN) return (ENODEV); reg = rw->d.dyn.cfg_offset; for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) { - if ((error = bhndb_pci_fast_setregwin(sc, rw, addr))) + if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr))) return (error); - if (pci_read_config(sc->parent, reg, 4) == addr) + if (pci_read_config(pci_dev, reg, 4) == addr) return (0); DELAY(10); } /* Unable to set window */ return (ENODEV); } /** * A bcma(4)-only bhndb_set_window_addr implementation. */ static int -bhndb_pci_fast_setregwin(struct bhndb_pci_softc *sc, +bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { /* The PCI bridge core only supports 32-bit addressing, regardless * of the bus' support for 64-bit addressing */ if (addr > UINT32_MAX) return (ERANGE); switch (rw->win_type) { case BHNDB_REGWIN_T_DYN: /* Addresses must be page aligned */ if (addr % rw->win_size != 0) return (EINVAL); - pci_write_config(sc->parent, rw->d.dyn.cfg_offset, addr, 4); + pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4); break; default: return (ENODEV); } return (0); } static int bhndb_pci_populate_board_info(device_t dev, device_t child, struct bhnd_board_info *info) { struct bhndb_pci_softc *sc; sc = device_get_softc(dev); /* * On a subset of Apple BCM4360 modules, always prefer the * PCI subdevice to the SPROM-supplied boardtype. * * TODO: * * Broadcom's own drivers implement this override, and then later use * the remapped BCM4360 board type to determine the required * board-specific workarounds. * * Without access to this hardware, it's unclear why this mapping * is done, and we must do the same. If we can survey the hardware * in question, it may be possible to replace this behavior with * explicit references to the SPROM-supplied boardtype(s) in our * quirk definitions. */ if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) { switch (info->board_type) { case BHND_BOARD_BCM94360X29C: case BHND_BOARD_BCM94360X29CP2: case BHND_BOARD_BCM94360X51: case BHND_BOARD_BCM94360X51P2: info->board_type = 0; /* allow override below */ break; default: break; } } /* If NVRAM did not supply vendor/type info, provide the PCI * subvendor/subdevice values. */ if (info->board_vendor == 0) info->board_vendor = pci_get_subvendor(sc->parent); if (info->board_type == 0) info->board_type = pci_get_subdevice(sc->parent); return (0); } /** + * Return true if the bridge device @p bhndb is attached via PCIe, + * false otherwise. + * + * @param dev The bhndb bridge device + */ +static bool +bhndb_is_pcie_attached(device_t dev) +{ + int reg; + + if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0) + return (true); + + return (false); +} + +/** * Enable externally managed clocks, if required. * * Some PCI chipsets (BCM4306, possibly others) chips do not support * the idle low-power clock. Clocking must be bootstrapped at * attach/resume by directly adjusting GPIO registers exposed in the * PCI config space, and correspondingly, explicitly shutdown at * detach/suspend. - * - * @param sc Bridge driver state. + * + * @note This function may be safely called prior to device attach, (e.g. + * from DEVICE_PROBE). + * + * @param dev The bhndb bridge device */ static int -bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc) +bhndb_enable_pci_clocks(device_t dev) { + device_t pci_dev; uint32_t gpio_in, gpio_out, gpio_en; uint32_t gpio_flags; uint16_t pci_status; + pci_dev = device_get_parent(dev); + /* Only supported and required on PCI devices */ - if (sc->pci_devclass != BHND_DEVCLASS_PCI) + if (!bhndb_is_pcie_attached(dev)) return (0); /* Read state of XTAL pin */ - gpio_in = pci_read_config(sc->parent, BHNDB_PCI_GPIO_IN, 4); + gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4); if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON) return (0); /* already enabled */ /* Fetch current config */ - gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4); - gpio_en = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUTEN, 4); + gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); + gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */ gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); gpio_out |= gpio_flags; gpio_en |= gpio_flags; - pci_write_config(sc->parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); - pci_write_config(sc->parent, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); + pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); + pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); DELAY(1000); /* Reset PLL_OFF */ gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF; - pci_write_config(sc->parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); + pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); DELAY(5000); /* Clear any PCI 'sent target-abort' flag. */ - pci_status = pci_read_config(sc->parent, PCIR_STATUS, 2); + pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2); pci_status &= ~PCIM_STATUS_STABORT; - pci_write_config(sc->parent, PCIR_STATUS, pci_status, 2); + pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2); return (0); } /** * Disable externally managed clocks, if required. - * - * @param sc Bridge driver state. + * + * This function may be safely called prior to device attach, (e.g. + * from DEVICE_PROBE). + * + * @param dev The bhndb bridge device */ static int -bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc) +bhndb_disable_pci_clocks(device_t dev) { + device_t pci_dev; uint32_t gpio_out, gpio_en; + pci_dev = device_get_parent(dev); + /* Only supported and required on PCI devices */ - if (sc->pci_devclass != BHND_DEVCLASS_PCI) + if (bhndb_is_pcie_attached(dev)) return (0); /* Fetch current config */ - gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4); - gpio_en = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUTEN, 4); + gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); + gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */ gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON; gpio_out |= BHNDB_PCI_GPIO_PLL_OFF; - pci_write_config(sc->parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); + pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); /* Enable both output pins */ gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); - pci_write_config(sc->parent, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); + pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); return (0); } static bhnd_clksrc bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc; uint32_t gpio_out; sc = device_get_softc(dev); /* Only supported on PCI devices */ - if (sc->pci_devclass != BHND_DEVCLASS_PCI) + if (bhndb_is_pcie_attached(sc->dev)) return (ENODEV); /* Only ILP is supported */ if (clock != BHND_CLOCK_ILP) return (ENXIO); gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4); if (gpio_out & BHNDB_PCI_GPIO_SCS) return (BHND_CLKSRC_PCI); else return (BHND_CLKSRC_XTAL); } static int bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc = device_get_softc(dev); /* Only supported on PCI devices */ - if (sc->pci_devclass != BHND_DEVCLASS_PCI) + if (bhndb_is_pcie_attached(sc->dev)) return (ENODEV); /* Only HT is supported */ if (clock != BHND_CLOCK_HT) return (ENXIO); - return (bhndb_disable_pci_clocks(sc)); + return (bhndb_disable_pci_clocks(sc->dev)); } static int bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc = device_get_softc(dev); /* Only supported on PCI devices */ - if (sc->pci_devclass != BHND_DEVCLASS_PCI) + if (bhndb_is_pcie_attached(sc->dev)) return (ENODEV); /* Only HT is supported */ if (clock != BHND_CLOCK_HT) return (ENXIO); - return (bhndb_enable_pci_clocks(sc)); + return (bhndb_enable_pci_clocks(sc->dev)); } static int bhndb_pci_assign_intr(device_t dev, device_t child, int rid) { struct bhndb_pci_softc *sc; rman_res_t start, count; int error; sc = device_get_softc(dev); /* Is the rid valid? */ if (rid >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch our common PCI interrupt's start/count. */ error = bus_get_resource(sc->parent, SYS_RES_IRQ, sc->intr.intr_rid, &start, &count); if (error) return (error); /* Add to child's resource list */ return (bus_set_resource(child, SYS_RES_IRQ, rid, start, count)); +} + +/** + * Initialize a new bhndb PCI bridge EROM I/O instance. This EROM I/O + * implementation supports mapping of the device enumeration table via the + * @p hr host resources. + * + * @param pio The instance to be initialized. + * @param dev The bridge device. + * @param pci_dev The bridge's parent PCI device. + * @param hr The host resources to be used to map the device + * enumeration table. + */ +static int +bhndb_pci_eio_init(struct bhndb_pci_eio *pio, device_t dev, device_t pci_dev, + struct bhndb_host_resources *hr) +{ + memset(&pio->eio, sizeof(pio->eio), 0); + pio->eio.map = bhndb_pci_eio_map; + pio->eio.read = bhndb_pci_eio_read; + pio->eio.fini = NULL; + + pio->dev = dev; + pio->pci_dev = pci_dev; + pio->hr = hr; + pio->win = NULL; + pio->res = NULL; + + return (0); +} + +/** + * Attempt to adjust the dynamic register window backing @p pio to permit + * reading @p size bytes at @p addr. + * + * If @p addr or @p size fall outside the existing mapped range, or if + * @p pio is not backed by a dynamic register window, ENXIO will be returned. + * + * @param pio The bhndb PCI erom I/O state to be modified. + * @param addr The address to be include + */ +static int +bhndb_pci_eio_adjust_mapping(struct bhndb_pci_eio *pio, bhnd_addr_t addr, + bhnd_size_t size) +{ + bhnd_addr_t target; + bhnd_size_t offset; + int error; + + + KASSERT(pio->win != NULL, ("missing register window")); + KASSERT(pio->res != NULL, ("missing regwin resource")); + KASSERT(pio->win->win_type == BHNDB_REGWIN_T_DYN, + ("unexpected window type %d", pio->win->win_type)); + + /* The requested subrange must fall within the total mapped range */ + if (addr < pio->addr || (addr - pio->addr) > pio->size || + size > pio->size || (addr - pio->addr) - pio->size < size) + { + return (ENXIO); + } + + /* Do we already have a useable mapping? */ + if (addr >= pio->res_target && + addr <= pio->res_target + pio->win->win_size && + (pio->res_target + pio->win->win_size) - addr >= size) + { + return (0); + } + + /* Page-align the target address */ + offset = addr % pio->win->win_size; + target = addr - offset; + + /* Configure the register window */ + error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, pio->win, + target); + if (error) { + device_printf(pio->dev, "failed to configure dynamic register " + "window: %d\n", error); + return (error); + } + + pio->res_target = target; + return (0); +} + +/* bhnd_erom_io_map() implementation */ +static int +bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, + bhnd_size_t size) +{ + struct bhndb_pci_eio *pio; + const struct bhndb_regwin *regwin; + struct resource *r; + bhnd_addr_t target; + bhnd_size_t offset; + int error; + + pio = (struct bhndb_pci_eio *)eio; + + /* Locate a useable dynamic register window */ + regwin = bhndb_regwin_find_type(pio->hr->cfg->register_windows, + BHNDB_REGWIN_T_DYN, MIN(size, BHND_DEFAULT_CORE_SIZE)); + if (regwin == NULL) { + device_printf(pio->dev, "unable to map %#jx+%#jx; no " + "usable dynamic register window found\n", addr, size); + return (ENXIO); + } + + /* Locate the host resource mapping our register window */ + if ((r = bhndb_host_resource_for_regwin(pio->hr, regwin)) == NULL) { + device_printf(pio->dev, "unable to map %#jx+%#jx; no " + "usable register resource found\n", addr, size); + return (ENXIO); + } + + /* Page-align the target address */ + offset = addr % regwin->win_size; + target = addr - offset; + + /* Configure the register window */ + error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, regwin, + target); + if (error) { + device_printf(pio->dev, "failed to configure dynamic register " + "window: %d\n", error); + return (error); + } + + /* Update our mapping state */ + pio->win = regwin; + pio->res = r; + pio->addr = addr; + pio->size = size; + pio->res_target = target; + + return (0); +} + +/* bhnd_erom_io_read() implementation */ +static uint32_t +bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) +{ + struct bhndb_pci_eio *pio; + bhnd_addr_t addr; + bus_size_t res_offset; + int error; + + pio = (struct bhndb_pci_eio *)eio; + + /* Calculate absolute address */ + if (BHND_SIZE_MAX - offset < pio->addr) { + device_printf(pio->dev, "invalid offset %#jx+%#jx\n", pio->addr, + offset); + return (UINT32_MAX); + } + + addr = pio->addr + offset; + + /* Adjust the mapping for our read */ + if ((error = bhndb_pci_eio_adjust_mapping(pio, addr, width))) { + device_printf(pio->dev, "failed to adjust register mapping: " + "%d\n", error); + return (UINT32_MAX); + } + + KASSERT(pio->res_target <= addr, ("invalid mapping (%#jx vs. %#jx)", + pio->res_target, addr)); + + /* Determine the actual read offset within our register window + * resource */ + res_offset = (addr - pio->res_target) + pio->win->win_offset; + + /* Perform our read */ + switch (width) { + case 1: + return (bus_read_1(pio->res, res_offset)); + case 2: + return (bus_read_2(pio->res, res_offset)); + case 4: + return (bus_read_4(pio->res, res_offset)); + default: + panic("unsupported width: %u", width); + } } static device_method_t bhndb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bhndb_pci_probe), DEVMETHOD(device_attach, bhndb_pci_attach), DEVMETHOD(device_resume, bhndb_pci_resume), DEVMETHOD(device_suspend, bhndb_pci_suspend), DEVMETHOD(device_detach, bhndb_pci_detach), /* BHND interface */ DEVMETHOD(bhnd_bus_assign_intr, bhndb_pci_assign_intr), DEVMETHOD(bhnd_bus_pwrctl_get_clksrc, bhndb_pci_pwrctl_get_clksrc), DEVMETHOD(bhnd_bus_pwrctl_gate_clock, bhndb_pci_pwrctl_gate_clock), DEVMETHOD(bhnd_bus_pwrctl_ungate_clock, bhndb_pci_pwrctl_ungate_clock), /* BHNDB interface */ DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr), DEVMETHOD(bhndb_populate_board_info, bhndb_pci_populate_board_info), DEVMETHOD_END }; DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods, sizeof(struct bhndb_pci_softc), bhndb_driver); MODULE_VERSION(bhndb_pci, 1); MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1); MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1); MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1); MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/bhndb/bhndb_pcivar.h =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_pcivar.h (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndb_pcivar.h (revision 324071) @@ -1,67 +1,67 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDB_PCIVAR_H_ #define _BHND_BHNDB_PCIVAR_H_ #include "bhndbvar.h" /* * bhndb(4) PCI driver subclass. */ DECLARE_CLASS(bhndb_pci_driver); struct bhndb_pci_softc; /* * An interconnect-specific function implementing BHNDB_SET_WINDOW_ADDR */ -typedef int (*bhndb_pci_set_regwin_t)(struct bhndb_pci_softc *sc, +typedef int (*bhndb_pci_set_regwin_t)(device_t dev, device_t pci_dev, const struct bhndb_regwin *rw, bhnd_addr_t addr); /* bhndb_pci interrupt state */ struct bhndb_pci_intr { int msi_count; /**< MSI count, or 0 */ int intr_rid; /**< interrupt resource ID.*/ }; struct bhndb_pci_softc { struct bhndb_softc bhndb; /**< parent softc */ device_t dev; /**< bridge device */ device_t parent; /**< parent PCI device */ bhnd_devclass_t pci_devclass; /**< PCI core's devclass */ struct bhndb_pci_intr intr; /**< PCI interrupt config */ bhndb_pci_set_regwin_t set_regwin; /**< regwin handler */ }; #endif /* _BHND_BHNDB_PCIVAR_H_ */ Index: head/sys/dev/bhnd/bhndb/bhndb_private.h =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_private.h (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndb_private.h (revision 324071) @@ -1,260 +1,223 @@ /*- - * Copyright (c) 2015 Landon Fuller + * Copyright (c) 2015-2016 Landon Fuller * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDB_PRIVATE_H_ #define _BHND_BHNDB_PRIVATE_H_ #include #include #include #include #include #include #include #include "bhndbvar.h" /* * Private bhndb(4) driver definitions. */ struct bhndb_dw_alloc; struct bhndb_region; struct bhndb_resources; -struct resource *bhndb_find_resource_range( - struct bhndb_resources *br, - rman_res_t start, rman_res_t count); - -struct resource *bhndb_find_regwin_resource( - struct bhndb_resources *br, - const struct bhndb_regwin *win); - struct bhndb_resources *bhndb_alloc_resources(device_t dev, device_t parent_dev, const struct bhndb_hwcfg *cfg); -int bhndb_alloc_host_resources( - struct bhndb_resources *br); - void bhndb_free_resources( struct bhndb_resources *br); int bhndb_add_resource_region( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size, bhndb_priority_t priority, const struct bhndb_regwin *static_regwin); int bhndb_find_resource_limits( struct bhndb_resources *br, struct resource *r, rman_res_t *start, rman_res_t *end); struct bhndb_region *bhndb_find_resource_region( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size); struct bhndb_dw_alloc *bhndb_dw_find_resource( struct bhndb_resources *dr, struct resource *r); struct bhndb_dw_alloc *bhndb_dw_find_mapping( struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size); int bhndb_dw_retain( struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res); void bhndb_dw_release( struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res); int bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size); -size_t bhndb_regwin_count( - const struct bhndb_regwin *table, - bhndb_regwin_type_t type); - -const struct bhndb_regwin *bhndb_regwin_find_type( - const struct bhndb_regwin *table, - bhndb_regwin_type_t type, - bus_size_t min_size); - -const struct bhndb_regwin *bhndb_regwin_find_core( - const struct bhndb_regwin *table, - bhnd_devclass_t class, int unit, - bhnd_port_type port_type, u_int port, - u_int region); - - -const struct bhndb_regwin *bhndb_regwin_find_best( - const struct bhndb_regwin *table, - bhnd_devclass_t class, int unit, - bhnd_port_type port_type, u_int port, - u_int region, bus_size_t min_size); - -bool bhndb_regwin_match_core( - const struct bhndb_regwin *regw, - struct bhnd_core_info *core); - const struct bhndb_hw_priority *bhndb_hw_priority_find_core( const struct bhndb_hw_priority *table, struct bhnd_core_info *core); /** * Dynamic register window allocation reference. */ struct bhndb_dw_rentry { struct resource *dw_res; /**< child resource */ LIST_ENTRY(bhndb_dw_rentry) dw_link; }; /** * A dynamic register window allocation record. */ struct bhndb_dw_alloc { const struct bhndb_regwin *win; /**< window definition */ struct resource *parent_res; /**< enclosing resource */ u_int rnid; /**< region identifier */ rman_res_t target; /**< the current window address, or 0x0 if unknown */ LIST_HEAD(, bhndb_dw_rentry) refs; /**< references */ }; /** * A bus address region description. */ struct bhndb_region { bhnd_addr_t addr; /**< start of mapped range */ bhnd_size_t size; /**< size of mapped range */ bhndb_priority_t priority; /**< direct resource allocation priority */ const struct bhndb_regwin *static_regwin; /**< fixed mapping regwin, if any */ STAILQ_ENTRY(bhndb_region) link; }; /** * BHNDB resource allocation state. */ struct bhndb_resources { device_t dev; /**< bridge device */ const struct bhndb_hwcfg *cfg; /**< hardware configuration */ - device_t parent_dev; /**< parent device */ - struct resource_spec *res_spec; /**< parent bus resource specs, or NULL if not allocated */ - struct resource **res; /**< parent bus resources, or NULL if not allocated */ - bool res_avail; /**< if parent bus resources have been allocated */ + struct bhndb_host_resources *res; /**< host resources, or NULL if not allocated */ struct rman ht_mem_rman; /**< host memory manager */ struct rman br_mem_rman; /**< bridged memory manager */ STAILQ_HEAD(, bhndb_region) bus_regions; /**< bus region descriptors */ struct bhndb_dw_alloc *dw_alloc; /**< dynamic window allocation records */ size_t dwa_count; /**< number of dynamic windows available. */ bitstr_t *dwa_freelist; /**< dynamic window free list */ bhndb_priority_t min_prio; /**< minimum resource priority required to allocate a dynamic window */ }; /** * Returns true if the all dynamic windows are marked free, false * otherwise. * * @param br The resource state to check. */ static inline bool bhndb_dw_all_free(struct bhndb_resources *br) { int bit; bit_ffs(br->dwa_freelist, br->dwa_count, &bit); return (bit == -1); } /** * Find the next free dynamic window region in @p br. * * @param br The resource state to search. */ static inline struct bhndb_dw_alloc * bhndb_dw_next_free(struct bhndb_resources *br) { struct bhndb_dw_alloc *dw_free; int bit; bit_ffc(br->dwa_freelist, br->dwa_count, &bit); if (bit == -1) return (NULL); dw_free = &br->dw_alloc[bit]; KASSERT(LIST_EMPTY(&dw_free->refs), ("free list out of sync with refs")); return (dw_free); } /** * Returns true if a dynamic window allocation is marked as free. * * @param br The resource state owning @p dwa. * @param dwa The dynamic window allocation record to be checked. */ static inline bool bhndb_dw_is_free(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa) { bool is_free = LIST_EMPTY(&dwa->refs); KASSERT(is_free == !bit_test(br->dwa_freelist, dwa->rnid), ("refs out of sync with free list")); return (is_free); } #define BHNDB_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ "bhndb resource allocator lock", MTX_DEF) #define BHNDB_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define BHNDB_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define BHNDB_LOCK_ASSERT(sc, what) mtx_assert(&(sc)->sc_mtx, what) #define BHNDB_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #endif /* _BHND_BHNDB_PRIVATE_H_ */ Index: head/sys/dev/bhnd/bhndb/bhndb_subr.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 324071) @@ -1,1084 +1,1164 @@ /*- - * Copyright (c) 2015 Landon Fuller + * Copyright (c) 2015-2016 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "bhndb_private.h" #include "bhndbvar.h" /** * Attach a BHND bridge device to @p parent. * * @param parent A parent PCI device. * @param[out] bhndb On success, the probed and attached bhndb bridge device. * @param unit The device unit number, or -1 to select the next available unit * number. * * @retval 0 success * @retval non-zero Failed to attach the bhndb device. */ int bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit) { int error; *bhndb = device_add_child(parent, "bhndb", unit); if (*bhndb == NULL) return (ENXIO); if (!(error = device_probe_and_attach(*bhndb))) return (0); if ((device_delete_child(parent, *bhndb))) device_printf(parent, "failed to detach bhndb child\n"); return (error); } /* * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl. */ static void bhndb_do_suspend_resources(device_t dev, struct resource_list *rl) { struct resource_list_entry *rle; /* Suspend all child resources. */ STAILQ_FOREACH(rle, rl, link) { /* Skip non-allocated resources */ if (rle->res == NULL) continue; BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type, rle->res); } } /** * Helper function for implementing BUS_RESUME_CHILD() on bridged * bhnd(4) buses. * * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all * child resources, ensuring that the device's allocated bridge resources * will be available to other devices during bus resumption. * * Before suspending any resources, @p child is suspended by * calling bhnd_generic_suspend_child(). * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_br_suspend_child(device_t dev, device_t child) { struct resource_list *rl; int error; if (device_get_parent(child) != dev) BUS_SUSPEND_CHILD(device_get_parent(dev), child); if (device_is_suspended(child)) return (EBUSY); /* Suspend the child device */ if ((error = bhnd_generic_suspend_child(dev, child))) return (error); /* Fetch the resource list. If none, there's nothing else to do */ rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); if (rl == NULL) return (0); /* Suspend all child resources. */ bhndb_do_suspend_resources(dev, rl); return (0); } /** * Helper function for implementing BUS_RESUME_CHILD() on bridged * bhnd(4) bus devices. * * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST() * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all * child resources, before delegating to bhnd_generic_resume_child(). * * If resource resumption fails, @p child will not be resumed. * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_br_resume_child(device_t dev, device_t child) { struct resource_list *rl; struct resource_list_entry *rle; int error; if (device_get_parent(child) != dev) BUS_RESUME_CHILD(device_get_parent(dev), child); if (!device_is_suspended(child)) return (EBUSY); /* Fetch the resource list. If none, there's nothing else to do */ rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child); if (rl == NULL) return (bhnd_generic_resume_child(dev, child)); /* Resume all resources */ STAILQ_FOREACH(rle, rl, link) { /* Skip non-allocated resources */ if (rle->res == NULL) continue; error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev, rle->type, rle->res); if (error) { /* Put all resources back into a suspend state */ bhndb_do_suspend_resources(dev, rl); return (error); } } /* Now that all resources are resumed, resume child */ if ((error = bhnd_generic_resume_child(dev, child))) { /* Put all resources back into a suspend state */ bhndb_do_suspend_resources(dev, rl); } return (error); } /** - * Find a SYS_RES_MEMORY resource containing the given address range. + * Find a host resource of @p type that maps the given range. * - * @param br The bhndb resource state to search. + * @param hr The resource state to search. + * @param type The resource type to search for (see SYS_RES_*). * @param start The start address of the range to search for. * @param count The size of the range to search for. * * @retval resource the host resource containing the requested range. * @retval NULL if no resource containing the requested range can be found. */ struct resource * -bhndb_find_resource_range(struct bhndb_resources *br, rman_res_t start, - rman_res_t count) +bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type, + rman_res_t start, rman_res_t count) { - KASSERT(br->res_avail, ("no host resources allocated")); + for (u_int i = 0; hr->resource_specs[i].type != -1; i++) { + struct resource *r = hr->resources[i]; - for (u_int i = 0; br->res_spec[i].type != -1; i++) { - struct resource *r = br->res[i]; - - if (br->res_spec->type != SYS_RES_MEMORY) + if (hr->resource_specs[i].type != type) continue; /* Verify range */ if (rman_get_start(r) > start) continue; if (rman_get_end(r) < (start + count - 1)) continue; return (r); } return (NULL); } /** - * Find the resource containing @p win. + * Find a host resource of that matches the given register window definition. * - * @param br The bhndb resource state to search. - * @param win A register window. + * @param hr The resource state to search. + * @param win A register window definition. * - * @retval resource the resource containing @p win. - * @retval NULL if no resource containing @p win can be found. + * @retval resource the host resource corresponding to @p win. + * @retval NULL if no resource corresponding to @p win can be found. */ struct resource * -bhndb_find_regwin_resource(struct bhndb_resources *br, +bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr, const struct bhndb_regwin *win) { const struct resource_spec *rspecs; - KASSERT(br->res_avail, ("no host resources allocated")); - - rspecs = br->cfg->resource_specs; + rspecs = hr->resource_specs; for (u_int i = 0; rspecs[i].type != -1; i++) { if (win->res.type != rspecs[i].type) continue; if (win->res.rid != rspecs[i].rid) continue; /* Found declared resource */ - return (br->res[i]); + return (hr->resources[i]); } - device_printf(br->dev, - "missing regwin resource spec (type=%d, rid=%d)\n", - win->res.type, win->res.rid); + device_printf(hr->owner, "missing regwin resource spec " + "(type=%d, rid=%d)\n", win->res.type, win->res.rid); return (NULL); } /** * Allocate and initialize a new resource state structure. * * @param dev The bridge device. * @param parent_dev The parent device from which host resources should be * allocated. * @param cfg The hardware configuration to be used. */ struct bhndb_resources * bhndb_alloc_resources(device_t dev, device_t parent_dev, const struct bhndb_hwcfg *cfg) { struct bhndb_resources *r; const struct bhndb_regwin *win; bus_size_t last_window_size; int rnid; int error; bool free_ht_mem, free_br_mem; free_ht_mem = false; free_br_mem = false; r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO); if (r == NULL) return (NULL); /* Basic initialization */ r->dev = dev; - r->parent_dev = parent_dev; r->cfg = cfg; + r->res = NULL; r->min_prio = BHNDB_PRIORITY_NONE; STAILQ_INIT(&r->bus_regions); /* Initialize host address space resource manager. */ r->ht_mem_rman.rm_start = 0; r->ht_mem_rman.rm_end = ~0; r->ht_mem_rman.rm_type = RMAN_ARRAY; r->ht_mem_rman.rm_descr = "BHNDB host memory"; if ((error = rman_init(&r->ht_mem_rman))) { device_printf(r->dev, "could not initialize ht_mem_rman\n"); goto failed; } free_ht_mem = true; /* Initialize resource manager for the bridged address space. */ r->br_mem_rman.rm_start = 0; r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT; r->br_mem_rman.rm_type = RMAN_ARRAY; r->br_mem_rman.rm_descr = "BHNDB bridged memory"; if ((error = rman_init(&r->br_mem_rman))) { device_printf(r->dev, "could not initialize br_mem_rman\n"); goto failed; } free_br_mem = true; error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT); if (error) { device_printf(r->dev, "could not configure br_mem_rman\n"); goto failed; } /* Fetch the dynamic regwin count and verify that it does not exceed * what is representable via our freelist bitstring. */ r->dwa_count = bhndb_regwin_count(cfg->register_windows, BHNDB_REGWIN_T_DYN); if (r->dwa_count >= INT_MAX) { device_printf(r->dev, "max dynamic regwin count exceeded\n"); goto failed; } /* Allocate the dynamic window allocation table. */ r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND, M_NOWAIT); if (r->dw_alloc == NULL) goto failed; /* Allocate the dynamic window allocation freelist */ r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT); if (r->dwa_freelist == NULL) goto failed; /* Initialize the dynamic window table */ rnid = 0; last_window_size = 0; for (win = cfg->register_windows; win->win_type != BHNDB_REGWIN_T_INVALID; win++) { struct bhndb_dw_alloc *dwa; /* Skip non-DYN windows */ if (win->win_type != BHNDB_REGWIN_T_DYN) continue; /* Validate the window size */ if (win->win_size == 0) { device_printf(r->dev, "ignoring zero-length dynamic " "register window\n"); continue; } else if (last_window_size == 0) { last_window_size = win->win_size; } else if (last_window_size != win->win_size) { /* * No existing hardware should trigger this. * * If you run into this in the future, the dynamic * window allocator and the resource priority system * will need to be extended to support multiple register * window allocation pools. */ device_printf(r->dev, "devices that vend multiple " "dynamic register window sizes are not currently " "supported\n"); goto failed; } dwa = &r->dw_alloc[rnid]; dwa->win = win; dwa->parent_res = NULL; dwa->rnid = rnid; dwa->target = 0x0; LIST_INIT(&dwa->refs); rnid++; } - return (r); - -failed: - if (free_ht_mem) - rman_fini(&r->ht_mem_rman); - - if (free_br_mem) - rman_fini(&r->br_mem_rman); - - if (r->dw_alloc != NULL) - free(r->dw_alloc, M_BHND); - - if (r->dwa_freelist != NULL) - free(r->dwa_freelist, M_BHND); - - free(r, M_BHND); - - return (NULL); -} - -/** - * Allocate host resources required by @p br, and initialize - * internal BHNDB_ADDRSPACE_NATIVE resource manager state. - * - * @param br Resource state. - */ -int -bhndb_alloc_host_resources(struct bhndb_resources *br) -{ - size_t res_num; - int error; - - KASSERT(!br->res_avail, ("host resources already allocated")); - - /* Determine our bridge resource count from the hardware config. */ - res_num = 0; - for (size_t i = 0; br->cfg->resource_specs[i].type != -1; i++) - res_num++; - - /* Allocate space for a non-const copy of our resource_spec - * table; this will be updated with the RIDs assigned by - * bus_alloc_resources. */ - br->res_spec = malloc(sizeof(br->res_spec[0]) * (res_num + 1), M_BHND, - M_NOWAIT); - if (br->res_spec == NULL) { - error = ENOMEM; - goto failed; - } - - /* Initialize and terminate the table */ - for (size_t i = 0; i < res_num; i++) - br->res_spec[i] = br->cfg->resource_specs[i]; - - br->res_spec[res_num].type = -1; - - /* Allocate space for our resource references */ - br->res = malloc(sizeof(br->res[0]) * res_num, M_BHND, M_NOWAIT); - if (br->res == NULL) { - error = ENOMEM; - goto failed; - } - /* Allocate host resources */ - error = bus_alloc_resources(br->parent_dev, br->res_spec, br->res); + error = bhndb_alloc_host_resources(parent_dev, r->cfg, &r->res); if (error) { - device_printf(br->dev, - "could not allocate bridge resources on %s: %d\n", - device_get_nameunit(br->parent_dev), error); + device_printf(r->dev, + "could not allocate host resources on %s: %d\n", + device_get_nameunit(parent_dev), error); goto failed; - } else { - br->res_avail = true; } /* Populate (and validate) parent resource references for all * dynamic windows */ - for (size_t i = 0; i < br->dwa_count; i++) { + for (size_t i = 0; i < r->dwa_count; i++) { struct bhndb_dw_alloc *dwa; const struct bhndb_regwin *win; - dwa = &br->dw_alloc[i]; + dwa = &r->dw_alloc[i]; win = dwa->win; /* Find and validate corresponding resource. */ - dwa->parent_res = bhndb_find_regwin_resource(br, win); + dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win); if (dwa->parent_res == NULL) { - device_printf(br->dev, "no host resource found for %u " + device_printf(r->dev, "no host resource found for %u " "register window with offset %#jx and " "size %#jx\n", win->win_type, (uintmax_t)win->win_offset, (uintmax_t)win->win_size); error = ENXIO; goto failed; } if (rman_get_size(dwa->parent_res) < win->win_offset + win->win_size) { - device_printf(br->dev, "resource %d too small for " + device_printf(r->dev, "resource %d too small for " "register window with offset %llx and size %llx\n", rman_get_rid(dwa->parent_res), (unsigned long long) win->win_offset, (unsigned long long) win->win_size); error = EINVAL; goto failed; } } /* Add allocated memory resources to our host memory resource manager */ - for (u_int i = 0; br->res_spec[i].type != -1; i++) { + for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) { struct resource *res; /* skip non-memory resources */ - if (br->res_spec[i].type != SYS_RES_MEMORY) + if (r->res->resource_specs[i].type != SYS_RES_MEMORY) continue; /* add host resource to set of managed regions */ - res = br->res[i]; - error = rman_manage_region(&br->ht_mem_rman, + res = r->res->resources[i]; + error = rman_manage_region(&r->ht_mem_rman, rman_get_start(res), rman_get_end(res)); if (error) { - device_printf(br->dev, + device_printf(r->dev, "could not register host memory region with " "ht_mem_rman: %d\n", error); goto failed; } } - return (0); + return (r); failed: - if (br->res_avail) - bus_release_resources(br->parent_dev, br->res_spec, br->res); - - if (br->res != NULL) - free(br->res, M_BHND); + if (free_ht_mem) + rman_fini(&r->ht_mem_rman); - if (br->res_spec != NULL) - free(br->res_spec, M_BHND); + if (free_br_mem) + rman_fini(&r->br_mem_rman); - return (error); + if (r->dw_alloc != NULL) + free(r->dw_alloc, M_BHND); + + if (r->dwa_freelist != NULL) + free(r->dwa_freelist, M_BHND); + + if (r->res != NULL) + bhndb_release_host_resources(r->res); + + free(r, M_BHND); + + return (NULL); } /** * Deallocate the given bridge resource structure and any associated resources. * * @param br Resource state to be deallocated. */ void bhndb_free_resources(struct bhndb_resources *br) { struct bhndb_region *region, *r_next; struct bhndb_dw_alloc *dwa; struct bhndb_dw_rentry *dwr, *dwr_next; /* No window regions may still be held */ if (!bhndb_dw_all_free(br)) { for (int i = 0; i < br->dwa_count; i++) { dwa = &br->dw_alloc[i]; /* Skip free dynamic windows */ if (bhndb_dw_is_free(br, dwa)) continue; device_printf(br->dev, "leaked dynamic register window %d\n", dwa->rnid); } } - /* Release resources allocated through our parent. */ - if (br->res_avail) - bus_release_resources(br->parent_dev, br->res_spec, br->res); + /* Release host resources allocated through our parent. */ + if (br->res != NULL) + bhndb_release_host_resources(br->res); /* Clean up resource reservations */ for (size_t i = 0; i < br->dwa_count; i++) { dwa = &br->dw_alloc[i]; LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) { LIST_REMOVE(dwr, dw_link); free(dwr, M_BHND); } } /* Release bus regions */ STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) { STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link); free(region, M_BHND); } /* Release our resource managers */ rman_fini(&br->ht_mem_rman); rman_fini(&br->br_mem_rman); - /* Free backing resource state structures */ - if (br->res != NULL) - free(br->res, M_BHND); - - if (br->res_spec != NULL) - free(br->res_spec, M_BHND); - free(br->dw_alloc, M_BHND); free(br->dwa_freelist, M_BHND); +} + +/** + * Allocate host bus resources defined by @p hwcfg. + * + * On success, the caller assumes ownership of the allocated host resources, + * which must be freed via bhndb_release_host_resources(). + * + * @param dev The device to be used when allocating resources + * (e.g. via bus_alloc_resources()). + * @param hwcfg The hardware configuration defining the host + * resources to be allocated + * @param[out] resources On success, the allocated host resources. + */ +int +bhndb_alloc_host_resources(device_t dev, const struct bhndb_hwcfg *hwcfg, + struct bhndb_host_resources **resources) +{ + struct bhndb_host_resources *hr; + size_t nres; + int error; + + hr = malloc(sizeof(*hr), M_BHND, M_WAITOK); + hr->owner = dev; + hr->cfg = hwcfg; + hr->resource_specs = NULL; + hr->resources = NULL; + + /* Determine our bridge resource count from the hardware config. */ + nres = 0; + for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++) + nres++; + + /* Allocate space for a non-const copy of our resource_spec + * table; this will be updated with the RIDs assigned by + * bus_alloc_resources. */ + hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1), + M_BHND, M_WAITOK); + + /* Initialize and terminate the table */ + for (size_t i = 0; i < nres; i++) + hr->resource_specs[i] = hwcfg->resource_specs[i]; + + hr->resource_specs[nres].type = -1; + + /* Allocate space for our resource references */ + hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND, + M_WAITOK); + + /* Allocate host resources */ + error = bus_alloc_resources(hr->owner, hr->resource_specs, + hr->resources); + if (error) { + device_printf(dev, "could not allocate bridge resources via " + "%s: %d\n", device_get_nameunit(dev), error); + goto failed; + } + + *resources = hr; + return (0); + +failed: + if (hr->resource_specs != NULL) + free(hr->resource_specs, M_BHND); + + if (hr->resources != NULL) + free(hr->resources, M_BHND); + + free(hr, M_BHND); + + return (error); +} + +/** + * Deallocate a set of bridge host resources. + * + * @param hr The resources to be freed. + */ +void +bhndb_release_host_resources(struct bhndb_host_resources *hr) +{ + bus_release_resources(hr->owner, hr->resource_specs, hr->resources); + + free(hr->resources, M_BHND); + free(hr->resource_specs, M_BHND); + free(hr, M_BHND); +} + + +/** + * Search @p cores for the core serving as the bhnd host bridge. + * + * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged + * bhnd(4) devices to determine the hostb core: + * + * - The core must have a Broadcom vendor ID. + * - The core devclass must match the bridge type. + * - The core must be the first device on the bus with the bridged device + * class. + * + * @param cores The core table to search. + * @param ncores The number of cores in @p cores. + * @param bridge_devclass The expected device class of the bridge core. + * @param[out] core If found, the matching host bridge core info. + * + * @retval 0 success + * @retval ENOENT not found + */ +int +bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores, + bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core) +{ + struct bhnd_core_match md; + struct bhnd_core_info *match; + u_int match_core_idx; + + /* Set up a match descriptor for the required device class. */ + md = (struct bhnd_core_match) { + BHND_MATCH_CORE_CLASS(bridge_devclass), + BHND_MATCH_CORE_UNIT(0) + }; + + /* Find the matching core with the lowest core index */ + match = NULL; + match_core_idx = UINT_MAX; + + for (u_int i = 0; i < ncores; i++) { + if (!bhnd_core_matches(&cores[i], &md)) + continue; + + /* Lower core indices take precedence */ + if (match != NULL && match_core_idx < match->core_idx) + continue; + + match = &cores[i]; + match_core_idx = match->core_idx; + } + + if (match == NULL) + return (ENOENT); + + *core = *match; + return (0); } /** * Add a bus region entry to @p r for the given base @p addr and @p size. * * @param br The resource state to which the bus region entry will be added. * @param addr The base address of this region. * @param size The size of this region. * @param priority The resource priority to be assigned to allocations * made within this bus region. * @param static_regwin If available, a static register window mapping this * bus region entry. If not available, NULL. * * @retval 0 success * @retval non-zero if adding the bus region fails. */ int bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size, bhndb_priority_t priority, const struct bhndb_regwin *static_regwin) { struct bhndb_region *reg; /* Insert in the bus resource list */ reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT); if (reg == NULL) return (ENOMEM); *reg = (struct bhndb_region) { .addr = addr, .size = size, .priority = priority, .static_regwin = static_regwin }; STAILQ_INSERT_HEAD(&br->bus_regions, reg, link); return (0); } /** * Find the maximum start and end limits of the register window mapping * resource @p r. * * If the memory range is not mapped by an existing dynamic or static register * window, ENOENT will be returned. * * @param br The resource state to search. * @param r The resource to search for in @p br. * @param addr The requested starting address. * @param size The requested size. * * @retval bhndb_region A region that fully contains the requested range. * @retval NULL If no mapping region can be found. */ int bhndb_find_resource_limits(struct bhndb_resources *br, struct resource *r, rman_res_t *start, rman_res_t *end) { struct bhndb_dw_alloc *dynamic; struct bhndb_region *sregion; /* Check for an enclosing dynamic register window */ if ((dynamic = bhndb_dw_find_resource(br, r))) { *start = dynamic->target; *end = dynamic->target + dynamic->win->win_size - 1; return (0); } /* Check for a static region */ sregion = bhndb_find_resource_region(br, rman_get_start(r), rman_get_size(r)); if (sregion != NULL && sregion->static_regwin != NULL) { *start = sregion->addr; *end = sregion->addr + sregion->size - 1; return (0); } /* Not found */ return (ENOENT); } /** * Find the bus region that maps @p size bytes at @p addr. * * @param br The resource state to search. * @param addr The requested starting address. * @param size The requested size. * * @retval bhndb_region A region that fully contains the requested range. * @retval NULL If no mapping region can be found. */ struct bhndb_region * bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size) { struct bhndb_region *region; STAILQ_FOREACH(region, &br->bus_regions, link) { /* Request must fit within the region's mapping */ if (addr < region->addr) continue; if (addr + size > region->addr + region->size) continue; return (region); } /* Not found */ return (NULL); } /** * Find the entry matching @p r in @p dwa's references, if any. * * @param dwa The dynamic window allocation to search * @param r The resource to search for in @p dwa. */ static struct bhndb_dw_rentry * bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r) { struct bhndb_dw_rentry *rentry; LIST_FOREACH(rentry, &dwa->refs, dw_link) { struct resource *dw_res = rentry->dw_res; /* Match dev/rid/addr/size */ if (rman_get_device(dw_res) != rman_get_device(r) || rman_get_rid(dw_res) != rman_get_rid(r) || rman_get_start(dw_res) != rman_get_start(r) || rman_get_size(dw_res) != rman_get_size(r)) { continue; } /* Matching allocation found */ return (rentry); } return (NULL); } /** * Find the dynamic region allocated for @p r, if any. * * @param br The resource state to search. * @param r The resource to search for. * * @retval bhndb_dw_alloc The allocation record for @p r. * @retval NULL if no dynamic window is allocated for @p r. */ struct bhndb_dw_alloc * bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r) { struct bhndb_dw_alloc *dwa; for (size_t i = 0; i < br->dwa_count; i++) { dwa = &br->dw_alloc[i]; /* Skip free dynamic windows */ if (bhndb_dw_is_free(br, dwa)) continue; /* Matching allocation found? */ if (bhndb_dw_find_resource_entry(dwa, r) != NULL) return (dwa); } return (NULL); } /** * Find an existing dynamic window mapping @p size bytes * at @p addr. The window may or may not be free. * * @param br The resource state to search. * @param addr The requested starting address. * @param size The requested size. * * @retval bhndb_dw_alloc A window allocation that fully contains the requested * range. * @retval NULL If no mapping region can be found. */ struct bhndb_dw_alloc * bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr, bhnd_size_t size) { struct bhndb_dw_alloc *dwr; const struct bhndb_regwin *win; /* Search for an existing dynamic mapping of this address range. */ for (size_t i = 0; i < br->dwa_count; i++) { dwr = &br->dw_alloc[i]; win = dwr->win; /* Verify the range */ if (addr < dwr->target) continue; if (addr + size > dwr->target + win->win_size) continue; /* Found a usable mapping */ return (dwr); } /* not found */ return (NULL); } /** * Retain a reference to @p dwa for use by @p res. * * @param br The resource state owning @p dwa. * @param dwa The allocation record to be retained. * @param res The resource that will own a reference to @p dwa. * * @retval 0 success * @retval ENOMEM Failed to allocate a new reference structure. */ int bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *res) { struct bhndb_dw_rentry *rentry; KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL, ("double-retain of dynamic window for same resource")); /* Insert a reference entry; we use M_NOWAIT to allow use from * within a non-sleepable lock */ rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT); if (rentry == NULL) return (ENOMEM); rentry->dw_res = res; LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link); /* Update the free list */ bit_set(br->dwa_freelist, dwa->rnid); return (0); } /** * Release a reference to @p dwa previously retained by @p res. If the * reference count of @p dwa reaches zero, it will be added to the * free list. * * @param br The resource state owning @p dwa. * @param dwa The allocation record to be released. * @param res The resource that currently owns a reference to @p dwa. */ void bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, struct resource *r) { struct bhndb_dw_rentry *rentry; /* Find the rentry */ rentry = bhndb_dw_find_resource_entry(dwa, r); KASSERT(rentry != NULL, ("over release of resource entry")); LIST_REMOVE(rentry, dw_link); free(rentry, M_BHND); /* If this was the last reference, update the free list */ if (LIST_EMPTY(&dwa->refs)) bit_clear(br->dwa_freelist, dwa->rnid); } /** * Attempt to set (or reset) the target address of @p dwa to map @p size bytes * at @p addr. * * This will apply any necessary window alignment and verify that * the window is capable of mapping the requested range prior to modifying * therecord. * * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request. * @param br The resource state owning @p dwa. * @param dwa The allocation record to be configured. * @param addr The address to be mapped via @p dwa. * @param size The number of bytes to be mapped at @p addr. * * @retval 0 success * @retval non-zero no usable register window available. */ int bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br, struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size) { const struct bhndb_regwin *rw; bus_addr_t offset; int error; rw = dwa->win; KASSERT(bhndb_dw_is_free(br, dwa), ("attempting to set the target address on an in-use window")); /* Page-align the target address */ offset = addr % rw->win_size; dwa->target = addr - offset; /* Verify that the window is large enough for the full target */ if (rw->win_size - offset < size) return (ENOMEM); /* Update the window target */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) { dwa->target = 0x0; return (error); } return (0); } /** * Return the count of @p type register windows in @p table. * * @param table The table to search. * @param type The required window type, or BHNDB_REGWIN_T_INVALID to * count all register window types. */ size_t bhndb_regwin_count(const struct bhndb_regwin *table, bhndb_regwin_type_t type) { const struct bhndb_regwin *rw; size_t count; count = 0; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type) count++; } return (count); } /** * Search @p table for the first window with the given @p type. * * @param table The table to search. * @param type The required window type. * @param min_size The minimum window size. * * @retval bhndb_regwin The first matching window. * @retval NULL If no window of the requested type could be found. */ const struct bhndb_regwin * bhndb_regwin_find_type(const struct bhndb_regwin *table, bhndb_regwin_type_t type, bus_size_t min_size) { const struct bhndb_regwin *rw; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (rw->win_type == type && rw->win_size >= min_size) return (rw); } return (NULL); } /** * Search @p windows for the first matching core window. * * @param table The table to search. * @param class The required core class. * @param unit The required core unit, or -1. * @param port_type The required port type. * @param port The required port. * @param region The required region. * * @retval bhndb_regwin The first matching window. * @retval NULL If no matching window was found. */ const struct bhndb_regwin * bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region) { const struct bhndb_regwin *rw; for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) { if (rw->win_type != BHNDB_REGWIN_T_CORE) continue; if (rw->d.core.class != class) continue; if (unit != -1 && rw->d.core.unit != unit) continue; if (rw->d.core.port_type != port_type) continue; if (rw->d.core.port != port) continue; if (rw->d.core.region != region) continue; return (rw); } return (NULL); } /** * Search @p windows for the best available window of at least @p min_size. * * Search order: * - BHND_REGWIN_T_CORE * - BHND_REGWIN_T_DYN * * @param table The table to search. * @param class The required core class. * @param unit The required core unit, or -1. * @param port_type The required port type. * @param port The required port. * @param region The required region. * @param min_size The minimum window size. * * @retval bhndb_regwin The first matching window. * @retval NULL If no matching window was found. */ const struct bhndb_regwin * bhndb_regwin_find_best(const struct bhndb_regwin *table, bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port, u_int region, bus_size_t min_size) { const struct bhndb_regwin *rw; /* Prefer a fixed core mapping */ rw = bhndb_regwin_find_core(table, class, unit, port_type, port, region); if (rw != NULL) return (rw); /* Fall back on a generic dynamic window */ return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size)); } /** * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window * that matches against @p core. * * @param regw A register window to match against. * @param core The bhnd(4) core info to match against @p regw. */ bool bhndb_regwin_match_core(const struct bhndb_regwin *regw, struct bhnd_core_info *core) { /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) return (false); /* Device class must match */ if (bhnd_core_class(core) != regw->d.core.class) return (false); /* Device unit must match */ if (core->unit != regw->d.core.unit) return (false); /* Matches */ return (true); } /** * Search for a core resource priority descriptor in @p table that matches * @p core. * * @param table The table to search. * @param core The core to match against @p table. */ const struct bhndb_hw_priority * bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table, struct bhnd_core_info *core) { const struct bhndb_hw_priority *hp; for (hp = table; hp->ports != NULL; hp++) { if (bhnd_core_matches(core, &hp->match)) return (hp); } /* not found */ return (NULL); } Index: head/sys/dev/bhnd/bhndb/bhndbvar.h =================================================================== --- head/sys/dev/bhnd/bhndb/bhndbvar.h (revision 324070) +++ head/sys/dev/bhnd/bhndb/bhndbvar.h (revision 324071) @@ -1,109 +1,167 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDBVAR_H_ #define _BHND_BHNDBVAR_H_ #include #include #include #include #include #include #include #include #include "bhndb.h" #include "bhndb_if.h" /* * Definitions shared by bhndb(4) driver implementations. */ DECLARE_CLASS(bhndb_driver); struct bhndb_resources; +struct bhndb_host_resources; -int bhndb_attach(device_t dev, bhnd_devclass_t bridge_devclass); +int bhndb_attach(device_t dev, + struct bhnd_chipid *cid, + struct bhnd_core_info *cores, u_int ncores, + struct bhnd_core_info *bridge_core, + bhnd_erom_class_t *erom_class); -int bhndb_generic_probe(device_t dev); -int bhndb_generic_detach(device_t dev); -int bhndb_generic_suspend(device_t dev); -int bhndb_generic_resume(device_t dev); -int bhndb_generic_init_full_config(device_t dev, device_t child, - const struct bhndb_hw_priority *hw_prio_table); +int bhndb_generic_probe(device_t dev); +int bhndb_generic_detach(device_t dev); +int bhndb_generic_suspend(device_t dev); +int bhndb_generic_resume(device_t dev); +int bhndb_generic_init_full_config(device_t dev, + device_t child, + const struct bhndb_hw_priority *hw_prio_table); -int bhnd_generic_br_suspend_child(device_t dev, device_t child); -int bhnd_generic_br_resume_child(device_t dev, device_t child); +int bhnd_generic_br_suspend_child(device_t dev, + device_t child); +int bhnd_generic_br_resume_child(device_t dev, + device_t child); +int bhndb_find_hostb_core( + struct bhnd_core_info *cores, u_int ncores, + bhnd_devclass_t bridge_devclass, + struct bhnd_core_info *core); + +int bhndb_alloc_host_resources(device_t dev, + const struct bhndb_hwcfg *hwcfg, + struct bhndb_host_resources **resources); +void bhndb_release_host_resources( + struct bhndb_host_resources *resources); +struct resource *bhndb_host_resource_for_range( + struct bhndb_host_resources *resources, + int type, rman_res_t start, + rman_res_t count); +struct resource *bhndb_host_resource_for_regwin( + struct bhndb_host_resources *resources, + const struct bhndb_regwin *win); + +size_t bhndb_regwin_count( + const struct bhndb_regwin *table, + bhndb_regwin_type_t type); + +const struct bhndb_regwin *bhndb_regwin_find_type( + const struct bhndb_regwin *table, + bhndb_regwin_type_t type, + bus_size_t min_size); + +const struct bhndb_regwin *bhndb_regwin_find_core( + const struct bhndb_regwin *table, + bhnd_devclass_t class, int unit, + bhnd_port_type port_type, u_int port, + u_int region); + +const struct bhndb_regwin *bhndb_regwin_find_best( + const struct bhndb_regwin *table, + bhnd_devclass_t class, int unit, + bhnd_port_type port_type, u_int port, + u_int region, bus_size_t min_size); + +bool bhndb_regwin_match_core( + const struct bhndb_regwin *regw, + struct bhnd_core_info *core); + /** * bhndb child address space. Children either operate in the bridged * SoC address space, or within the address space mapped to the host * device (e.g. the PCI BAR(s)). */ typedef enum { BHNDB_ADDRSPACE_BRIDGED, /**< bridged (SoC) address space */ BHNDB_ADDRSPACE_NATIVE /**< host address space */ } bhndb_addrspace; /** bhndb child instance state */ struct bhndb_devinfo { bhndb_addrspace addrspace; /**< child address space. */ struct resource_list resources; /**< child resources. */ }; /** + * Host resources allocated for a bridge hardware configuration. + */ +struct bhndb_host_resources { + device_t owner; /**< device owning the allocated resources */ + const struct bhndb_hwcfg *cfg; /**< bridge hardware configuration */ + struct resource_spec *resource_specs; /**< resource specification table */ + struct resource **resources; /**< allocated resource table */ +}; + +/** * bhndb driver instance state. Must be first member of all subclass * softc structures. */ struct bhndb_softc { device_t dev; /**< bridge device */ struct bhnd_chipid chipid; /**< chip identification */ - bhnd_devclass_t bridge_class; /**< bridge core type */ - struct bhnd_core_info bridge_core; /**< bridge core. not populated until - * full bridge config is initialized */ - bool have_br_core; /**< false if not yet available */ + struct bhnd_core_info bridge_core; /**< bridge core info */ device_t parent_dev; /**< parent device */ device_t bus_dev; /**< child bhnd(4) bus */ struct bhnd_service_registry services; /**< local service registry */ struct mtx sc_mtx; /**< resource lock. */ struct bhndb_resources *bus_res; /**< bus resource state */ }; #endif /* _BHND_BHNDBVAR_H_ */ Index: head/sys/dev/bhnd/bhndreg.h =================================================================== --- head/sys/dev/bhnd/bhndreg.h (revision 324070) +++ head/sys/dev/bhnd/bhndreg.h (revision 324071) @@ -1,48 +1,52 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _BHND_BHNDREG_H_ #define _BHND_BHNDREG_H_ /** * The default address at which the ChipCommon core is mapped on all siba(4) - * devices, and most bcma(4) devices. + * devices, and most (all?) bcma(4) devices. */ -#define BHND_DEFAULT_CHIPC_ADDR 0x18000000 +#define BHND_DEFAULT_CHIPC_ADDR 0x18000000 /** * The standard size of a primary BHND_PORT_DEVICE or BHND_PORT_AGENT * register block. */ #define BHND_DEFAULT_CORE_SIZE 0x1000 +/** + * The standard size of the siba(4) and bcma(4) enumeration space. + */ +#define BHND_DEFAULT_ENUM_SIZE 0x00100000 #endif /* _BHND_BHNDREG_H_ */ \ No newline at end of file Index: head/sys/dev/bhnd/siba/siba.c =================================================================== --- head/sys/dev/bhnd/siba/siba.c (revision 324070) +++ head/sys/dev/bhnd/siba/siba.c (revision 324071) @@ -1,971 +1,1004 @@ /*- * Copyright (c) 2015 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "sibareg.h" #include "sibavar.h" static bhnd_erom_class_t * siba_get_erom_class(driver_t *driver) { return (&siba_erom_parser); } int siba_probe(device_t dev) { device_set_desc(dev, "SIBA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default siba(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal siba(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int siba_attach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; /* Enumerate children */ if ((error = siba_add_children(dev))) { device_delete_children(dev); return (error); } return (0); } int siba_detach(device_t dev) { return (bhnd_generic_detach(dev)); } int siba_resume(device_t dev) { return (bhnd_generic_resume(dev)); } int siba_suspend(device_t dev) { return (bhnd_generic_suspend(dev)); } static int siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { const struct siba_devinfo *dinfo; const struct bhnd_core_info *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->core_id.core_info; switch (index) { case BHND_IVAR_VENDOR: *result = cfg->vendor; return (0); case BHND_IVAR_DEVICE: *result = cfg->device; return (0); case BHND_IVAR_HWREV: *result = cfg->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(cfg); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(cfg->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(cfg); return (0); case BHND_IVAR_CORE_INDEX: *result = cfg->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = cfg->unit; return (0); case BHND_IVAR_PMU_INFO: *result = (uintptr_t) dinfo->pmu_info; return (0); default: return (ENOENT); } } static int siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct siba_devinfo *dinfo; dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: dinfo->pmu_info = (struct bhnd_core_pmu_info *) value; return (0); default: return (ENOENT); } } static struct resource_list * siba_get_resource_list(device_t dev, device_t child) { struct siba_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int siba_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t tmhigh; int error; error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4); if (error) return (error); *iost = (SIBA_REG_GET(tmhigh, TMH_SISF)); return (0); } static int siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t ts_low; int error; if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4))) return (error); *ioctl = (SIBA_REG_GET(ts_low, TML_SICF)); return (0); } static int siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, ts_mask; if (device_get_parent(child) != dev) return (EINVAL); /* Fetch CFG0 mapping */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg[0]) == NULL) return (ENODEV); /* Mask and set TMSTATELOW core flag bits */ ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK; ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask; return (siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask)); } static bool siba_is_hw_suspended(device_t dev, device_t child) { uint32_t ts_low; uint16_t ioctl; int error; /* Fetch target state */ error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } /* Is core held in RESET? */ if (ts_low & SIBA_TML_RESET) return (true); /* Is core clocked? */ ioctl = SIBA_REG_GET(ts_low, TML_SICF); if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int siba_reset_hw(device_t dev, device_t child, uint16_t ioctl) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, imstate; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_EN and * BHND_IOCTL_CLK_FORCE. */ if (ioctl & (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE)) return (EINVAL); /* Place core into known RESET state */ if ((error = BHND_BUS_SUSPEND_HW(dev, child))) return (error); /* Leaving the core in reset, set the caller's IOCTL flags and * enable the core's clocks. */ ts_low = (ioctl | BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT; error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, SIBA_TML_SICF_MASK); if (error) return (error); /* Clear any target errors */ if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) { error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0, SIBA_TMH_SERR); if (error) return (error); } /* Clear any initiator errors */ imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE); if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) { error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0, SIBA_IM_IBE|SIBA_IM_TO); if (error) return (error); } /* Release from RESET while leaving clocks forced, ensuring the * signal propagates throughout the core */ error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_RESET); if (error) return (error); /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE * bit and allow the core to manage clock gating. */ error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT)); if (error) return (error); return (0); } static int siba_suspend_hw(device_t dev, device_t child) { struct siba_devinfo *dinfo; struct bhnd_core_pmu_info *pm; struct bhnd_resource *r; uint32_t idl, ts_low; uint16_t ioctl; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); pm = dinfo->pmu_info; /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg[0]) == NULL) return (ENODEV); /* Already in RESET? */ ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW); if (ts_low & SIBA_TML_RESET) { /* Clear IOCTL flags, ensuring the clock is disabled */ return (siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_SICF_MASK)); return (0); } /* If clocks are already disabled, we can put the core directly * into RESET */ ioctl = SIBA_REG_GET(ts_low, TML_SICF); if (!(ioctl & BHND_IOCTL_CLK_EN)) { /* Set RESET and clear IOCTL flags */ return (siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_RESET, SIBA_TML_RESET | SIBA_TML_SICF_MASK)); } /* Reject any further target backplane transactions */ error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_REJ, SIBA_TML_REJ); if (error) return (error); /* If this is an initiator core, we need to reject initiator * transactions too. */ idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW); if (idl & SIBA_IDL_INIT) { error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, SIBA_IM_RJ, SIBA_IM_RJ); if (error) return (error); } /* Put the core into RESET|REJECT, forcing clocks to ensure the RESET * signal propagates throughout the core, leaving REJECT asserted. */ ts_low = SIBA_TML_RESET; ts_low |= (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT; error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_low); if (error) return (error); /* Give RESET ample time */ DELAY(10); /* Leaving core in reset, disable all clocks, clear REJ flags and * IOCTL state */ error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_RESET, SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK); if (error) return (error); /* Clear previously asserted initiator reject */ if (idl & SIBA_IDL_INIT) { error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0, SIBA_IM_RJ); if (error) return (error); } /* Core is now in RESET, with clocks disabled and REJ not asserted. * * We lastly need to inform the PMU, releasing any outstanding per-core * PMU requests */ if (pm != NULL) { if ((error = BHND_PMU_CORE_RELEASE(pm->pm_pmu, pm))) return (error); } return (0); } static int siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct siba_devinfo *dinfo; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if (dinfo->cfg[0] == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(dinfo->cfg[0]->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg[0], offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg[0], offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg[0], offset); return (0); default: return (EINVAL); } } static int siba_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct siba_devinfo *dinfo; struct bhnd_resource *r; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg[0]) == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(r->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint8_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint8_t *)value); return (0); default: return (EINVAL); } } static u_int siba_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); return (siba_addrspace_port_count(dinfo->core_id.num_addrspace)); } static u_int siba_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port)); dinfo = device_get_ivars(child); if (!siba_is_port_valid(dinfo->core_id.num_addrspace, type, port)) return (0); return (siba_addrspace_region_count(dinfo->core_id.num_addrspace, port)); } static int siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child, port_type, port_num, region_num)); dinfo = device_get_ivars(child); addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace == NULL) return (-1); return (addrspace->sa_rid); } static int siba_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child, type, rid, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); for (int i = 0; i < dinfo->core_id.num_addrspace; i++) { if (dinfo->addrspace[i].sa_rid != rid) continue; *port_type = BHND_PORT_DEVICE; *port_num = siba_addrspace_port(i); *region_num = siba_addrspace_region(i); return (0); } /* Not found */ return (ENOENT); } static int siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) { return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child, port_type, port_num, region_num, addr, size)); } dinfo = device_get_ivars(child); addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace == NULL) return (ENOENT); *addr = addrspace->sa_base; *size = addrspace->sa_size - addrspace->sa_bus_reserved; return (0); } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). * * This implementation consults @p child's configuration block mapping, * returning SIBA_CORE_NUM_INTR if a valid CFG0 block is mapped. */ int siba_get_intr_count(device_t dev, device_t child) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); dinfo = device_get_ivars(child); /* We can get/set interrupt sbflags on any core with a valid cfg0 * block; whether the core actually makes use of it is another matter * entirely */ if (dinfo->cfg[0] == NULL) return (0); return (SIBA_CORE_NUM_INTR); } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_CORE_IVEC(). * * This implementation consults @p child's CFG0 register block, * returning the interrupt flag assigned to @p child. */ int siba_get_core_ivec(device_t dev, device_t child, u_int intr, uint32_t *ivec) { struct siba_devinfo *dinfo; uint32_t tpsflag; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_CORE_IVEC(device_get_parent(dev), child, intr, ivec)); /* Must be a valid interrupt ID */ if (intr >= siba_get_intr_count(dev, child)) return (ENXIO); /* Fetch sbflag number */ dinfo = device_get_ivars(child); tpsflag = bhnd_bus_read_4(dinfo->cfg[0], SIBA_CFG0_TPSFLAG); *ivec = SIBA_REG_GET(tpsflag, TPS_NUM0); return (0); } /** * Register all address space mappings for @p di. * * @param dev The siba bus device. * @param di The device info instance on which to register all address * space entries. * @param r A resource mapping the enumeration table block for @p di. */ static int siba_register_addrspaces(device_t dev, struct siba_devinfo *di, struct bhnd_resource *r) { struct siba_core_id *cid; uint32_t addr; uint32_t size; int error; cid = &di->core_id; /* Register the device address space entries */ for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) { uint32_t adm; u_int adm_offset; uint32_t bus_reserved; /* Determine the register offset */ adm_offset = siba_admatch_offset(i); if (adm_offset == 0) { device_printf(dev, "addrspace %hhu is unsupported", i); return (ENODEV); } /* Fetch the address match register value */ adm = bhnd_bus_read_4(r, adm_offset); /* Parse the value */ if ((error = siba_parse_admatch(adm, &addr, &size))) { device_printf(dev, "failed to decode address " " match register value 0x%x\n", adm); return (error); } /* If this is the device's core/enumeration addrespace, * reserve the Sonics configuration register blocks for the * use of our bus. */ bus_reserved = 0; if (i == SIBA_CORE_ADDRSPACE) bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE; /* Append the region info */ error = siba_append_dinfo_region(di, i, addr, size, bus_reserved); if (error) return (error); } return (0); } /** * Map per-core configuration blocks for @p dinfo. * * @param dev The siba bus device. * @param dinfo The device info instance on which to map all per-core * configuration blocks. */ static int siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo) { struct siba_addrspace *addrspace; rman_res_t r_start, r_count, r_end; uint8_t num_cfg; num_cfg = dinfo->core_id.num_cfg_blocks; if (num_cfg > SIBA_MAX_CFG) { device_printf(dev, "config block count %hhu out of range\n", num_cfg); return (ENXIO); } /* Fetch the core register address space */ addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0); if (addrspace == NULL) { device_printf(dev, "missing device registers\n"); return (ENXIO); } /* * Map the per-core configuration blocks */ for (uint8_t i = 0; i < num_cfg; i++) { /* Determine the config block's address range; configuration * blocks are allocated starting at SIBA_CFG0_OFFSET, * growing downwards. */ r_start = addrspace->sa_base + SIBA_CFG0_OFFSET; r_start -= i * SIBA_CFG_SIZE; r_count = SIBA_CFG_SIZE; r_end = r_start + r_count - 1; /* Allocate the config resource */ dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i); dinfo->cfg[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev, SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end, r_count, RF_ACTIVE); if (dinfo->cfg[i] == NULL) { device_printf(dev, "failed to allocate SIBA_CFG%hhu\n", i); return (ENXIO); } } return (0); } static device_t siba_add_child(device_t dev, u_int order, const char *name, int unit) { struct siba_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = siba_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void siba_child_deleted(device_t dev, device_t child) { struct bhnd_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free siba device info */ if ((dinfo = device_get_ivars(child)) != NULL) siba_free_dinfo(dev, dinfo); device_set_ivars(child, NULL); } /** * Scan the core table and add all valid discovered cores to * the bus. * * @param dev The siba bus device. */ int siba_add_children(device_t dev) { const struct bhnd_chipid *chipid; - struct bhnd_core_info *cores; - struct siba_devinfo *dinfo; + struct siba_core_id *cores; struct bhnd_resource *r; + device_t *children; int rid; int error; - dinfo = NULL; cores = NULL; r = NULL; chipid = BHND_BUS_GET_CHIPID(dev, dev); - /* Allocate our temporary core table and enumerate all cores */ - cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_NOWAIT); - if (cores == NULL) - return (ENOMEM); + /* Allocate our temporary core and device table */ + cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK); + children = malloc(sizeof(*children) * chipid->ncores, M_BHND, + M_WAITOK | M_ZERO); - /* Add all cores. */ + /* + * Add child devices for all discovered cores. + * + * On bridged devices, we'll exhaust our available register windows if + * we map config blocks on unpopulated/disabled cores. To avoid this, we + * defer mapping of the per-core siba(4) config blocks until all cores + * have been enumerated and otherwise configured. + */ for (u_int i = 0; i < chipid->ncores; i++) { - struct siba_core_id cid; - device_t child; + struct siba_devinfo *dinfo; uint32_t idhigh, idlow; rman_res_t r_count, r_end, r_start; - int nintr; /* Map the core's register block */ rid = 0; r_start = SIBA_CORE_ADDR(i); r_count = SIBA_CORE_SIZE; r_end = r_start + SIBA_CORE_SIZE - 1; r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start, r_end, r_count, RF_ACTIVE); if (r == NULL) { error = ENXIO; - goto cleanup; + goto failed; } - /* Add the child device */ - child = BUS_ADD_CHILD(dev, 0, NULL, -1); - if (child == NULL) { - error = ENXIO; - goto cleanup; - } - /* Read the core info */ idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW)); - cid = siba_parse_core_id(idhigh, idlow, i, 0); - cores[i] = cid.core_info; + cores[i] = siba_parse_core_id(idhigh, idlow, i, 0); - /* Determine unit number */ + /* Determine and set unit number */ for (u_int j = 0; j < i; j++) { - if (cores[j].vendor == cores[i].vendor && - cores[j].device == cores[i].device) - cores[i].unit++; + struct bhnd_core_info *cur = &cores[i].core_info; + struct bhnd_core_info *prev = &cores[j].core_info; + + if (prev->vendor == cur->vendor && + prev->device == cur->device) + cur->unit++; } + /* Add the child device */ + children[i] = BUS_ADD_CHILD(dev, 0, NULL, -1); + if (children[i] == NULL) { + error = ENXIO; + goto failed; + } + /* Initialize per-device bus info */ - if ((dinfo = device_get_ivars(child)) == NULL) { + if ((dinfo = device_get_ivars(children[i])) == NULL) { error = ENXIO; - goto cleanup; + goto failed; } - if ((error = siba_init_dinfo(dev, dinfo, &cid))) - goto cleanup; + if ((error = siba_init_dinfo(dev, dinfo, &cores[i]))) + goto failed; /* Register the core's address space(s). */ if ((error = siba_register_addrspaces(dev, dinfo, r))) - goto cleanup; + goto failed; - /* Release our resource covering the register blocks - * we're about to map */ + /* Unmap the core's register block */ bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); r = NULL; + /* If pins are floating or the hardware is otherwise + * unpopulated, the device shouldn't be used. */ + if (bhnd_is_hw_disabled(children[i])) + device_disable(children[i]); + } + + /* Map all valid core's config register blocks and perform interrupt + * assignment */ + for (u_int i = 0; i < chipid->ncores; i++) { + struct siba_devinfo *dinfo; + device_t child; + int nintr; + + child = children[i]; + + /* Skip if core is disabled */ + if (bhnd_is_hw_disabled(child)) + continue; + + dinfo = device_get_ivars(child); + /* Map the core's config blocks */ if ((error = siba_map_cfg_resources(dev, dinfo))) - goto cleanup; + goto failed; /* Assign interrupts */ nintr = bhnd_get_intr_count(child); for (int rid = 0; rid < nintr; rid++) { error = BHND_BUS_ASSIGN_INTR(dev, child, rid); if (error) { device_printf(dev, "failed to assign interrupt " "%d to core %u: %d\n", rid, i, error); } } - /* If pins are floating or the hardware is otherwise - * unpopulated, the device shouldn't be used. */ - if (bhnd_is_hw_disabled(child)) - device_disable(child); - /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(dev, child); } - -cleanup: - if (cores != NULL) - free(cores, M_BHND); + + free(cores, M_BHND); + free(children, M_BHND); + + return (0); + +failed: + for (u_int i = 0; i < chipid->ncores; i++) { + if (children[i] == NULL) + continue; + + device_delete_child(dev, children[i]); + } + + free(cores, M_BHND); + free(children, M_BHND); if (r != NULL) bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); return (error); } static device_method_t siba_methods[] = { /* Device interface */ DEVMETHOD(device_probe, siba_probe), DEVMETHOD(device_attach, siba_attach), DEVMETHOD(device_detach, siba_detach), DEVMETHOD(device_resume, siba_resume), DEVMETHOD(device_suspend, siba_suspend), /* Bus interface */ DEVMETHOD(bus_add_child, siba_add_child), DEVMETHOD(bus_child_deleted, siba_child_deleted), DEVMETHOD(bus_read_ivar, siba_read_ivar), DEVMETHOD(bus_write_ivar, siba_write_ivar), DEVMETHOD(bus_get_resource_list, siba_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class), DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, siba_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw), DEVMETHOD(bhnd_bus_read_config, siba_read_config), DEVMETHOD(bhnd_bus_write_config, siba_write_config), DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count), DEVMETHOD(bhnd_bus_get_core_ivec, siba_get_core_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver); MODULE_VERSION(siba, 1); MODULE_DEPEND(siba, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/siba/siba_erom.c =================================================================== --- head/sys/dev/bhnd/siba/siba_erom.c (revision 324070) +++ head/sys/dev/bhnd/siba/siba_erom.c (revision 324071) @@ -1,596 +1,504 @@ /*- * Copyright (c) 2015-2016 Landon Fuller + * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * + * Portions of this software were developed by Landon Fuller + * under sponsorship from the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "sibareg.h" #include "sibavar.h" struct siba_erom; struct siba_erom_io; static int siba_eio_init(struct siba_erom_io *io, - device_t parent, struct bhnd_resource *res, - int rid, bus_size_t offset, u_int ncores); + struct bhnd_erom_io *eio, u_int ncores); -static int siba_eio_init_static(struct siba_erom_io *io, - bus_space_tag_t bst, bus_space_handle_t bsh, - bus_size_t offset, u_int ncores); - static uint32_t siba_eio_read_4(struct siba_erom_io *io, u_int core_idx, bus_size_t offset); static struct siba_core_id siba_eio_read_core_id(struct siba_erom_io *io, u_int core_idx, int unit); static int siba_eio_read_chipid(struct siba_erom_io *io, bus_addr_t enum_addr, struct bhnd_chipid *cid); /** * SIBA EROM generic I/O context */ struct siba_erom_io { + struct bhnd_erom_io *eio; /**< erom I/O callbacks */ + bhnd_addr_t base_addr; /**< address of first core */ u_int ncores; /**< core count */ - bus_size_t offset; /**< base read offset */ - - /* resource state */ - device_t dev; /**< parent dev to use for resource allocations, - or NULL if unavailable. */ - struct bhnd_resource *res; /**< memory resource, or NULL */ - int rid; /**< memory resource ID */ - - /* bus tag state */ - bus_space_tag_t bst; /**< bus space tag */ - bus_space_handle_t bsh; /**< bus space handle */ }; /** * SIBA EROM per-instance state. */ struct siba_erom { struct bhnd_erom obj; struct siba_erom_io io; /**< i/o context */ }; -#define EROM_LOG(io, fmt, ...) do { \ - if (io->dev != NULL) { \ - device_printf(io->dev, "%s: " fmt, __FUNCTION__, \ - ##__VA_ARGS__); \ - } else { \ - printf("%s: " fmt, __FUNCTION__, ##__VA_ARGS__); \ - } \ +#define EROM_LOG(io, fmt, ...) do { \ + printf("%s: " fmt, __FUNCTION__, ##__VA_ARGS__); \ } while(0) +/* SIBA implementation of BHND_EROM_PROBE() */ static int -siba_erom_probe_common(struct siba_erom_io *io, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) +siba_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio, + const struct bhnd_chipid *hint, struct bhnd_chipid *cid) { + struct siba_erom_io io; uint32_t idreg; int error; + /* Initialize I/O context, assuming at least the first core is mapped */ + if ((error = siba_eio_init(&io, eio, 1))) + return (error); + /* Try using the provided hint. */ if (hint != NULL) { struct siba_core_id sid; /* Validate bus type */ if (hint->chip_type != BHND_CHIPTYPE_SIBA) return (ENXIO); /* * Verify the first core's IDHIGH/IDLOW identification. * * The core must be a Broadcom core, but must *not* be * a chipcommon core; those shouldn't be hinted. * * The first core on EXTIF-equipped devices varies, but on the * BCM4710, it's a SDRAM core (0x803). */ - sid = siba_eio_read_core_id(io, 0, 0); + sid = siba_eio_read_core_id(&io, 0, 0); if (sid.core_info.vendor != BHND_MFGID_BCM) return (ENXIO); if (sid.core_info.device == BHND_COREID_CC) return (EINVAL); *cid = *hint; } else { /* Validate bus type */ - idreg = siba_eio_read_4(io, 0, CHIPC_ID); + idreg = siba_eio_read_4(&io, 0, CHIPC_ID); if (CHIPC_GET_BITS(idreg, CHIPC_ID_BUS) != BHND_CHIPTYPE_SIBA) return (ENXIO); /* Identify the chipset */ - if ((error = siba_eio_read_chipid(io, SIBA_ENUM_ADDR, cid))) + if ((error = siba_eio_read_chipid(&io, SIBA_ENUM_ADDR, cid))) return (error); /* Verify the chip type */ if (cid->chip_type != BHND_CHIPTYPE_SIBA) return (ENXIO); } /* * gcc hack: ensure bhnd_chipid.ncores cannot exceed SIBA_MAX_CORES * without triggering build failure due to -Wtype-limits * * if (cid.ncores > SIBA_MAX_CORES) * return (EINVAL) */ _Static_assert((2^sizeof(cid->ncores)) <= SIBA_MAX_CORES, "ncores could result in over-read of backing resource"); return (0); } -/* SIBA implementation of BHND_EROM_PROBE() */ -static int -siba_erom_probe(bhnd_erom_class_t *cls, struct bhnd_resource *res, - bus_size_t offset, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) -{ - struct siba_erom_io io; - int error, rid; - - rid = rman_get_rid(res->res); - - /* Initialize I/O context, assuming at least 1 core exists. */ - if ((error = siba_eio_init(&io, NULL, res, rid, offset, 1))) - return (error); - - return (siba_erom_probe_common(&io, hint, cid)); -} - -/* SIBA implementation of BHND_EROM_PROBE_STATIC() */ -static int -siba_erom_probe_static(bhnd_erom_class_t *cls, bus_space_tag_t bst, - bus_space_handle_t bsh, bus_addr_t paddr, const struct bhnd_chipid *hint, - struct bhnd_chipid *cid) -{ - struct siba_erom_io io; - int error; - - /* Initialize I/O context, assuming at least 1 core exists. */ - if ((error = siba_eio_init_static(&io, bst, bsh, 0, 1))) - return (error); - - return (siba_erom_probe_common(&io, hint, cid)); -} - /* SIBA implementation of BHND_EROM_INIT() */ static int siba_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid, - device_t parent, int rid) + struct bhnd_erom_io *eio) { struct siba_erom *sc; - struct bhnd_resource *res; int error; - + sc = (struct siba_erom *)erom; - /* Allocate backing resource */ - res = bhnd_alloc_resource(parent, SYS_RES_MEMORY, &rid, - cid->enum_addr, cid->enum_addr + SIBA_ENUM_SIZE -1, SIBA_ENUM_SIZE, - RF_ACTIVE|RF_SHAREABLE); - if (res == NULL) - return (ENOMEM); + /* Attempt to map the full core enumeration space */ + error = bhnd_erom_io_map(eio, cid->enum_addr, + cid->ncores * SIBA_CORE_SIZE); + if (error) { + printf("%s: failed to map %u cores: %d\n", __FUNCTION__, + cid->ncores, error); + return (error); + } /* Initialize I/O context */ - error = siba_eio_init(&sc->io, parent, res, rid, 0x0, cid->ncores); - if (error) - bhnd_release_resource(parent, SYS_RES_MEMORY, rid, res); - - return (error); + return (siba_eio_init(&sc->io, eio, cid->ncores)); } -/* SIBA implementation of BHND_EROM_INIT_STATIC() */ -static int -siba_erom_init_static(bhnd_erom_t *erom, const struct bhnd_chipid *cid, - bus_space_tag_t bst, bus_space_handle_t bsh) -{ - struct siba_erom *sc; - - sc = (struct siba_erom *)erom; - - /* Initialize I/O context */ - return (siba_eio_init_static(&sc->io, bst, bsh, 0x0, cid->ncores)); -} - /* SIBA implementation of BHND_EROM_FINI() */ static void siba_erom_fini(bhnd_erom_t *erom) { struct siba_erom *sc = (struct siba_erom *)erom; - if (sc->io.res != NULL) { - bhnd_release_resource(sc->io.dev, SYS_RES_MEMORY, sc->io.rid, - sc->io.res); - - sc->io.res = NULL; - sc->io.rid = -1; - } + bhnd_erom_io_fini(sc->io.eio); } /* Initialize siba_erom resource I/O context */ static int -siba_eio_init(struct siba_erom_io *io, device_t parent, - struct bhnd_resource *res, int rid, bus_size_t offset, u_int ncores) +siba_eio_init(struct siba_erom_io *io, struct bhnd_erom_io *eio, u_int ncores) { - io->dev = parent; - io->res = res; - io->rid = rid; - io->offset = offset; + io->eio = eio; io->ncores = ncores; - return (0); } -/* Initialize siba_erom bus space I/O context */ -static int -siba_eio_init_static(struct siba_erom_io *io, bus_space_tag_t bst, - bus_space_handle_t bsh, bus_size_t offset, u_int ncores) -{ - io->res = NULL; - io->rid = -1; - io->bst = bst; - io->bsh = bsh; - io->offset = offset; - io->ncores = ncores; - - return (0); -} - /** * Read a 32-bit value from @p offset relative to the base address of * the given @p core_idx. * * @param io EROM I/O context. * @param core_idx Core index. * @param offset Core register offset. */ static uint32_t siba_eio_read_4(struct siba_erom_io *io, u_int core_idx, bus_size_t offset) { - bus_size_t core_offset; - /* Sanity check core index and offset */ if (core_idx >= io->ncores) panic("core index %u out of range (ncores=%u)", core_idx, io->ncores); if (offset > SIBA_CORE_SIZE - sizeof(uint32_t)) panic("invalid core offset %#jx", (uintmax_t)offset); /* Perform read */ - core_offset = io->offset + SIBA_CORE_OFFSET(core_idx) + offset; - if (io->res != NULL) - return (bhnd_bus_read_4(io->res, core_offset)); - else - return (bus_space_read_4(io->bst, io->bsh, core_offset)); + return (bhnd_erom_io_read(io->eio, SIBA_CORE_OFFSET(core_idx) + offset, + 4)); } /** * Read and parse identification registers for the given @p core_index. * * @param io EROM I/O context. * @param core_idx The core index. * @param unit The caller-specified unit number to be included in the return * value. */ static struct siba_core_id siba_eio_read_core_id(struct siba_erom_io *io, u_int core_idx, int unit) { uint32_t idhigh, idlow; idhigh = siba_eio_read_4(io, core_idx, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = siba_eio_read_4(io, core_idx, SB0_REG_ABS(SIBA_CFG0_IDLOW)); return (siba_parse_core_id(idhigh, idlow, core_idx, unit)); } /** * Read and parse the chip identification register from the ChipCommon core. * * @param io EROM I/O context. * @param enum_addr The physical address mapped by @p io. * @param cid On success, the parsed chip identifier. */ static int siba_eio_read_chipid(struct siba_erom_io *io, bus_addr_t enum_addr, struct bhnd_chipid *cid) { struct siba_core_id ccid; uint32_t idreg; /* Identify the chipcommon core */ ccid = siba_eio_read_core_id(io, 0, 0); if (ccid.core_info.vendor != BHND_MFGID_BCM || ccid.core_info.device != BHND_COREID_CC) { if (bootverbose) { EROM_LOG(io, "first core not chipcommon " "(vendor=%#hx, core=%#hx)\n", ccid.core_info.vendor, ccid.core_info.device); } return (ENXIO); } /* Identify the chipset */ idreg = siba_eio_read_4(io, 0, CHIPC_ID); *cid = bhnd_parse_chipid(idreg, enum_addr); /* Fix up the core count in-place */ return (bhnd_chipid_fixed_ncores(cid, ccid.core_info.hwrev, &cid->ncores)); } static int siba_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc, struct bhnd_core_info *core) { struct siba_erom *sc; struct bhnd_core_match imatch; sc = (struct siba_erom *)erom; /* We can't determine a core's unit number during the initial scan. */ imatch = *desc; imatch.m.match.core_unit = 0; /* Locate the first matching core */ for (u_int i = 0; i < sc->io.ncores; i++) { struct siba_core_id sid; struct bhnd_core_info ci; /* Read the core info */ sid = siba_eio_read_core_id(&sc->io, i, 0); ci = sid.core_info; /* Check for initial match */ if (!bhnd_core_matches(&ci, &imatch)) continue; /* Re-scan preceding cores to determine the unit number. */ for (u_int j = 0; j < i; j++) { - sid = siba_eio_read_core_id(&sc->io, i, 0); + sid = siba_eio_read_core_id(&sc->io, j, 0); /* Bump the unit number? */ if (sid.core_info.vendor == ci.vendor && sid.core_info.device == ci.device) ci.unit++; } /* Check for full match against now-valid unit number */ if (!bhnd_core_matches(&ci, desc)) continue; /* Matching core found */ *core = ci; return (0); } /* Not found */ return (ENOENT); } static int siba_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc, bhnd_port_type type, u_int port, u_int region, struct bhnd_core_info *info, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_erom *sc; struct bhnd_core_info core; struct siba_core_id sid; uint32_t am, am_addr, am_size; u_int am_offset; u_int addrspace; int error; sc = (struct siba_erom *)erom; /* Locate the requested core */ if ((error = siba_erom_lookup_core(erom, desc, &core))) return (error); /* Fetch full siba core ident */ sid = siba_eio_read_core_id(&sc->io, core.core_idx, core.unit); /* Is port valid? */ if (!siba_is_port_valid(sid.num_addrspace, type, port)) return (ENOENT); /* Is region valid? */ if (region >= siba_addrspace_region_count(sid.num_addrspace, port)) return (ENOENT); /* Map the bhnd port values to a siba addrspace index */ error = siba_addrspace_index(sid.num_addrspace, type, port, region, &addrspace); if (error) return (error); /* Determine the register offset */ am_offset = siba_admatch_offset(addrspace); if (am_offset == 0) { printf("addrspace %u is unsupported", addrspace); return (ENODEV); } /* Read and parse the address match register */ am = siba_eio_read_4(&sc->io, core.core_idx, am_offset); if ((error = siba_parse_admatch(am, &am_addr, &am_size))) { printf("failed to decode address match register value 0x%x\n", am); return (error); } if (info != NULL) *info = core; *addr = am_addr; *size = am_size; return (0); } /* BHND_EROM_GET_CORE_TABLE() */ static int siba_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores, u_int *num_cores) { struct siba_erom *sc; struct bhnd_core_info *out; sc = (struct siba_erom *)erom; /* Allocate our core array */ out = malloc(sizeof(*out) * sc->io.ncores, M_BHND, M_NOWAIT); if (out == NULL) return (ENOMEM); *cores = out; *num_cores = sc->io.ncores; /* Enumerate all cores. */ for (u_int i = 0; i < sc->io.ncores; i++) { struct siba_core_id sid; /* Read the core info */ sid = siba_eio_read_core_id(&sc->io, i, 0); out[i] = sid.core_info; /* Determine unit number */ for (u_int j = 0; j < i; j++) { if (out[j].vendor == out[i].vendor && out[j].device == out[i].device) out[i].unit++; } } return (0); } /* BHND_EROM_FREE_CORE_TABLE() */ static void siba_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores) { free(cores, M_BHND); } /* BHND_EROM_DUMP() */ static int siba_erom_dump(bhnd_erom_t *erom) { struct siba_erom *sc; int error; sc = (struct siba_erom *)erom; /* Enumerate all cores. */ for (u_int i = 0; i < sc->io.ncores; i++) { uint32_t idhigh, idlow; uint32_t nraddr; idhigh = siba_eio_read_4(&sc->io, i, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = siba_eio_read_4(&sc->io, i, SB0_REG_ABS(SIBA_CFG0_IDLOW)); printf("siba core %u:\n", i); printf("\tvendor:\t0x%04x\n", SIBA_REG_GET(idhigh, IDH_VENDOR)); printf("\tdevice:\t0x%04x\n", SIBA_REG_GET(idhigh, IDH_DEVICE)); printf("\trev:\t0x%04x\n", SIBA_IDH_CORE_REV(idhigh)); printf("\tsbrev:\t0x%02x\n", SIBA_REG_GET(idlow, IDL_SBREV)); /* Enumerate the address match registers */ nraddr = SIBA_REG_GET(idlow, IDL_NRADDR); printf("\tnraddr\t0x%04x\n", nraddr); for (size_t addrspace = 0; addrspace < nraddr; addrspace++) { uint32_t am, am_addr, am_size; u_int am_offset; /* Determine the register offset */ am_offset = siba_admatch_offset(addrspace); if (am_offset == 0) { printf("addrspace %zu unsupported", addrspace); break; } /* Read and parse the address match register */ am = siba_eio_read_4(&sc->io, i, am_offset); error = siba_parse_admatch(am, &am_addr, &am_size); if (error) { printf("failed to decode address match " "register value 0x%x\n", am); continue; } printf("\taddrspace %zu\n", addrspace); printf("\t\taddr: 0x%08x\n", am_addr); printf("\t\tsize: 0x%08x\n", am_size); } } return (0); } static kobj_method_t siba_erom_methods[] = { KOBJMETHOD(bhnd_erom_probe, siba_erom_probe), - KOBJMETHOD(bhnd_erom_probe_static, siba_erom_probe_static), KOBJMETHOD(bhnd_erom_init, siba_erom_init), - KOBJMETHOD(bhnd_erom_init_static, siba_erom_init_static), KOBJMETHOD(bhnd_erom_fini, siba_erom_fini), KOBJMETHOD(bhnd_erom_get_core_table, siba_erom_get_core_table), KOBJMETHOD(bhnd_erom_free_core_table, siba_erom_free_core_table), KOBJMETHOD(bhnd_erom_lookup_core, siba_erom_lookup_core), KOBJMETHOD(bhnd_erom_lookup_core_addr, siba_erom_lookup_core_addr), KOBJMETHOD(bhnd_erom_dump, siba_erom_dump), KOBJMETHOD_END }; BHND_EROM_DEFINE_CLASS(siba_erom, siba_erom_parser, siba_erom_methods, sizeof(struct siba_erom)); Index: head/sys/mips/broadcom/bcm_machdep.c =================================================================== --- head/sys/mips/broadcom/bcm_machdep.c (revision 324070) +++ head/sys/mips/broadcom/bcm_machdep.c (revision 324071) @@ -1,603 +1,636 @@ /*- * Copyright (c) 2007 Bruce M. Simpson. * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include "bcm_machdep.h" #include "bcm_bmips_exts.h" #ifdef CFE #include #include #endif #if 0 #define BCM_TRACE(_fmt, ...) printf(_fmt, ##__VA_ARGS__) #else #define BCM_TRACE(_fmt, ...) #endif static int bcm_init_platform_data(struct bcm_platform *bp); static int bcm_find_core(struct bcm_platform *bp, const struct bhnd_core_match *descs, size_t num_descs, struct bhnd_core_info *info, uintptr_t *addr); static int bcm_erom_probe_and_attach(bhnd_erom_class_t **erom_cls, kobj_ops_t erom_ops, bhnd_erom_t *erom, size_t esize, - struct bhnd_chipid *cid); + struct bhnd_erom_io *eio, struct bhnd_chipid *cid); extern int *edata; extern int *end; static struct bcm_platform bcm_platform_data; static bool bcm_platform_data_avail = false; #ifdef CFE static struct bcm_nvram_iocfe bcm_cfe_nvram; #endif static const struct bhnd_core_match bcm_chipc_cores[] = { { BHND_MATCH_CORE(BHND_MFGID_BCM, BHND_COREID_CC) }, { BHND_MATCH_CORE(BHND_MFGID_BCM, BHND_COREID_4706_CC) }, }; static const struct bhnd_core_match bcm_pmu_cores[] = { { BHND_MATCH_CORE(BHND_MFGID_BCM, BHND_COREID_PMU) }, }; struct bcm_platform * bcm_get_platform(void) { if (!bcm_platform_data_avail) panic("platform data not available"); return (&bcm_platform_data); } static bus_addr_t bcm_get_bus_addr(void) { long maddr; if (resource_long_value("bhnd", 0, "maddr", &maddr) == 0) return ((u_long)maddr); return (BHND_DEFAULT_CHIPC_ADDR); } +static bus_size_t +bcm_get_bus_size(void) +{ + long msize; + + if (resource_long_value("bhnd", 0, "msize", &msize) == 0) + return ((u_long)msize); + + return (BHND_DEFAULT_ENUM_SIZE); +} + /** * Search the device enumeration table for a core matching @p descs, * * @param bp Platform state containing a valid EROM parser. * @param descs The core match descriptor table. * @param num_descs The number of match descriptors in @p descs. * @param[out] info If non-NULL, will be populated with the core * info. * @param[out] addr If non-NULL, will be populated with the core's * physical register address. */ static int bcm_find_core(struct bcm_platform *bp, const struct bhnd_core_match *descs, size_t num_descs, struct bhnd_core_info *info, uintptr_t *addr) { bhnd_addr_t b_addr; bhnd_size_t b_size; int error; /* Fetch core info */ for (size_t i = 0; i < num_descs; i++) { error = bhnd_erom_lookup_core_addr(&bp->erom.obj, &descs[i], BHND_PORT_DEVICE, 0, 0, info, &b_addr, &b_size); /* Terminate search on first match */ if (error == 0) break; /* Terminate on first error (other than core not found) */ if (error != ENOENT) return (error); /* Continue search ... */ } /* Provide the core's base address */ if (addr != NULL && b_addr > UINTPTR_MAX) { BCM_ERR("core address %#jx overflows native address width\n", (uintmax_t)b_addr); return (ERANGE); } if (addr != NULL) *addr = b_addr; return (0); } /** * Read a variable directly from NVRAM, decoding as @p type. * * @param bp Platform state. * @param name The raw name of the variable to be fetched, * including any device path (/pci/1/1/varname) or * alias prefix (0:varname). * @param[out] buf On success, the requested value will be written * to this buffer. This argment may be NULL if * the value is not desired. * @param[in,out] len The capacity of @p buf. On success, will be set * to the actual size of the requested value. * @param type The data type to be written to @p buf. * * @retval 0 success * @retval ENOMEM If @p buf is non-NULL and a buffer of @p len is too * small to hold the requested value. * @retval ENOENT If @p name is not found. * @retval EFTYPE If the variable data cannot be coerced to @p type. * @retval ERANGE If value coercion would overflow @p type. * @retval non-zero If parsing NVRAM otherwise fails, a regular unix error * code will be returned. */ int bcm_get_nvram(struct bcm_platform *bp, const char *name, void *buf, size_t *len, bhnd_nvram_type type) { if (bp->nvram_io == NULL || bp->nvram_cls == NULL) return (ENOENT); return (bhnd_nvram_data_getvar_direct(bp->nvram_cls, bp->nvram_io, name, buf, len, type)); } /** * Probe and attach a bhnd_erom parser instance for the bhnd bus. * * @param[out] erom_cls The probed EROM class. * @param[out] erom_ops The storage to be used when compiling * @p erom_cls. * @param[out] erom The storage to be used when initializing the * static instance of @p erom_cls. * @param esize The total available number of bytes allocated * for @p erom. If this is less than is required * by @p erom_cls ENOMEM will be returned. + * @param eio EROM I/O callbacks to be used. * @param[out] cid On success, the probed chip identification. */ static int bcm_erom_probe_and_attach(bhnd_erom_class_t **erom_cls, kobj_ops_t erom_ops, - bhnd_erom_t *erom, size_t esize, struct bhnd_chipid *cid) + bhnd_erom_t *erom, size_t esize, struct bhnd_erom_io *eio, + struct bhnd_chipid *cid) { bhnd_erom_class_t **clsp; - bus_space_tag_t bst; - bus_space_handle_t bsh; bus_addr_t bus_addr; int error, prio, result; - bus_addr = bcm_get_bus_addr(); *erom_cls = NULL; prio = 0; - bst = mips_bus_space_generic; - bsh = BCM_SOC_BSH(bus_addr, 0); + /* Map our first bus core for the erom probe */ + bus_addr = bcm_get_bus_addr(); + if ((error = bhnd_erom_io_map(eio, bus_addr, BHND_DEFAULT_CORE_SIZE))) { + BCM_ERR("failed to map first core at %#jx+%#jx: %d\n", + (uintmax_t)bus_addr, (uintmax_t)BHND_DEFAULT_CORE_SIZE, + error); + return (error); + } + SET_FOREACH(clsp, bhnd_erom_class_set) { struct bhnd_chipid pcid; bhnd_erom_class_t *cls; struct kobj_ops kops; cls = *clsp; /* Compile the class' ops table */ kobj_class_compile_static(cls, &kops); /* Probe the bus address */ - result = bhnd_erom_probe_static(cls, bst, bsh, bus_addr, NULL, - &pcid); + result = bhnd_erom_probe(cls, eio, NULL, &pcid); /* Drop pointer to stack allocated ops table */ cls->ops = NULL; /* The parser did not match if an error was returned */ if (result > 0) continue; /* Check for a new highest priority match */ if (*erom_cls == NULL || result > prio) { prio = result; *cid = pcid; *erom_cls = cls; } /* Terminate immediately on BUS_PROBE_SPECIFIC */ if (result == BUS_PROBE_SPECIFIC) break; } /* Valid EROM class probed? */ if (*erom_cls == NULL) { BCM_ERR("no erom parser found for root bus at %#jx\n", (uintmax_t)bus_addr); + return (ENOENT); } /* Using the provided storage, recompile the erom class ... */ kobj_class_compile_static(*erom_cls, erom_ops); /* ... and initialize the erom parser instance */ - bsh = BCM_SOC_BSH(cid->enum_addr, 0); - error = bhnd_erom_init_static(*erom_cls, erom, esize, cid, - mips_bus_space_generic, bsh); + error = bhnd_erom_init_static(*erom_cls, erom, esize, cid, eio); return (error); } /** * Populate platform configuration data. */ static int bcm_init_platform_data(struct bcm_platform *bp) { - bool aob, pmu; - int error; + bus_addr_t bus_addr, bus_size; + bus_space_tag_t erom_bst; + bus_space_handle_t erom_bsh; + bool aob, pmu; + int error; + bus_addr = bcm_get_bus_addr(); + bus_size = bcm_get_bus_size(); + #ifdef CFE /* Fetch CFE console handle (if any). Must be initialized before * any calls to printf/early_putc. */ if ((bp->cfe_console = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE)) < 0) bp->cfe_console = -1; /* Probe CFE NVRAM sources */ bp->nvram_io = &bcm_cfe_nvram.io; error = bcm_nvram_find_cfedev(&bcm_cfe_nvram, &bp->nvram_cls); if (error) { bp->nvram_io = NULL; bp->nvram_cls = NULL; } #endif /* CFE */ /* Probe and attach device table provider, populating our * chip identification */ + erom_bst = mips_bus_space_generic; + erom_bsh = BCM_SOC_BSH(bus_addr, 0); + + error = bhnd_erom_iobus_init(&bp->erom_io, bus_addr, bus_size, erom_bst, + erom_bsh); + if (error) { + BCM_ERR("failed to initialize erom I/O callbacks: %d\n", error); + return (error); + } + error = bcm_erom_probe_and_attach(&bp->erom_impl, &bp->erom_ops, - &bp->erom.obj, sizeof(bp->erom), &bp->cid); + &bp->erom.obj, sizeof(bp->erom), &bp->erom_io.eio, &bp->cid); if (error) { BCM_ERR("error attaching erom parser: %d\n", error); + bhnd_erom_io_fini(&bp->erom_io.eio); return (error); } if (bootverbose) bhnd_erom_dump(&bp->erom.obj); /* Fetch chipcommon core info */ error = bcm_find_core(bp, bcm_chipc_cores, nitems(bcm_chipc_cores), &bp->cc_id, &bp->cc_addr); if (error) { BCM_ERR("error locating chipc core: %d\n", error); return (error); } /* Fetch chipc capability flags */ bp->cc_caps = BCM_SOC_READ_4(bp->cc_addr, CHIPC_CAPABILITIES); bp->cc_caps_ext = 0x0; if (CHIPC_HWREV_HAS_CAP_EXT(bp->cc_id.hwrev)) bp->cc_caps_ext = BCM_CHIPC_READ_4(bp, CHIPC_CAPABILITIES_EXT); /* Fetch PMU info */ pmu = CHIPC_GET_FLAG(bp->cc_caps, CHIPC_CAP_PMU); aob = CHIPC_GET_FLAG(bp->cc_caps_ext, CHIPC_CAP2_AOB); if (pmu && aob) { /* PMU block mapped to a PMU core on the Always-on-Bus (aob) */ error = bcm_find_core(bp, bcm_pmu_cores, nitems(bcm_pmu_cores), &bp->pmu_id, &bp->pmu_addr); if (error) { BCM_ERR("error locating pmu core: %d\n", error); return (error); } } else if (pmu) { /* PMU block mapped to chipc */ bp->pmu_addr = bp->cc_addr; bp->pmu_id = bp->cc_id; } else { /* No PMU */ bp->pmu_addr = 0x0; memset(&bp->pmu_id, 0, sizeof(bp->pmu_id)); } /* Initialize PMU query state */ if (pmu) { error = bhnd_pmu_query_init(&bp->pmu, NULL, bp->cid, &bcm_pmu_soc_io, bp); if (error) { BCM_ERR("bhnd_pmu_query_init() failed: %d\n", error); return (error); } } /* Initialize our platform service registry */ if ((error = bhnd_service_registry_init(&bp->services))) { BCM_ERR("error initializing service registry: %d\n", error); return (error); } bcm_platform_data_avail = true; return (0); } void platform_cpu_init() { /* Nothing special */ } static void mips_init(void) { int i, j; printf("entry: mips_init()\n"); #ifdef CFE /* * Query DRAM memory map from CFE. */ physmem = 0; for (i = 0; i < 10; i += 2) { int result; uint64_t addr, len, type; result = cfe_enummem(i / 2, 0, &addr, &len, &type); if (result < 0) { BCM_TRACE("There is no phys memory for: %d\n", i); phys_avail[i] = phys_avail[i + 1] = 0; break; } if (type != CFE_MI_AVAILABLE) { BCM_TRACE("phys memory is not available: %d\n", i); continue; } phys_avail[i] = addr; if (i == 0 && addr == 0) { /* * If this is the first physical memory segment probed * from CFE, omit the region at the start of physical * memory where the kernel has been loaded. */ phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); } BCM_TRACE("phys memory is available for: %d\n", i); BCM_TRACE(" => addr = %jx\n", addr); BCM_TRACE(" => len = %jd\n", len); phys_avail[i + 1] = addr + len; physmem += len; } BCM_TRACE("Total phys memory is : %ld\n", physmem); realmem = btoc(physmem); #endif for (j = 0; j < i; j++) dump_avail[j] = phys_avail[j]; physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } void platform_reset(void) { struct bcm_platform *bp; bool bcm4785war; printf("bcm::platform_reset()\n"); intr_disable(); #ifdef CFE /* Fall back on CFE if reset requested during platform * data initialization */ if (!bcm_platform_data_avail) { cfe_exit(0, 0); while (1); } #endif bp = bcm_get_platform(); bcm4785war = false; /* Handle BCM4785-specific behavior */ if (bp->cid.chip_id == BHND_CHIPID_BCM4785) { bcm4785war = true; /* Switch to async mode */ bcm_bmips_wr_pllcfg3(BMIPS_BCMCFG_PLLCFG3_SM); } /* Set watchdog (PMU or ChipCommon) */ if (bp->pmu_addr != 0x0) { BCM_PMU_WRITE_4(bp, BHND_PMU_WATCHDOG, 1); } else BCM_CHIPC_WRITE_4(bp, CHIPC_WATCHDOG, 1); /* BCM4785 */ if (bcm4785war) { mips_sync(); __asm __volatile("wait"); } while (1); } void platform_start(__register_t a0, __register_t a1, __register_t a2, __register_t a3) { vm_offset_t kernend; uint64_t platform_counter_freq; int error; /* clear the BSS and SBSS segments */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); /* Initialize pcpu stuff */ mips_pcpu0_init(); #ifdef CFE /* * Initialize CFE firmware trampolines. This must be done * before any CFE APIs are called, including writing * to the CFE console. * * CFE passes the following values in registers: * a0: firmware handle * a2: firmware entry point * a3: entry point seal */ if (a3 == CFE_EPTSEAL) cfe_init(a0, a2); #endif /* Init BCM platform data */ if ((error = bcm_init_platform_data(&bcm_platform_data))) panic("bcm_init_platform_data() failed: %d", error); platform_counter_freq = bcm_get_cpufreq(bcm_get_platform()); /* CP0 ticks every two cycles */ mips_timer_early_init(platform_counter_freq / 2); cninit(); mips_init(); mips_timer_init_params(platform_counter_freq, 1); } /* * CFE-based EARLY_PRINTF support. To use, add the following to the kernel * config: * option EARLY_PRINTF * option CFE * device cfe */ #if defined(EARLY_PRINTF) && defined(CFE) static void bcm_cfe_eputc(int c) { unsigned char ch; int handle; ch = (unsigned char) c; /* bcm_get_platform() cannot be used here, as we may be called * from bcm_init_platform_data(). */ if ((handle = bcm_platform_data.cfe_console) < 0) return; if (ch == '\n') early_putc('\r'); while ((cfe_write(handle, &ch, 1)) == 0) continue; } early_putc_t *early_putc = bcm_cfe_eputc; #endif /* EARLY_PRINTF */ Index: head/sys/mips/broadcom/bcm_machdep.h =================================================================== --- head/sys/mips/broadcom/bcm_machdep.h (revision 324070) +++ head/sys/mips/broadcom/bcm_machdep.h (revision 324071) @@ -1,129 +1,130 @@ /*- * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _MIPS_BROADCOM_BCM_MACHDEP_H_ #define _MIPS_BROADCOM_BCM_MACHDEP_H_ #include #include #include -#include +#include #include #include "bcm_nvram_cfevar.h" extern const struct bhnd_pmu_io bcm_pmu_soc_io; struct bcm_platform { struct bhnd_chipid cid; /**< chip id */ struct bhnd_core_info cc_id; /**< chipc core info */ uintptr_t cc_addr; /**< chipc core phys address */ uint32_t cc_caps; /**< chipc capabilities */ uint32_t cc_caps_ext; /**< chipc extended capabilies */ /* On non-AOB devices, the PMU register block is mapped to chipc; * the pmu_id and pmu_addr values will be copied from cc_id * and cc_addr. */ struct bhnd_core_info pmu_id; /**< PMU core info */ uintptr_t pmu_addr; /**< PMU core phys address, or 0x0 if no PMU */ struct bhnd_pmu_query pmu; /**< PMU query instance */ bhnd_erom_class_t *erom_impl; /**< erom parser class */ struct kobj_ops erom_ops; /**< compiled kobj opcache */ + struct bhnd_erom_iobus erom_io; /**< erom I/O callbacks */ union { bhnd_erom_static_t data; bhnd_erom_t obj; } erom; struct bhnd_nvram_io *nvram_io; /**< NVRAM I/O context, or NULL if unavailable */ bhnd_nvram_data_class *nvram_cls; /**< NVRAM data class, or NULL if unavailable */ struct bhnd_service_registry services; /**< platform service providers */ #ifdef CFE int cfe_console; /**< Console handle, or -1 */ #endif }; struct bcm_platform *bcm_get_platform(void); uint64_t bcm_get_cpufreq(struct bcm_platform *bp); uint64_t bcm_get_sifreq(struct bcm_platform *bp); uint64_t bcm_get_alpfreq(struct bcm_platform *bp); uint64_t bcm_get_ilpfreq(struct bcm_platform *bp); u_int bcm_get_uart_rclk(struct bcm_platform *bp); int bcm_get_nvram(struct bcm_platform *bp, const char *name, void *outp, size_t *olen, bhnd_nvram_type type); #define BCM_ERR(fmt, ...) \ printf("%s: " fmt, __FUNCTION__, ##__VA_ARGS__) #define BCM_SOC_BSH(_addr, _offset) \ ((bus_space_handle_t)BCM_SOC_ADDR((_addr), (_offset))) #define BCM_SOC_ADDR(_addr, _offset) \ MIPS_PHYS_TO_KSEG1((_addr) + (_offset)) #define BCM_SOC_READ_4(_addr, _offset) \ readl(BCM_SOC_ADDR((_addr), (_offset))) #define BCM_SOC_WRITE_4(_addr, _reg, _val) \ writel(BCM_SOC_ADDR((_addr), (_offset)), (_val)) #define BCM_CORE_ADDR(_bp, _name, _reg) \ BCM_SOC_ADDR(_bp->_name, (_reg)) #define BCM_CORE_READ_4(_bp, _name, _reg) \ readl(BCM_CORE_ADDR(_bp, _name, (_reg))) #define BCM_CORE_WRITE_4(_bp, _name, _reg, _val) \ writel(BCM_CORE_ADDR(_bp, _name, (_reg)), (_val)) #define BCM_CHIPC_READ_4(_bp, _reg) \ BCM_CORE_READ_4(_bp, cc_addr, (_reg)) #define BCM_CHIPC_WRITE_4(_bp, _reg, _val) \ BCM_CORE_WRITE_4(_bp, cc_addr, (_reg), (_val)) #define BCM_PMU_READ_4(_bp, _reg) \ BCM_CORE_READ_4(_bp, pmu_addr, (_reg)) #define BCM_PMU_WRITE_4(_bp, _reg, _val) \ BCM_CORE_WRITE_4(_bp, pmu_addr, (_reg), (_val)) #endif /* _MIPS_BROADCOM_BCM_MACHDEP_H_ */ Index: head/sys/modules/bhnd/bhndb_pci/Makefile =================================================================== --- head/sys/modules/bhnd/bhndb_pci/Makefile (revision 324070) +++ head/sys/modules/bhnd/bhndb_pci/Makefile (revision 324071) @@ -1,13 +1,14 @@ # $FreeBSD$ .PATH: ${SRCTOP}/sys/dev/bhnd/bhndb KMOD= bhndb_pci SRCS= bhndb_pci.c bhndb_pci_hwdata.c \ bhndb_pci_sprom.c SRCS+= bhnd_bus_if.h bhndb_bus_if.h bhndb_if.h +SRCS+= bhnd_erom_if.h SRCS+= bhnd_nvram_if.h SRCS+= device_if.h bus_if.h pci_if.h .include