Index: head/sys/dev/bhnd/bcma/bcma_dmp.h =================================================================== --- head/sys/dev/bhnd/bcma/bcma_dmp.h (revision 326870) +++ head/sys/dev/bhnd/bcma/bcma_dmp.h (revision 326871) @@ -1,267 +1,267 @@ /*- * SPDX-License-Identifier: ISC * * Copyright (c) 2015 Landon Fuller * Copyright (c) 2010 Broadcom Corporation * * Portions of this file were derived from the aidmp.h header * distributed with Broadcom's initial brcm80211 Linux driver release, as * contributed to the Linux staging repository. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _BCMA_BCMA_DMP_H_ #define _BCMA_BCMA_DMP_H_ /* * PL-368 Device Management Plugin (DMP) Registers & Constants * * The "DMP" core used in Broadcom HND devices has been described * by Broadcom engineers (and in published header files) as being * ARM's PL-368 "Device Management Plugin" system IP, included with * the CoreLink AMBA Designer tooling. * * Documentation for the PL-368 is not publicly available, however, * and the only public reference by ARM to its existence appears to be * in the proprietary "NIC-301 Interconnect Device Management (PL368)" * errata publication, available to licensees as part of ARM's * CoreLink Controllers and Peripherals Engineering Errata. * * As such, the exact interpretation of these register definitions is * unconfirmed, and may be incorrect. */ #define BCMA_DMP_GET_FLAG(_value, _flag) \ (((_value) & _flag) != 0) #define BCMA_DMP_GET_BITS(_value, _field) \ ((_value & _field ## _MASK) >> _field ## _SHIFT) #define BHND_DMP_SET_BITS(_value, _field) \ (((_value) << _field ## _SHIFT) & _field ## _MASK) /* Out-of-band Router registers */ #define BCMA_OOB_BUSCONFIG 0x020 #define BCMA_OOB_STATUSA 0x100 #define BCMA_OOB_STATUSB 0x104 #define BCMA_OOB_STATUSC 0x108 #define BCMA_OOB_STATUSD 0x10c #define BCMA_OOB_ENABLEA0 0x200 #define BCMA_OOB_ENABLEA1 0x204 #define BCMA_OOB_ENABLEA2 0x208 #define BCMA_OOB_ENABLEA3 0x20c #define BCMA_OOB_ENABLEB0 0x280 #define BCMA_OOB_ENABLEB1 0x284 #define BCMA_OOB_ENABLEB2 0x288 #define BCMA_OOB_ENABLEB3 0x28c #define BCMA_OOB_ENABLEC0 0x300 #define BCMA_OOB_ENABLEC1 0x304 #define BCMA_OOB_ENABLEC2 0x308 #define BCMA_OOB_ENABLEC3 0x30c #define BCMA_OOB_ENABLED0 0x380 #define BCMA_OOB_ENABLED1 0x384 #define BCMA_OOB_ENABLED2 0x388 #define BCMA_OOB_ENABLED3 0x38c #define BCMA_OOB_ITCR 0xf00 #define BCMA_OOB_ITIPOOBA 0xf10 #define BCMA_OOB_ITIPOOBB 0xf14 #define BCMA_OOB_ITIPOOBC 0xf18 #define BCMA_OOB_ITIPOOBD 0xf1c #define BCMA_OOB_ITOPOOBA 0xf30 #define BCMA_OOB_ITOPOOBB 0xf34 #define BCMA_OOB_ITOPOOBC 0xf38 #define BCMA_OOB_ITOPOOBD 0xf3c /* Common definitions */ #define BCMA_OOB_NUM_BANKS 4 /**< number of OOB banks (A, B, C, D) */ #define BCMA_OOB_NUM_SEL 8 /**< number of OOB selectors per bank */ #define BCMA_OOB_NUM_BUSLINES 32 /**< number of bus lines managed by OOB core */ #define BCMA_OOB_BANKA 0 /**< bank A index */ #define BCMA_OOB_BANKB 1 /**< bank B index */ #define BCMA_OOB_BANKC 2 /**< bank C index */ #define BCMA_OOB_BANKD 3 /**< bank D index */ /** OOB bank used for interrupt lines */ #define BCMA_OOB_BANK_INTR BCMA_OOB_BANKA /* DMP agent registers */ #define BCMA_DMP_OOBSELINA30 0x000 /**< A0-A3 input selectors */ #define BCMA_DMP_OOBSELINA74 0x004 /**< A4-A7 input selectors */ #define BCMA_DMP_OOBSELINB30 0x020 /**< B0-B3 input selectors */ #define BCMA_DMP_OOBSELINB74 0x024 /**< B4-B7 input selectors */ #define BCMA_DMP_OOBSELINC30 0x040 /**< C0-C3 input selectors */ #define BCMA_DMP_OOBSELINC74 0x044 /**< C4-C7 input selectors */ #define BCMA_DMP_OOBSELIND30 0x060 /**< D0-D3 input selectors */ #define BCMA_DMP_OOBSELIND74 0x064 /**< D4-D7 input selectors */ #define BCMA_DMP_OOBSELOUTA30 0x100 /**< A0-A3 output selectors */ #define BCMA_DMP_OOBSELOUTA74 0x104 /**< A4-A7 output selectors */ #define BCMA_DMP_OOBSELOUTB30 0x120 /**< B0-B3 output selectors */ #define BCMA_DMP_OOBSELOUTB74 0x124 /**< B4-B7 output selectors */ #define BCMA_DMP_OOBSELOUTC30 0x140 /**< C0-C3 output selectors */ #define BCMA_DMP_OOBSELOUTC74 0x144 /**< C4-C7 output selectors */ #define BCMA_DMP_OOBSELOUTD30 0x160 /**< D0-D3 output selectors */ #define BCMA_DMP_OOBSELOUTD74 0x164 /**< D4-D7 output selectors */ #define BCMA_DMP_OOBSYNCA 0x200 #define BCMA_DMP_OOBSELOUTAEN 0x204 #define BCMA_DMP_OOBSYNCB 0x220 #define BCMA_DMP_OOBSELOUTBEN 0x224 #define BCMA_DMP_OOBSYNCC 0x240 #define BCMA_DMP_OOBSELOUTCEN 0x244 #define BCMA_DMP_OOBSYNCD 0x260 #define BCMA_DMP_OOBSELOUTDEN 0x264 #define BCMA_DMP_OOBAEXTWIDTH 0x300 #define BCMA_DMP_OOBAINWIDTH 0x304 #define BCMA_DMP_OOBAOUTWIDTH 0x308 #define BCMA_DMP_OOBBEXTWIDTH 0x320 #define BCMA_DMP_OOBBINWIDTH 0x324 #define BCMA_DMP_OOBBOUTWIDTH 0x328 #define BCMA_DMP_OOBCEXTWIDTH 0x340 #define BCMA_DMP_OOBCINWIDTH 0x344 #define BCMA_DMP_OOBCOUTWIDTH 0x348 #define BCMA_DMP_OOBDEXTWIDTH 0x360 #define BCMA_DMP_OOBDINWIDTH 0x364 #define BCMA_DMP_OOBDOUTWIDTH 0x368 #define BCMA_DMP_OOBSEL(_base, _bank, _sel) \ (_base + (_bank * 8) + (_sel >= 4 ? 4 : 0)) #define BCMA_DMP_OOBSELIN(_bank, _sel) \ BCMA_DMP_OOBSEL(BCMA_DMP_OOBSELINA30, _bank, _sel) #define BCMA_DMP_OOBSELOUT(_bank, _sel) \ BCMA_DMP_OOBSEL(BCMA_DMP_OOBSELOUTA30, _bank, _sel) #define BCMA_DMP_OOBSYNC(_bank) (BCMA_DMP_OOBSYNCA + (_bank * 8)) #define BCMA_DMP_OOBSELOUT_EN(_bank) (BCMA_DMP_OOBSELOUTAEN + (_bank * 8)) #define BCMA_DMP_OOB_EXTWIDTH(_bank) (BCMA_DMP_OOBAEXTWIDTH + (_bank * 12)) #define BCMA_DMP_OOB_INWIDTH(_bank) (BCMA_DMP_OOBAINWIDTH + (_bank * 12)) #define BCMA_DMP_OOB_OUTWIDTH(_bank) (BCMA_DMP_OOBAOUTWIDTH + (_bank * 12)) // This was inherited from Broadcom's aidmp.h header // Is it required for any of our use-cases? #if 0 /* defined(IL_BIGENDIAN) && defined(BCMHND74K) */ /* Selective swapped defines for those registers we need in * big-endian code. */ #define BCMA_DMP_IOCTRLSET 0x404 #define BCMA_DMP_IOCTRLCLEAR 0x400 #define BCMA_DMP_IOCTRL 0x40c #define BCMA_DMP_IOSTATUS 0x504 #define BCMA_DMP_RESETCTRL 0x804 #define BCMA_DMP_RESETSTATUS 0x800 #else /* !IL_BIGENDIAN || !BCMHND74K */ #define BCMA_DMP_IOCTRLSET 0x400 #define BCMA_DMP_IOCTRLCLEAR 0x404 #define BCMA_DMP_IOCTRL 0x408 #define BCMA_DMP_IOSTATUS 0x500 #define BCMA_DMP_RESETCTRL 0x800 #define BCMA_DMP_RESETSTATUS 0x804 #endif /* IL_BIGENDIAN && BCMHND74K */ #define BCMA_DMP_IOCTRLWIDTH 0x700 #define BCMA_DMP_IOSTATUSWIDTH 0x704 #define BCMA_DMP_RESETREADID 0x808 #define BCMA_DMP_RESETWRITEID 0x80c #define BCMA_DMP_ERRLOGCTRL 0xa00 #define BCMA_DMP_ERRLOGDONE 0xa04 #define BCMA_DMP_ERRLOGSTATUS 0xa08 #define BCMA_DMP_ERRLOGADDRLO 0xa0c #define BCMA_DMP_ERRLOGADDRHI 0xa10 #define BCMA_DMP_ERRLOGID 0xa14 #define BCMA_DMP_ERRLOGUSER 0xa18 #define BCMA_DMP_ERRLOGFLAGS 0xa1c #define BCMA_DMP_INTSTATUS 0xa00 #define BCMA_DMP_CONFIG 0xe00 #define BCMA_DMP_ITCR 0xf00 #define BCMA_DMP_ITIPOOBA 0xf10 #define BCMA_DMP_ITIPOOBB 0xf14 #define BCMA_DMP_ITIPOOBC 0xf18 #define BCMA_DMP_ITIPOOBD 0xf1c #define BCMA_DMP_ITIPOOBAOUT 0xf30 #define BCMA_DMP_ITIPOOBBOUT 0xf34 #define BCMA_DMP_ITIPOOBCOUT 0xf38 #define BCMA_DMP_ITIPOOBDOUT 0xf3c #define BCMA_DMP_ITOPOOBA 0xf50 #define BCMA_DMP_ITOPOOBB 0xf54 #define BCMA_DMP_ITOPOOBC 0xf58 #define BCMA_DMP_ITOPOOBD 0xf5c #define BCMA_DMP_ITOPOOBAIN 0xf70 #define BCMA_DMP_ITOPOOBBIN 0xf74 #define BCMA_DMP_ITOPOOBCIN 0xf78 #define BCMA_DMP_ITOPOOBDIN 0xf7c #define BCMA_DMP_ITOPRESET 0xf90 #define BCMA_DMP_PERIPHERIALID4 0xfd0 #define BCMA_DMP_PERIPHERIALID5 0xfd4 #define BCMA_DMP_PERIPHERIALID6 0xfd8 #define BCMA_DMP_PERIPHERIALID7 0xfdc #define BCMA_DMP_PERIPHERIALID0 0xfe0 #define BCMA_DMP_PERIPHERIALID1 0xfe4 #define BCMA_DMP_PERIPHERIALID2 0xfe8 #define BCMA_DMP_PERIPHERIALID3 0xfec #define BCMA_DMP_COMPONENTID0 0xff0 #define BCMA_DMP_COMPONENTID1 0xff4 #define BCMA_DMP_COMPONENTID2 0xff8 #define BCMA_DMP_COMPONENTID3 0xffc /* OOBSEL(IN|OUT) */ #define BCMA_DMP_OOBSEL_MASK 0xFF /**< OOB selector mask */ #define BCMA_DMP_OOBSEL_EN (1<<7) /**< OOB selector enable bit */ -#define BCMA_DMP_OOBSEL_SHIFT(_sel) ((_sel % BCMA_OOB_NUM_SEL) * 8) +#define BCMA_DMP_OOBSEL_SHIFT(_sel) ((_sel % 4) * 8) #define BCMA_DMP_OOBSEL_BUSLINE_MASK 0x7F /**< OOB selector bus line mask */ #define BCMA_DMP_OOBSEL_BUSLINE_SHIFT 0 #define BCMA_DMP_OOBSEL_0_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_1_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_2_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_3_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_4_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_5_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_6_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_7_MASK BCMA_DMP_OOBSEL_MASK #define BCMA_DMP_OOBSEL_0_SHIFT BCMA_DMP_OOBSEL_SHIFT(0) #define BCMA_DMP_OOBSEL_1_SHIFT BCMA_DMP_OOBSEL_SHIFT(1) #define BCMA_DMP_OOBSEL_2_SHIFT BCMA_DMP_OOBSEL_SHIFT(2) #define BCMA_DMP_OOBSEL_3_SHIFT BCMA_DMP_OOBSEL_SHIFT(3) #define BCMA_DMP_OOBSEL_4_SHIFT BCMA_DMP_OOBSEL_0_SHIFT #define BCMA_DMP_OOBSEL_5_SHIFT BCMA_DMP_OOBSEL_1_SHIFT #define BCMA_DMP_OOBSEL_6_SHIFT BCMA_DMP_OOBSEL_2_SHIFT #define BCMA_DMP_OOBSEL_7_SHIFT BCMA_DMP_OOBSEL_3_SHIFT /* ioctrl */ #define BCMA_DMP_IOCTRL_MASK 0x0000FFFF /* iostatus */ #define BCMA_DMP_IOST_MASK 0x0000FFFF /* resetctrl */ #define BCMA_DMP_RC_RESET 0x00000001 /* config */ #define BCMA_DMP_CFG_OOB 0x00000020 #define BCMA_DMP_CFG_IOS 0x00000010 #define BCMA_DMP_CFGIOC 0x00000008 #define BCMA_DMP_CFGTO 0x00000004 #define BCMA_DMP_CFGERRL 0x00000002 #define BCMA_DMP_CFGRST 0x00000001 #endif /* _BCMA_BCMA_DMP_H_ */ Index: head/sys/dev/bhnd/bcma/bcma_subr.c =================================================================== --- head/sys/dev/bhnd/bcma/bcma_subr.c (revision 326870) +++ head/sys/dev/bhnd/bcma/bcma_subr.c (revision 326871) @@ -1,614 +1,614 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "bcma_dmp.h" #include "bcmavar.h" /* Return the resource ID for a device's agent register allocation */ #define BCMA_AGENT_RID(_dinfo) \ (BCMA_AGENT_RID_BASE + BCMA_DINFO_COREIDX(_dinfo)) /** * Allocate and initialize new core config structure. * * @param core_index Core index on the bus. * @param core_unit Core unit number. * @param vendor Core designer. * @param device Core identifier (e.g. part number). * @param hwrev Core revision. */ struct bcma_corecfg * bcma_alloc_corecfg(u_int core_index, int core_unit, uint16_t vendor, uint16_t device, uint8_t hwrev) { struct bcma_corecfg *cfg; cfg = malloc(sizeof(*cfg), M_BHND, M_NOWAIT); if (cfg == NULL) return NULL; cfg->core_info = (struct bhnd_core_info) { .vendor = vendor, .device = device, .hwrev = hwrev, .core_idx = core_index, .unit = core_unit }; STAILQ_INIT(&cfg->master_ports); cfg->num_master_ports = 0; STAILQ_INIT(&cfg->dev_ports); cfg->num_dev_ports = 0; STAILQ_INIT(&cfg->bridge_ports); cfg->num_bridge_ports = 0; STAILQ_INIT(&cfg->wrapper_ports); cfg->num_wrapper_ports = 0; return (cfg); } /** * Deallocate the given core config and any associated resources. * * @param corecfg Core info to be deallocated. */ void bcma_free_corecfg(struct bcma_corecfg *corecfg) { struct bcma_mport *mport, *mnext; struct bcma_sport *sport, *snext; STAILQ_FOREACH_SAFE(mport, &corecfg->master_ports, mp_link, mnext) { free(mport, M_BHND); } STAILQ_FOREACH_SAFE(sport, &corecfg->dev_ports, sp_link, snext) { bcma_free_sport(sport); } STAILQ_FOREACH_SAFE(sport, &corecfg->bridge_ports, sp_link, snext) { bcma_free_sport(sport); } STAILQ_FOREACH_SAFE(sport, &corecfg->wrapper_ports, sp_link, snext) { bcma_free_sport(sport); } free(corecfg, M_BHND); } /** * Return the @p cfg port list for @p type. * * @param cfg The core configuration. * @param type The requested port type. */ struct bcma_sport_list * bcma_corecfg_get_port_list(struct bcma_corecfg *cfg, bhnd_port_type type) { switch (type) { case BHND_PORT_DEVICE: return (&cfg->dev_ports); break; case BHND_PORT_BRIDGE: return (&cfg->bridge_ports); break; case BHND_PORT_AGENT: return (&cfg->wrapper_ports); break; default: return (NULL); } } /** * Populate the resource list and bcma_map RIDs using the maps defined on * @p ports. * * @param bus The requesting bus device. * @param dinfo The device info instance to be initialized. * @param ports The set of ports to be enumerated */ static void bcma_dinfo_init_port_resource_info(device_t bus, struct bcma_devinfo *dinfo, struct bcma_sport_list *ports) { struct bcma_map *map; struct bcma_sport *port; bhnd_addr_t end; STAILQ_FOREACH(port, ports, sp_link) { STAILQ_FOREACH(map, &port->sp_maps, m_link) { /* * Create the corresponding device resource list entry. * * We necessarily skip registration if the region's * device memory range is not representable via * rman_res_t. * * When rman_res_t is migrated to uintmax_t, any * range should be representable. */ end = map->m_base + map->m_size; if (map->m_base <= RM_MAX_END && end <= RM_MAX_END) { map->m_rid = resource_list_add_next( &dinfo->resources, SYS_RES_MEMORY, map->m_base, end, map->m_size); } else if (bootverbose) { device_printf(bus, "core%u %s%u.%u: region %llx-%llx extends " "beyond supported addressable range\n", dinfo->corecfg->core_info.core_idx, bhnd_port_type_name(port->sp_type), port->sp_num, map->m_region_num, (unsigned long long) map->m_base, (unsigned long long) end); } } } } /** * Allocate the per-core agent register block for a device info structure. * * If an agent0.0 region is not defined on @p dinfo, the device info * agent resource is set to NULL and 0 is returned. * * @param bus The requesting bus device. * @param child The bcma child device. * @param dinfo The device info associated with @p child * * @retval 0 success * @retval non-zero resource allocation failed. */ static int bcma_dinfo_init_agent(device_t bus, device_t child, struct bcma_devinfo *dinfo) { bhnd_addr_t addr; bhnd_size_t size; rman_res_t r_start, r_count, r_end; int error; KASSERT(dinfo->res_agent == NULL, ("double allocation of agent")); /* Verify that the agent register block exists and is * mappable */ if (bhnd_get_port_rid(child, BHND_PORT_AGENT, 0, 0) == -1) return (0); /* nothing to do */ /* Fetch the address of the agent register block */ error = bhnd_get_region_addr(child, BHND_PORT_AGENT, 0, 0, &addr, &size); if (error) { device_printf(bus, "failed fetching agent register block " "address for core %u\n", BCMA_DINFO_COREIDX(dinfo)); return (error); } /* Allocate the resource */ r_start = addr; r_count = size; r_end = r_start + r_count - 1; dinfo->rid_agent = BCMA_AGENT_RID(dinfo); dinfo->res_agent = BHND_BUS_ALLOC_RESOURCE(bus, bus, SYS_RES_MEMORY, &dinfo->rid_agent, r_start, r_end, r_count, RF_ACTIVE|RF_SHAREABLE); if (dinfo->res_agent == NULL) { device_printf(bus, "failed allocating agent register block for " "core %u\n", BCMA_DINFO_COREIDX(dinfo)); return (ENXIO); } return (0); } /** * Populate the list of interrupts for a device info structure * previously initialized via bcma_dinfo_alloc_agent(). * * If an agent0.0 region is not mapped on @p dinfo, the OOB interrupt bank is * assumed to be unavailable and 0 is returned. * * @param bus The requesting bus device. * @param dinfo The device info instance to be initialized. */ static int bcma_dinfo_init_intrs(device_t bus, device_t child, struct bcma_devinfo *dinfo) { uint32_t dmpcfg, oobw; /* Agent block must be mapped */ if (dinfo->res_agent == NULL) return (0); /* Agent must support OOB */ dmpcfg = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_CONFIG); if (!BCMA_DMP_GET_FLAG(dmpcfg, BCMA_DMP_CFG_OOB)) return (0); /* Fetch width of the OOB interrupt bank */ oobw = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_OOB_OUTWIDTH(BCMA_OOB_BANK_INTR)); - if (oobw > BCMA_OOB_NUM_SEL) { + if (oobw >= BCMA_OOB_NUM_SEL) { device_printf(bus, "ignoring invalid OOBOUTWIDTH for core %u: " "%#x\n", BCMA_DINFO_COREIDX(dinfo), oobw); return (0); } /* Fetch OOBSEL busline values and populate list of interrupt * descriptors */ for (uint32_t sel = 0; sel < oobw; sel++) { struct bcma_intr *intr; uint32_t selout; uint8_t line; if (dinfo->num_intrs == UINT_MAX) return (ENOMEM); selout = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_OOBSELOUT( BCMA_OOB_BANK_INTR, sel)); line = (selout >> BCMA_DMP_OOBSEL_SHIFT(sel)) & BCMA_DMP_OOBSEL_BUSLINE_MASK; intr = bcma_alloc_intr(BCMA_OOB_BANK_INTR, sel, line); if (intr == NULL) { device_printf(bus, "failed allocating interrupt " "descriptor %#x for core %u\n", sel, BCMA_DINFO_COREIDX(dinfo)); return (ENOMEM); } STAILQ_INSERT_HEAD(&dinfo->intrs, intr, i_link); dinfo->num_intrs++; } return (0); } /** * Allocate and return a new empty device info structure. * * @param bus The requesting bus device. * * @retval NULL if allocation failed. */ struct bcma_devinfo * bcma_alloc_dinfo(device_t bus) { struct bcma_devinfo *dinfo; dinfo = malloc(sizeof(struct bcma_devinfo), M_BHND, M_NOWAIT|M_ZERO); if (dinfo == NULL) return (NULL); dinfo->corecfg = NULL; dinfo->res_agent = NULL; dinfo->rid_agent = -1; STAILQ_INIT(&dinfo->intrs); dinfo->num_intrs = 0; resource_list_init(&dinfo->resources); return (dinfo); } /** * Initialize a device info structure previously allocated via * bcma_alloc_dinfo, assuming ownership of the provided core * configuration. * * @param bus The requesting bus device. * @param child The bcma child device. * @param dinfo The device info associated with @p child * @param corecfg Device core configuration; ownership of this value * will be assumed by @p dinfo. * * @retval 0 success * @retval non-zero initialization failed. */ int bcma_init_dinfo(device_t bus, device_t child, struct bcma_devinfo *dinfo, struct bcma_corecfg *corecfg) { struct bcma_intr *intr; int error; KASSERT(dinfo->corecfg == NULL, ("dinfo previously initialized")); /* Save core configuration value */ dinfo->corecfg = corecfg; /* The device ports must always be initialized first to ensure that * rid 0 maps to the first device port */ bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->dev_ports); bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->bridge_ports); bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->wrapper_ports); /* Now that we've defined the port resources, we can map the device's * agent registers (if any) */ if ((error = bcma_dinfo_init_agent(bus, child, dinfo))) goto failed; /* With agent registers mapped, we can populate the device's interrupt * descriptors */ if ((error = bcma_dinfo_init_intrs(bus, child, dinfo))) goto failed; /* Finally, map the interrupt descriptors */ STAILQ_FOREACH(intr, &dinfo->intrs, i_link) { /* Already mapped? */ if (intr->i_mapped) continue; /* Map the interrupt */ error = BHND_BUS_MAP_INTR(bus, child, intr->i_sel, &intr->i_irq); if (error) { device_printf(bus, "failed mapping interrupt line %u " "for core %u: %d\n", intr->i_sel, BCMA_DINFO_COREIDX(dinfo), error); goto failed; } intr->i_mapped = true; /* Add to resource list */ intr->i_rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ, intr->i_irq, intr->i_irq, 1); } return (0); failed: /* Owned by the caller on failure */ dinfo->corecfg = NULL; return (error); } /** * Deallocate the given device info structure and any associated resources. * * @param bus The requesting bus device. * @param dinfo Device info to be deallocated. */ void bcma_free_dinfo(device_t bus, device_t child, struct bcma_devinfo *dinfo) { struct bcma_intr *intr, *inext; resource_list_free(&dinfo->resources); if (dinfo->corecfg != NULL) bcma_free_corecfg(dinfo->corecfg); /* Release agent resource, if any */ if (dinfo->res_agent != NULL) { bhnd_release_resource(bus, SYS_RES_MEMORY, dinfo->rid_agent, dinfo->res_agent); } /* Clean up interrupt descriptors */ STAILQ_FOREACH_SAFE(intr, &dinfo->intrs, i_link, inext) { STAILQ_REMOVE(&dinfo->intrs, intr, bcma_intr, i_link); /* Release our IRQ mapping */ if (intr->i_mapped) { BHND_BUS_UNMAP_INTR(bus, child, intr->i_irq); intr->i_mapped = false; } bcma_free_intr(intr); } free(dinfo, M_BHND); } /** * Allocate and initialize a new interrupt descriptor. * * @param bank OOB bank. * @param sel OOB selector. * @param line OOB bus line. */ struct bcma_intr * bcma_alloc_intr(uint8_t bank, uint8_t sel, uint8_t line) { struct bcma_intr *intr; if (bank >= BCMA_OOB_NUM_BANKS) return (NULL); if (sel >= BCMA_OOB_NUM_SEL) return (NULL); if (line >= BCMA_OOB_NUM_BUSLINES) return (NULL); intr = malloc(sizeof(*intr), M_BHND, M_NOWAIT); if (intr == NULL) return (NULL); intr->i_bank = bank; intr->i_sel = sel; intr->i_busline = line; intr->i_mapped = false; intr->i_irq = 0; return (intr); } /** * Deallocate all resources associated with the given interrupt descriptor. * * @param intr Interrupt descriptor to be deallocated. */ void bcma_free_intr(struct bcma_intr *intr) { KASSERT(!intr->i_mapped, ("interrupt %u still mapped", intr->i_sel)); free(intr, M_BHND); } /** * Allocate and initialize new slave port descriptor. * * @param port_num Per-core port number. * @param port_type Port type. */ struct bcma_sport * bcma_alloc_sport(bcma_pid_t port_num, bhnd_port_type port_type) { struct bcma_sport *sport; sport = malloc(sizeof(struct bcma_sport), M_BHND, M_NOWAIT); if (sport == NULL) return NULL; sport->sp_num = port_num; sport->sp_type = port_type; sport->sp_num_maps = 0; STAILQ_INIT(&sport->sp_maps); return sport; } /** * Deallocate all resources associated with the given port descriptor. * * @param sport Port descriptor to be deallocated. */ void bcma_free_sport(struct bcma_sport *sport) { struct bcma_map *map, *mapnext; STAILQ_FOREACH_SAFE(map, &sport->sp_maps, m_link, mapnext) { free(map, M_BHND); } free(sport, M_BHND); } /** * Given a bcma(4) child's device info, spin waiting for the device's DMP * resetstatus register to clear. * * @param child The bcma(4) child device. * @param dinfo The @p child device info. * * @retval 0 success * @retval ENODEV if @p dinfo does not map an agent register resource. * @retval ETIMEDOUT if timeout occurs */ int bcma_dmp_wait_reset(device_t child, struct bcma_devinfo *dinfo) { uint32_t rst; if (dinfo->res_agent == NULL) return (ENODEV); /* 300us should be long enough, but there are references to this * requiring up to 10ms when performing reset of an 80211 core * after a MAC PSM microcode watchdog event. */ for (int i = 0; i < 10000; i += 10) { rst = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETSTATUS); if (rst == 0) return (0); DELAY(10); } device_printf(child, "BCMA_DMP_RESETSTATUS timeout\n"); return (ETIMEDOUT); } /** * Set the bcma(4) child's DMP resetctrl register value, and then wait * for all backplane operations to complete. * * @param child The bcma(4) child device. * @param dinfo The @p child device info. * @param value The new ioctrl value to set. * * @retval 0 success * @retval ENODEV if @p dinfo does not map an agent register resource. * @retval ETIMEDOUT if timeout occurs waiting for reset completion */ int bcma_dmp_write_reset(device_t child, struct bcma_devinfo *dinfo, uint32_t value) { uint32_t rst; if (dinfo->res_agent == NULL) return (ENODEV); /* Already in requested reset state? */ rst = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETCTRL); if (rst == value) return (0); bhnd_bus_write_4(dinfo->res_agent, BCMA_DMP_RESETCTRL, value); bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETCTRL); /* read-back */ DELAY(10); return (bcma_dmp_wait_reset(child, dinfo)); } Index: head/sys/dev/bhnd/bhnd_erom.c =================================================================== --- head/sys/dev/bhnd/bhnd_erom.c (revision 326870) +++ head/sys/dev/bhnd/bhnd_erom.c (revision 326871) @@ -1,489 +1,490 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include static int bhnd_erom_iores_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size); static uint32_t bhnd_erom_iores_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width); static void bhnd_erom_iores_fini(struct bhnd_erom_io *eio); static int bhnd_erom_iobus_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size); static uint32_t bhnd_erom_iobus_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width); /** * An implementation of bhnd_erom_io that manages mappings via * bhnd_alloc_resource() and bhnd_release_resource(). */ struct bhnd_erom_iores { struct bhnd_erom_io eio; device_t owner; /**< device from which we'll allocate resources */ int owner_rid; /**< rid to use when allocating new mappings */ struct bhnd_resource *mapped; /**< current mapping, or NULL */ int mapped_rid; /**< resource ID of current mapping, or -1 */ }; /** * Fetch the device enumeration parser class from all bhnd(4)-compatible drivers * registered for @p bus_devclass, probe @p eio for supporting parser classes, * and return the best available supporting enumeration parser class. * * @param bus_devclass The bus device class to be queried for * bhnd(4)-compatible drivers. * @param eio An erom bus I/O instance, configured with a * mapping of the first bus core. * @param hint Identification hint used to identify the device. * If the chipset supports standard chip * identification registers within the first core, * this parameter should be NULL. * @param[out] cid On success, the probed chip identifier. * * @retval non-NULL on success, the best available EROM class. * @retval NULL if no erom class returned a successful probe result for * @p eio. */ bhnd_erom_class_t * bhnd_erom_probe_driver_classes(devclass_t bus_devclass, struct bhnd_erom_io *eio, const struct bhnd_chipid *hint, struct bhnd_chipid *cid) { driver_t **drivers; int drv_count; bhnd_erom_class_t *erom_cls; int error, prio, result; erom_cls = NULL; prio = 0; /* Fetch all available drivers */ error = devclass_get_drivers(bus_devclass, &drivers, &drv_count); if (error) { printf("error fetching bhnd(4) drivers for %s: %d\n", devclass_get_name(bus_devclass), error); return (NULL); } /* Enumerate the drivers looking for the best available EROM class */ for (int i = 0; i < drv_count; i++) { struct bhnd_chipid pcid; bhnd_erom_class_t *cls; /* The default implementation of BHND_BUS_GET_EROM_CLASS() * returns NULL if unimplemented; this should always be safe * to call on arbitrary drivers */ cls = bhnd_driver_get_erom_class(drivers[i]); if (cls == NULL) continue; kobj_class_compile(cls); /* Probe the bus */ result = bhnd_erom_probe(cls, eio, hint, &pcid); /* The parser did not match if an error was returned */ if (result > 0) continue; /* Check for a new highest priority match */ if (erom_cls == NULL || result > prio) { prio = result; *cid = pcid; erom_cls = cls; } /* Terminate immediately on BUS_PROBE_SPECIFIC */ if (result == BUS_PROBE_SPECIFIC) break; } + free(drivers, M_TEMP); return (erom_cls); } /** * Allocate and return a new device enumeration table parser. * * @param cls The parser class for which an instance will be * allocated. * @param eio The bus I/O callbacks to use when reading the device * enumeration table. * @param cid The device's chip identifier. * * @retval non-NULL success * @retval NULL if an error occured allocating or initializing the * EROM parser. */ bhnd_erom_t * bhnd_erom_alloc(bhnd_erom_class_t *cls, const struct bhnd_chipid *cid, struct bhnd_erom_io *eio) { bhnd_erom_t *erom; int error; erom = (bhnd_erom_t *)kobj_create((kobj_class_t)cls, M_BHND, M_WAITOK|M_ZERO); if ((error = BHND_EROM_INIT(erom, cid, eio))) { printf("error initializing %s parser at %#jx: %d\n", cls->name, (uintmax_t)cid->enum_addr, error); kobj_delete((kobj_t)erom, M_BHND); return (NULL); } return (erom); } /** * Perform static initialization of a device enumeration table parser. * * This may be used to initialize a caller-allocated erom instance state * during early boot, prior to malloc availability. * * @param cls The parser class for which an instance will be * allocated. * @param erom The erom parser instance to initialize. * @param esize The total available number of bytes allocated for * @p erom. If this is less than is required by @p cls, * ENOMEM will be returned. * @param cid The device's chip identifier. * @param eio The bus I/O callbacks to use when reading the device * enumeration table. * * @retval 0 success * @retval ENOMEM if @p esize is smaller than required by @p cls. * @retval non-zero if an error occurs initializing the EROM parser, * a regular unix error code will be returned. */ int bhnd_erom_init_static(bhnd_erom_class_t *cls, bhnd_erom_t *erom, size_t esize, const struct bhnd_chipid *cid, struct bhnd_erom_io *eio) { kobj_class_t kcls; kcls = (kobj_class_t)cls; /* Verify allocation size */ if (kcls->size > esize) return (ENOMEM); /* Perform instance initialization */ kobj_init_static((kobj_t)erom, kcls); return (BHND_EROM_INIT(erom, cid, eio)); } /** * Release any resources held by a @p erom parser previously * initialized via bhnd_erom_init_static(). * * @param erom An erom parser instance previously initialized via * bhnd_erom_init_static(). */ void bhnd_erom_fini_static(bhnd_erom_t *erom) { return (BHND_EROM_FINI(erom)); } /** * Release all resources held by a @p erom parser previously * allocated via bhnd_erom_alloc(). * * @param erom An erom parser instance previously allocated via * bhnd_erom_alloc(). */ void bhnd_erom_free(bhnd_erom_t *erom) { BHND_EROM_FINI(erom); kobj_delete((kobj_t)erom, M_BHND); } /** * Attempt to map @p size bytes at @p addr, replacing any existing * @p eio mapping. * * @param eio I/O instance state. * @param addr The address to be mapped. * @param size The number of bytes to be mapped at @p addr. * * @retval 0 success * @retval non-zero if mapping @p addr otherwise fails, a regular * unix error code should be returned. */ int bhnd_erom_io_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size) { return (eio->map(eio, addr, size)); } /** * Read a 1, 2, or 4 byte data item from @p eio, at the given @p offset * relative to @p eio's current mapping. * * @param eio erom I/O callbacks * @param offset read offset. * @param width item width (1, 2, or 4 bytes). */ uint32_t bhnd_erom_io_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) { return (eio->read(eio, offset, width)); } /** * Free all resources held by @p eio. */ void bhnd_erom_io_fini(struct bhnd_erom_io *eio) { if (eio->fini != NULL) return (eio->fini(eio)); } /** * Allocate, initialize, and return a new I/O instance that will perform * mapping by allocating SYS_RES_MEMORY resources from @p dev using @p rid. * * @param dev The device to pass to bhnd_alloc_resource() and * bhnd_release_resource() functions. * @param rid The resource ID to be used when allocating memory resources. */ struct bhnd_erom_io * bhnd_erom_iores_new(device_t dev, int rid) { struct bhnd_erom_iores *iores; iores = malloc(sizeof(*iores), M_BHND, M_WAITOK | M_ZERO); iores->eio.map = bhnd_erom_iores_map; iores->eio.read = bhnd_erom_iores_read; iores->eio.fini = bhnd_erom_iores_fini; iores->owner = dev; iores->owner_rid = rid; iores->mapped = NULL; iores->mapped_rid = -1; return (&iores->eio); } static int bhnd_erom_iores_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size) { struct bhnd_erom_iores *iores; iores = (struct bhnd_erom_iores *)eio; /* Sanity check the addr/size */ if (size == 0) return (EINVAL); if (BHND_ADDR_MAX - size < addr) return (EINVAL); /* would overflow */ /* Check for an existing mapping */ if (iores->mapped) { /* If already mapped, nothing else to do */ if (rman_get_start(iores->mapped->res) == addr && rman_get_size(iores->mapped->res) == size) { return (0); } /* Otherwise, we need to drop the existing mapping */ bhnd_release_resource(iores->owner, SYS_RES_MEMORY, iores->mapped_rid, iores->mapped); iores->mapped = NULL; iores->mapped_rid = -1; } /* Try to allocate the new mapping */ iores->mapped_rid = iores->owner_rid; iores->mapped = bhnd_alloc_resource(iores->owner, SYS_RES_MEMORY, &iores->mapped_rid, addr, addr+size-1, size, RF_ACTIVE|RF_SHAREABLE); if (iores->mapped == NULL) { iores->mapped_rid = -1; return (ENXIO); } return (0); } static uint32_t bhnd_erom_iores_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) { struct bhnd_erom_iores *iores = (struct bhnd_erom_iores *)eio; if (iores->mapped == NULL) panic("read with invalid mapping"); switch (width) { case 1: return (bhnd_bus_read_1(iores->mapped, offset)); case 2: return (bhnd_bus_read_2(iores->mapped, offset)); case 4: return (bhnd_bus_read_4(iores->mapped, offset)); default: panic("invalid width %u", width); } } static void bhnd_erom_iores_fini(struct bhnd_erom_io *eio) { struct bhnd_erom_iores *iores = (struct bhnd_erom_iores *)eio; /* Release any mapping */ if (iores->mapped) { bhnd_release_resource(iores->owner, SYS_RES_MEMORY, iores->mapped_rid, iores->mapped); iores->mapped = NULL; iores->mapped_rid = -1; } free(eio, M_BHND); } /** * Initialize an I/O instance that will perform mapping directly from the * given bus space tag and handle. * * @param iobus The I/O instance to be initialized. * @param addr The base address mapped by @p bsh. * @param size The total size mapped by @p bsh. * @param bst Bus space tag for @p bsh. * @param bsh Bus space handle mapping the full bus enumeration space. * * @retval 0 success * @retval non-zero if initializing @p iobus otherwise fails, a regular * unix error code will be returned. */ int bhnd_erom_iobus_init(struct bhnd_erom_iobus *iobus, bhnd_addr_t addr, bhnd_size_t size, bus_space_tag_t bst, bus_space_handle_t bsh) { iobus->eio.map = bhnd_erom_iobus_map; iobus->eio.read = bhnd_erom_iobus_read; iobus->eio.fini = NULL; iobus->addr = addr; iobus->size = size; iobus->bst = bst; iobus->bsh = bsh; iobus->mapped = false; return (0); } static int bhnd_erom_iobus_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size) { struct bhnd_erom_iobus *iobus = (struct bhnd_erom_iobus *)eio; /* Sanity check the addr/size */ if (size == 0) return (EINVAL); /* addr+size must not overflow */ if (BHND_ADDR_MAX - size < addr) return (EINVAL); /* addr/size must fit within our bus tag's mapping */ if (addr < iobus->addr || size > iobus->size) return (ENXIO); if (iobus->size - (addr - iobus->addr) < size) return (ENXIO); /* The new addr offset and size must be representible as a bus_size_t */ if ((addr - iobus->addr) > BUS_SPACE_MAXSIZE) return (ENXIO); if (size > BUS_SPACE_MAXSIZE) return (ENXIO); iobus->offset = addr - iobus->addr; iobus->limit = size; iobus->mapped = true; return (0); } static uint32_t bhnd_erom_iobus_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) { struct bhnd_erom_iobus *iobus = (struct bhnd_erom_iobus *)eio; if (!iobus->mapped) panic("no active mapping"); if (iobus->limit < width || iobus->limit - width < offset) panic("invalid offset %#jx", offset); switch (width) { case 1: return (bus_space_read_1(iobus->bst, iobus->bsh, iobus->offset + offset)); case 2: return (bus_space_read_2(iobus->bst, iobus->bsh, iobus->offset + offset)); case 4: return (bus_space_read_4(iobus->bst, iobus->bsh, iobus->offset + offset)); default: panic("invalid width %u", width); } } Index: head/sys/dev/bhnd/bhndb/bhndb.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb.c (revision 326870) +++ head/sys/dev/bhnd/bhndb/bhndb.c (revision 326871) @@ -1,2319 +1,2320 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct bhndb_softc *sc; struct resource_list *rl; int retval = 0; sc = device_get_softc(dev); retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { *buf = '\0'; return (0); } static int bhndb_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { struct bhndb_softc *sc; sc = device_get_softc(dev); snprintf(buf, buflen, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); + error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), type, rman_get_rid(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, type, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, type, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param type The type of the resource to be activated. * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, type, rid, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, type, rid, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { struct bhndb_softc *sc; u_int ivec; int error; sc = device_get_softc(dev); /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Find the best matching descriptor for the requested type */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_pnpinfo_str, bhndb_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, bhndb_child_location_str), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; devclass_t bhndb_devclass; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/bhndb/bhndb_pci.c =================================================================== --- head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 326870) +++ head/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 326871) @@ -1,1499 +1,1499 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * PCI-specific implementation for the BHNDB bridge driver. * * Provides support for bridging from a PCI parent bus to a BHND-compatible * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point * mode. * * This driver handles all initial generic host-level PCI interactions with a * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4) * bus has been enumerated, this driver works in tandem with a core-specific * bhnd_pci_hostb driver to manage the PCI core. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_pwrctl_hostb_if.h" #include "bhndb_pcireg.h" #include "bhndb_pcivar.h" #include "bhndb_private.h" struct bhndb_pci_eio; static int bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count); static int bhndb_pci_read_core_table(device_t dev, struct bhnd_chipid *chipid, struct bhnd_core_info **cores, u_int *ncores, bhnd_erom_class_t **eromcls); static int bhndb_pci_add_children(struct bhndb_pci_softc *sc); static bhnd_devclass_t bhndb_expected_pci_devclass(device_t dev); static bool bhndb_is_pcie_attached(device_t dev); static int bhndb_enable_pci_clocks(device_t dev); static int bhndb_disable_pci_clocks(device_t dev); static int bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *, bhnd_addr_t); static int bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *, bhnd_addr_t); static void bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset, uint32_t value, u_int width); static uint32_t bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width); static void bhndb_init_sromless_pci_config( struct bhndb_pci_softc *sc); static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc); static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc); static int bhndb_pci_eio_init(struct bhndb_pci_eio *pio, device_t dev, device_t pci_dev, struct bhndb_host_resources *hr); static int bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size); static uint32_t bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width); #define BHNDB_PCI_MSI_COUNT 1 static struct bhndb_pci_quirk bhndb_pci_quirks[]; static struct bhndb_pci_quirk bhndb_pcie_quirks[]; static struct bhndb_pci_quirk bhndb_pcie2_quirks[]; static struct bhndb_pci_core bhndb_pci_cores[] = { BHNDB_PCI_CORE(PCI, BHND_PCI_SRSH_PI_OFFSET, bhndb_pci_quirks), BHNDB_PCI_CORE(PCIE, BHND_PCIE_SRSH_PI_OFFSET, bhndb_pcie_quirks), BHNDB_PCI_CORE(PCIE2, BHND_PCIE_SRSH_PI_OFFSET, bhndb_pcie2_quirks), BHNDB_PCI_CORE_END }; /* bhndb_pci erom I/O instance state */ struct bhndb_pci_eio { struct bhnd_erom_io eio; device_t dev; /**< bridge device */ device_t pci_dev; /**< parent PCI device */ struct bhndb_host_resources *hr; /**< borrowed reference to host resources */ const struct bhndb_regwin *win; /**< mapped register window, or NULL */ struct resource *res; /**< resource containing the register window, or NULL if no window mapped */ bhnd_addr_t res_target; /**< current target address (if mapped) */ bool mapped; /**< true if a valid mapping exists, false otherwise */ bhnd_addr_t addr; /**< mapped address */ bhnd_size_t size; /**< mapped size */ }; static struct bhndb_pci_quirk bhndb_pci_quirks[] = { /* Backplane interrupt flags must be routed via siba-specific * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK * PCI configuration register is unsupported. */ {{ BHND_MATCH_CHIP_TYPE (SIBA) }, { BHND_MATCH_CORE_REV (HWREV_LTE(5)) }, BHNDB_PCI_QUIRK_SIBA_INTVEC }, /* All PCI core revisions require the SRSH work-around */ BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), BHNDB_PCI_QUIRK_END }; static struct bhndb_pci_quirk bhndb_pcie_quirks[] = { /* All PCIe-G1 core revisions require the SRSH work-around */ BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), BHNDB_PCI_QUIRK_END }; static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = { /* All PCIe-G2 core revisions require the SRSH work-around */ BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), BHNDB_PCI_QUIRK_END }; /** * Return the device table entry for @p ci, or NULL if none. */ static struct bhndb_pci_core * bhndb_pci_find_core(struct bhnd_core_info *ci) { for (size_t i = 0; !BHNDB_PCI_IS_CORE_END(&bhndb_pci_cores[i]); i++) { struct bhndb_pci_core *entry = &bhndb_pci_cores[i]; if (bhnd_core_matches(ci, &entry->match)) return (entry); } return (NULL); } /** * Return all quirk flags for the given @p cid and @p ci. */ static uint32_t bhndb_pci_get_core_quirks(struct bhnd_chipid *cid, struct bhnd_core_info *ci) { struct bhndb_pci_core *entry; struct bhndb_pci_quirk *qtable; uint32_t quirks; quirks = 0; /* No core entry? */ if ((entry = bhndb_pci_find_core(ci)) == NULL) return (quirks); /* No quirks? */ if ((qtable = entry->quirks) == NULL) return (quirks); for (size_t i = 0; !BHNDB_PCI_IS_QUIRK_END(&qtable[i]); i++) { struct bhndb_pci_quirk *q = &qtable[i]; if (!bhnd_chip_matches(cid, &q->chip_desc)) continue; if (!bhnd_core_matches(ci, &q->core_desc)) continue; quirks |= q->quirks; } return (quirks); } /** * Default bhndb_pci implementation of device_probe(). * * Verifies that the parent is a PCI/PCIe device. */ static int bhndb_pci_probe(device_t dev) { struct bhnd_chipid cid; struct bhnd_core_info *cores, hostb_core; struct bhndb_pci_core *entry; bhnd_devclass_t hostb_devclass; u_int ncores; device_t parent; devclass_t parent_bus, pci; int error; cores = NULL; /* Our parent must be a PCI/PCIe device. */ pci = devclass_find("pci"); parent = device_get_parent(dev); parent_bus = device_get_devclass(device_get_parent(parent)); if (parent_bus != pci) return (ENXIO); /* Enable clocks */ if ((error = bhndb_enable_pci_clocks(dev))) return (error); /* Identify the chip and enumerate the bridged cores */ error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores, NULL); if (error) goto cleanup; /* Search our core table for the host bridge core */ hostb_devclass = bhndb_expected_pci_devclass(dev); error = bhndb_find_hostb_core(cores, ncores, hostb_devclass, &hostb_core); if (error) goto cleanup; /* Look for a matching core table entry */ if ((entry = bhndb_pci_find_core(&hostb_core)) == NULL) { error = ENXIO; goto cleanup; } device_set_desc(dev, "PCI-BHND bridge"); /* fall-through */ error = BUS_PROBE_DEFAULT; cleanup: bhndb_disable_pci_clocks(dev); if (cores != NULL) free(cores, M_BHND); return (error); } /** * Attempt to allocate MSI interrupts, returning the count in @p msi_count * on success. */ static int bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count) { int error, count; /* Is MSI available? */ if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT) return (ENXIO); /* Allocate expected message count */ count = BHNDB_PCI_MSI_COUNT; if ((error = pci_alloc_msi(sc->parent, &count))) { device_printf(sc->dev, "failed to allocate MSI interrupts: " "%d\n", error); return (error); } if (count < BHNDB_PCI_MSI_COUNT) { pci_release_msi(sc->parent); return (ENXIO); } *msi_count = count; return (0); } static int bhndb_pci_attach(device_t dev) { struct bhndb_pci_softc *sc; struct bhnd_chipid cid; struct bhnd_core_info *cores, hostb_core; bhnd_erom_class_t *erom_class; u_int ncores; int irq_rid; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent = device_get_parent(dev); sc->pci_devclass = bhndb_expected_pci_devclass(dev); sc->pci_quirks = 0; sc->set_regwin = NULL; BHNDB_PCI_LOCK_INIT(sc); cores = NULL; /* Enable PCI bus mastering */ pci_enable_busmaster(sc->parent); /* Set up PCI interrupt handling */ if (bhndb_pci_alloc_msi(sc, &sc->msi_count) == 0) { /* MSI uses resource IDs starting at 1 */ irq_rid = 1; device_printf(dev, "Using MSI interrupts on %s\n", device_get_nameunit(sc->parent)); } else { sc->msi_count = 0; irq_rid = 0; device_printf(dev, "Using INTx interrupts on %s\n", device_get_nameunit(sc->parent)); } sc->isrc = bhndb_alloc_intr_isrc(sc->parent, irq_rid, 0, RM_MAX_END, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->isrc == NULL) { device_printf(sc->dev, "failed to allocate interrupt " "resource\n"); error = ENXIO; goto cleanup; } /* Enable clocks (if required by this hardware) */ if ((error = bhndb_enable_pci_clocks(sc->dev))) goto cleanup; /* Identify the chip and enumerate the bridged cores */ error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores, &erom_class); if (error) goto cleanup; /* Select the appropriate register window handler */ if (cid.chip_type == BHND_CHIPTYPE_SIBA) { sc->set_regwin = bhndb_pci_compat_setregwin; } else { sc->set_regwin = bhndb_pci_fast_setregwin; } /* Determine our host bridge core and populate our quirk flags */ error = bhndb_find_hostb_core(cores, ncores, sc->pci_devclass, &hostb_core); if (error) goto cleanup; sc->pci_quirks = bhndb_pci_get_core_quirks(&cid, &hostb_core); /* Perform bridge attach */ error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class); if (error) goto cleanup; /* Fix-up power on defaults for SROM-less devices. */ bhndb_init_sromless_pci_config(sc); /* Add any additional child devices */ if ((error = bhndb_pci_add_children(sc))) goto cleanup; /* Probe and attach our children */ if ((error = bus_generic_attach(dev))) goto cleanup; free(cores, M_BHND); return (0); cleanup: device_delete_children(dev); bhndb_disable_pci_clocks(sc->dev); if (sc->isrc != NULL) bhndb_free_intr_isrc(sc->isrc); if (sc->msi_count > 0) pci_release_msi(sc->parent); if (cores != NULL) free(cores, M_BHND); pci_disable_busmaster(sc->parent); BHNDB_PCI_LOCK_DESTROY(sc); return (error); } static int bhndb_pci_detach(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Attempt to detach our children */ if ((error = bus_generic_detach(dev))) return (error); /* Perform generic bridge detach */ if ((error = bhndb_generic_detach(dev))) return (error); /* Disable clocks (if required by this hardware) */ if ((error = bhndb_disable_pci_clocks(sc->dev))) return (error); /* Free our interrupt resources */ bhndb_free_intr_isrc(sc->isrc); /* Release MSI interrupts */ if (sc->msi_count > 0) pci_release_msi(sc->parent); /* Disable PCI bus mastering */ pci_disable_busmaster(sc->parent); BHNDB_PCI_LOCK_DESTROY(sc); return (0); } /** * Use the generic PCI bridge hardware configuration to enumerate the bridged * bhnd(4) bus' core table. * * @note This function may be safely called prior to device attach, (e.g. * from DEVICE_PROBE). * @note This function requires exclusive ownership over allocating and * configuring host bridge resources, and should only be called prior to * completion of device attach and full configuration of the bridge. * * @param dev The bhndb_pci bridge device. * @param[out] chipid On success, the parsed chip identification. * @param[out] cores On success, the enumerated core table. The * caller is responsible for freeing this table via * bhndb_pci_free_core_table(). * @param[out] ncores On success, the number of cores found in * @p cores. * @param[out] eromcls On success, a pointer to the erom class used to * parse the device enumeration table. This * argument may be NULL if the class is not * desired. * * @retval 0 success * @retval non-zero if enumerating the bridged bhnd(4) bus fails, a regular * unix error code will be returned. */ static int bhndb_pci_read_core_table(device_t dev, struct bhnd_chipid *chipid, struct bhnd_core_info **cores, u_int *ncores, bhnd_erom_class_t **eromcls) { const struct bhndb_hwcfg *cfg; struct bhndb_host_resources *hr; struct bhndb_pci_eio pio; struct bhnd_core_info *erom_cores; const struct bhnd_chipid *hint; struct bhnd_chipid cid; bhnd_erom_class_t *erom_class; bhnd_erom_t *erom; device_t parent_dev; u_int erom_ncores; int error; parent_dev = device_get_parent(dev); erom = NULL; erom_cores = NULL; /* Fetch our chipid hint (if any) and generic hardware configuration */ cfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev); hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev); /* Allocate our host resources */ if ((error = bhndb_alloc_host_resources(&hr, dev, parent_dev, cfg))) return (error); /* Initialize our erom I/O state */ if ((error = bhndb_pci_eio_init(&pio, dev, parent_dev, hr))) goto failed; /* Map the first bus core from our bridged bhnd(4) bus */ error = bhndb_pci_eio_map(&pio.eio, BHND_DEFAULT_CHIPC_ADDR, BHND_DEFAULT_CORE_SIZE); if (error) goto failed; /* Probe for a usable EROM class, and read the chip identifier */ erom_class = bhnd_erom_probe_driver_classes(device_get_devclass(dev), &pio.eio, hint, &cid); if (erom_class == NULL) { device_printf(dev, "device enumeration unsupported; no " "compatible driver found\n"); error = ENXIO; goto failed; } /* Allocate EROM parser */ if ((erom = bhnd_erom_alloc(erom_class, &cid, &pio.eio)) == NULL) { device_printf(dev, "failed to allocate device enumeration " "table parser\n"); error = ENXIO; goto failed; } /* Read the full core table */ error = bhnd_erom_get_core_table(erom, &erom_cores, &erom_ncores); if (error) { device_printf(dev, "error fetching core table: %d\n", error); goto failed; } /* Provide the results to our caller */ *cores = malloc(sizeof(erom_cores[0]) * erom_ncores, M_BHND, M_WAITOK); memcpy(*cores, erom_cores, sizeof(erom_cores[0]) * erom_ncores); *ncores = erom_ncores; *chipid = cid; if (eromcls != NULL) *eromcls = erom_class; /* Clean up */ bhnd_erom_free_core_table(erom, erom_cores); bhnd_erom_free(erom); bhndb_release_host_resources(hr); return (0); failed: if (erom_cores != NULL) bhnd_erom_free_core_table(erom, erom_cores); if (erom != NULL) bhnd_erom_free(erom); bhndb_release_host_resources(hr); return (error); } static int bhndb_pci_add_children(struct bhndb_pci_softc *sc) { bus_size_t nv_sz; int error; /** * If SPROM is mapped directly into BAR0, add child NVRAM * device. */ nv_sz = bhndb_pci_sprom_size(sc); if (nv_sz > 0) { struct bhndb_devinfo *dinfo; device_t child; if (bootverbose) { device_printf(sc->dev, "found SPROM (%ju bytes)\n", (uintmax_t)nv_sz); } /* Add sprom device, ordered early enough to be available * before the bridged bhnd(4) bus is attached. */ child = BUS_ADD_CHILD(sc->dev, BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add sprom device\n"); return (ENXIO); } /* Initialize device address space and resource covering the * BAR0 SPROM shadow. */ dinfo = device_get_ivars(child); dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; error = bus_set_resource(child, SYS_RES_MEMORY, 0, bhndb_pci_sprom_addr(sc), nv_sz); if (error) { device_printf(sc->dev, "failed to register sprom resources\n"); return (error); } } return (0); } static const struct bhndb_regwin * bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc) { struct bhndb_resources *bres; const struct bhndb_hwcfg *cfg; const struct bhndb_regwin *sprom_win; bres = sc->bhndb.bus_res; cfg = bres->cfg; sprom_win = bhndb_regwin_find_type(cfg->register_windows, BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE); return (sprom_win); } static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc) { const struct bhndb_regwin *sprom_win; struct resource *r; /* Fetch the SPROM register window */ sprom_win = bhndb_pci_sprom_regwin(sc); KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+")); /* Fetch the associated resource */ r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win); KASSERT(r != NULL, ("missing resource for sprom window\n")); return (rman_get_start(r) + sprom_win->win_offset); } static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc) { const struct bhndb_regwin *sprom_win; uint32_t sctl; bus_size_t sprom_sz; sprom_win = bhndb_pci_sprom_regwin(sc); /* PCI_V2 and later devices map SPROM/OTP via ChipCommon */ if (sprom_win == NULL) return (0); /* Determine SPROM size */ sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4); if (sctl & BHNDB_PCI_SPROM_BLANK) return (0); switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) { case BHNDB_PCI_SPROM_SZ_1KB: sprom_sz = (1 * 1024); break; case BHNDB_PCI_SPROM_SZ_4KB: sprom_sz = (4 * 1024); break; case BHNDB_PCI_SPROM_SZ_16KB: sprom_sz = (16 * 1024); break; case BHNDB_PCI_SPROM_SZ_RESERVED: default: device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl); return (0); } if (sprom_sz > sprom_win->win_size) { device_printf(sc->dev, "PCI sprom size (0x%x) overruns defined register window\n", sctl); return (0); } return (sprom_sz); } /** * Return the host resource providing a static mapping of the PCI core's * registers. * * @param sc bhndb PCI driver state. * @param offset The required readable offset within the PCI core * register block. * @param size The required readable size at @p offset. * @param[out] res On success, the host resource containing our PCI * core's register window. * @param[out] res_offset On success, the @p offset relative to @p res. * * @retval 0 success * @retval ENXIO if a valid static register window mapping the PCI core * registers is not available. */ static int bhndb_pci_get_core_regs(struct bhndb_pci_softc *sc, bus_size_t offset, bus_size_t size, struct resource **res, bus_size_t *res_offset) { const struct bhndb_regwin *win; struct resource *r; /* Locate the static register window mapping the requested offset */ win = bhndb_regwin_find_core(sc->bhndb.bus_res->cfg->register_windows, sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0, offset, size); if (win == NULL) { device_printf(sc->dev, "missing PCI core register window\n"); return (ENXIO); } /* Fetch the resource containing the register window */ r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, win); if (r == NULL) { device_printf(sc->dev, "missing PCI core register resource\n"); return (ENXIO); } KASSERT(offset >= win->d.core.offset, ("offset %#jx outside of " "register window", (uintmax_t)offset)); *res = r; *res_offset = win->win_offset + (offset - win->d.core.offset); return (0); } /** * Write a 1, 2, or 4 byte data item to the PCI core's registers at @p offset. * * @param sc bhndb PCI driver state. * @param offset register write offset. * @param value value to be written. * @param width item width (1, 2, or 4 bytes). */ static void bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset, uint32_t value, u_int width) { struct resource *r; bus_size_t r_offset; int error; error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset); if (error) { panic("no PCI register window mapping %#jx+%#x: %d", (uintmax_t)offset, width, error); } switch (width) { case 1: bus_write_1(r, r_offset, value); break; case 2: bus_write_2(r, r_offset, value); break; case 4: bus_write_4(r, r_offset, value); break; default: panic("invalid width: %u", width); } } /** * Read a 1, 2, or 4 byte data item from the PCI core's registers * at @p offset. * * @param sc bhndb PCI driver state. * @param offset register read offset. * @param width item width (1, 2, or 4 bytes). */ static uint32_t bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width) { struct resource *r; bus_size_t r_offset; int error; error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset); if (error) { panic("no PCI register window mapping %#jx+%#x: %d", (uintmax_t)offset, width, error); } switch (width) { case 1: return (bus_read_1(r, r_offset)); case 2: return (bus_read_2(r, r_offset)); case 4: return (bus_read_4(r, r_offset)); default: panic("invalid width: %u", width); } } /* * On devices without a SROM, the PCI(e) cores will be initialized with * their Power-on-Reset defaults; this can leave two of the BAR0 PCI windows * mapped to the wrong core. * * This function updates the SROM shadow to point the BAR0 windows at the * current PCI core. * * Applies to all PCI/PCIe revisions. */ static void bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc) { const struct bhndb_pci_core *pci_core; bus_size_t srsh_offset; u_int pci_cidx, sprom_cidx; uint16_t val; if ((sc->pci_quirks & BHNDB_PCI_QUIRK_SRSH_WAR) == 0) return; /* Determine the correct register offset for our PCI core */ pci_core = bhndb_pci_find_core(&sc->bhndb.bridge_core); KASSERT(pci_core != NULL, ("missing core table entry")); srsh_offset = pci_core->srsh_offset; /* Fetch the SPROM's configured core index */ val = bhndb_pci_read_core(sc, srsh_offset, sizeof(val)); sprom_cidx = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT; /* If it doesn't match host bridge's core index, update the index * value */ pci_cidx = sc->bhndb.bridge_core.core_idx; if (sprom_cidx != pci_cidx) { val &= ~BHND_PCI_SRSH_PI_MASK; val |= (pci_cidx << BHND_PCI_SRSH_PI_SHIFT); bhndb_pci_write_core(sc, srsh_offset, val, sizeof(val)); } } static int bhndb_pci_resume(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Enable clocks (if supported by this hardware) */ if ((error = bhndb_enable_pci_clocks(sc->dev))) return (error); /* Perform resume */ return (bhndb_generic_resume(dev)); } static int bhndb_pci_suspend(device_t dev) { struct bhndb_pci_softc *sc; int error; sc = device_get_softc(dev); /* Disable clocks (if supported by this hardware) */ if ((error = bhndb_disable_pci_clocks(sc->dev))) return (error); /* Perform suspend */ return (bhndb_generic_suspend(dev)); } static int bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { struct bhndb_pci_softc *sc = device_get_softc(dev); return (sc->set_regwin(sc->dev, sc->parent, rw, addr)); } /** * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation. * * On siba(4) devices, it's possible that writing a PCI window register may * not succeed; it's necessary to immediately read the configuration register * and retry if not set to the desired value. * * This is not necessary on bcma(4) devices, but other than the overhead of * validating the register, there's no harm in performing the verification. */ static int bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { int error; int reg; if (rw->win_type != BHNDB_REGWIN_T_DYN) return (ENODEV); reg = rw->d.dyn.cfg_offset; for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) { if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr))) return (error); if (pci_read_config(pci_dev, reg, 4) == addr) return (0); DELAY(10); } /* Unable to set window */ return (ENODEV); } /** * A bcma(4)-only bhndb_set_window_addr implementation. */ static int bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, const struct bhndb_regwin *rw, bhnd_addr_t addr) { /* The PCI bridge core only supports 32-bit addressing, regardless * of the bus' support for 64-bit addressing */ if (addr > UINT32_MAX) return (ERANGE); switch (rw->win_type) { case BHNDB_REGWIN_T_DYN: /* Addresses must be page aligned */ if (addr % rw->win_size != 0) return (EINVAL); pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4); break; default: return (ENODEV); } return (0); } static int bhndb_pci_populate_board_info(device_t dev, device_t child, struct bhnd_board_info *info) { struct bhndb_pci_softc *sc; sc = device_get_softc(dev); /* * On a subset of Apple BCM4360 modules, always prefer the * PCI subdevice to the SPROM-supplied boardtype. * * TODO: * * Broadcom's own drivers implement this override, and then later use * the remapped BCM4360 board type to determine the required * board-specific workarounds. * * Without access to this hardware, it's unclear why this mapping * is done, and we must do the same. If we can survey the hardware * in question, it may be possible to replace this behavior with * explicit references to the SPROM-supplied boardtype(s) in our * quirk definitions. */ if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) { switch (info->board_type) { case BHND_BOARD_BCM94360X29C: case BHND_BOARD_BCM94360X29CP2: case BHND_BOARD_BCM94360X51: case BHND_BOARD_BCM94360X51P2: info->board_type = 0; /* allow override below */ break; default: break; } } /* If NVRAM did not supply vendor/type/devid info, provide the PCI * subvendor/subdevice/device values. */ if (info->board_vendor == 0) info->board_vendor = pci_get_subvendor(sc->parent); if (info->board_type == 0) info->board_type = pci_get_subdevice(sc->parent); if (info->board_devid == 0) info->board_devid = pci_get_device(sc->parent); return (0); } /** * Examine the bridge device @p dev and return the expected host bridge * device class. * * @param dev The bhndb bridge device */ static bhnd_devclass_t bhndb_expected_pci_devclass(device_t dev) { if (bhndb_is_pcie_attached(dev)) return (BHND_DEVCLASS_PCIE); else return (BHND_DEVCLASS_PCI); } /** * Return true if the bridge device @p dev is attached via PCIe, * false otherwise. * * @param dev The bhndb bridge device */ static bool bhndb_is_pcie_attached(device_t dev) { int reg; if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0) return (true); return (false); } /** * Enable externally managed clocks, if required. * * Some PCI chipsets (BCM4306, possibly others) chips do not support * the idle low-power clock. Clocking must be bootstrapped at * attach/resume by directly adjusting GPIO registers exposed in the * PCI config space, and correspondingly, explicitly shutdown at * detach/suspend. * * @note This function may be safely called prior to device attach, (e.g. * from DEVICE_PROBE). * * @param dev The bhndb bridge device */ static int bhndb_enable_pci_clocks(device_t dev) { device_t pci_dev; uint32_t gpio_in, gpio_out, gpio_en; uint32_t gpio_flags; uint16_t pci_status; pci_dev = device_get_parent(dev); /* Only supported and required on PCI devices */ if (bhndb_is_pcie_attached(dev)) return (0); /* Read state of XTAL pin */ gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4); if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON) return (0); /* already enabled */ /* Fetch current config */ gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */ gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); gpio_out |= gpio_flags; gpio_en |= gpio_flags; pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); DELAY(1000); /* Reset PLL_OFF */ gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF; pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); DELAY(5000); /* Clear any PCI 'sent target-abort' flag. */ pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2); pci_status &= ~PCIM_STATUS_STABORT; pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2); return (0); } /** * Disable externally managed clocks, if required. * * This function may be safely called prior to device attach, (e.g. * from DEVICE_PROBE). * * @param dev The bhndb bridge device */ static int bhndb_disable_pci_clocks(device_t dev) { device_t pci_dev; uint32_t gpio_out, gpio_en; pci_dev = device_get_parent(dev); /* Only supported and required on PCI devices */ if (bhndb_is_pcie_attached(dev)) return (0); /* Fetch current config */ gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */ gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON; gpio_out |= BHNDB_PCI_GPIO_PLL_OFF; pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); /* Enable both output pins */ gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); return (0); } static bhnd_clksrc bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc; uint32_t gpio_out; sc = device_get_softc(dev); /* Only supported on PCI devices */ if (bhndb_is_pcie_attached(sc->dev)) return (BHND_CLKSRC_UNKNOWN); /* Only ILP is supported */ if (clock != BHND_CLOCK_ILP) return (BHND_CLKSRC_UNKNOWN); gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4); if (gpio_out & BHNDB_PCI_GPIO_SCS) return (BHND_CLKSRC_PCI); else return (BHND_CLKSRC_XTAL); } static int bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc = device_get_softc(dev); /* Only supported on PCI devices */ if (bhndb_is_pcie_attached(sc->dev)) return (ENODEV); /* Only HT is supported */ if (clock != BHND_CLOCK_HT) return (ENXIO); return (bhndb_disable_pci_clocks(sc->dev)); } static int bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child, bhnd_clock clock) { struct bhndb_pci_softc *sc = device_get_softc(dev); /* Only supported on PCI devices */ if (bhndb_is_pcie_attached(sc->dev)) return (ENODEV); /* Only HT is supported */ if (clock != BHND_CLOCK_HT) return (ENXIO); return (bhndb_enable_pci_clocks(sc->dev)); } /** * BHNDB_MAP_INTR_ISRC() */ static int bhndb_pci_map_intr_isrc(device_t dev, struct resource *irq, struct bhndb_intr_isrc **isrc) { struct bhndb_pci_softc *sc = device_get_softc(dev); /* There's only one bridged interrupt to choose from */ *isrc = sc->isrc; return (0); } /* siba-specific implementation of BHNDB_ROUTE_INTERRUPTS() */ static int bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc *sc, device_t child) { uint32_t sbintvec; u_int ivec; int error; KASSERT(sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC, ("route_siba_interrupts not supported by this hardware")); /* Fetch the sbflag# for the child */ if ((error = bhnd_get_intr_ivec(child, 0, &ivec))) return (error); if (ivec > (sizeof(sbintvec)*8) - 1 /* aka '31' */) { /* This should never be an issue in practice */ device_printf(sc->dev, "cannot route interrupts to high " "sbflag# %u\n", ivec); return (ENXIO); } BHNDB_PCI_LOCK(sc); sbintvec = bhndb_pci_read_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), 4); sbintvec |= (1 << ivec); bhndb_pci_write_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), sbintvec, 4); BHNDB_PCI_UNLOCK(sc); return (0); } /* BHNDB_ROUTE_INTERRUPTS() */ static int bhndb_pci_route_interrupts(device_t dev, device_t child) { struct bhndb_pci_softc *sc; struct bhnd_core_info core; uint32_t core_bit; uint32_t intmask; sc = device_get_softc(dev); if (sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC) return (bhndb_pci_route_siba_interrupts(sc, child)); core = bhnd_get_core_info(child); if (core.core_idx > BHNDB_PCI_SBIM_COREIDX_MAX) { /* This should never be an issue in practice */ device_printf(dev, "cannot route interrupts to high core " "index %u\n", core.core_idx); return (ENXIO); } BHNDB_PCI_LOCK(sc); core_bit = (1<parent, BHNDB_PCI_INT_MASK, 4); intmask |= core_bit; pci_write_config(sc->parent, BHNDB_PCI_INT_MASK, intmask, 4); BHNDB_PCI_UNLOCK(sc); return (0); } /** * Initialize a new bhndb PCI bridge EROM I/O instance. This EROM I/O * implementation supports mapping of the device enumeration table via the * @p hr host resources. * * @param pio The instance to be initialized. * @param dev The bridge device. * @param pci_dev The bridge's parent PCI device. * @param hr The host resources to be used to map the device * enumeration table. */ static int bhndb_pci_eio_init(struct bhndb_pci_eio *pio, device_t dev, device_t pci_dev, struct bhndb_host_resources *hr) { - memset(&pio->eio, sizeof(pio->eio), 0); + memset(&pio->eio, 0, sizeof(pio->eio)); pio->eio.map = bhndb_pci_eio_map; pio->eio.read = bhndb_pci_eio_read; pio->eio.fini = NULL; pio->dev = dev; pio->pci_dev = pci_dev; pio->hr = hr; pio->win = NULL; pio->res = NULL; return (0); } /** * Attempt to adjust the dynamic register window backing @p pio to permit * reading @p size bytes at @p addr. * * If @p addr or @p size fall outside the existing mapped range, or if * @p pio is not backed by a dynamic register window, ENXIO will be returned. * * @param pio The bhndb PCI erom I/O state to be modified. * @param addr The address to be include */ static int bhndb_pci_eio_adjust_mapping(struct bhndb_pci_eio *pio, bhnd_addr_t addr, bhnd_size_t size) { bhnd_addr_t target; bhnd_size_t offset; int error; KASSERT(pio->win != NULL, ("missing register window")); KASSERT(pio->res != NULL, ("missing regwin resource")); KASSERT(pio->win->win_type == BHNDB_REGWIN_T_DYN, ("unexpected window type %d", pio->win->win_type)); /* The requested subrange must fall within the total mapped range */ if (addr < pio->addr || (addr - pio->addr) > pio->size || size > pio->size || (addr - pio->addr) - pio->size < size) { return (ENXIO); } /* Do we already have a useable mapping? */ if (addr >= pio->res_target && addr <= pio->res_target + pio->win->win_size && (pio->res_target + pio->win->win_size) - addr >= size) { return (0); } /* Page-align the target address */ offset = addr % pio->win->win_size; target = addr - offset; /* Configure the register window */ error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, pio->win, target); if (error) { device_printf(pio->dev, "failed to configure dynamic register " "window: %d\n", error); return (error); } pio->res_target = target; return (0); } /* bhnd_erom_io_map() implementation */ static int bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, bhnd_size_t size) { struct bhndb_pci_eio *pio; const struct bhndb_regwin *regwin; struct resource *r; bhnd_addr_t target; bhnd_size_t offset; int error; pio = (struct bhndb_pci_eio *)eio; /* Locate a useable dynamic register window */ regwin = bhndb_regwin_find_type(pio->hr->cfg->register_windows, BHNDB_REGWIN_T_DYN, MIN(size, BHND_DEFAULT_CORE_SIZE)); if (regwin == NULL) { device_printf(pio->dev, "unable to map %#jx+%#jx; no " "usable dynamic register window found\n", addr, size); return (ENXIO); } /* Locate the host resource mapping our register window */ if ((r = bhndb_host_resource_for_regwin(pio->hr, regwin)) == NULL) { device_printf(pio->dev, "unable to map %#jx+%#jx; no " "usable register resource found\n", addr, size); return (ENXIO); } /* Page-align the target address */ offset = addr % regwin->win_size; target = addr - offset; /* Configure the register window */ error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, regwin, target); if (error) { device_printf(pio->dev, "failed to configure dynamic register " "window: %d\n", error); return (error); } /* Update our mapping state */ pio->win = regwin; pio->res = r; pio->addr = addr; pio->size = size; pio->res_target = target; return (0); } /* bhnd_erom_io_read() implementation */ static uint32_t bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) { struct bhndb_pci_eio *pio; bhnd_addr_t addr; bus_size_t res_offset; int error; pio = (struct bhndb_pci_eio *)eio; /* Calculate absolute address */ if (BHND_SIZE_MAX - offset < pio->addr) { device_printf(pio->dev, "invalid offset %#jx+%#jx\n", pio->addr, offset); return (UINT32_MAX); } addr = pio->addr + offset; /* Adjust the mapping for our read */ if ((error = bhndb_pci_eio_adjust_mapping(pio, addr, width))) { device_printf(pio->dev, "failed to adjust register mapping: " "%d\n", error); return (UINT32_MAX); } KASSERT(pio->res_target <= addr, ("invalid mapping (%#jx vs. %#jx)", pio->res_target, addr)); /* Determine the actual read offset within our register window * resource */ res_offset = (addr - pio->res_target) + pio->win->win_offset; /* Perform our read */ switch (width) { case 1: return (bus_read_1(pio->res, res_offset)); case 2: return (bus_read_2(pio->res, res_offset)); case 4: return (bus_read_4(pio->res, res_offset)); default: panic("unsupported width: %u", width); } } static device_method_t bhndb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bhndb_pci_probe), DEVMETHOD(device_attach, bhndb_pci_attach), DEVMETHOD(device_resume, bhndb_pci_resume), DEVMETHOD(device_suspend, bhndb_pci_suspend), DEVMETHOD(device_detach, bhndb_pci_detach), /* BHNDB interface */ DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr), DEVMETHOD(bhndb_populate_board_info, bhndb_pci_populate_board_info), DEVMETHOD(bhndb_map_intr_isrc, bhndb_pci_map_intr_isrc), DEVMETHOD(bhndb_route_interrupts, bhndb_pci_route_interrupts), /* BHND PWRCTL hostb interface */ DEVMETHOD(bhnd_pwrctl_hostb_get_clksrc, bhndb_pci_pwrctl_get_clksrc), DEVMETHOD(bhnd_pwrctl_hostb_gate_clock, bhndb_pci_pwrctl_gate_clock), DEVMETHOD(bhnd_pwrctl_hostb_ungate_clock, bhndb_pci_pwrctl_ungate_clock), DEVMETHOD_END }; DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods, sizeof(struct bhndb_pci_softc), bhndb_driver); MODULE_VERSION(bhndb_pci, 1); MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1); MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1); MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1); MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/cores/pmu/bhnd_pmureg.h =================================================================== --- head/sys/dev/bhnd/cores/pmu/bhnd_pmureg.h (revision 326870) +++ head/sys/dev/bhnd/cores/pmu/bhnd_pmureg.h (revision 326871) @@ -1,732 +1,732 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2010 Broadcom Corporation * All rights reserved. * * This file is derived from the sbchipc.h header contributed by Broadcom * to to the Linux staging repository, as well as later revisions of sbchipc.h * distributed with the Asus RT-N16 firmware source code release. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _BHND_CORES_PMU_BHND_PMUREG_H_ #define _BHND_CORES_PMU_BHND_PMUREG_H_ #define BHND_PMU_GET_FLAG(_value, _flag) \ (((_value) & _flag) != 0) #define BHND_PMU_GET_BITS(_value, _field) \ (((_value) & _field ## _MASK) >> _field ## _SHIFT) #define BHND_PMU_SET_BITS(_value, _field) \ (((_value) << _field ## _SHIFT) & _field ## _MASK) #define BHND_PMU_ILP_CLOCK 32000 /**< default ILP freq */ #define BHND_PMU_ALP_CLOCK 20000000 /**< default ALP freq */ #define BHND_PMU_HT_CLOCK 80000000 /**< default HT freq */ /** * Common per-core clock control/status register available on PMU-equipped * devices. */ #define BHND_CLK_CTL_ST 0x1e0 /**< clock control and status */ /* * BHND_CLK_CTL_ST register * * Clock Mode Name Description * High Throughput (HT) Full bandwidth, low latency. Generally supplied * from PLL. * Active Low Power (ALP) Register access, low speed DMA. * Idle Low Power (ILP) No interconnect activity, or if long latency * is permitted. */ #define BHND_CCS_FORCEALP 0x00000001 /**< force ALP request */ #define BHND_CCS_FORCEHT 0x00000002 /**< force HT request */ #define BHND_CCS_FORCEILP 0x00000004 /**< force ILP request */ #define BHND_CCS_FORCE_MASK 0x0000000F #define BHND_CCS_ALPAREQ 0x00000008 /**< ALP Avail Request */ #define BHND_CCS_HTAREQ 0x00000010 /**< HT Avail Request */ #define BHND_CCS_AREQ_MASK 0x00000018 #define BHND_CCS_FORCEHWREQOFF 0x00000020 /**< Force HW Clock Request Off */ #define BHND_CCS_ERSRC_REQ_MASK 0x00000700 /**< external resource requests */ #define BHND_CCS_ERSRC_REQ_SHIFT 8 #define BHND_CCS_ERSRC_MAX 2 /**< maximum ERSRC value (corresponding to bits 0-2) */ #define BHND_CCS_ALPAVAIL 0x00010000 /**< ALP is available */ #define BHND_CCS_HTAVAIL 0x00020000 /**< HT is available */ #define BHND_CCS_AVAIL_MASK 0x00030000 #define BHND_CCS_BP_ON_APL 0x00040000 /**< RO: Backplane is running on ALP clock */ #define BHND_CCS_BP_ON_HT 0x00080000 /**< RO: Backplane is running on HT clock */ #define BHND_CCS_ERSRC_STS_MASK 0x07000000 /**< external resource status */ #define BHND_CCS_ERSRC_STS_SHIFT 24 #define BHND_CCS0_HTAVAIL 0x00010000 /**< HT avail in chipc and pcmcia on 4328a0 */ #define BHND_CCS0_ALPAVAIL 0x00020000 /**< ALP avail in chipc and pcmcia on 4328a0 */ /* PMU registers */ #define BHND_PMU_CTRL 0x600 #define BHND_PMU_CTRL_ILP_DIV_MASK 0xffff0000 #define BHND_PMU_CTRL_ILP_DIV_SHIFT 16 #define BHND_PMU_CTRL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */ #define BHND_PMU_CTRL_NOILP_ON_WAIT 0x00000200 /* rev 1 */ #define BHND_PMU_CTRL_HT_REQ_EN 0x00000100 #define BHND_PMU_CTRL_ALP_REQ_EN 0x00000080 #define BHND_PMU_CTRL_XTALFREQ_MASK 0x0000007c #define BHND_PMU_CTRL_XTALFREQ_SHIFT 2 #define BHND_PMU_CTRL_ILP_DIV_EN 0x00000002 #define BHND_PMU_CTRL_LPO_SEL 0x00000001 #define BHND_PMU_CAP 0x604 #define BHND_PMU_CAP_REV_MASK 0x000000ff #define BHND_PMU_CAP_REV_SHIFT 0 #define BHND_PMU_CAP_RC_MASK 0x00001f00 #define BHND_PMU_CAP_RC_SHIFT 8 #define BHND_PMU_CAP_RC_MAX \ (BHND_PMU_CAP_RC_MASK >> BHND_PMU_CAP_RC_SHIFT) #define BHND_PMU_CAP_TC_MASK 0x0001e000 #define BHND_PMU_CAP_TC_SHIFT 13 #define BHND_PMU_CAP_PC_MASK 0x001e0000 #define BHND_PMU_CAP_PC_SHIFT 17 #define BHND_PMU_CAP_VC_MASK 0x01e00000 #define BHND_PMU_CAP_VC_SHIFT 21 #define BHND_PMU_CAP_CC_MASK 0x1e000000 #define BHND_PMU_CAP_CC_SHIFT 25 #define BHND_PMU_CAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */ #define BHND_PMU_CAP5_PC_SHIFT 17 #define BHND_PMU_CAP5_VC_MASK 0x07c00000 #define BHND_PMU_CAP5_VC_SHIFT 22 #define BHND_PMU_CAP5_CC_MASK 0xf8000000 #define BHND_PMU_CAP5_CC_SHIFT 27 #define BHND_PMU_ST 0x608 #define BHND_PMU_ST_EXTLPOAVAIL 0x0100 #define BHND_PMU_ST_WDRESET 0x0080 #define BHND_PMU_ST_INTPEND 0x0040 #define BHND_PMU_ST_SBCLKST 0x0030 #define BHND_PMU_ST_SBCLKST_ILP 0x0010 #define BHND_PMU_ST_SBCLKST_ALP 0x0020 #define BHND_PMU_ST_SBCLKST_HT 0x0030 #define BHND_PMU_ST_ALPAVAIL 0x0008 #define BHND_PMU_ST_HTAVAIL 0x0004 #define BHND_PMU_ST_RESINIT 0x0003 #define BHND_PMU_RES_STATE 0x60c #define BHND_PMU_RES_PENDING 0x610 #define BHND_PMU_TIMER 0x614 #define BHND_PMU_MIN_RES_MASK 0x618 #define BHND_PMU_MAX_RES_MASK 0x61c #define BHND_PMU_RES_TABLE_SEL 0x620 #define BHND_PMU_RES_DEP_MASK 0x624 #define BHND_PMU_RES_UPDN_TIMER 0x628 -#define BHND_PMU_RES_UPDN_UPTME_MASK 0xFF +#define BHND_PMU_RES_UPDN_UPTME_MASK 0xFF00 #define BHND_PMU_RES_UPDN_UPTME_SHIFT 8 #define BHND_PMU_RES_TIMER 0x62C #define BHND_PMU_CLKSTRETCH 0x630 #define BHND_PMU_CSTRETCH_HT 0xffff0000 #define BHND_PMU_CSTRETCH_ALP 0x0000ffff #define BHND_PMU_WATCHDOG 0x634 #define BHND_PMU_GPIOSEL 0x638 /* pmu rev >= 1 ? */ #define BHND_PMU_GPIOEN 0x63C /* pmu rev >= 1 ? */ #define BHND_PMU_RES_REQ_TIMER_SEL 0x640 #define BHND_PMU_RES_REQ_TIMER 0x644 #define BHND_PMU_RRQT_TIME_MASK 0x03ff #define BHND_PMU_RRQT_INTEN 0x0400 #define BHND_PMU_RRQT_REQ_ACTIVE 0x0800 #define BHND_PMU_RRQT_ALP_REQ 0x1000 #define BHND_PMU_RRQT_HT_REQ 0x2000 #define BHND_PMU_RES_REQ_MASK 0x648 #define BHND_PMU_CHIP_CONTROL_ADDR 0x650 #define BHND_PMU_CHIP_CONTROL_DATA 0x654 #define BHND_PMU_REG_CONTROL_ADDR 0x658 #define BHND_PMU_REG_CONTROL_DATA 0x65C #define BHND_PMU_PLL_CONTROL_ADDR 0x660 #define BHND_PMU_PLL_CONTROL_DATA 0x664 #define BHND_PMU_STRAPOPT 0x668 /* chipc rev >= 28 */ #define BHND_PMU_XTALFREQ 0x66C /* pmu rev >= 10 */ /* PMU resource bit position */ #define BHND_PMURES_BIT(bit) (1 << (bit)) /* PMU resource number limit */ #define BHND_PMU_RESNUM_MAX 30 /* PMU chip control0 register */ #define BHND_PMU_CHIPCTL0 0 /* PMU chip control1 register */ #define BHND_PMU_CHIPCTL1 1 #define BHND_PMU_CC1_RXC_DLL_BYPASS 0x00010000 #define BHND_PMU_CC1_IF_TYPE_MASK 0x00000030 #define BHND_PMU_CC1_IF_TYPE_RMII 0x00000000 #define BHND_PMU_CC1_IF_TYPE_MII 0x00000010 #define BHND_PMU_CC1_IF_TYPE_RGMII 0x00000020 #define BHND_PMU_CC1_SW_TYPE_MASK 0x000000c0 #define BHND_PMU_CC1_SW_TYPE_EPHY 0x00000000 #define BHND_PMU_CC1_SW_TYPE_EPHYMII 0x00000040 #define BHND_PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 #define BHND_PMU_CC1_SW_TYPE_RGMII 0x000000c0 /* PMU corerev and chip specific PLL controls. * PMU_PLL_XX where is PMU corerev and is an arbitrary number * to differentiate different PLLs controlled by the same PMU rev. */ /* pllcontrol registers */ /* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ #define BHND_PMU0_PLL0_PLLCTL0 0 #define BHND_PMU0_PLL0_PC0_PDIV_MASK 1 #define BHND_PMU0_PLL0_PC0_PDIV_FREQ 25000 #define BHND_PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 #define BHND_PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 #define BHND_PMU0_PLL0_PC0_DIV_ARM_BASE 8 /* PC0_DIV_ARM for PLLOUT_ARM */ #define BHND_PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 #define BHND_PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 #define BHND_PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 #define BHND_PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ #define BHND_PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 #define BHND_PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 #define BHND_PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 #define BHND_PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 /* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ #define BHND_PMU0_PLL0_PLLCTL1 1 #define BHND_PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 #define BHND_PMU0_PLL0_PC1_WILD_INT_SHIFT 28 #define BHND_PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 #define BHND_PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 #define BHND_PMU0_PLL0_PC1_STOP_MOD 0x00000040 /* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ #define BHND_PMU0_PLL0_PLLCTL2 2 -#define BHND_PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define BHND_PMU0_PLL0_PC2_WILD_INT_MASK 0xf0 #define BHND_PMU0_PLL0_PC2_WILD_INT_SHIFT 4 /* pllcontrol registers */ /* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ #define BHND_PMU1_PLL0_PLLCTL0 0 #define BHND_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 #define BHND_PMU1_PLL0_PC0_P1DIV_SHIFT 20 #define BHND_PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 #define BHND_PMU1_PLL0_PC0_P2DIV_SHIFT 24 #define BHND_PMU1_PLL0_PC0_BYPASS_SDMOD_MASK 0x10000000 #define BHND_PMU1_PLL0_PC0_BYPASS_SDMOD_SHIFT 28 /* mdiv */ #define BHND_PMU1_PLL0_PLLCTL1 1 #define BHND_PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff #define BHND_PMU1_PLL0_PC1_M1DIV_SHIFT 0 #define BHND_PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 #define BHND_PMU1_PLL0_PC1_M2DIV_SHIFT 8 #define BHND_PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 #define BHND_PMU1_PLL0_PC1_M3DIV_SHIFT 16 #define BHND_PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 #define BHND_PMU1_PLL0_PC1_M4DIV_SHIFT 24 #define BHND_PMU_DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 #define BHND_PMU_DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << BHND_PMU_DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) #define BHND_PMU_DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << BHND_PMU_DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) /* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ #define BHND_PMU1_PLL0_PLLCTL2 2 #define BHND_PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff #define BHND_PMU1_PLL0_PC2_M5DIV_SHIFT 0 #define BHND_PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 #define BHND_PMU1_PLL0_PC2_M6DIV_SHIFT 8 #define BHND_PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 #define BHND_PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 #define BHND_PMU1_PLL0_PC2_NDIV_MODE_INT 0 #define BHND_PMU1_PLL0_PC2_NDIV_MODE_MASH 1 #define BHND_PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */ #define BHND_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 #define BHND_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 /* ndiv_frac */ #define BHND_PMU1_PLL0_PLLCTL3 3 #define BHND_PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff #define BHND_PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 /* pll_ctrl */ #define BHND_PMU1_PLL0_PLLCTL4 4 #define BHND_PMU1_PLL0_PC4_KVCO_XS_MASK 0x38000000 #define BHND_PMU1_PLL0_PC4_KVCO_XS_SHIFT 27 /* pll_ctrl, vco_rng, clkdrive_ch */ #define BHND_PMU1_PLL0_PLLCTL5 5 #define BHND_PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 #define BHND_PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 #define BHND_PMU1_PLL0_PC5_PLL_CTRL_37_32_MASK 0x0000003f #define BHND_PMU1_PLL0_PC5_PLL_CTRL_37_32_SHIFT 0 #define BHND_PMU1_PLL0_PC5_VCO_RNG_MASK 0x000000C0 #define BHND_PMU1_PLL0_PC5_VCO_RNG_SHIFT 6 /* PMU rev 2 control words */ #define BHND_PMU2_PHY_PLL_PLLCTL 4 #define BHND_PMU2_SI_PLL_PLLCTL 10 /* PMU rev 2 */ /* pllcontrol registers */ /* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ #define BHND_PMU2_PLL_PLLCTL0 0 #define BHND_PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 #define BHND_PMU2_PLL_PC0_P1DIV_SHIFT 20 #define BHND_PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 #define BHND_PMU2_PLL_PC0_P2DIV_SHIFT 24 /* mdiv */ #define BHND_PMU2_PLL_PLLCTL1 1 #define BHND_PMU2_PLL_PC1_M1DIV_MASK 0x000000ff #define BHND_PMU2_PLL_PC1_M1DIV_SHIFT 0 #define BHND_PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 #define BHND_PMU2_PLL_PC1_M2DIV_SHIFT 8 #define BHND_PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 #define BHND_PMU2_PLL_PC1_M3DIV_SHIFT 16 #define BHND_PMU2_PLL_PC1_M4DIV_MASK 0xff000000 #define BHND_PMU2_PLL_PC1_M4DIV_SHIFT 24 /* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ #define BHND_PMU2_PLL_PLLCTL2 2 #define BHND_PMU2_PLL_PC2_M5DIV_MASK 0x000000ff #define BHND_PMU2_PLL_PC2_M5DIV_SHIFT 0 #define BHND_PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 #define BHND_PMU2_PLL_PC2_M6DIV_SHIFT 8 #define BHND_PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 #define BHND_PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 #define BHND_PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 #define BHND_PMU2_PLL_PC2_NDIV_INT_SHIFT 20 /* ndiv_frac */ #define BHND_PMU2_PLL_PLLCTL3 3 #define BHND_PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff #define BHND_PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 /* pll_ctrl */ #define BHND_PMU2_PLL_PLLCTL4 4 /* pll_ctrl, vco_rng, clkdrive_ch */ #define BHND_PMU2_PLL_PLLCTL5 5 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 #define BHND_PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 /* PMU rev 5 (& 6) */ #define BHND_PMU5_PLL_P1P2_OFF 0 #define BHND_PMU5_PLL_P1_MASK 0x0f000000 #define BHND_PMU5_PLL_P1_SHIFT 24 #define BHND_PMU5_PLL_P2_MASK 0x00f00000 #define BHND_PMU5_PLL_P2_SHIFT 20 #define BHND_PMU5_PLL_M14_OFF 1 #define BHND_PMU5_PLL_MDIV_MASK 0x000000ff #define BHND_PMU5_PLL_MDIV_WIDTH 8 #define BHND_PMU5_PLL_NM5_OFF 2 #define BHND_PMU5_PLL_NDIV_MASK 0xfff00000 #define BHND_PMU5_PLL_NDIV_SHIFT 20 #define BHND_PMU5_PLL_NDIV_MODE_MASK 0x000e0000 #define BHND_PMU5_PLL_NDIV_MODE_SHIFT 17 #define BHND_PMU5_PLL_FMAB_OFF 3 #define BHND_PMU5_PLL_MRAT_MASK 0xf0000000 #define BHND_PMU5_PLL_MRAT_SHIFT 28 #define BHND_PMU5_PLL_ABRAT_MASK 0x08000000 #define BHND_PMU5_PLL_ABRAT_SHIFT 27 #define BHND_PMU5_PLL_FDIV_MASK 0x07ffffff #define BHND_PMU5_PLL_PLLCTL_OFF 4 #define BHND_PMU5_PLL_PCHI_OFF 5 #define BHND_PMU5_PLL_PCHI_MASK 0x0000003f /* pmu XtalFreqRatio */ #define BHND_PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF #define BHND_PMU_XTALFREQ_REG_ILPCTR_SHIFT 0 #define BHND_PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 #define BHND_PMU_XTALFREQ_REG_MEASURE_SHIFT 31 /* Divider allocation in 4716/47162/5356/5357 */ #define BHND_PMU5_MAINPLL_CPU 1 #define BHND_PMU5_MAINPLL_MEM 2 #define BHND_PMU5_MAINPLL_SI 3 /* PMU rev 6 (BCM4706/Northstar) */ #define BHND_PMU4706_MAINPLL_PLL0 0 #define BHND_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */ #define BHND_PMU6_4706_PROC_P1DIV_MASK 0x000f0000 #define BHND_PMU6_4706_PROC_P1DIV_SHIFT 16 #define BHND_PMU6_4706_PROC_P2DIV_MASK 0x0000f000 #define BHND_PMU6_4706_PROC_P2DIV_SHIFT 12 #define BHND_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 #define BHND_PMU6_4706_PROC_NDIV_INT_SHIFT 3 #define BHND_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 #define BHND_PMU6_4706_PROC_NDIV_MODE_SHIFT 0 /* Divider allocation in 4706 */ #define BHND_PMU6_MAINPLL_CPU 1 #define BHND_PMU6_MAINPLL_MEM 2 #define BHND_PMU6_MAINPLL_SI 3 /* PMU7 (?) */ #define BHND_PMU7_PLL_PLLCTL7 7 #define BHND_PMU7_PLL_PLLCTL8 8 #define BHND_PMU7_PLL_PLLCTL11 11 /* PLL usage in 4716/47162 */ #define BHND_PMU4716_MAINPLL_PLL0 12 /* PLL usage in 5356/5357 */ #define BHND_PMU5356_MAINPLL_PLL0 0 #define BHND_PMU5357_MAINPLL_PLL0 0 /* 4716/47162 PMU resources */ #define BHND_PMU_RES4716_PROC_PLL_ON 0x00000040 #define BHND_PMU_RES4716_PROC_HT_AVAIL 0x00000080 /* 4716/4717/4718 chip-specific CHIPCTRL PMU register bits */ #define BHND_PMU_CCTRL471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared with pflash */ /* 5354 PMU resources */ #define BHND_PMU_RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */ #define BHND_PMU_RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */ #define BHND_PMU_RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */ #define BHND_PMU_RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ #define BHND_PMU_RES5354_ILP_REQUEST 4 /* 0x00010 */ #define BHND_PMU_RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */ #define BHND_PMU_RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */ #define BHND_PMU_RES5354_ROM_SWITCH 7 /* 0x00080 */ #define BHND_PMU_RES5354_PA_REF_LDO 8 /* 0x00100 */ #define BHND_PMU_RES5354_RADIO_LDO 9 /* 0x00200 */ #define BHND_PMU_RES5354_AFE_LDO 10 /* 0x00400 */ #define BHND_PMU_RES5354_PLL_LDO 11 /* 0x00800 */ #define BHND_PMU_RES5354_BG_FILTBYP 12 /* 0x01000 */ #define BHND_PMU_RES5354_TX_FILTBYP 13 /* 0x02000 */ #define BHND_PMU_RES5354_RX_FILTBYP 14 /* 0x04000 */ #define BHND_PMU_RES5354_XTAL_PU 15 /* 0x08000 */ #define BHND_PMU_RES5354_XTAL_EN 16 /* 0x10000 */ #define BHND_PMU_RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */ #define BHND_PMU_RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */ #define BHND_PMU_RES5354_BB_PLL_PU 19 /* 0x80000 */ /* 5357 chip-specific CHIPCTRL register bits */ #define BHND_PMU_CCTRL5357_EXTPA (1<<14) /* extPA in CHIPCTRL1, bit 14 */ #define BHND_PMU_CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in CHIPCTRL1, bit 15 */ /* 4328 PMU resources */ #define BHND_PMU_RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */ #define BHND_PMU_RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */ #define BHND_PMU_RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */ #define BHND_PMU_RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ #define BHND_PMU_RES4328_ILP_REQUEST 4 /* 0x00010 */ #define BHND_PMU_RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */ #define BHND_PMU_RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */ #define BHND_PMU_RES4328_ROM_SWITCH 7 /* 0x00080 */ #define BHND_PMU_RES4328_PA_REF_LDO 8 /* 0x00100 */ #define BHND_PMU_RES4328_RADIO_LDO 9 /* 0x00200 */ #define BHND_PMU_RES4328_AFE_LDO 10 /* 0x00400 */ #define BHND_PMU_RES4328_PLL_LDO 11 /* 0x00800 */ #define BHND_PMU_RES4328_BG_FILTBYP 12 /* 0x01000 */ #define BHND_PMU_RES4328_TX_FILTBYP 13 /* 0x02000 */ #define BHND_PMU_RES4328_RX_FILTBYP 14 /* 0x04000 */ #define BHND_PMU_RES4328_XTAL_PU 15 /* 0x08000 */ #define BHND_PMU_RES4328_XTAL_EN 16 /* 0x10000 */ #define BHND_PMU_RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */ #define BHND_PMU_RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */ #define BHND_PMU_RES4328_BB_PLL_PU 19 /* 0x80000 */ /* 4325 A0/A1 PMU resources */ #define BHND_PMU_RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */ #define BHND_PMU_RES4325_CBUCK_BURST 1 /* 0x00000002 */ #define BHND_PMU_RES4325_CBUCK_PWM 2 /* 0x00000004 */ #define BHND_PMU_RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */ #define BHND_PMU_RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */ #define BHND_PMU_RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */ #define BHND_PMU_RES4325_ILP_REQUEST 6 /* 0x00000040 */ #define BHND_PMU_RES4325_ABUCK_BURST 7 /* 0x00000080 */ #define BHND_PMU_RES4325_ABUCK_PWM 8 /* 0x00000100 */ #define BHND_PMU_RES4325_LNLDO1_PU 9 /* 0x00000200 */ #define BHND_PMU_RES4325_OTP_PU 10 /* 0x00000400 */ #define BHND_PMU_RES4325_LNLDO3_PU 11 /* 0x00000800 */ #define BHND_PMU_RES4325_LNLDO4_PU 12 /* 0x00001000 */ #define BHND_PMU_RES4325_XTAL_PU 13 /* 0x00002000 */ #define BHND_PMU_RES4325_ALP_AVAIL 14 /* 0x00004000 */ #define BHND_PMU_RES4325_RX_PWRSW_PU 15 /* 0x00008000 */ #define BHND_PMU_RES4325_TX_PWRSW_PU 16 /* 0x00010000 */ #define BHND_PMU_RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */ #define BHND_PMU_RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */ #define BHND_PMU_RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */ #define BHND_PMU_RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */ #define BHND_PMU_RES4325_HT_AVAIL 21 /* 0x00200000 */ /* 4325 B0/C0 PMU resources */ #define BHND_PMU_RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */ #define BHND_PMU_RES4325B0_CBUCK_BURST 2 /* 0x00000004 */ #define BHND_PMU_RES4325B0_CBUCK_PWM 3 /* 0x00000008 */ #define BHND_PMU_RES4325B0_CLDO_PU 4 /* 0x00000010 */ /* 4325 C1 PMU resources */ #define BHND_PMU_RES4325C1_LNLDO2_PU 12 /* 0x00001000 */ /* 4325 PMU resources */ #define BHND_PMU_RES4329_RESERVED0 0 /* 0x00000001 */ #define BHND_PMU_RES4329_CBUCK_LPOM 1 /* 0x00000002 */ #define BHND_PMU_RES4329_CBUCK_BURST 2 /* 0x00000004 */ #define BHND_PMU_RES4329_CBUCK_PWM 3 /* 0x00000008 */ #define BHND_PMU_RES4329_CLDO_PU 4 /* 0x00000010 */ #define BHND_PMU_RES4329_PALDO_PU 5 /* 0x00000020 */ #define BHND_PMU_RES4329_ILP_REQUEST 6 /* 0x00000040 */ #define BHND_PMU_RES4329_RESERVED7 7 /* 0x00000080 */ #define BHND_PMU_RES4329_RESERVED8 8 /* 0x00000100 */ #define BHND_PMU_RES4329_LNLDO1_PU 9 /* 0x00000200 */ #define BHND_PMU_RES4329_OTP_PU 10 /* 0x00000400 */ #define BHND_PMU_RES4329_RESERVED11 11 /* 0x00000800 */ #define BHND_PMU_RES4329_LNLDO2_PU 12 /* 0x00001000 */ #define BHND_PMU_RES4329_XTAL_PU 13 /* 0x00002000 */ #define BHND_PMU_RES4329_ALP_AVAIL 14 /* 0x00004000 */ #define BHND_PMU_RES4329_RX_PWRSW_PU 15 /* 0x00008000 */ #define BHND_PMU_RES4329_TX_PWRSW_PU 16 /* 0x00010000 */ #define BHND_PMU_RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */ #define BHND_PMU_RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */ #define BHND_PMU_RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */ #define BHND_PMU_RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */ #define BHND_PMU_RES4329_HT_AVAIL 21 /* 0x00200000 */ /* 4312 PMU resources (all PMU chips with little memory constraint) */ #define BHND_PMU_RES4312_SWITCHER_BURST 0 /* 0x00000001 */ #define BHND_PMU_RES4312_SWITCHER_PWM 1 /* 0x00000002 */ #define BHND_PMU_RES4312_PA_REF_LDO 2 /* 0x00000004 */ #define BHND_PMU_RES4312_CORE_LDO_BURST 3 /* 0x00000008 */ #define BHND_PMU_RES4312_CORE_LDO_PWM 4 /* 0x00000010 */ #define BHND_PMU_RES4312_RADIO_LDO 5 /* 0x00000020 */ #define BHND_PMU_RES4312_ILP_REQUEST 6 /* 0x00000040 */ #define BHND_PMU_RES4312_BG_FILTBYP 7 /* 0x00000080 */ #define BHND_PMU_RES4312_TX_FILTBYP 8 /* 0x00000100 */ #define BHND_PMU_RES4312_RX_FILTBYP 9 /* 0x00000200 */ #define BHND_PMU_RES4312_XTAL_PU 10 /* 0x00000400 */ #define BHND_PMU_RES4312_ALP_AVAIL 11 /* 0x00000800 */ #define BHND_PMU_RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */ #define BHND_PMU_RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */ #define BHND_PMU_RES4312_HT_AVAIL 14 /* 0x00004000 */ /* 4322 PMU resources */ #define BHND_PMU_RES4322_RF_LDO 0 #define BHND_PMU_RES4322_ILP_REQUEST 1 #define BHND_PMU_RES4322_XTAL_PU 2 #define BHND_PMU_RES4322_ALP_AVAIL 3 #define BHND_PMU_RES4322_SI_PLL_ON 4 #define BHND_PMU_RES4322_HT_SI_AVAIL 5 #define BHND_PMU_RES4322_PHY_PLL_ON 6 #define BHND_PMU_RES4322_HT_PHY_AVAIL 7 #define BHND_PMU_RES4322_OTP_PU 8 /* 43224 chip-specific CHIPCTRL register bits */ #define BHND_PMU_CCTRL43224_GPIO_TOGGLE 0x8000 #define BHND_PMU_CCTRL43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ #define BHND_PMU_CCTRL43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ /* 43236 PMU resources */ #define BHND_PMU_RES43236_REGULATOR 0 #define BHND_PMU_RES43236_ILP_REQUEST 1 #define BHND_PMU_RES43236_XTAL_PU 2 #define BHND_PMU_RES43236_ALP_AVAIL 3 #define BHND_PMU_RES43236_SI_PLL_ON 4 #define BHND_PMU_RES43236_HT_SI_AVAIL 5 /* 43236 chip-specific CHIPCTRL register bits */ #define BHND_PMU_CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */ #define BHND_PMU_CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ #define BHND_PMU_CCTRL43236_EXT_LNA (1<<2) /* 0 disable */ #define BHND_PMU_CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */ #define BHND_PMU_CCTRL43236_GSIO (1<<4) /* 0 disable */ /* 4331 PMU resources */ #define BHND_PMU_RES4331_REGULATOR 0 #define BHND_PMU_RES4331_ILP_REQUEST 1 #define BHND_PMU_RES4331_XTAL_PU 2 #define BHND_PMU_RES4331_ALP_AVAIL 3 #define BHND_PMU_RES4331_SI_PLL_ON 4 #define BHND_PMU_RES4331_HT_SI_AVAIL 5 /* 4315 PMU resources */ #define BHND_PMU_RES4315_CBUCK_LPOM 1 /* 0x00000002 */ #define BHND_PMU_RES4315_CBUCK_BURST 2 /* 0x00000004 */ #define BHND_PMU_RES4315_CBUCK_PWM 3 /* 0x00000008 */ #define BHND_PMU_RES4315_CLDO_PU 4 /* 0x00000010 */ #define BHND_PMU_RES4315_PALDO_PU 5 /* 0x00000020 */ #define BHND_PMU_RES4315_ILP_REQUEST 6 /* 0x00000040 */ #define BHND_PMU_RES4315_LNLDO1_PU 9 /* 0x00000200 */ #define BHND_PMU_RES4315_OTP_PU 10 /* 0x00000400 */ #define BHND_PMU_RES4315_LNLDO2_PU 12 /* 0x00001000 */ #define BHND_PMU_RES4315_XTAL_PU 13 /* 0x00002000 */ #define BHND_PMU_RES4315_ALP_AVAIL 14 /* 0x00004000 */ #define BHND_PMU_RES4315_RX_PWRSW_PU 15 /* 0x00008000 */ #define BHND_PMU_RES4315_TX_PWRSW_PU 16 /* 0x00010000 */ #define BHND_PMU_RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */ #define BHND_PMU_RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */ #define BHND_PMU_RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */ #define BHND_PMU_RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */ #define BHND_PMU_RES4315_HT_AVAIL 21 /* 0x00200000 */ /* 4319 PMU resources */ #define BHND_PMU_RES4319_CBUCK_LPOM 1 /* 0x00000002 */ #define BHND_PMU_RES4319_CBUCK_BURST 2 /* 0x00000004 */ #define BHND_PMU_RES4319_CBUCK_PWM 3 /* 0x00000008 */ #define BHND_PMU_RES4319_CLDO_PU 4 /* 0x00000010 */ #define BHND_PMU_RES4319_PALDO_PU 5 /* 0x00000020 */ #define BHND_PMU_RES4319_ILP_REQUEST 6 /* 0x00000040 */ #define BHND_PMU_RES4319_LNLDO1_PU 9 /* 0x00000200 */ #define BHND_PMU_RES4319_OTP_PU 10 /* 0x00000400 */ #define BHND_PMU_RES4319_LNLDO2_PU 12 /* 0x00001000 */ #define BHND_PMU_RES4319_XTAL_PU 13 /* 0x00002000 */ #define BHND_PMU_RES4319_ALP_AVAIL 14 /* 0x00004000 */ #define BHND_PMU_RES4319_RX_PWRSW_PU 15 /* 0x00008000 */ #define BHND_PMU_RES4319_TX_PWRSW_PU 16 /* 0x00010000 */ #define BHND_PMU_RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */ #define BHND_PMU_RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */ #define BHND_PMU_RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */ #define BHND_PMU_RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */ #define BHND_PMU_RES4319_HT_AVAIL 21 /* 0x00200000 */ /* 4319 chip-specific CHIPCTL register bits */ #define BHND_PMU1_PLL0_CHIPCTL0 0 #define BHND_PMU1_PLL0_CHIPCTL1 1 #define BHND_PMU1_PLL0_CHIPCTL2 2 #define BHND_PMU_CCTRL4319USB_XTAL_SEL_MASK 0x00180000 #define BHND_PMU_CCTRL4319USB_XTAL_SEL_SHIFT 19 #define BHND_PMU_CCTRL4319USB_48MHZ_PLL_SEL 1 #define BHND_PMU_CCTRL4319USB_24MHZ_PLL_SEL 2 /* 4336 PMU resources */ #define BHND_PMU_RES4336_CBUCK_LPOM 0 #define BHND_PMU_RES4336_CBUCK_BURST 1 #define BHND_PMU_RES4336_CBUCK_LP_PWM 2 #define BHND_PMU_RES4336_CBUCK_PWM 3 #define BHND_PMU_RES4336_CLDO_PU 4 #define BHND_PMU_RES4336_DIS_INT_RESET_PD 5 #define BHND_PMU_RES4336_ILP_REQUEST 6 #define BHND_PMU_RES4336_LNLDO_PU 7 #define BHND_PMU_RES4336_LDO3P3_PU 8 #define BHND_PMU_RES4336_OTP_PU 9 #define BHND_PMU_RES4336_XTAL_PU 10 #define BHND_PMU_RES4336_ALP_AVAIL 11 #define BHND_PMU_RES4336_RADIO_PU 12 #define BHND_PMU_RES4336_BG_PU 13 #define BHND_PMU_RES4336_VREG1p4_PU_PU 14 #define BHND_PMU_RES4336_AFE_PWRSW_PU 15 #define BHND_PMU_RES4336_RX_PWRSW_PU 16 #define BHND_PMU_RES4336_TX_PWRSW_PU 17 #define BHND_PMU_RES4336_BB_PWRSW_PU 18 #define BHND_PMU_RES4336_SYNTH_PWRSW_PU 19 #define BHND_PMU_RES4336_MISC_PWRSW_PU 20 #define BHND_PMU_RES4336_LOGEN_PWRSW_PU 21 #define BHND_PMU_RES4336_BBPLL_PWRSW_PU 22 #define BHND_PMU_RES4336_MACPHY_CLKAVAIL 23 #define BHND_PMU_RES4336_HT_AVAIL 24 #define BHND_PMU_RES4336_RSVD 25 /* 4330 resources */ #define BHND_PMU_RES4330_CBUCK_LPOM 0 #define BHND_PMU_RES4330_CBUCK_BURST 1 #define BHND_PMU_RES4330_CBUCK_LP_PWM 2 #define BHND_PMU_RES4330_CBUCK_PWM 3 #define BHND_PMU_RES4330_CLDO_PU 4 #define BHND_PMU_RES4330_DIS_INT_RESET_PD 5 #define BHND_PMU_RES4330_ILP_REQUEST 6 #define BHND_PMU_RES4330_LNLDO_PU 7 #define BHND_PMU_RES4330_LDO3P3_PU 8 #define BHND_PMU_RES4330_OTP_PU 9 #define BHND_PMU_RES4330_XTAL_PU 10 #define BHND_PMU_RES4330_ALP_AVAIL 11 #define BHND_PMU_RES4330_RADIO_PU 12 #define BHND_PMU_RES4330_BG_PU 13 #define BHND_PMU_RES4330_VREG1p4_PU_PU 14 #define BHND_PMU_RES4330_AFE_PWRSW_PU 15 #define BHND_PMU_RES4330_RX_PWRSW_PU 16 #define BHND_PMU_RES4330_TX_PWRSW_PU 17 #define BHND_PMU_RES4330_BB_PWRSW_PU 18 #define BHND_PMU_RES4330_SYNTH_PWRSW_PU 19 #define BHND_PMU_RES4330_MISC_PWRSW_PU 20 #define BHND_PMU_RES4330_LOGEN_PWRSW_PU 21 #define BHND_PMU_RES4330_BBPLL_PWRSW_PU 22 #define BHND_PMU_RES4330_MACPHY_CLKAVAIL 23 #define BHND_PMU_RES4330_HT_AVAIL 24 #define BHND_PMU_RES4330_5gRX_PWRSW_PU 25 #define BHND_PMU_RES4330_5gTX_PWRSW_PU 26 #define BHND_PMU_RES4330_5g_LOGEN_PWRSW_PU 27 /* 4313 resources */ #define BHND_PMU_RES4313_BB_PU_RSRC 0 #define BHND_PMU_RES4313_ILP_REQ_RSRC 1 #define BHND_PMU_RES4313_XTAL_PU_RSRC 2 #define BHND_PMU_RES4313_ALP_AVAIL_RSRC 3 #define BHND_PMU_RES4313_RADIO_PU_RSRC 4 #define BHND_PMU_RES4313_BG_PU_RSRC 5 #define BHND_PMU_RES4313_VREG1P4_PU_RSRC 6 #define BHND_PMU_RES4313_AFE_PWRSW_RSRC 7 #define BHND_PMU_RES4313_RX_PWRSW_RSRC 8 #define BHND_PMU_RES4313_TX_PWRSW_RSRC 9 #define BHND_PMU_RES4313_BB_PWRSW_RSRC 10 #define BHND_PMU_RES4313_SYNTH_PWRSW_RSRC 11 #define BHND_PMU_RES4313_MISC_PWRSW_RSRC 12 #define BHND_PMU_RES4313_BB_PLL_PWRSW_RSRC 13 #define BHND_PMU_RES4313_HT_AVAIL_RSRC 14 #define BHND_PMU_RES4313_MACPHY_CLK_AVAIL_RSRC 15 /* 4313 chip-specific CHIPCTRL register bits */ #define BHND_PMU_CCTRL4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ /* 43228 resources */ #define BHND_PMU_RES43228_NOT_USED 0 #define BHND_PMU_RES43228_ILP_REQUEST 1 #define BHND_PMU_RES43228_XTAL_PU 2 #define BHND_PMU_RES43228_ALP_AVAIL 3 #define BHND_PMU_RES43228_PLL_EN 4 #define BHND_PMU_RES43228_HT_PHY_AVAIL 5 /* * Maximum delay for the PMU state transition in us. * This is an upper bound intended for spinwaits etc. */ #define BHND_PMU_MAX_TRANSITION_DLY 15000 /* PMU resource up transition time in ILP cycles */ #define BHND_PMURES_UP_TRANSITION 2 #endif /* _BHND_CORES_PMU_BHND_PMUREG_H_ */ Index: head/sys/dev/bhnd/nvram/bhnd_nvram_data_sprom.c =================================================================== --- head/sys/dev/bhnd/nvram/bhnd_nvram_data_sprom.c (revision 326870) +++ head/sys/dev/bhnd/nvram/bhnd_nvram_data_sprom.c (revision 326871) @@ -1,1467 +1,1470 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #else /* !_KERNEL */ #include #include #include #include #include #include #include #endif /* _KERNEL */ #include "bhnd_nvram_map.h" #include "bhnd_nvram_private.h" #include "bhnd_nvram_datavar.h" #include "bhnd_nvram_data_spromvar.h" /* * BHND SPROM NVRAM data class * * The SPROM data format is a fixed-layout, non-self-descriptive binary format, * used on Broadcom wireless and wired adapters, that provides a subset of the * variables defined by Broadcom SoC NVRAM formats. */ static const bhnd_sprom_layout *bhnd_nvram_sprom_get_layout(uint8_t sromrev); static int bhnd_nvram_sprom_ident( struct bhnd_nvram_io *io, const bhnd_sprom_layout **ident); static int bhnd_nvram_sprom_write_var( bhnd_sprom_opcode_state *state, bhnd_sprom_opcode_idx_entry *entry, bhnd_nvram_val *value, struct bhnd_nvram_io *io); static int bhnd_nvram_sprom_read_var( struct bhnd_sprom_opcode_state *state, struct bhnd_sprom_opcode_idx_entry *entry, struct bhnd_nvram_io *io, union bhnd_nvram_sprom_storage *storage, bhnd_nvram_val *val); static int bhnd_nvram_sprom_write_offset( const struct bhnd_nvram_vardefn *var, struct bhnd_nvram_io *data, bhnd_nvram_type type, size_t offset, uint32_t mask, int8_t shift, uint32_t value); static int bhnd_nvram_sprom_read_offset( const struct bhnd_nvram_vardefn *var, struct bhnd_nvram_io *data, bhnd_nvram_type type, size_t offset, uint32_t mask, int8_t shift, uint32_t *value); static bool bhnd_sprom_is_external_immutable( const char *name); BHND_NVRAM_DATA_CLASS_DEFN(sprom, "Broadcom SPROM", BHND_NVRAM_DATA_CAP_DEVPATHS, sizeof(struct bhnd_nvram_sprom)) #define SPROM_COOKIE_TO_VID(_cookie) \ (((struct bhnd_sprom_opcode_idx_entry *)(_cookie))->vid) #define SPROM_COOKIE_TO_NVRAM_VAR(_cookie) \ bhnd_nvram_get_vardefn(SPROM_COOKIE_TO_VID(_cookie)) /** * Read the magic value from @p io, and verify that it matches * the @p layout's expected magic value. * * If @p layout does not defined a magic value, @p magic is set to 0x0 * and success is returned. * * @param io An I/O context mapping the SPROM data to be identified. * @param layout The SPROM layout against which @p io should be verified. * @param[out] magic On success, the SPROM magic value. * * @retval 0 success * @retval non-zero If checking @p io otherwise fails, a regular unix * error code will be returned. */ static int bhnd_nvram_sprom_check_magic(struct bhnd_nvram_io *io, const bhnd_sprom_layout *layout, uint16_t *magic) { int error; /* Skip if layout does not define a magic value */ if (layout->flags & SPROM_LAYOUT_MAGIC_NONE) return (0); /* Read the magic value */ error = bhnd_nvram_io_read(io, layout->magic_offset, magic, sizeof(*magic)); if (error) return (error); *magic = le16toh(*magic); /* If the signature does not match, skip to next layout */ if (*magic != layout->magic_value) return (ENXIO); return (0); } /** * Attempt to identify the format of the SPROM data mapped by @p io. * * The SPROM data format does not provide any identifying information at a * known offset, instead requiring that we iterate over the known SPROM image * sizes until we are able to compute a valid checksum (and, for later * revisions, validate a signature at a revision-specific offset). * * @param io An I/O context mapping the SPROM data to be identified. * @param[out] ident On success, the identified SPROM layout. * * @retval 0 success * @retval non-zero If identifying @p io otherwise fails, a regular unix * error code will be returned. */ static int bhnd_nvram_sprom_ident(struct bhnd_nvram_io *io, const bhnd_sprom_layout **ident) { uint8_t crc; size_t crc_errors; size_t nbytes; int error; crc = BHND_NVRAM_CRC8_INITIAL; crc_errors = 0; nbytes = 0; /* We iterate the SPROM layouts smallest to largest, allowing us to * perform incremental checksum calculation */ for (size_t i = 0; i < bhnd_sprom_num_layouts; i++) { const bhnd_sprom_layout *layout; u_char buf[512]; size_t nread; uint16_t magic; uint8_t srevcrc[2]; uint8_t srev; bool crc_valid; bool have_magic; layout = &bhnd_sprom_layouts[i]; + crc_valid = true; have_magic = true; if ((layout->flags & SPROM_LAYOUT_MAGIC_NONE)) have_magic = false; /* * Read image data and update CRC (errors are reported * after the signature check) * * Layout instances must be ordered from smallest to largest by * the nvram_map compiler, allowing us to incrementally update * our CRC. */ if (nbytes > layout->size) BHND_NV_PANIC("SPROM layout defined out-of-order"); nread = layout->size - nbytes; while (nread > 0) { size_t nr; nr = bhnd_nv_ummin(nread, sizeof(buf)); if ((error = bhnd_nvram_io_read(io, nbytes, buf, nr))) return (error); crc = bhnd_nvram_crc8(buf, nr, crc); crc_valid = (crc == BHND_NVRAM_CRC8_VALID); if (!crc_valid) crc_errors++; nread -= nr; nbytes += nr; } /* Read 8-bit SPROM revision, maintaining 16-bit size alignment * required by some OTP/SPROM chipsets. */ error = bhnd_nvram_io_read(io, layout->srev_offset, &srevcrc, sizeof(srevcrc)); if (error) return (error); srev = srevcrc[0]; /* Early sromrev 1 devices (specifically some BCM440x enet * cards) are reported to have been incorrectly programmed * with a revision of 0x10. */ if (layout->rev == 1 && srev == 0x10) srev = 0x1; /* Check revision against the layout definition */ if (srev != layout->rev) continue; /* Check the magic value, skipping to the next layout on * failure. */ error = bhnd_nvram_sprom_check_magic(io, layout, &magic); if (error) { /* If the CRC is was valid, log the mismatch */ if (crc_valid || BHND_NV_VERBOSE) { BHND_NV_LOG("invalid sprom %hhu signature: " "0x%hx (expected 0x%hx)\n", srev, magic, layout->magic_value); return (ENXIO); } continue; } /* Check for an earlier CRC error */ if (!crc_valid) { /* If the magic check succeeded, then we may just have * data corruption -- log the CRC error */ if (have_magic || BHND_NV_VERBOSE) { BHND_NV_LOG("sprom %hhu CRC error (crc=%#hhx, " "expected=%#x)\n", srev, crc, BHND_NVRAM_CRC8_VALID); } continue; } /* Identified */ *ident = layout; return (0); } /* No match */ if (crc_errors > 0 && BHND_NV_VERBOSE) { BHND_NV_LOG("sprom parsing failed with %zu CRC errors\n", crc_errors); } return (ENXIO); } static int bhnd_nvram_sprom_probe(struct bhnd_nvram_io *io) { const bhnd_sprom_layout *layout; int error; /* Try to parse the input */ if ((error = bhnd_nvram_sprom_ident(io, &layout))) return (error); return (BHND_NVRAM_DATA_PROBE_DEFAULT); } static int bhnd_nvram_sprom_getvar_direct(struct bhnd_nvram_io *io, const char *name, void *buf, size_t *len, bhnd_nvram_type type) { const bhnd_sprom_layout *layout; bhnd_sprom_opcode_state state; const struct bhnd_nvram_vardefn *var; size_t vid; int error; /* Look up the variable definition and ID */ if ((var = bhnd_nvram_find_vardefn(name)) == NULL) return (ENOENT); vid = bhnd_nvram_get_vardefn_id(var); /* Identify the SPROM image layout */ if ((error = bhnd_nvram_sprom_ident(io, &layout))) return (error); /* Initialize SPROM layout interpreter */ if ((error = bhnd_sprom_opcode_init(&state, layout))) { BHND_NV_LOG("error initializing opcode state: %d\n", error); return (ENXIO); } /* Find SPROM layout entry for the requested variable */ while ((error = bhnd_sprom_opcode_next_var(&state)) == 0) { bhnd_sprom_opcode_idx_entry entry; union bhnd_nvram_sprom_storage storage; bhnd_nvram_val val; /* Fetch the variable's entry state */ if ((error = bhnd_sprom_opcode_init_entry(&state, &entry))) return (error); /* Match against expected VID */ if (entry.vid != vid) continue; /* Decode variable to a new value instance */ error = bhnd_nvram_sprom_read_var(&state, &entry, io, &storage, &val); if (error) return (error); /* Perform value coercion */ error = bhnd_nvram_val_encode(&val, buf, len, type); /* Clean up */ bhnd_nvram_val_release(&val); return (error); } /* Hit EOF without matching the requested variable? */ if (error == ENOENT) return (ENOENT); /* Some other parse error occured */ return (error); } /** * Return the SPROM layout definition for the given @p sromrev, or NULL if * not found. */ static const bhnd_sprom_layout * bhnd_nvram_sprom_get_layout(uint8_t sromrev) { /* Find matching SPROM layout definition */ for (size_t i = 0; i < bhnd_sprom_num_layouts; i++) { if (bhnd_sprom_layouts[i].rev == sromrev) return (&bhnd_sprom_layouts[i]); } /* Not found */ return (NULL); } /** * Serialize a SPROM variable. * * @param state The SPROM opcode state describing the layout of @p io. * @param entry The variable's SPROM opcode index entry. * @param value The value to encode to @p io as per @p entry. * @param io I/O context to which @p value should be written, or NULL * if no output should be produced. This may be used to validate * values prior to write. * * @retval 0 success * @retval EFTYPE If value coercion from @p value to the type required by * @p entry is unsupported. * @retval ERANGE If value coercion from @p value would overflow * (or underflow) the type required by @p entry. * @retval non-zero If serialization otherwise fails, a regular unix error * code will be returned. */ static int bhnd_nvram_sprom_write_var(bhnd_sprom_opcode_state *state, bhnd_sprom_opcode_idx_entry *entry, bhnd_nvram_val *value, struct bhnd_nvram_io *io) { const struct bhnd_nvram_vardefn *var; uint32_t u32[BHND_SPROM_ARRAY_MAXLEN]; bhnd_nvram_type itype, var_base_type; size_t ipos, ilen, nelem; int error; /* Fetch variable definition and the native element type */ var = bhnd_nvram_get_vardefn(entry->vid); BHND_NV_ASSERT(var != NULL, ("missing variable definition")); var_base_type = bhnd_nvram_base_type(var->type); /* Fetch the element count from the SPROM variable layout definition */ if ((error = bhnd_sprom_opcode_eval_var(state, entry))) return (error); nelem = state->var.nelem; BHND_NV_ASSERT(nelem <= var->nelem, ("SPROM nelem=%zu exceeds maximum " "NVRAM nelem=%hhu", nelem, var->nelem)); /* Promote the data to a common 32-bit representation */ if (bhnd_nvram_is_signed_type(var_base_type)) itype = BHND_NVRAM_TYPE_INT32_ARRAY; else itype = BHND_NVRAM_TYPE_UINT32_ARRAY; /* Calculate total size of the 32-bit promoted representation */ if ((ilen = bhnd_nvram_value_size(NULL, 0, itype, nelem)) == 0) { /* Variable-width types are unsupported */ BHND_NV_LOG("invalid %s SPROM variable type %d\n", var->name, var->type); return (EFTYPE); } /* The native representation must fit within our scratch array */ if (ilen > sizeof(u32)) { BHND_NV_LOG("error encoding '%s', SPROM_ARRAY_MAXLEN " "incorrect\n", var->name); return (EFTYPE); } /* Initialize our common 32-bit value representation */ if (bhnd_nvram_val_type(value) == BHND_NVRAM_TYPE_NULL) { /* No value provided; can this variable be encoded as missing * by setting all bits to one? */ if (!(var->flags & BHND_NVRAM_VF_IGNALL1)) { BHND_NV_LOG("missing required property: %s\n", var->name); return (EINVAL); } /* Set all bits */ memset(u32, 0xFF, ilen); } else { bhnd_nvram_val bcm_val; const void *var_ptr; bhnd_nvram_type var_type, raw_type; size_t var_len, enc_nelem; /* Try to coerce the value to the native variable format. */ error = bhnd_nvram_val_convert_init(&bcm_val, var->fmt, value, BHND_NVRAM_VAL_DYNAMIC|BHND_NVRAM_VAL_BORROW_DATA); if (error) { BHND_NV_LOG("error converting input type %s to %s " "format\n", bhnd_nvram_type_name(bhnd_nvram_val_type(value)), bhnd_nvram_val_fmt_name(var->fmt)); return (error); } var_ptr = bhnd_nvram_val_bytes(&bcm_val, &var_len, &var_type); /* * Promote to a common 32-bit representation. * * We must use the raw type to interpret the input data as its * underlying integer representation -- otherwise, coercion * would attempt to parse the input as its complex * representation. * * For example, direct CHAR -> UINT32 coercion would attempt to * parse the character as a decimal integer, rather than * promoting the raw UTF8 byte value to a 32-bit value. */ raw_type = bhnd_nvram_raw_type(var_type); error = bhnd_nvram_value_coerce(var_ptr, var_len, raw_type, u32, &ilen, itype); /* Clean up temporary value representation */ bhnd_nvram_val_release(&bcm_val); /* Report coercion failure */ if (error) { BHND_NV_LOG("error promoting %s to %s: %d\n", bhnd_nvram_type_name(var_type), bhnd_nvram_type_name(itype), error); return (error); } /* Encoded element count must match SPROM's definition */ error = bhnd_nvram_value_nelem(u32, ilen, itype, &enc_nelem); if (error) return (error); if (enc_nelem != nelem) { const char *type_name; type_name = bhnd_nvram_type_name(var_base_type); BHND_NV_LOG("invalid %s property value '%s[%zu]': " "required %s[%zu]", var->name, type_name, enc_nelem, type_name, nelem); return (EFTYPE); } } /* * Seek to the start of the variable's SPROM layout definition and * iterate over all bindings. */ if ((error = bhnd_sprom_opcode_seek(state, entry))) { BHND_NV_LOG("variable seek failed: %d\n", error); return (error); } ipos = 0; while ((error = bhnd_sprom_opcode_next_binding(state)) == 0) { bhnd_sprom_opcode_bind *binding; bhnd_sprom_opcode_var *binding_var; size_t offset; uint32_t skip_out_bytes; BHND_NV_ASSERT( state->var_state >= SPROM_OPCODE_VAR_STATE_OPEN, ("invalid var state")); BHND_NV_ASSERT(state->var.have_bind, ("invalid bind state")); binding_var = &state->var; binding = &state->var.bind; /* Calculate output skip bytes for this binding. * * Skip directions are defined in terms of decoding, and * reversed when encoding. */ skip_out_bytes = binding->skip_in; error = bhnd_sprom_opcode_apply_scale(state, &skip_out_bytes); if (error) return (error); /* Bind */ offset = state->offset; for (size_t i = 0; i < binding->count; i++) { if (ipos >= nelem) { BHND_NV_LOG("input skip %u positioned %zu " "beyond nelem %zu\n", binding->skip_out, ipos, nelem); return (EINVAL); } /* Write next offset */ if (io != NULL) { error = bhnd_nvram_sprom_write_offset(var, io, binding_var->base_type, offset, binding_var->mask, binding_var->shift, u32[ipos]); if (error) return (error); } /* Adjust output position; this was already verified to * not overflow/underflow during SPROM opcode * evaluation */ if (binding->skip_in_negative) { offset -= skip_out_bytes; } else { offset += skip_out_bytes; } /* Skip advancing input if additional bindings are * required to fully encode intv */ if (binding->skip_out == 0) continue; /* Advance input position */ if (SIZE_MAX - binding->skip_out < ipos) { BHND_NV_LOG("output skip %u would overflow " "%zu\n", binding->skip_out, ipos); return (EINVAL); } ipos += binding->skip_out; } } /* Did we iterate all bindings until hitting end of the variable * definition? */ BHND_NV_ASSERT(error != 0, ("loop terminated early")); if (error != ENOENT) return (error); return (0); } static int bhnd_nvram_sprom_serialize(bhnd_nvram_data_class *cls, bhnd_nvram_plist *props, bhnd_nvram_plist *options, void *outp, size_t *olen) { bhnd_sprom_opcode_state state; struct bhnd_nvram_io *io; bhnd_nvram_prop *prop; bhnd_sprom_opcode_idx_entry *entry; const bhnd_sprom_layout *layout; size_t limit; uint8_t crc; uint8_t sromrev; int error; limit = *olen; layout = NULL; io = NULL; /* Fetch sromrev property */ if (!bhnd_nvram_plist_contains(props, BHND_NVAR_SROMREV)) { BHND_NV_LOG("missing required property: %s\n", BHND_NVAR_SROMREV); return (EINVAL); } error = bhnd_nvram_plist_get_uint8(props, BHND_NVAR_SROMREV, &sromrev); if (error) { BHND_NV_LOG("error reading sromrev property: %d\n", error); return (EFTYPE); } /* Find SPROM layout definition */ if ((layout = bhnd_nvram_sprom_get_layout(sromrev)) == NULL) { BHND_NV_LOG("unsupported sromrev: %hhu\n", sromrev); return (EFTYPE); } /* Provide required size to caller */ *olen = layout->size; if (outp == NULL) return (0); else if (limit < *olen) return (ENOMEM); /* Initialize SPROM layout interpreter */ if ((error = bhnd_sprom_opcode_init(&state, layout))) { BHND_NV_LOG("error initializing opcode state: %d\n", error); return (ENXIO); } /* Check for unsupported properties */ prop = NULL; while ((prop = bhnd_nvram_plist_next(props, prop)) != NULL) { const char *name; /* Fetch the corresponding SPROM layout index entry */ name = bhnd_nvram_prop_name(prop); entry = bhnd_sprom_opcode_index_find(&state, name); if (entry == NULL) { BHND_NV_LOG("property '%s' unsupported by sromrev " "%hhu\n", name, layout->rev); error = EINVAL; goto finished; } } /* Zero-initialize output */ memset(outp, 0, *olen); /* Allocate wrapping I/O context for output buffer */ io = bhnd_nvram_ioptr_new(outp, *olen, *olen, BHND_NVRAM_IOPTR_RDWR); if (io == NULL) { error = ENOMEM; goto finished; } /* * Serialize all SPROM variable data. */ entry = NULL; while ((entry = bhnd_sprom_opcode_index_next(&state, entry)) != NULL) { const struct bhnd_nvram_vardefn *var; bhnd_nvram_val *val; var = bhnd_nvram_get_vardefn(entry->vid); BHND_NV_ASSERT(var != NULL, ("missing variable definition")); /* Fetch prop; will be NULL if unavailable */ prop = bhnd_nvram_plist_get_prop(props, var->name); if (prop != NULL) { val = bhnd_nvram_prop_val(prop); } else { val = BHND_NVRAM_VAL_NULL; } /* Attempt to serialize the property value to the appropriate * offset within the output buffer */ error = bhnd_nvram_sprom_write_var(&state, entry, val, io); if (error) { BHND_NV_LOG("error serializing %s to required type " "%s: %d\n", var->name, bhnd_nvram_type_name(var->type), error); /* ENOMEM is reserved for signaling that the output * buffer capacity is insufficient */ if (error == ENOMEM) error = EINVAL; goto finished; } } /* * Write magic value, if any. */ if (!(layout->flags & SPROM_LAYOUT_MAGIC_NONE)) { uint16_t magic; magic = htole16(layout->magic_value); error = bhnd_nvram_io_write(io, layout->magic_offset, &magic, sizeof(magic)); if (error) { BHND_NV_LOG("error writing magic value: %d\n", error); goto finished; } } /* Calculate the CRC over all SPROM data, not including the CRC byte. */ crc = ~bhnd_nvram_crc8(outp, layout->crc_offset, BHND_NVRAM_CRC8_INITIAL); /* Write the checksum. */ error = bhnd_nvram_io_write(io, layout->crc_offset, &crc, sizeof(crc)); if (error) { BHND_NV_LOG("error writing CRC value: %d\n", error); goto finished; } /* * Success! */ error = 0; finished: bhnd_sprom_opcode_fini(&state); if (io != NULL) bhnd_nvram_io_free(io); return (error); } static int bhnd_nvram_sprom_new(struct bhnd_nvram_data *nv, struct bhnd_nvram_io *io) { struct bhnd_nvram_sprom *sp; int error; sp = (struct bhnd_nvram_sprom *)nv; /* Identify the SPROM input data */ if ((error = bhnd_nvram_sprom_ident(io, &sp->layout))) return (error); /* Copy SPROM image to our shadow buffer */ sp->data = bhnd_nvram_iobuf_copy_range(io, 0, sp->layout->size); if (sp->data == NULL) goto failed; /* Initialize SPROM binding eval state */ if ((error = bhnd_sprom_opcode_init(&sp->state, sp->layout))) goto failed; return (0); failed: if (sp->data != NULL) bhnd_nvram_io_free(sp->data); return (error); } static void bhnd_nvram_sprom_free(struct bhnd_nvram_data *nv) { struct bhnd_nvram_sprom *sp = (struct bhnd_nvram_sprom *)nv; bhnd_sprom_opcode_fini(&sp->state); bhnd_nvram_io_free(sp->data); } size_t bhnd_nvram_sprom_count(struct bhnd_nvram_data *nv) { struct bhnd_nvram_sprom *sprom = (struct bhnd_nvram_sprom *)nv; return (sprom->layout->num_vars); } static bhnd_nvram_plist * bhnd_nvram_sprom_options(struct bhnd_nvram_data *nv) { return (NULL); } static uint32_t bhnd_nvram_sprom_caps(struct bhnd_nvram_data *nv) { return (BHND_NVRAM_DATA_CAP_INDEXED); } static const char * bhnd_nvram_sprom_next(struct bhnd_nvram_data *nv, void **cookiep) { struct bhnd_nvram_sprom *sp; bhnd_sprom_opcode_idx_entry *entry; const struct bhnd_nvram_vardefn *var; sp = (struct bhnd_nvram_sprom *)nv; /* Find next index entry that is not disabled by virtue of IGNALL1 */ entry = *cookiep; while ((entry = bhnd_sprom_opcode_index_next(&sp->state, entry))) { /* Update cookiep and fetch variable definition */ *cookiep = entry; var = SPROM_COOKIE_TO_NVRAM_VAR(*cookiep); + BHND_NV_ASSERT(var != NULL, ("invalid cookiep %p", cookiep)); /* We might need to parse the variable's value to determine * whether it should be treated as unset */ if (var->flags & BHND_NVRAM_VF_IGNALL1) { int error; size_t len; error = bhnd_nvram_sprom_getvar(nv, *cookiep, NULL, &len, var->type); if (error) { BHND_NV_ASSERT(error == ENOENT, ("unexpected " "error parsing variable: %d", error)); continue; } } /* Found! */ return (var->name); } /* Reached end of index entries */ return (NULL); } static void * bhnd_nvram_sprom_find(struct bhnd_nvram_data *nv, const char *name) { struct bhnd_nvram_sprom *sp; bhnd_sprom_opcode_idx_entry *entry; sp = (struct bhnd_nvram_sprom *)nv; entry = bhnd_sprom_opcode_index_find(&sp->state, name); return (entry); } /** * Write @p value of @p type to the SPROM @p data at @p offset, applying * @p mask and @p shift, and OR with the existing data. * * @param var The NVRAM variable definition. * @param data The SPROM data to be modified. * @param type The type to write at @p offset. * @param offset The data offset to be written. * @param mask The mask to be applied to @p value after shifting. * @param shift The shift to be applied to @p value; if positive, a left * shift will be applied, if negative, a right shift (this is the reverse of the * decoding behavior) * @param value The value to be written. The parsed value will be OR'd with the * current contents of @p data at @p offset. */ static int bhnd_nvram_sprom_write_offset(const struct bhnd_nvram_vardefn *var, struct bhnd_nvram_io *data, bhnd_nvram_type type, size_t offset, uint32_t mask, int8_t shift, uint32_t value) { union bhnd_nvram_sprom_storage scratch; int error; #define NV_WRITE_INT(_widen, _repr, _swap) do { \ /* Narrow the 32-bit representation */ \ scratch._repr[1] = (_widen)value; \ \ /* Shift and mask the new value */ \ if (shift > 0) \ scratch._repr[1] <<= shift; \ else if (shift < 0) \ scratch._repr[1] >>= -shift; \ scratch._repr[1] &= mask; \ \ /* Swap to output byte order */ \ scratch._repr[1] = _swap(scratch._repr[1]); \ \ /* Fetch the current value */ \ error = bhnd_nvram_io_read(data, offset, \ &scratch._repr[0], sizeof(scratch._repr[0])); \ if (error) { \ BHND_NV_LOG("error reading %s SPROM offset " \ "%#zx: %d\n", var->name, offset, error); \ return (EFTYPE); \ } \ \ /* Mask and set our new value's bits in the current \ * value */ \ if (shift >= 0) \ scratch._repr[0] &= ~_swap(mask << shift); \ else if (shift < 0) \ scratch._repr[0] &= ~_swap(mask >> (-shift)); \ scratch._repr[0] |= scratch._repr[1]; \ \ /* Perform write */ \ error = bhnd_nvram_io_write(data, offset, \ &scratch._repr[0], sizeof(scratch._repr[0])); \ if (error) { \ BHND_NV_LOG("error writing %s SPROM offset " \ "%#zx: %d\n", var->name, offset, error); \ return (EFTYPE); \ } \ } while(0) /* Apply mask/shift and widen to a common 32bit representation */ switch (type) { case BHND_NVRAM_TYPE_UINT8: NV_WRITE_INT(uint32_t, u8, ); break; case BHND_NVRAM_TYPE_UINT16: NV_WRITE_INT(uint32_t, u16, htole16); break; case BHND_NVRAM_TYPE_UINT32: NV_WRITE_INT(uint32_t, u32, htole32); break; case BHND_NVRAM_TYPE_INT8: NV_WRITE_INT(int32_t, i8, ); break; case BHND_NVRAM_TYPE_INT16: NV_WRITE_INT(int32_t, i16, htole16); break; case BHND_NVRAM_TYPE_INT32: NV_WRITE_INT(int32_t, i32, htole32); break; case BHND_NVRAM_TYPE_CHAR: NV_WRITE_INT(uint32_t, u8, ); break; default: BHND_NV_LOG("unhandled %s offset type: %d\n", var->name, type); return (EFTYPE); } #undef NV_WRITE_INT return (0); } /** * Read the value of @p type from the SPROM @p data at @p offset, apply @p mask * and @p shift, and OR with the existing @p value. * * @param var The NVRAM variable definition. * @param data The SPROM data to be decoded. * @param type The type to read at @p offset * @param offset The data offset to be read. * @param mask The mask to be applied to the value read at @p offset. * @param shift The shift to be applied after masking; if positive, a right * shift will be applied, if negative, a left shift. * @param value The read destination; the parsed value will be OR'd with the * current contents of @p value. */ static int bhnd_nvram_sprom_read_offset(const struct bhnd_nvram_vardefn *var, struct bhnd_nvram_io *data, bhnd_nvram_type type, size_t offset, uint32_t mask, int8_t shift, uint32_t *value) { union bhnd_nvram_sprom_storage scratch; int error; #define NV_PARSE_INT(_widen, _repr, _swap) do { \ /* Perform read */ \ error = bhnd_nvram_io_read(data, offset, \ &scratch._repr[0], sizeof(scratch._repr[0])); \ if (error) { \ BHND_NV_LOG("error reading %s SPROM offset " \ "%#zx: %d\n", var->name, offset, error); \ return (EFTYPE); \ } \ \ /* Swap to host byte order */ \ scratch._repr[0] = _swap(scratch._repr[0]); \ \ /* Mask and shift the value */ \ scratch._repr[0] &= mask; \ if (shift > 0) { \ scratch. _repr[0] >>= shift; \ } else if (shift < 0) { \ scratch. _repr[0] <<= -shift; \ } \ \ /* Widen to 32-bit representation and OR with current \ * value */ \ (*value) |= (_widen)scratch._repr[0]; \ } while(0) /* Apply mask/shift and widen to a common 32bit representation */ switch (type) { case BHND_NVRAM_TYPE_UINT8: NV_PARSE_INT(uint32_t, u8, ); break; case BHND_NVRAM_TYPE_UINT16: NV_PARSE_INT(uint32_t, u16, le16toh); break; case BHND_NVRAM_TYPE_UINT32: NV_PARSE_INT(uint32_t, u32, le32toh); break; case BHND_NVRAM_TYPE_INT8: NV_PARSE_INT(int32_t, i8, ); break; case BHND_NVRAM_TYPE_INT16: NV_PARSE_INT(int32_t, i16, le16toh); break; case BHND_NVRAM_TYPE_INT32: NV_PARSE_INT(int32_t, i32, le32toh); break; case BHND_NVRAM_TYPE_CHAR: NV_PARSE_INT(uint32_t, u8, ); break; default: BHND_NV_LOG("unhandled %s offset type: %d\n", var->name, type); return (EFTYPE); } #undef NV_PARSE_INT return (0); } /** * Read a SPROM variable value from @p io. * * @param state The SPROM opcode state describing the layout of @p io. * @param entry The variable's SPROM opcode index entry. * @param io The input I/O context. * @param storage Storage to be used with @p val. * @param[out] val Value instance to be initialized with the * parsed variable data. * * The returned @p val instance will hold a borrowed reference to @p storage, * and must be copied via bhnd_nvram_val_copy() if it will be referenced beyond * the lifetime of @p storage. * * The caller is responsible for releasing any allocated value state * via bhnd_nvram_val_release(). */ static int bhnd_nvram_sprom_read_var(struct bhnd_sprom_opcode_state *state, struct bhnd_sprom_opcode_idx_entry *entry, struct bhnd_nvram_io *io, union bhnd_nvram_sprom_storage *storage, bhnd_nvram_val *val) { union bhnd_nvram_sprom_storage *inp; const struct bhnd_nvram_vardefn *var; bhnd_nvram_type var_btype; uint32_t intv; size_t ilen, ipos, iwidth; size_t nelem; bool all_bits_set; int error; /* Fetch canonical variable definition */ var = bhnd_nvram_get_vardefn(entry->vid); BHND_NV_ASSERT(var != NULL, ("invalid entry")); /* * Fetch the array length from the SPROM variable definition. * * This generally be identical to the array length provided by the * canonical NVRAM variable definition, but some SPROM layouts may * define a smaller element count. */ if ((error = bhnd_sprom_opcode_eval_var(state, entry))) { BHND_NV_LOG("variable evaluation failed: %d\n", error); return (error); } nelem = state->var.nelem; if (nelem > var->nelem) { BHND_NV_LOG("SPROM array element count %zu cannot be " "represented by '%s' element count of %hhu\n", nelem, var->name, var->nelem); return (EFTYPE); } /* Fetch the var's base element type */ var_btype = bhnd_nvram_base_type(var->type); /* Calculate total byte length of the native encoding */ if ((iwidth = bhnd_nvram_value_size(NULL, 0, var_btype, 1)) == 0) { /* SPROM does not use (and we do not support) decoding of * variable-width data types */ BHND_NV_LOG("invalid SPROM data type: %d", var->type); return (EFTYPE); } ilen = nelem * iwidth; /* Decode into our caller's local storage */ inp = storage; if (ilen > sizeof(*storage)) { BHND_NV_LOG("error decoding '%s', SPROM_ARRAY_MAXLEN " "incorrect\n", var->name); return (EFTYPE); } /* Zero-initialize our decode buffer; any output elements skipped * during decode should default to zero. */ memset(inp, 0, ilen); /* * Decode the SPROM data, iteratively decoding up to nelem values. */ if ((error = bhnd_sprom_opcode_seek(state, entry))) { BHND_NV_LOG("variable seek failed: %d\n", error); return (error); } ipos = 0; intv = 0x0; if (var->flags & BHND_NVRAM_VF_IGNALL1) all_bits_set = true; else all_bits_set = false; while ((error = bhnd_sprom_opcode_next_binding(state)) == 0) { bhnd_sprom_opcode_bind *binding; bhnd_sprom_opcode_var *binding_var; bhnd_nvram_type intv_type; size_t offset; size_t nbyte; uint32_t skip_in_bytes; void *ptr; BHND_NV_ASSERT( state->var_state >= SPROM_OPCODE_VAR_STATE_OPEN, ("invalid var state")); BHND_NV_ASSERT(state->var.have_bind, ("invalid bind state")); binding_var = &state->var; binding = &state->var.bind; if (ipos >= nelem) { BHND_NV_LOG("output skip %u positioned " "%zu beyond nelem %zu\n", binding->skip_out, ipos, nelem); return (EINVAL); } /* Calculate input skip bytes for this binding */ skip_in_bytes = binding->skip_in; error = bhnd_sprom_opcode_apply_scale(state, &skip_in_bytes); if (error) return (error); /* Bind */ offset = state->offset; for (size_t i = 0; i < binding->count; i++) { /* Read the offset value, OR'ing with the current * value of intv */ error = bhnd_nvram_sprom_read_offset(var, io, binding_var->base_type, offset, binding_var->mask, binding_var->shift, &intv); if (error) return (error); /* If IGNALL1, record whether value does not have * all bits set. */ if (var->flags & BHND_NVRAM_VF_IGNALL1 && all_bits_set) { uint32_t all1; all1 = binding_var->mask; if (binding_var->shift > 0) all1 >>= binding_var->shift; else if (binding_var->shift < 0) all1 <<= -binding_var->shift; if ((intv & all1) != all1) all_bits_set = false; } /* Adjust input position; this was already verified to * not overflow/underflow during SPROM opcode * evaluation */ if (binding->skip_in_negative) { offset -= skip_in_bytes; } else { offset += skip_in_bytes; } /* Skip writing to inp if additional bindings are * required to fully populate intv */ if (binding->skip_out == 0) continue; /* We use bhnd_nvram_value_coerce() to perform * overflow-checked coercion from the widened * uint32/int32 intv value to the requested output * type */ if (bhnd_nvram_is_signed_type(var_btype)) intv_type = BHND_NVRAM_TYPE_INT32; else intv_type = BHND_NVRAM_TYPE_UINT32; /* Calculate address of the current element output * position */ ptr = (uint8_t *)inp + (iwidth * ipos); /* Perform coercion of the array element */ nbyte = iwidth; error = bhnd_nvram_value_coerce(&intv, sizeof(intv), intv_type, ptr, &nbyte, var_btype); if (error) return (error); /* Clear temporary state */ intv = 0x0; /* Advance output position */ if (SIZE_MAX - binding->skip_out < ipos) { BHND_NV_LOG("output skip %u would overflow " "%zu\n", binding->skip_out, ipos); return (EINVAL); } ipos += binding->skip_out; } } /* Did we iterate all bindings until hitting end of the variable * definition? */ BHND_NV_ASSERT(error != 0, ("loop terminated early")); if (error != ENOENT) { return (error); } /* If marked IGNALL1 and all bits are set, treat variable as * unavailable */ if ((var->flags & BHND_NVRAM_VF_IGNALL1) && all_bits_set) return (ENOENT); /* Provide value wrapper */ return (bhnd_nvram_val_init(val, var->fmt, inp, ilen, var->type, BHND_NVRAM_VAL_BORROW_DATA)); } /** * Common variable decoding; fetches and decodes variable to @p val, * using @p storage for actual data storage. * * The returned @p val instance will hold a borrowed reference to @p storage, * and must be copied via bhnd_nvram_val_copy() if it will be referenced beyond * the lifetime of @p storage. * * The caller is responsible for releasing any allocated value state * via bhnd_nvram_val_release(). */ static int bhnd_nvram_sprom_getvar_common(struct bhnd_nvram_data *nv, void *cookiep, union bhnd_nvram_sprom_storage *storage, bhnd_nvram_val *val) { struct bhnd_nvram_sprom *sp; bhnd_sprom_opcode_idx_entry *entry; const struct bhnd_nvram_vardefn *var; BHND_NV_ASSERT(cookiep != NULL, ("NULL variable cookiep")); sp = (struct bhnd_nvram_sprom *)nv; entry = cookiep; /* Fetch canonical variable definition */ var = SPROM_COOKIE_TO_NVRAM_VAR(cookiep); BHND_NV_ASSERT(var != NULL, ("invalid cookiep %p", cookiep)); return (bhnd_nvram_sprom_read_var(&sp->state, entry, sp->data, storage, val)); } static int bhnd_nvram_sprom_getvar_order(struct bhnd_nvram_data *nv, void *cookiep1, void *cookiep2) { struct bhnd_sprom_opcode_idx_entry *e1, *e2; e1 = cookiep1; e2 = cookiep2; /* Use the index entry order; this matches the order of variables * returned via bhnd_nvram_sprom_next() */ if (e1 < e2) return (-1); else if (e1 > e2) return (1); return (0); } static int bhnd_nvram_sprom_getvar(struct bhnd_nvram_data *nv, void *cookiep, void *buf, size_t *len, bhnd_nvram_type otype) { bhnd_nvram_val val; union bhnd_nvram_sprom_storage storage; int error; /* Decode variable to a new value instance */ error = bhnd_nvram_sprom_getvar_common(nv, cookiep, &storage, &val); if (error) return (error); /* Perform value coercion */ error = bhnd_nvram_val_encode(&val, buf, len, otype); /* Clean up */ bhnd_nvram_val_release(&val); return (error); } static int bhnd_nvram_sprom_copy_val(struct bhnd_nvram_data *nv, void *cookiep, bhnd_nvram_val **value) { bhnd_nvram_val val; union bhnd_nvram_sprom_storage storage; int error; /* Decode variable to a new value instance */ error = bhnd_nvram_sprom_getvar_common(nv, cookiep, &storage, &val); if (error) return (error); /* Attempt to copy to heap */ *value = bhnd_nvram_val_copy(&val); bhnd_nvram_val_release(&val); if (*value == NULL) return (ENOMEM); return (0); } static const void * bhnd_nvram_sprom_getvar_ptr(struct bhnd_nvram_data *nv, void *cookiep, size_t *len, bhnd_nvram_type *type) { /* Unsupported */ return (NULL); } static const char * bhnd_nvram_sprom_getvar_name(struct bhnd_nvram_data *nv, void *cookiep) { const struct bhnd_nvram_vardefn *var; BHND_NV_ASSERT(cookiep != NULL, ("NULL variable cookiep")); var = SPROM_COOKIE_TO_NVRAM_VAR(cookiep); BHND_NV_ASSERT(var != NULL, ("invalid cookiep %p", cookiep)); return (var->name); } static int bhnd_nvram_sprom_filter_setvar(struct bhnd_nvram_data *nv, const char *name, bhnd_nvram_val *value, bhnd_nvram_val **result) { struct bhnd_nvram_sprom *sp; const struct bhnd_nvram_vardefn *var; bhnd_sprom_opcode_idx_entry *entry; bhnd_nvram_val *spval; int error; sp = (struct bhnd_nvram_sprom *)nv; /* Is this an externally immutable variable name? */ if (bhnd_sprom_is_external_immutable(name)) return (EINVAL); /* Variable must be defined in our SPROM layout */ if ((entry = bhnd_sprom_opcode_index_find(&sp->state, name)) == NULL) return (ENOENT); var = bhnd_nvram_get_vardefn(entry->vid); BHND_NV_ASSERT(var != NULL, ("missing variable definition")); /* Value must be convertible to the native variable type */ error = bhnd_nvram_val_convert_new(&spval, var->fmt, value, BHND_NVRAM_VAL_DYNAMIC); if (error) return (error); /* Value must be encodeable by our SPROM layout */ error = bhnd_nvram_sprom_write_var(&sp->state, entry, spval, NULL); if (error) { bhnd_nvram_val_release(spval); return (error); } /* Success. Transfer our ownership of the converted value to the * caller */ *result = spval; return (0); } static int bhnd_nvram_sprom_filter_unsetvar(struct bhnd_nvram_data *nv, const char *name) { struct bhnd_nvram_sprom *sp; const struct bhnd_nvram_vardefn *var; bhnd_sprom_opcode_idx_entry *entry; sp = (struct bhnd_nvram_sprom *)nv; /* Is this an externally immutable variable name? */ if (bhnd_sprom_is_external_immutable(name)) return (EINVAL); /* Variable must be defined in our SPROM layout */ if ((entry = bhnd_sprom_opcode_index_find(&sp->state, name)) == NULL) return (ENOENT); var = bhnd_nvram_get_vardefn(entry->vid); + BHND_NV_ASSERT(var != NULL, ("missing variable definition")); /* Variable must be capable of representing a NULL/deleted value. * * Since SPROM's layout is fixed, this requires IGNALL -- if * all bits are set, an IGNALL variable is treated as unset. */ if (!(var->flags & BHND_NVRAM_VF_IGNALL1)) return (EINVAL); return (0); } /** * Return true if @p name represents a special immutable variable name * (e.g. sromrev) that cannot be updated in an SPROM existing image. * * @param name The name to check. */ static bool bhnd_sprom_is_external_immutable(const char *name) { /* The layout revision is immutable and cannot be changed */ if (strcmp(name, BHND_NVAR_SROMREV) == 0) return (true); return (false); } Index: head/sys/dev/bhnd/siba/siba.c =================================================================== --- head/sys/dev/bhnd/siba/siba.c (revision 326870) +++ head/sys/dev/bhnd/siba/siba.c (revision 326871) @@ -1,1577 +1,1575 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "sibareg.h" #include "sibavar.h" static bhnd_erom_class_t * siba_get_erom_class(driver_t *driver) { return (&siba_erom_parser); } int siba_probe(device_t dev) { device_set_desc(dev, "SIBA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default siba(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal siba(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int siba_attach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; SIBA_LOCK_INIT(sc); /* Enumerate children */ if ((error = siba_add_children(dev))) { device_delete_children(dev); SIBA_LOCK_DESTROY(sc); return (error); } return (0); } int siba_detach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); if ((error = bhnd_generic_detach(dev))) return (error); SIBA_LOCK_DESTROY(sc); return (0); } int siba_resume(device_t dev) { return (bhnd_generic_resume(dev)); } int siba_suspend(device_t dev) { return (bhnd_generic_suspend(dev)); } static int siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct siba_softc *sc; const struct siba_devinfo *dinfo; const struct bhnd_core_info *cfg; sc = device_get_softc(dev); dinfo = device_get_ivars(child); cfg = &dinfo->core_id.core_info; switch (index) { case BHND_IVAR_VENDOR: *result = cfg->vendor; return (0); case BHND_IVAR_DEVICE: *result = cfg->device; return (0); case BHND_IVAR_HWREV: *result = cfg->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(cfg); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(cfg->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(cfg); return (0); case BHND_IVAR_CORE_INDEX: *result = cfg->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = cfg->unit; return (0); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: *result = (uintptr_t)NULL; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_BHND: *result = (uintptr_t)dinfo->pmu.bhnd_info; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: panic("bhnd_get_pmu_info() called with siba PMU state " "%d", dinfo->pmu_state); return (ENXIO); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static int siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct siba_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: case SIBA_PMU_BHND: dinfo->pmu.bhnd_info = (void *)value; dinfo->pmu_state = SIBA_PMU_BHND; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: panic("bhnd_set_pmu_info() called with siba PMU state " "%d", dinfo->pmu_state); return (ENXIO); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static struct resource_list * siba_get_resource_list(device_t dev, device_t child) { struct siba_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* BHND_BUS_ALLOC_PMU() */ static int siba_alloc_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t chipc; device_t pwrctl; struct chipc_caps ccaps; siba_pmu_state pmu_state; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); pwrctl = NULL; /* Fetch ChipCommon capability flags */ chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC); if (chipc != NULL) { ccaps = *BHND_CHIPC_GET_CAPS(chipc); bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC); } else { memset(&ccaps, 0, sizeof(ccaps)); } /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and * advertises PMU support */ if (ccaps.pmu) { if ((error = bhnd_generic_alloc_pmu(dev, child))) return (error); KASSERT(dinfo->pmu_state == SIBA_PMU_BHND, ("unexpected PMU state: %d", dinfo->pmu_state)); return (0); } /* * This is either a legacy PWRCTL chipset, or the device does not * support dynamic clock control. * * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations. */ if (ccaps.pwr_ctrl) { pmu_state = SIBA_PMU_PWRCTL; pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL); if (pwrctl == NULL) { device_printf(dev, "PWRCTL not found\n"); return (ENODEV); } } else { pmu_state = SIBA_PMU_FIXED; pwrctl = NULL; } SIBA_LOCK(sc); /* Per-core PMU state already allocated? */ if (dinfo->pmu_state != SIBA_PMU_NONE) { panic("duplicate PMU allocation for %s", device_get_nameunit(child)); } /* Update the child's PMU allocation state, and transfer ownership of * the PWRCTL provider reference (if any) */ dinfo->pmu_state = pmu_state; dinfo->pmu.pwrctl = pwrctl; SIBA_UNLOCK(sc); return (0); } /* BHND_BUS_RELEASE_PMU() */ static int siba_release_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t pwrctl; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("pmu over-release for %s", device_get_nameunit(child)); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_pmu(dev, child)); case SIBA_PMU_PWRCTL: /* Requesting BHND_CLOCK_DYN releases any outstanding clock * reservations */ pwrctl = dinfo->pmu.pwrctl; error = bhnd_pwrctl_request_clock(pwrctl, child, BHND_CLOCK_DYN); if (error) { SIBA_UNLOCK(sc); return (error); } /* Clean up the child's PMU state */ dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); /* Release the provider reference */ bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL); return (0); case SIBA_PMU_FIXED: /* Clean up the child's PMU state */ KASSERT(dinfo->pmu.pwrctl == NULL, ("PWRCTL reference with FIXED state")); dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_LATENCY() */ static int siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock, u_int *latency) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_latency(dev, child, clock, latency)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock, latency); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and incurs no transition * delay. */ switch (clock) { case BHND_CLOCK_HT: *latency = 0; return (0); default: return (ENODEV); } return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_FREQ() */ static int siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock, u_int *freq) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_freq(dev, child, clock, freq)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock, freq); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_EXT_RSRC() */ static int siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_RELEASE_EXT_RSRC() */ static int siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_CLOCK() */ static int siba_request_clock(device_t dev, device_t child, bhnd_clock clock) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_clock(dev, child, clock)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, clock); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and fulfills any of the * following clock requests */ switch (clock) { case BHND_CLOCK_DYN: case BHND_CLOCK_ILP: case BHND_CLOCK_ALP: case BHND_CLOCK_HT: return (0); default: return (ENODEV); } } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_ENABLE_CLOCKS() */ static int siba_enable_clocks(device_t dev, device_t child, uint32_t clocks) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_enable_clocks(dev, child, clocks)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* All (supported) clocks are already enabled by default */ clocks &= ~(BHND_CLOCK_DYN | BHND_CLOCK_ILP | BHND_CLOCK_ALP | BHND_CLOCK_HT); if (clocks != 0) { device_printf(dev, "%s requested unknown clocks: %#x\n", device_get_nameunit(child), clocks); return (ENODEV); } return (0); } panic("invalid PMU state: %d", dinfo->pmu_state); } static int siba_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t tmhigh; int error; error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4); if (error) return (error); *iost = (SIBA_REG_GET(tmhigh, TMH_SISF)); return (0); } static int siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t ts_low; int error; if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4))) return (error); *ioctl = (SIBA_REG_GET(ts_low, TML_SICF)); return (0); } static int siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, ts_mask; if (device_get_parent(child) != dev) return (EINVAL); /* Fetch CFG0 mapping */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Mask and set TMSTATELOW core flag bits */ ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK; ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } static bool siba_is_hw_suspended(device_t dev, device_t child) { uint32_t ts_low; uint16_t ioctl; int error; /* Fetch target state */ error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } /* Is core held in RESET? */ if (ts_low & SIBA_TML_RESET) return (true); /* Is target reject enabled? */ if (ts_low & SIBA_TML_REJ_MASK) return (true); /* Is core clocked? */ ioctl = SIBA_REG_GET(ts_low, TML_SICF); if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int siba_reset_hw(device_t dev, device_t child, uint16_t ioctl, uint16_t reset_ioctl) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, imstate; uint16_t clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Place core into known RESET state */ if ((error = bhnd_suspend_hw(child, reset_ioctl))) return (error); /* Set RESET, clear REJ, set the caller's IOCTL flags, and * force clocks to ensure the signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, UINT32_MAX); /* Clear any target errors */ if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) { siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_SERR); } /* Clear any initiator errors */ imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE); if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_IBE|SIBA_IM_TO); } /* Release from RESET while leaving clocks forced, ensuring the * signal propagates throughout the core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_RESET); /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE * bit and allow the core to manage clock gating. */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT)); return (0); } static int siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl) { struct siba_softc *sc; struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t idl, ts_low, ts_mask; uint16_t cflags, clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Already in RESET? */ ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW); if (ts_low & SIBA_TML_RESET) return (0); /* If clocks are already disabled, we can place the core directly * into RESET|REJ while setting the caller's IOCTL flags. */ cflags = SIBA_REG_GET(ts_low, TML_SICF); if (!(cflags & BHND_IOCTL_CLK_EN)) { ts_low = SIBA_TML_RESET | SIBA_TML_REJ | (ioctl << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } /* Reject further transactions reaching this core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_REJ, SIBA_TML_REJ); /* Wait for transaction busy flag to clear for all transactions * initiated by this core */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_BUSY, 100000); if (error) return (error); /* If this is an initiator core, we need to reject initiator * transactions too. */ idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW); if (idl & SIBA_IDL_INIT) { /* Reject further initiator transactions */ siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, SIBA_IM_RJ, SIBA_IM_RJ); /* Wait for initiator busy flag to clear */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_BY, 100000); if (error) return (error); } /* Put the core into RESET, set the caller's IOCTL flags, and * force clocks to ensure the RESET signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); - if (error) - return (error); /* Give RESET ample time */ DELAY(10); /* Clear previously asserted initiator reject */ if (idl & SIBA_IDL_INIT) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_RJ); } /* Disable all clocks, leaving RESET and REJ asserted */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT); /* * Core is now in RESET. * * If the core holds any PWRCTL clock reservations, we need to release * those now. This emulates the standard bhnd(4) PMU behavior of RESET * automatically clearing clkctl */ SIBA_LOCK(sc); if (dinfo->pmu_state == SIBA_PMU_PWRCTL) { error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, BHND_CLOCK_DYN); SIBA_UNLOCK(sc); if (error) { device_printf(child, "failed to release clock request: " "%d", error); return (error); } return (0); } else { SIBA_UNLOCK(sc); return (0); } } static int siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct siba_devinfo *dinfo; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if (dinfo->cfg_res[0] == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(dinfo->cfg_res[0]->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0], offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0], offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0], offset); return (0); default: return (EINVAL); } } static int siba_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct siba_devinfo *dinfo; struct bhnd_resource *r; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(r->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint8_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint8_t *)value); return (0); default: return (EINVAL); } } static u_int siba_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); return (siba_port_count(&dinfo->core_id, type)); } static u_int siba_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port)); dinfo = device_get_ivars(child); return (siba_port_region_count(&dinfo->core_id, type, port)); } static int siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Look for a matching addrspace entry */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) return (addrspace->sa_rid); /* Try the config blocks */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) return (cfg->cb_rid); /* Not found */ return (-1); } static int siba_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child, type, rid, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); /* Look for a matching addrspace entry */ for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) { if (dinfo->addrspace[i].sa_rid != rid) continue; *port_type = BHND_PORT_DEVICE; *port_num = siba_addrspace_device_port(i); *region_num = siba_addrspace_device_region(i); return (0); } /* Try the config blocks */ for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) { if (dinfo->cfg[i].cb_rid != rid) continue; *port_type = BHND_PORT_AGENT; *port_num = siba_cfg_agent_port(i); *region_num = siba_cfg_agent_region(i); return (0); } /* Not found */ return (ENOENT); } static int siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) { return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child, port_type, port_num, region_num, addr, size)); } dinfo = device_get_ivars(child); /* Look for a matching addrspace */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) { *addr = addrspace->sa_base; *size = addrspace->sa_size - addrspace->sa_bus_reserved; return (0); } /* Look for a matching cfg block */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) { *addr = cfg->cb_base; *size = cfg->cb_size; return (0); } /* Not found */ return (ENOENT); } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). */ u_int siba_get_intr_count(device_t dev, device_t child) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); dinfo = device_get_ivars(child); if (!dinfo->intr_en) { /* No interrupts */ return (0); } else { /* One assigned interrupt */ return (1); } } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). */ int siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, intr, ivec)); /* Must be a valid interrupt ID */ if (intr >= siba_get_intr_count(dev, child)) return (ENXIO); KASSERT(intr == 0, ("invalid ivec %u", intr)); dinfo = device_get_ivars(child); KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned")); *ivec = dinfo->intr.flag; return (0); } /** * Register all address space mappings for @p di. * * @param dev The siba bus device. * @param di The device info instance on which to register all address * space entries. * @param r A resource mapping the enumeration table block for @p di. */ static int siba_register_addrspaces(device_t dev, struct siba_devinfo *di, struct bhnd_resource *r) { struct siba_core_id *cid; uint32_t addr; uint32_t size; int error; cid = &di->core_id; /* Register the device address space entries */ for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) { uint32_t adm; u_int adm_offset; uint32_t bus_reserved; /* Determine the register offset */ adm_offset = siba_admatch_offset(i); if (adm_offset == 0) { device_printf(dev, "addrspace %hhu is unsupported", i); return (ENODEV); } /* Fetch the address match register value */ adm = bhnd_bus_read_4(r, adm_offset); /* Parse the value */ if ((error = siba_parse_admatch(adm, &addr, &size))) { device_printf(dev, "failed to decode address " " match register value 0x%x\n", adm); return (error); } /* If this is the device's core/enumeration addrespace, * reserve the Sonics configuration register blocks for the * use of our bus. */ bus_reserved = 0; if (i == SIBA_CORE_ADDRSPACE) bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE; /* Append the region info */ error = siba_append_dinfo_region(di, i, addr, size, bus_reserved); if (error) return (error); } return (0); } /** * Register all interrupt descriptors for @p dinfo. Must be called after * configuration blocks have been mapped. * * @param dev The siba bus device. * @param child The siba child device. * @param dinfo The device info instance on which to register all interrupt * descriptor entries. * @param r A resource mapping the enumeration table block for @p di. */ static int siba_register_interrupts(device_t dev, device_t child, struct siba_devinfo *dinfo, struct bhnd_resource *r) { uint32_t tpsflag; int error; /* Is backplane interrupt distribution enabled for this core? */ tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG)); if ((tpsflag & SIBA_TPS_F0EN0) == 0) { dinfo->intr_en = false; return (0); } /* Have one interrupt */ dinfo->intr_en = true; dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0); dinfo->intr.mapped = false; dinfo->intr.irq = 0; dinfo->intr.rid = -1; /* Map the interrupt */ error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */, &dinfo->intr.irq); if (error) { device_printf(dev, "failed mapping interrupt line for core %u: " "%d\n", dinfo->core_id.core_info.core_idx, error); return (error); } dinfo->intr.mapped = true; /* Update the resource list */ dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ, dinfo->intr.irq, dinfo->intr.irq, 1); return (0); } /** * Map per-core configuration blocks for @p dinfo. * * @param dev The siba bus device. * @param dinfo The device info instance on which to map all per-core * configuration blocks. */ static int siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo) { struct siba_addrspace *addrspace; rman_res_t r_start, r_count, r_end; uint8_t num_cfg; int rid; num_cfg = dinfo->core_id.num_cfg_blocks; if (num_cfg > SIBA_MAX_CFG) { device_printf(dev, "config block count %hhu out of range\n", num_cfg); return (ENXIO); } /* Fetch the core register address space */ addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0); if (addrspace == NULL) { device_printf(dev, "missing device registers\n"); return (ENXIO); } /* * Map the per-core configuration blocks */ for (uint8_t i = 0; i < num_cfg; i++) { /* Add to child's resource list */ r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i); r_count = SIBA_CFG_SIZE; r_end = r_start + r_count - 1; rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, r_start, r_end, r_count); /* Initialize config block descriptor */ dinfo->cfg[i] = ((struct siba_cfg_block) { .cb_base = r_start, .cb_size = SIBA_CFG_SIZE, .cb_rid = rid }); /* Map the config resource for bus-level access */ dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i); dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev, SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end, r_count, RF_ACTIVE|RF_SHAREABLE); if (dinfo->cfg_res[i] == NULL) { device_printf(dev, "failed to allocate SIBA_CFG%hhu\n", i); return (ENXIO); } } return (0); } static device_t siba_add_child(device_t dev, u_int order, const char *name, int unit) { struct siba_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = siba_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void siba_child_deleted(device_t dev, device_t child) { struct bhnd_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free siba device info */ if ((dinfo = device_get_ivars(child)) != NULL) siba_free_dinfo(dev, child, dinfo); device_set_ivars(child, NULL); } /** * Scan the core table and add all valid discovered cores to * the bus. * * @param dev The siba bus device. */ int siba_add_children(device_t dev) { const struct bhnd_chipid *chipid; struct siba_core_id *cores; struct bhnd_resource *r; device_t *children; int rid; int error; cores = NULL; r = NULL; chipid = BHND_BUS_GET_CHIPID(dev, dev); /* Allocate our temporary core and device table */ cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK); children = malloc(sizeof(*children) * chipid->ncores, M_BHND, M_WAITOK | M_ZERO); /* * Add child devices for all discovered cores. * * On bridged devices, we'll exhaust our available register windows if * we map config blocks on unpopulated/disabled cores. To avoid this, we * defer mapping of the per-core siba(4) config blocks until all cores * have been enumerated and otherwise configured. */ for (u_int i = 0; i < chipid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; uint32_t idhigh, idlow; rman_res_t r_count, r_end, r_start; /* Map the core's register block */ rid = 0; r_start = SIBA_CORE_ADDR(i); r_count = SIBA_CORE_SIZE; r_end = r_start + SIBA_CORE_SIZE - 1; r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start, r_end, r_count, RF_ACTIVE); if (r == NULL) { error = ENXIO; goto failed; } /* Read the core info */ idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW)); cores[i] = siba_parse_core_id(idhigh, idlow, i, 0); /* Determine and set unit number */ for (u_int j = 0; j < i; j++) { struct bhnd_core_info *cur = &cores[i].core_info; struct bhnd_core_info *prev = &cores[j].core_info; if (prev->vendor == cur->vendor && prev->device == cur->device) cur->unit++; } /* Add the child device */ child = BUS_ADD_CHILD(dev, 0, NULL, -1); if (child == NULL) { error = ENXIO; goto failed; } children[i] = child; /* Initialize per-device bus info */ if ((dinfo = device_get_ivars(child)) == NULL) { error = ENXIO; goto failed; } if ((error = siba_init_dinfo(dev, dinfo, &cores[i]))) goto failed; /* Register the core's address space(s). */ if ((error = siba_register_addrspaces(dev, dinfo, r))) goto failed; /* Register the core's interrupts */ if ((error = siba_register_interrupts(dev, child, dinfo, r))) goto failed; /* Unmap the core's register block */ bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); r = NULL; /* If pins are floating or the hardware is otherwise * unpopulated, the device shouldn't be used. */ if (bhnd_is_hw_disabled(child)) device_disable(child); } /* Map all valid core's config register blocks and perform interrupt * assignment */ for (u_int i = 0; i < chipid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; child = children[i]; /* Skip if core is disabled */ if (bhnd_is_hw_disabled(child)) continue; dinfo = device_get_ivars(child); /* Map the core's config blocks */ if ((error = siba_map_cfg_resources(dev, dinfo))) goto failed; /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(dev, child); } free(cores, M_BHND); free(children, M_BHND); return (0); failed: for (u_int i = 0; i < chipid->ncores; i++) { if (children[i] == NULL) continue; device_delete_child(dev, children[i]); } free(cores, M_BHND); free(children, M_BHND); if (r != NULL) bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); return (error); } static device_method_t siba_methods[] = { /* Device interface */ DEVMETHOD(device_probe, siba_probe), DEVMETHOD(device_attach, siba_attach), DEVMETHOD(device_detach, siba_detach), DEVMETHOD(device_resume, siba_resume), DEVMETHOD(device_suspend, siba_suspend), /* Bus interface */ DEVMETHOD(bus_add_child, siba_add_child), DEVMETHOD(bus_child_deleted, siba_child_deleted), DEVMETHOD(bus_read_ivar, siba_read_ivar), DEVMETHOD(bus_write_ivar, siba_write_ivar), DEVMETHOD(bus_get_resource_list, siba_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class), DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu), DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu), DEVMETHOD(bhnd_bus_request_clock, siba_request_clock), DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks), DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc), DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc), DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq), DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency), DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, siba_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw), DEVMETHOD(bhnd_bus_read_config, siba_read_config), DEVMETHOD(bhnd_bus_write_config, siba_write_config), DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count), DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver); MODULE_VERSION(siba, 1); MODULE_DEPEND(siba, bhnd, 1, 1, 1); Index: head/sys/dev/bwn/if_bwn_siba_compat.c =================================================================== --- head/sys/dev/bwn/if_bwn_siba_compat.c (revision 326870) +++ head/sys/dev/bwn/if_bwn_siba_compat.c (revision 326871) @@ -1,2556 +1,2556 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include "bhnd_nvram_map.h" #include "if_bwn_siba_compat.h" static int bwn_bhnd_populate_nvram_data(device_t dev, struct bwn_bhnd_ctx *ctx); static inline bool bwn_bhnd_is_siba_reg(device_t dev, uint16_t offset); #define BWN_ASSERT_VALID_REG(_dev, _offset) \ KASSERT(!bwn_bhnd_is_siba_reg(_dev, _offset), \ ("%s: accessing siba-specific register %#jx", __FUNCTION__, \ (uintmax_t)(_offset))); static int bwn_bhnd_bus_ops_init(device_t dev) { struct bwn_bhnd_ctx *ctx; struct bwn_softc *sc; const struct chipc_caps *ccaps; int error; sc = device_get_softc(dev); ctx = NULL; sc->sc_mem_rid = 0; sc->sc_mem_res = bhnd_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { return (ENXIO); } /* Allocate PMU state */ if ((error = bhnd_alloc_pmu(dev))) { device_printf(dev, "PMU allocation failed: %d\n", error); goto failed; } /* Allocate our context */ ctx = malloc(sizeof(struct bwn_bhnd_ctx), M_DEVBUF, M_WAITOK|M_ZERO); /* Locate the ChipCommon device */ ctx->chipc_dev = bhnd_retain_provider(dev, BHND_SERVICE_CHIPC); if (ctx->chipc_dev == NULL) { device_printf(dev, "ChipCommon not found\n"); error = ENXIO; goto failed; } /* Locate the GPIO device */ ctx->gpio_dev = bhnd_retain_provider(dev, BHND_SERVICE_GPIO); if (ctx->gpio_dev == NULL) { device_printf(dev, "GPIO not found\n"); error = ENXIO; goto failed; } /* Locate the PMU device (if any) */ ccaps = BHND_CHIPC_GET_CAPS(ctx->chipc_dev); if (ccaps->pmu) { ctx->pmu_dev = bhnd_retain_provider(dev, BHND_SERVICE_PMU); if (ctx->pmu_dev == NULL) { device_printf(dev, "PMU not found\n"); error = ENXIO; goto failed; } } /* Populate NVRAM data */ if ((error = bwn_bhnd_populate_nvram_data(dev, ctx))) goto failed; /* Initialize bwn_softc */ sc->sc_bus_ctx = ctx; return (0); failed: bhnd_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); if (ctx != NULL) { if (ctx->chipc_dev != NULL) { bhnd_release_provider(dev, ctx->chipc_dev, BHND_SERVICE_CHIPC); } if (ctx->gpio_dev != NULL) { bhnd_release_provider(dev, ctx->gpio_dev, BHND_SERVICE_GPIO); } if (ctx->pmu_dev != NULL) { bhnd_release_provider(dev, ctx->pmu_dev, BHND_SERVICE_PMU); } free(ctx, M_DEVBUF); } return (error); } static void bwn_bhnd_bus_ops_fini(device_t dev) { struct bwn_bhnd_ctx *ctx; struct bwn_softc *sc; sc = device_get_softc(dev); ctx = sc->sc_bus_ctx; bhnd_release_pmu(dev); bhnd_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); bhnd_release_provider(dev, ctx->chipc_dev, BHND_SERVICE_CHIPC); bhnd_release_provider(dev, ctx->gpio_dev, BHND_SERVICE_GPIO); if (ctx->pmu_dev != NULL) bhnd_release_provider(dev, ctx->pmu_dev, BHND_SERVICE_PMU); free(ctx, M_DEVBUF); sc->sc_bus_ctx = NULL; } /** * Return true if @p offset is within a siba-specific configuration register * block. */ static inline bool bwn_bhnd_is_siba_reg(device_t dev, uint16_t offset) { if (offset >= SIBA_CFG0_OFFSET && offset <= SIBA_CFG0_OFFSET + SIBA_CFG_SIZE) return (true); if (offset >= SIBA_CFG1_OFFSET && offset <= SIBA_CFG1_OFFSET + SIBA_CFG_SIZE) return (true); return (false); } /* Populate SPROM values from NVRAM */ static int bwn_bhnd_populate_nvram_data(device_t dev, struct bwn_bhnd_ctx *ctx) { const char *mac_80211bg_var, *mac_80211a_var; int error; /* Fetch SROM revision */ error = bhnd_nvram_getvar_uint8(dev, BHND_NVAR_SROMREV, &ctx->sromrev); if (error) { device_printf(dev, "error reading %s: %d\n", BHND_NVAR_SROMREV, error); return (error); } /* Fetch board flags */ error = bhnd_nvram_getvar_uint32(dev, BHND_NVAR_BOARDFLAGS, &ctx->boardflags); if (error) { device_printf(dev, "error reading %s: %d\n", BHND_NVAR_BOARDFLAGS, error); return (error); } /* Fetch macaddrs if available; bwn(4) expects any missing macaddr * values to be initialized with 0xFF octets */ memset(ctx->mac_80211bg, 0xFF, sizeof(ctx->mac_80211bg)); memset(ctx->mac_80211a, 0xFF, sizeof(ctx->mac_80211a)); if (ctx->sromrev <= 2) { mac_80211bg_var = BHND_NVAR_IL0MACADDR; mac_80211a_var = BHND_NVAR_ET1MACADDR; } else { mac_80211bg_var = BHND_NVAR_MACADDR; mac_80211a_var = NULL; } /* Fetch required D11 core 0 macaddr */ error = bhnd_nvram_getvar_array(dev, mac_80211bg_var, ctx->mac_80211bg, sizeof(ctx->mac_80211bg), BHND_NVRAM_TYPE_UINT8_ARRAY); if (error) { device_printf(dev, "error reading %s: %d\n", mac_80211bg_var, error); return (error); } /* Fetch optional D11 core 1 macaddr */ if (mac_80211a_var != NULL) { error = bhnd_nvram_getvar_array(dev, mac_80211a_var, ctx->mac_80211a, sizeof(ctx->mac_80211a), BHND_NVRAM_TYPE_UINT8_ARRAY); if (error && error != ENOENT) { device_printf(dev, "error reading %s: %d\n", mac_80211a_var, error); return (error); } }; /* Fetch pa0maxpwr; bwn(4) expects to be able to modify it */ if ((ctx->sromrev >= 1 && ctx->sromrev <= 3) || (ctx->sromrev >= 8 && ctx->sromrev <= 10)) { error = bhnd_nvram_getvar_uint8(dev, BHND_NVAR_PA0MAXPWR, &ctx->pa0maxpwr); if (error) { device_printf(dev, "error reading %s: %d\n", BHND_NVAR_PA0MAXPWR, error); return (error); } } return (0); } /* * Disable PCI-specific MSI interrupt allocation handling */ /* * pci_find_cap() * * Referenced by: * bwn_attach() */ static int bhnd_compat_pci_find_cap(device_t dev, int capability, int *capreg) { return (ENODEV); } /* * pci_alloc_msi() * * Referenced by: * bwn_attach() */ static int bhnd_compat_pci_alloc_msi(device_t dev, int *count) { return (ENODEV); } /* * pci_release_msi() * * Referenced by: * bwn_attach() * bwn_detach() */ static int bhnd_compat_pci_release_msi(device_t dev) { return (ENODEV); } /* * pci_msi_count() * * Referenced by: * bwn_attach() */ static int bhnd_compat_pci_msi_count(device_t dev) { return (0); } /* * siba_get_vendor() * * Referenced by: * bwn_probe() */ static uint16_t bhnd_compat_get_vendor(device_t dev) { uint16_t vendor = bhnd_get_vendor(dev); switch (vendor) { case BHND_MFGID_BCM: return (SIBA_VID_BROADCOM); default: return (0x0000); } } /* * siba_get_device() * * Referenced by: * bwn_probe() */ static uint16_t bhnd_compat_get_device(device_t dev) { return (bhnd_get_device(dev)); } /* * siba_get_revid() * * Referenced by: * bwn_attach() * bwn_attach_core() * bwn_chip_init() * bwn_chiptest() * bwn_core_init() * bwn_core_start() * bwn_pio_idx2base() * bwn_pio_set_txqueue() * bwn_pio_tx_start() * bwn_probe() * ... and 19 others * */ static uint8_t bhnd_compat_get_revid(device_t dev) { return (bhnd_get_hwrev(dev)); } /** * Return the PCI bridge root device. * * Will panic if a PCI bridge root device is not found. */ static device_t bwn_bhnd_get_pci_dev(device_t dev) { device_t bridge_root; bridge_root = bhnd_find_bridge_root(dev, devclass_find("pci")); if (bridge_root == NULL) panic("not a PCI device"); return (bridge_root); } /* * siba_get_pci_vendor() * * Referenced by: * bwn_sprom_bugfixes() */ static uint16_t bhnd_compat_get_pci_vendor(device_t dev) { return (pci_get_vendor(bwn_bhnd_get_pci_dev(dev))); } /* * siba_get_pci_device() * * Referenced by: * bwn_attach() * bwn_attach_core() * bwn_nphy_op_prepare_structs() * bwn_sprom_bugfixes() */ static uint16_t bhnd_compat_get_pci_device(device_t dev) { return (pci_get_device(bwn_bhnd_get_pci_dev(dev))); } /* * siba_get_pci_subvendor() * * Referenced by: * bwn_led_attach() * bwn_nphy_op_prepare_structs() * bwn_phy_g_prepare_hw() * bwn_phy_hwpctl_init() * bwn_phy_init_b5() * bwn_phy_initn() * bwn_phy_txpower_check() * bwn_radio_init2055_post() * bwn_sprom_bugfixes() * bwn_wa_init() */ static uint16_t bhnd_compat_get_pci_subvendor(device_t dev) { return (pci_get_subvendor(bwn_bhnd_get_pci_dev(dev))); } /* * siba_get_pci_subdevice() * * Referenced by: * bwn_nphy_workarounds_rev1_2() * bwn_phy_g_prepare_hw() * bwn_phy_hwpctl_init() * bwn_phy_init_b5() * bwn_phy_initn() * bwn_phy_lp_bbinit_r01() * bwn_phy_txpower_check() * bwn_radio_init2055_post() * bwn_sprom_bugfixes() * bwn_wa_init() */ static uint16_t bhnd_compat_get_pci_subdevice(device_t dev) { return (pci_get_subdevice(bwn_bhnd_get_pci_dev(dev))); } /* * siba_get_pci_revid() * * Referenced by: * bwn_phy_g_prepare_hw() * bwn_phy_lp_bbinit_r2() * bwn_sprom_bugfixes() * bwn_wa_init() */ static uint8_t bhnd_compat_get_pci_revid(device_t dev) { return (pci_get_revid(bwn_bhnd_get_pci_dev(dev))); } /* * siba_get_chipid() * * Referenced by: * bwn_attach() * bwn_gpio_init() * bwn_mac_switch_freq() * bwn_phy_g_attach() * bwn_phy_g_init_sub() * bwn_phy_g_prepare_hw() * bwn_phy_getinfo() * bwn_phy_lp_calib() * bwn_set_opmode() * bwn_sprom_bugfixes() * ... and 9 others * */ static uint16_t bhnd_compat_get_chipid(device_t dev) { return (bhnd_get_chipid(dev)->chip_id); } /* * siba_get_chiprev() * * Referenced by: * bwn_phy_getinfo() * bwn_phy_lp_bbinit_r2() * bwn_phy_lp_tblinit_r2() * bwn_set_opmode() */ static uint16_t bhnd_compat_get_chiprev(device_t dev) { return (bhnd_get_chipid(dev)->chip_rev); } /* * siba_get_chippkg() * * Referenced by: * bwn_phy_g_init_sub() * bwn_phy_lp_bbinit_r01() * bwn_radio_2056_setup() */ static uint8_t bhnd_compat_get_chippkg(device_t dev) { return (bhnd_get_chipid(dev)->chip_pkg); } /* * siba_get_type() * * Referenced by: * bwn_core_init() * bwn_dma_attach() * bwn_nphy_op_prepare_structs() * bwn_sprom_bugfixes() */ static enum siba_type bhnd_compat_get_type(device_t dev) { device_t bus, hostb; bhnd_devclass_t hostb_devclass; bus = device_get_parent(dev); hostb = bhnd_bus_find_hostb_device(bus); if (hostb == NULL) return (SIBA_TYPE_SSB); hostb_devclass = bhnd_get_class(hostb); switch (hostb_devclass) { case BHND_DEVCLASS_PCCARD: return (SIBA_TYPE_PCMCIA); case BHND_DEVCLASS_PCI: case BHND_DEVCLASS_PCIE: return (SIBA_TYPE_PCI); default: panic("unsupported hostb devclass: %d\n", hostb_devclass); } } /* * siba_get_cc_pmufreq() * * Referenced by: * bwn_phy_lp_b2062_init() * bwn_phy_lp_b2062_switch_channel() * bwn_phy_lp_b2063_switch_channel() * bwn_phy_lp_rxcal_r2() */ static uint32_t bhnd_compat_get_cc_pmufreq(device_t dev) { u_int freq; int error; if ((error = bhnd_get_clock_freq(dev, BHND_CLOCK_ALP, &freq))) panic("failed to fetch clock frequency: %d", error); /* TODO: bwn(4) immediately multiplies the result by 1000 (MHz -> Hz) */ return (freq / 1000); } /* * siba_get_cc_caps() * * Referenced by: * bwn_phy_lp_b2062_init() */ static uint32_t bhnd_compat_get_cc_caps(device_t dev) { device_t chipc; const struct chipc_caps *ccaps; uint32_t result; /* Fetch our ChipCommon device */ chipc = bhnd_retain_provider(dev, BHND_SERVICE_CHIPC); if (chipc == NULL) panic("missing ChipCommon device"); /* * The ChipCommon capability flags are only used in one LP-PHY function, * to assert that a PMU is in fact available. * * We can support this by producing a value containing just that flag. */ result = 0; ccaps = BHND_CHIPC_GET_CAPS(chipc); if (ccaps->pmu) result |= SIBA_CC_CAPS_PMU; bhnd_release_provider(dev, chipc, BHND_SERVICE_CHIPC); return (result); } /* * siba_get_cc_powerdelay() * * Referenced by: * bwn_chip_init() */ static uint16_t bhnd_compat_get_cc_powerdelay(device_t dev) { u_int delay; int error; if ((error = bhnd_get_clock_latency(dev, BHND_CLOCK_HT, &delay))) panic("failed to fetch clock latency: %d", error); if (delay > UINT16_MAX) panic("%#x would overflow", delay); return (delay); } /* * siba_get_pcicore_revid() * * Referenced by: * bwn_core_init() */ static uint8_t bhnd_compat_get_pcicore_revid(device_t dev) { device_t hostb; uint8_t nomatch_revid; /* * This is used by bwn(4) in only bwn_core_init(), where a revid <= 10 * results in the BWN_HF_PCI_SLOWCLOCK_WORKAROUND workaround being * enabled. * * The quirk should only be applied on siba(4) devices using a PCI * core; we handle around this by returning a bogus value >= 10 here. * * TODO: bwn(4) should match this quirk on: * - BHND_CHIPTYPE_SIBA * - BHND_COREID_PCI * - HWREV_LTE(10) */ nomatch_revid = 0xFF; hostb = bhnd_bus_find_hostb_device(device_get_parent(dev)); if (hostb == NULL) { /* Not a bridged device */ return (nomatch_revid); } if (bhnd_get_device(hostb) != BHND_COREID_PCI) { /* Not a PCI core */ return (nomatch_revid); } /* This is a PCI core; we can return the real core revision */ return (bhnd_get_hwrev(hostb)); } /* * siba_sprom_get_rev() * * Referenced by: * bwn_nphy_op_prepare_structs() * bwn_nphy_tx_power_ctl_setup() * bwn_nphy_tx_power_fix() * bwn_nphy_workarounds_rev7plus() */ static uint8_t bhnd_compat_sprom_get_rev(device_t dev) { return (bwn_bhnd_get_ctx(dev)->sromrev); } /* * siba_sprom_get_mac_80211bg() * * Referenced by: * bwn_attach_post() */ static uint8_t * bhnd_compat_sprom_get_mac_80211bg(device_t dev) { /* 'MAC_80211BG' is il0macaddr or macaddr*/ return (bwn_bhnd_get_ctx(dev)->mac_80211bg); } /* * siba_sprom_get_mac_80211a() * * Referenced by: * bwn_attach_post() */ static uint8_t * bhnd_compat_sprom_get_mac_80211a(device_t dev) { /* 'MAC_80211A' is et1macaddr */ return (bwn_bhnd_get_ctx(dev)->mac_80211a); } /* * siba_sprom_get_brev() * * Referenced by: * bwn_radio_init2055_post() */ static uint8_t bhnd_compat_sprom_get_brev(device_t dev) { /* TODO: bwn(4) needs to switch to uint16_t */ BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_BOARDREV); } /* * siba_sprom_get_ccode() * * Referenced by: * bwn_phy_g_switch_chan() */ static uint8_t bhnd_compat_sprom_get_ccode(device_t dev) { /* This has been replaced with 'ccode' in later SPROM * revisions, but this API is only called on devices with * spromrev 1. */ BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_CC); } /* * siba_sprom_get_ant_a() * * Referenced by: * bwn_antenna_sanitize() */ static uint8_t bhnd_compat_sprom_get_ant_a(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_AA5G); } /* * siba_sprom_get_ant_bg() * * Referenced by: * bwn_antenna_sanitize() */ static uint8_t bhnd_compat_sprom_get_ant_bg(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_AA2G); } /* * siba_sprom_get_pa0b0() * * Referenced by: * bwn_phy_g_attach() */ static uint16_t bhnd_compat_sprom_get_pa0b0(device_t dev) { int16_t value; BWN_BHND_NVRAM_FETCH_VAR(dev, int16, BHND_NVAR_PA0B0, &value); /* TODO: bwn(4) immediately casts this back to int16_t */ return ((uint16_t)value); } /* * siba_sprom_get_pa0b1() * * Referenced by: * bwn_phy_g_attach() */ static uint16_t bhnd_compat_sprom_get_pa0b1(device_t dev) { int16_t value; BWN_BHND_NVRAM_FETCH_VAR(dev, int16, BHND_NVAR_PA0B1, &value); /* TODO: bwn(4) immediately casts this back to int16_t */ return ((uint16_t)value); } /* * siba_sprom_get_pa0b2() * * Referenced by: * bwn_phy_g_attach() */ static uint16_t bhnd_compat_sprom_get_pa0b2(device_t dev) { int16_t value; BWN_BHND_NVRAM_FETCH_VAR(dev, int16, BHND_NVAR_PA0B2, &value); /* TODO: bwn(4) immediately casts this back to int16_t */ return ((uint16_t)value); } /** * Fetch an led behavior (ledbhX) NVRAM variable value, for use by * siba_sprom_get_gpioX(). * * ('gpioX' are actually the ledbhX NVRAM variables). */ static uint8_t bhnd_compat_sprom_get_ledbh(device_t dev, const char *name) { uint8_t value; int error; error = bhnd_nvram_getvar_uint8(dev, name, &value); if (error && error != ENOENT) panic("NVRAM variable %s unreadable: %d", name, error); /* For some variables (including ledbhX), a value with all bits set is * treated as uninitialized in the SPROM format; our SPROM parser * detects this case and returns ENOENT, but bwn(4) actually expects * to read the raw value 0xFF value. */ if (error == ENOENT) value = 0xFF; return (value); } /* * siba_sprom_get_gpio0() * * 'gpioX' are actually the led behavior (ledbh) NVRAM variables. * * Referenced by: * bwn_led_attach() */ static uint8_t bhnd_compat_sprom_get_gpio0(device_t dev) { return (bhnd_compat_sprom_get_ledbh(dev, BHND_NVAR_LEDBH0)); } /* * siba_sprom_get_gpio1() * * Referenced by: * bwn_led_attach() */ static uint8_t bhnd_compat_sprom_get_gpio1(device_t dev) { return (bhnd_compat_sprom_get_ledbh(dev, BHND_NVAR_LEDBH1)); } /* * siba_sprom_get_gpio2() * * Referenced by: * bwn_led_attach() */ static uint8_t bhnd_compat_sprom_get_gpio2(device_t dev) { return (bhnd_compat_sprom_get_ledbh(dev, BHND_NVAR_LEDBH2)); } /* * siba_sprom_get_gpio3() * * Referenced by: * bwn_led_attach() */ static uint8_t bhnd_compat_sprom_get_gpio3(device_t dev) { return (bhnd_compat_sprom_get_ledbh(dev, BHND_NVAR_LEDBH3)); } /* * siba_sprom_get_maxpwr_bg() * * Referenced by: * bwn_phy_g_recalc_txpwr() */ static uint16_t bhnd_compat_sprom_get_maxpwr_bg(device_t dev) { return (bwn_bhnd_get_ctx(dev)->pa0maxpwr); } /* * siba_sprom_set_maxpwr_bg() * * Referenced by: * bwn_phy_g_recalc_txpwr() */ static void bhnd_compat_sprom_set_maxpwr_bg(device_t dev, uint16_t t) { KASSERT(t <= UINT8_MAX, ("invalid maxpwr value %hu", t)); bwn_bhnd_get_ctx(dev)->pa0maxpwr = t; } /* * siba_sprom_get_rxpo2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rxpo2g(device_t dev) { /* Should be signed, but bwn(4) expects an unsigned value */ BWN_BHND_NVRAM_RETURN_VAR(dev, int8, BHND_NVAR_RXPO2G); } /* * siba_sprom_get_rxpo5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rxpo5g(device_t dev) { /* Should be signed, but bwn(4) expects an unsigned value */ BWN_BHND_NVRAM_RETURN_VAR(dev, int8, BHND_NVAR_RXPO5G); } /* * siba_sprom_get_tssi_bg() * * Referenced by: * bwn_phy_g_attach() */ static uint8_t bhnd_compat_sprom_get_tssi_bg(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_PA0ITSSIT); } /* * siba_sprom_get_tri2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_tri2g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TRI2G); } /* * siba_sprom_get_tri5gl() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_tri5gl(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TRI5GL); } /* * siba_sprom_get_tri5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_tri5g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TRI5G); } /* * siba_sprom_get_tri5gh() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_tri5gh(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TRI5GH); } /* * siba_sprom_get_rssisav2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssisav2g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISAV2G); } /* * siba_sprom_get_rssismc2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssismc2g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISMC2G); } /* * siba_sprom_get_rssismf2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssismf2g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISMF2G); } /* * siba_sprom_get_bxa2g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_bxa2g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_BXA2G); } /* * siba_sprom_get_rssisav5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssisav5g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISAV5G); } /* * siba_sprom_get_rssismc5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssismc5g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISMC5G); } /* * siba_sprom_get_rssismf5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_rssismf5g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_RSSISMF5G); } /* * siba_sprom_get_bxa5g() * * Referenced by: * bwn_phy_lp_readsprom() */ static uint8_t bhnd_compat_sprom_get_bxa5g(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_BXA5G); } /* * siba_sprom_get_cck2gpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint16_t bhnd_compat_sprom_get_cck2gpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint16, BHND_NVAR_CCK2GPO); } /* * siba_sprom_get_ofdm2gpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint32_t bhnd_compat_sprom_get_ofdm2gpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint32, BHND_NVAR_OFDM2GPO); } /* * siba_sprom_get_ofdm5glpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint32_t bhnd_compat_sprom_get_ofdm5glpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint32, BHND_NVAR_OFDM5GLPO); } /* * siba_sprom_get_ofdm5gpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint32_t bhnd_compat_sprom_get_ofdm5gpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint32, BHND_NVAR_OFDM5GPO); } /* * siba_sprom_get_ofdm5ghpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint32_t bhnd_compat_sprom_get_ofdm5ghpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint32, BHND_NVAR_OFDM5GHPO); } /* * siba_sprom_set_bf_lo() * * Referenced by: * bwn_sprom_bugfixes() */ static void bhnd_compat_sprom_set_bf_lo(device_t dev, uint16_t t) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); ctx->boardflags &= ~0xFFFF; ctx->boardflags |= t; } /* * siba_sprom_get_bf_lo() * * Referenced by: * bwn_bt_enable() * bwn_core_init() * bwn_gpio_init() * bwn_loopback_calcgain() * bwn_phy_g_init_sub() * bwn_phy_g_recalc_txpwr() * bwn_phy_g_set_txpwr() * bwn_phy_g_task_60s() * bwn_rx_rssi_calc() * bwn_sprom_bugfixes() * ... and 11 others * */ static uint16_t bhnd_compat_sprom_get_bf_lo(device_t dev) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); return (ctx->boardflags & UINT16_MAX); } /* * siba_sprom_get_bf_hi() * * Referenced by: * bwn_nphy_gain_ctl_workarounds_rev3() * bwn_phy_lp_bbinit_r01() * bwn_phy_lp_tblinit_txgain() */ static uint16_t bhnd_compat_sprom_get_bf_hi(device_t dev) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); return (ctx->boardflags >> 16); } /* * siba_sprom_get_bf2_lo() * * Referenced by: * bwn_nphy_op_prepare_structs() * bwn_nphy_workarounds_rev1_2() * bwn_nphy_workarounds_rev3plus() * bwn_phy_initn() * bwn_radio_2056_setup() * bwn_radio_init2055_post() */ static uint16_t bhnd_compat_sprom_get_bf2_lo(device_t dev) { uint32_t bf2; BWN_BHND_NVRAM_FETCH_VAR(dev, uint32, BHND_NVAR_BOARDFLAGS2, &bf2); return (bf2 & UINT16_MAX); } /* * siba_sprom_get_bf2_hi() * * Referenced by: * bwn_nphy_workarounds_rev7plus() * bwn_phy_initn() * bwn_radio_2056_setup() */ static uint16_t bhnd_compat_sprom_get_bf2_hi(device_t dev) { uint32_t bf2; BWN_BHND_NVRAM_FETCH_VAR(dev, uint32, BHND_NVAR_BOARDFLAGS2, &bf2); return (bf2 >> 16); } /* * siba_sprom_get_fem_2ghz_tssipos() * * Referenced by: * bwn_nphy_tx_power_ctl_setup() */ static uint8_t bhnd_compat_sprom_get_fem_2ghz_tssipos(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TSSIPOS2G); } /* * siba_sprom_get_fem_2ghz_extpa_gain() * * Referenced by: * bwn_nphy_op_prepare_structs() */ static uint8_t bhnd_compat_sprom_get_fem_2ghz_extpa_gain(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_EXTPAGAIN2G); } /* * siba_sprom_get_fem_2ghz_pdet_range() * * Referenced by: * bwn_nphy_workarounds_rev3plus() */ static uint8_t bhnd_compat_sprom_get_fem_2ghz_pdet_range(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_PDETRANGE2G); } /* * siba_sprom_get_fem_2ghz_tr_iso() * * Referenced by: * bwn_nphy_get_gain_ctl_workaround_ent() */ static uint8_t bhnd_compat_sprom_get_fem_2ghz_tr_iso(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TRISO2G); } /* * siba_sprom_get_fem_2ghz_antswlut() * * Referenced by: * bwn_nphy_tables_init_rev3() * bwn_nphy_tables_init_rev7_volatile() */ static uint8_t bhnd_compat_sprom_get_fem_2ghz_antswlut(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_ANTSWCTL2G); } /* * siba_sprom_get_fem_5ghz_extpa_gain() * * Referenced by: * bwn_nphy_get_tx_gain_table() * bwn_nphy_op_prepare_structs() */ static uint8_t bhnd_compat_sprom_get_fem_5ghz_extpa_gain(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_EXTPAGAIN5G); } /* * siba_sprom_get_fem_5ghz_pdet_range() * * Referenced by: * bwn_nphy_workarounds_rev3plus() */ static uint8_t bhnd_compat_sprom_get_fem_5ghz_pdet_range(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_PDETRANGE5G); } /* * siba_sprom_get_fem_5ghz_antswlut() * * Referenced by: * bwn_nphy_tables_init_rev3() * bwn_nphy_tables_init_rev7_volatile() */ static uint8_t bhnd_compat_sprom_get_fem_5ghz_antswlut(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_ANTSWCTL5G); } /* * siba_sprom_get_txpid_2g_0() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_2g_0(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID2GA0); } /* * siba_sprom_get_txpid_2g_1() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_2g_1(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID2GA1); } /* * siba_sprom_get_txpid_5gl_0() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5gl_0(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GLA0); } /* * siba_sprom_get_txpid_5gl_1() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5gl_1(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GLA1); } /* * siba_sprom_get_txpid_5g_0() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5g_0(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GA0); } /* * siba_sprom_get_txpid_5g_1() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5g_1(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GA1); } /* * siba_sprom_get_txpid_5gh_0() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5gh_0(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GHA0); } /* * siba_sprom_get_txpid_5gh_1() * * Referenced by: * bwn_nphy_tx_power_fix() */ static uint8_t bhnd_compat_sprom_get_txpid_5gh_1(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint8, BHND_NVAR_TXPID5GHA1); } /* * siba_sprom_get_stbcpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint16_t bhnd_compat_sprom_get_stbcpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint16, BHND_NVAR_STBCPO); } /* * siba_sprom_get_cddpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static uint16_t bhnd_compat_sprom_get_cddpo(device_t dev) { BWN_BHND_NVRAM_RETURN_VAR(dev, uint16, BHND_NVAR_CDDPO); } /* * siba_powerup() * * Referenced by: * bwn_attach_core() * bwn_core_init() */ static void bhnd_compat_powerup(device_t dev, int dynamic) { struct bwn_bhnd_ctx *ctx; bhnd_clock clock; int error; ctx = bwn_bhnd_get_ctx(dev); /* On PMU equipped devices, we do not need to issue a clock request * at powerup */ if (ctx->pmu_dev != NULL) return; /* Issue a PMU clock request */ if (dynamic) clock = BHND_CLOCK_DYN; else clock = BHND_CLOCK_HT; if ((error = bhnd_request_clock(dev, clock))) { device_printf(dev, "%d clock request failed: %d\n", clock, error); } } /* * siba_powerdown() * * Referenced by: * bwn_attach_core() * bwn_core_exit() * bwn_core_init() */ static int bhnd_compat_powerdown(device_t dev) { int error; /* Suspend the core */ if ((error = bhnd_suspend_hw(dev, 0))) return (error); return (0); } /* * siba_read_2() * * Referenced by: * bwn_chip_init() * bwn_chiptest() * bwn_dummy_transmission() * bwn_gpio_init() * bwn_phy_getinfo() * bwn_pio_read_2() * bwn_shm_read_2() * bwn_shm_read_4() * bwn_wme_init() * bwn_wme_loadparams() * ... and 23 others * */ static uint16_t bhnd_compat_read_2(device_t dev, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); return (bhnd_bus_read_2(sc->sc_mem_res, offset)); } /* * siba_write_2() * * Referenced by: * bwn_chip_init() * bwn_chiptest() * bwn_crypt_init() * bwn_gpio_init() * bwn_phy_getinfo() * bwn_pio_tx_start() * bwn_set_opmode() * bwn_shm_write_2() * bwn_shm_write_4() * bwn_wme_init() * ... and 43 others * */ static void bhnd_compat_write_2(device_t dev, uint16_t offset, uint16_t value) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); return (bhnd_bus_write_2(sc->sc_mem_res, offset, value)); } /* * siba_read_4() * * Referenced by: * bwn_attach_core() * bwn_chip_init() * bwn_chiptest() * bwn_core_exit() * bwn_core_init() * bwn_core_start() * bwn_pio_init() * bwn_pio_tx_start() * bwn_reset_core() * bwn_shm_read_4() * ... and 42 others * */ static uint32_t bhnd_compat_read_4(device_t dev, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); uint16_t ioreg; int error; /* bwn(4) fetches IOCTL/IOST values directly from siba-specific target * state registers; we map these directly to bhnd_read_(ioctl|iost) */ switch (offset) { case SB0_REG_ABS(SIBA_CFG0_TMSTATELOW): if ((error = bhnd_read_ioctl(dev, &ioreg))) panic("error reading IOCTL: %d\n", error); return (((uint32_t)ioreg) << SIBA_TML_SICF_SHIFT); case SB0_REG_ABS(SIBA_CFG0_TMSTATEHIGH): if ((error = bhnd_read_iost(dev, &ioreg))) panic("error reading IOST: %d\n", error); return (((uint32_t)ioreg) << SIBA_TMH_SISF_SHIFT); } /* Otherwise, perform a standard bus read */ BWN_ASSERT_VALID_REG(dev, offset); return (bhnd_bus_read_4(sc->sc_mem_res, offset)); } /* * siba_write_4() * * Referenced by: * bwn_chip_init() * bwn_chiptest() * bwn_core_exit() * bwn_core_start() * bwn_dma_mask() * bwn_dma_rxdirectfifo() * bwn_pio_init() * bwn_reset_core() * bwn_shm_ctlword() * bwn_shm_write_4() * ... and 37 others * */ static void bhnd_compat_write_4(device_t dev, uint16_t offset, uint32_t value) { struct bwn_softc *sc = device_get_softc(dev); uint16_t ioctl; int error; /* bwn(4) writes IOCTL values directly to siba-specific target state * registers; we map these directly to bhnd_write_ioctl() */ if (offset == SB0_REG_ABS(SIBA_CFG0_TMSTATELOW)) { /* shift IOCTL flags back down to their original values */ if (value & ~SIBA_TML_SICF_MASK) panic("%s: non-IOCTL flags provided", __FUNCTION__); ioctl = (value & SIBA_TML_SICF_MASK) >> SIBA_TML_SICF_SHIFT; if ((error = bhnd_write_ioctl(dev, ioctl, UINT16_MAX))) panic("error writing IOCTL: %d\n", error); } else { /* Otherwise, perform a standard bus write */ BWN_ASSERT_VALID_REG(dev, offset); bhnd_bus_write_4(sc->sc_mem_res, offset, value); } return; } /* * siba_dev_up() * * Referenced by: * bwn_reset_core() */ static void bhnd_compat_dev_up(device_t dev, uint32_t flags) { uint16_t ioctl; int error; /* shift IOCTL flags back down to their original values */ if (flags & ~SIBA_TML_SICF_MASK) panic("%s: non-IOCTL flags provided", __FUNCTION__); ioctl = (flags & SIBA_TML_SICF_MASK) >> SIBA_TML_SICF_SHIFT; /* Perform core reset; note that bwn(4) incorrectly assumes that both * RESET and post-RESET ioctl flags should be identical */ if ((error = bhnd_reset_hw(dev, ioctl, ioctl))) panic("%s: core reset failed: %d", __FUNCTION__, error); } /* * siba_dev_down() * * Referenced by: * bwn_attach_core() * bwn_core_exit() */ static void bhnd_compat_dev_down(device_t dev, uint32_t flags) { uint16_t ioctl; int error; /* shift IOCTL flags back down to their original values */ if (flags & ~SIBA_TML_SICF_MASK) panic("%s: non-IOCTL flags provided", __FUNCTION__); ioctl = (flags & SIBA_TML_SICF_MASK) >> SIBA_TML_SICF_SHIFT; /* Put core into RESET state */ if ((error = bhnd_suspend_hw(dev, ioctl))) panic("%s: core suspend failed: %d", __FUNCTION__, error); } /* * siba_dev_isup() * * Referenced by: * bwn_core_init() */ static int bhnd_compat_dev_isup(device_t dev) { return (!bhnd_is_hw_suspended(dev)); } /* * siba_pcicore_intr() * * Referenced by: * bwn_core_init() */ static void bhnd_compat_pcicore_intr(device_t dev) { /* This is handled by bhnd_bhndb on the first call to * bus_setup_intr() */ } /* * siba_dma_translation() * * Referenced by: * bwn_dma_32_setdesc() * bwn_dma_64_setdesc() * bwn_dma_setup() */ static uint32_t bhnd_compat_dma_translation(device_t dev) { struct bhnd_dma_translation dt; struct bwn_softc *sc; struct bwn_mac *mac; int bwn_dmatype, error; sc = device_get_softc(dev); mac = sc->sc_curmac; KASSERT(mac != NULL, ("no MAC")); /* Fetch our DMA translation */ bwn_dmatype = mac->mac_method.dma.dmatype; if ((error = bhnd_get_dma_translation(dev, bwn_dmatype, 0, NULL, &dt))) panic("error requesting DMA translation: %d\n", error); /* * TODO: bwn(4) needs to switch to bhnd_get_dma_translation(). * * Currently, bwn(4) incorrectly assumes that: * - The 32-bit translation mask is always SIBA_DMA_TRANSLATION_MASK. * - The 32-bit mask can simply be applied to the top 32-bits of a * 64-bit DMA address. * - The 64-bit address translation is always derived by shifting the * 32-bit siba_dma_translation() left by 1 bit. * * In practice, these assumptions won't result in any bugs on known * PCI/PCIe Wi-Fi hardware: * - The 32-bit mask _is_ always SIBA_DMA_TRANSLATION_MASK on * the subset of devices supported by bwn(4). * - The 64-bit mask used by bwn(4) is a superset of the real * mask, and thus: * - Our DMA tag will still have valid constraints. * - Our address translation will not be corrupted by * applying the mask. * - The mask falls within the top 16 address bits, and our * supported 64-bit architectures are all still limited * to 48-bit addresses anyway; we don't need to worry about * addressing >= 48-bit host memory. * * However, we will need to resolve these issues in bwn(4) if DMA is to * work on new hardware (e.g. WiSoCs). */ switch (bwn_dmatype) { case BWN_DMA_32BIT: case BWN_DMA_30BIT: KASSERT((~dt.addr_mask & BHND_DMA_ADDR_BITMASK(32)) == SIBA_DMA_TRANSLATION_MASK, ("unexpected DMA mask: %#jx", (uintmax_t)dt.addr_mask)); return (dt.base_addr); case BWN_DMA_64BIT: /* bwn(4) will shift this left by 32+1 bits before applying it * to the top 32-bits of the DMA address */ KASSERT((~dt.addr_mask & BHND_DMA_ADDR_BITMASK(33)) == 0, ("DMA64 translation %#jx masks low 33-bits", (uintmax_t)dt.addr_mask)); return (dt.base_addr >> 33); default: panic("unknown dma type %d", bwn_dmatype); } } /* * siba_read_multi_2() * * Referenced by: * bwn_pio_rxeof() */ static void bhnd_compat_read_multi_2(device_t dev, void *buffer, size_t count, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); return (bhnd_bus_read_multi_2(sc->sc_mem_res, offset, buffer, count)); } /* * siba_read_multi_4() * * Referenced by: * bwn_pio_rxeof() */ static void bhnd_compat_read_multi_4(device_t dev, void *buffer, size_t count, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); return (bhnd_bus_read_multi_4(sc->sc_mem_res, offset, buffer, count)); } /* * siba_write_multi_2() * * Referenced by: * bwn_pio_write_multi_2() */ static void bhnd_compat_write_multi_2(device_t dev, const void *buffer, size_t count, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); /* XXX discarding const to maintain API compatibility with * siba_write_multi_2() */ bhnd_bus_write_multi_2(sc->sc_mem_res, offset, __DECONST(void *, buffer), count); } /* * siba_write_multi_4() * * Referenced by: * bwn_pio_write_multi_4() */ static void bhnd_compat_write_multi_4(device_t dev, const void *buffer, size_t count, uint16_t offset) { struct bwn_softc *sc = device_get_softc(dev); BWN_ASSERT_VALID_REG(dev, offset); /* XXX discarding const to maintain API compatibility with * siba_write_multi_4() */ bhnd_bus_write_multi_4(sc->sc_mem_res, offset, __DECONST(void *, buffer), count); } /* * siba_barrier() * * Referenced by: * bwn_intr() * bwn_intrtask() * bwn_ram_write() */ static void bhnd_compat_barrier(device_t dev, int flags) { struct bwn_softc *sc = device_get_softc(dev); /* XXX is siba_barrier()'s use of an offset and length of 0 * correct? */ BWN_ASSERT_VALID_REG(dev, 0); bhnd_bus_barrier(sc->sc_mem_res, 0, 0, flags); } /* * siba_cc_pmu_set_ldovolt() * * Referenced by: * bwn_phy_lp_bbinit_r01() */ static void bhnd_compat_cc_pmu_set_ldovolt(device_t dev, int id, uint32_t volt) { struct bwn_bhnd_ctx *ctx; int error; ctx = bwn_bhnd_get_ctx(dev); /* Only ever used to set the PAREF LDO voltage */ if (id != SIBA_LDO_PAREF) panic("invalid LDO id: %d", id); /* Configuring regulator voltage requires a PMU */ if (ctx->pmu_dev == NULL) panic("no PMU; cannot set LDO voltage"); error = bhnd_pmu_set_voltage_raw(ctx->pmu_dev, BHND_REGULATOR_PAREF_LDO, volt); if (error) panic("failed to set LDO voltage: %d", error); } /* * siba_cc_pmu_set_ldoparef() * * Referenced by: * bwn_phy_lp_bbinit_r01() */ static void bhnd_compat_cc_pmu_set_ldoparef(device_t dev, uint8_t on) { struct bwn_bhnd_ctx *ctx; int error; ctx = bwn_bhnd_get_ctx(dev); /* Enabling/disabling regulators requires a PMU */ if (ctx->pmu_dev == NULL) panic("no PMU; cannot set LDO voltage"); if (on) { error = bhnd_pmu_enable_regulator(ctx->pmu_dev, BHND_REGULATOR_PAREF_LDO); } else { - error = bhnd_pmu_enable_regulator(ctx->pmu_dev, + error = bhnd_pmu_disable_regulator(ctx->pmu_dev, BHND_REGULATOR_PAREF_LDO); } if (error) { panic("failed to %s PAREF_LDO: %d", on ? "enable" : "disable", error); } } /* * siba_gpio_set() * * Referenced by: * bwn_chip_exit() * bwn_chip_init() * bwn_gpio_init() * bwn_nphy_superswitch_init() */ static void bhnd_compat_gpio_set(device_t dev, uint32_t value) { struct bwn_bhnd_ctx *ctx; uint32_t flags[32]; int error; ctx = bwn_bhnd_get_ctx(dev); for (size_t i = 0; i < nitems(flags); i++) { if (value & (1 << i)) { /* Tristate pin */ flags[i] = (GPIO_PIN_OUTPUT | GPIO_PIN_TRISTATE); } else { /* Leave unmodified */ flags[i] = 0; } } error = GPIO_PIN_CONFIG_32(ctx->gpio_dev, 0, nitems(flags), flags); if (error) panic("error configuring pin flags: %d", error); } /* * siba_gpio_get() * * Referenced by: * bwn_gpio_init() */ static uint32_t bhnd_compat_gpio_get(device_t dev) { struct bwn_bhnd_ctx *ctx; uint32_t ctrl; int npin; int error; /* * We recreate the expected GPIOCTRL register value for bwn_gpio_init() * by querying pins individually for GPIO_PIN_TRISTATE. * * Once we drop these compatibility shims, the GPIO_PIN_CONFIG_32 method * can be used to set pin configuration without bwn(4) externally * implementing RMW. */ /* Fetch the total pin count */ ctx = bwn_bhnd_get_ctx(dev); if ((error = GPIO_PIN_MAX(ctx->gpio_dev, &npin))) panic("failed to fetch max pin: %d", error); /* Must be representable within a 32-bit GPIOCTRL register value */ KASSERT(npin <= 32, ("unsupported pin count: %u", npin)); ctrl = 0; for (uint32_t pin = 0; pin < npin; pin++) { uint32_t flags; if ((error = GPIO_PIN_GETFLAGS(ctx->gpio_dev, pin, &flags))) panic("error fetching pin%u flags: %d", pin, error); if (flags & GPIO_PIN_TRISTATE) ctrl |= (1 << pin); } return (ctrl); } /* * siba_fix_imcfglobug() * * Referenced by: * bwn_core_init() */ static void bhnd_compat_fix_imcfglobug(device_t dev) { /* This is handled by siba_bhndb during attach/resume */ } /* Core power NVRAM variables, indexed by D11 core unit number */ static const struct bwn_power_vars { const char *itt2ga; const char *itt5ga; const char *maxp2ga; const char *pa2ga; const char *pa5ga; } bwn_power_vars[BWN_BHND_NUM_CORE_PWR] = { #define BHND_POWER_NVAR(_idx) \ { BHND_NVAR_ITT2GA ## _idx, BHND_NVAR_ITT5GA ## _idx, \ BHND_NVAR_MAXP2GA ## _idx, BHND_NVAR_PA2GA ## _idx, \ BHND_NVAR_PA5GA ## _idx } BHND_POWER_NVAR(0), BHND_POWER_NVAR(1), BHND_POWER_NVAR(2), BHND_POWER_NVAR(3) #undef BHND_POWER_NVAR }; static int bwn_get_core_power_info_r11(device_t dev, const struct bwn_power_vars *v, struct siba_sprom_core_pwr_info *c) { int16_t pa5ga[12]; int error; /* BHND_NVAR_PA2GA[core] */ error = bhnd_nvram_getvar_array(dev, v->pa2ga, c->pa_2g, sizeof(c->pa_2g), BHND_NVRAM_TYPE_INT16); if (error) return (error); /* * BHND_NVAR_PA5GA * * The NVRAM variable is defined as a single pa5ga[12] array; we have * to split this into pa_5gl[4], pa_5g[4], and pa_5gh[4] for use * by bwn(4); */ _Static_assert(nitems(pa5ga) == nitems(c->pa_5g) + nitems(c->pa_5gh) + nitems(c->pa_5gl), "cannot split pa5ga into pa_5gl/pa_5g/pa_5gh"); error = bhnd_nvram_getvar_array(dev, v->pa5ga, pa5ga, sizeof(pa5ga), BHND_NVRAM_TYPE_INT16); if (error) return (error); memcpy(c->pa_5gl, &pa5ga[0], sizeof(c->pa_5gl)); memcpy(c->pa_5g, &pa5ga[4], sizeof(c->pa_5g)); memcpy(c->pa_5gh, &pa5ga[8], sizeof(c->pa_5gh)); return (0); } static int bwn_get_core_power_info_r4_r10(device_t dev, const struct bwn_power_vars *v, struct siba_sprom_core_pwr_info *c) { int error; /* BHND_NVAR_ITT2GA[core] */ if ((error = bhnd_nvram_getvar_uint8(dev, v->itt2ga, &c->itssi_2g))) return (error); /* BHND_NVAR_ITT5GA[core] */ if ((error = bhnd_nvram_getvar_uint8(dev, v->itt5ga, &c->itssi_5g))) return (error); return (0); } /* * siba_sprom_get_core_power_info() * * Referenced by: * bwn_nphy_tx_power_ctl_setup() * bwn_ppr_load_max_from_sprom() */ static int bhnd_compat_sprom_get_core_power_info(device_t dev, int core, struct siba_sprom_core_pwr_info *c) { struct bwn_bhnd_ctx *ctx; const struct bwn_power_vars *v; int error; if (core < 0 || core >= nitems(bwn_power_vars)) return (EINVAL); ctx = bwn_bhnd_get_ctx(dev); if (ctx->sromrev < 4) return (ENXIO); v = &bwn_power_vars[core]; /* Any power variables not found in NVRAM (or returning a * shorter array for a particular NVRAM revision) should be zero * initialized */ memset(c, 0x0, sizeof(*c)); /* Populate SPROM revision-independent values */ if ((error = bhnd_nvram_getvar_uint8(dev, v->maxp2ga, &c->maxpwr_2g))) return (error); /* Populate SPROM revision-specific values */ if (ctx->sromrev >= 4 && ctx->sromrev <= 10) return (bwn_get_core_power_info_r4_r10(dev, v, c)); else return (bwn_get_core_power_info_r11(dev, v, c)); } /* * siba_sprom_get_mcs2gpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static int bhnd_compat_sprom_get_mcs2gpo(device_t dev, uint16_t *c) { static const char *varnames[] = { BHND_NVAR_MCS2GPO0, BHND_NVAR_MCS2GPO1, BHND_NVAR_MCS2GPO2, BHND_NVAR_MCS2GPO3, BHND_NVAR_MCS2GPO4, BHND_NVAR_MCS2GPO5, BHND_NVAR_MCS2GPO6, BHND_NVAR_MCS2GPO7 }; for (size_t i = 0; i < nitems(varnames); i++) { const char *name = varnames[i]; BWN_BHND_NVRAM_FETCH_VAR(dev, uint16, name, &c[i]); } return (0); } /* * siba_sprom_get_mcs5glpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static int bhnd_compat_sprom_get_mcs5glpo(device_t dev, uint16_t *c) { static const char *varnames[] = { BHND_NVAR_MCS5GLPO0, BHND_NVAR_MCS5GLPO1, BHND_NVAR_MCS5GLPO2, BHND_NVAR_MCS5GLPO3, BHND_NVAR_MCS5GLPO4, BHND_NVAR_MCS5GLPO5, BHND_NVAR_MCS5GLPO6, BHND_NVAR_MCS5GLPO7 }; for (size_t i = 0; i < nitems(varnames); i++) { const char *name = varnames[i]; BWN_BHND_NVRAM_FETCH_VAR(dev, uint16, name, &c[i]); } return (0); } /* * siba_sprom_get_mcs5gpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static int bhnd_compat_sprom_get_mcs5gpo(device_t dev, uint16_t *c) { static const char *varnames[] = { BHND_NVAR_MCS5GPO0, BHND_NVAR_MCS5GPO1, BHND_NVAR_MCS5GPO2, BHND_NVAR_MCS5GPO3, BHND_NVAR_MCS5GPO4, BHND_NVAR_MCS5GPO5, BHND_NVAR_MCS5GPO6, BHND_NVAR_MCS5GPO7 }; for (size_t i = 0; i < nitems(varnames); i++) { const char *name = varnames[i]; BWN_BHND_NVRAM_FETCH_VAR(dev, uint16, name, &c[i]); } return (0); } /* * siba_sprom_get_mcs5ghpo() * * Referenced by: * bwn_ppr_load_max_from_sprom() */ static int bhnd_compat_sprom_get_mcs5ghpo(device_t dev, uint16_t *c) { static const char *varnames[] = { BHND_NVAR_MCS5GHPO0, BHND_NVAR_MCS5GHPO1, BHND_NVAR_MCS5GHPO2, BHND_NVAR_MCS5GHPO3, BHND_NVAR_MCS5GHPO4, BHND_NVAR_MCS5GHPO5, BHND_NVAR_MCS5GHPO6, BHND_NVAR_MCS5GHPO7 }; for (size_t i = 0; i < nitems(varnames); i++) { const char *name = varnames[i]; BWN_BHND_NVRAM_FETCH_VAR(dev, uint16, name, &c[i]); } return (0); } /* * siba_pmu_spuravoid_pllupdate() * * Referenced by: * bwn_nphy_pmu_spur_avoid() */ static void bhnd_compat_pmu_spuravoid_pllupdate(device_t dev, int spur_avoid) { struct bwn_bhnd_ctx *ctx; bhnd_pmu_spuravoid mode; int error; ctx = bwn_bhnd_get_ctx(dev); if (ctx->pmu_dev == NULL) panic("requested spuravoid on non-PMU device"); switch (spur_avoid) { case 0: mode = BHND_PMU_SPURAVOID_NONE; break; case 1: mode = BHND_PMU_SPURAVOID_M1; break; default: panic("unknown spur_avoid: %d", spur_avoid); } if ((error = bhnd_pmu_request_spuravoid(ctx->pmu_dev, mode))) panic("spuravoid request failed: %d", error); } /* * siba_cc_set32() * * Referenced by: * bwn_phy_initn() * bwn_wireless_core_phy_pll_reset() */ static void bhnd_compat_cc_set32(device_t dev, uint32_t reg, uint32_t val) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); /* * OR with the current value. * * This function is only ever used to write to either ChipCommon's * chipctrl register or chipctl_data register. Note that chipctl_data * is actually a PMU register; it is not actually mapped by ChipCommon * on Always-on-Bus (AOB) devices with a standalone PMU core. */ if (dev != ctx->chipc_dev) panic("unsupported device: %s", device_get_nameunit(dev)); switch (reg) { case SIBA_CC_CHIPCTL: BHND_CHIPC_WRITE_CHIPCTRL(ctx->chipc_dev, val, val); break; case SIBA_CC_CHIPCTL_DATA: bhnd_pmu_write_chipctrl(ctx->pmu_dev, ctx->pmu_cctl_addr, val, val); break; default: panic("unsupported register: %#x", reg); } } /* * siba_cc_mask32() * * Referenced by: * bwn_wireless_core_phy_pll_reset() */ static void bhnd_compat_cc_mask32(device_t dev, uint32_t reg, uint32_t mask) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); /* * AND with the current value. * * This function is only ever used to write to ChipCommon's chipctl_data * register. Note that chipctl_data is actually a PMU register; it is * not actually mapped by ChipCommon on Always-on-Bus (AOB) devices with * a standalone PMU core. */ if (dev != ctx->chipc_dev) panic("unsupported device: %s", device_get_nameunit(dev)); switch (reg) { case SIBA_CC_CHIPCTL_DATA: bhnd_pmu_write_chipctrl(ctx->pmu_dev, ctx->pmu_cctl_addr, 0, ~mask); break; default: panic("unsupported register: %#x", reg); } } /* * siba_cc_write32() * * Referenced by: * bwn_wireless_core_phy_pll_reset() */ static void bhnd_compat_cc_write32(device_t dev, uint32_t reg, uint32_t val) { struct bwn_bhnd_ctx *ctx = bwn_bhnd_get_ctx(dev); /* * This function is only ever used to write to ChipCommon's chipctl_addr * register; setting chipctl_addr is handled atomically by * bhnd_pmu_write_chipctrl(), so we merely cache the intended address * for later use when chipctl_data is written. * * Also, note that chipctl_addr is actually a PMU register; it is * not actually mapped by ChipCommon on Always-on-Bus (AOB) devices with * a standalone PMU core. */ if (dev != ctx->chipc_dev) panic("unsupported device: %s", device_get_nameunit(dev)); switch (reg) { case SIBA_CC_CHIPCTL_ADDR: ctx->pmu_cctl_addr = val; break; default: panic("unsupported register: %#x", reg); } } const struct bwn_bus_ops bwn_bhnd_bus_ops = { .init = bwn_bhnd_bus_ops_init, .fini = bwn_bhnd_bus_ops_fini, .pci_find_cap = bhnd_compat_pci_find_cap, .pci_alloc_msi = bhnd_compat_pci_alloc_msi, .pci_release_msi = bhnd_compat_pci_release_msi, .pci_msi_count = bhnd_compat_pci_msi_count, .get_vendor = bhnd_compat_get_vendor, .get_device = bhnd_compat_get_device, .get_revid = bhnd_compat_get_revid, .get_pci_vendor = bhnd_compat_get_pci_vendor, .get_pci_device = bhnd_compat_get_pci_device, .get_pci_subvendor = bhnd_compat_get_pci_subvendor, .get_pci_subdevice = bhnd_compat_get_pci_subdevice, .get_pci_revid = bhnd_compat_get_pci_revid, .get_chipid = bhnd_compat_get_chipid, .get_chiprev = bhnd_compat_get_chiprev, .get_chippkg = bhnd_compat_get_chippkg, .get_type = bhnd_compat_get_type, .get_cc_pmufreq = bhnd_compat_get_cc_pmufreq, .get_cc_caps = bhnd_compat_get_cc_caps, .get_cc_powerdelay = bhnd_compat_get_cc_powerdelay, .get_pcicore_revid = bhnd_compat_get_pcicore_revid, .sprom_get_rev = bhnd_compat_sprom_get_rev, .sprom_get_mac_80211bg = bhnd_compat_sprom_get_mac_80211bg, .sprom_get_mac_80211a = bhnd_compat_sprom_get_mac_80211a, .sprom_get_brev = bhnd_compat_sprom_get_brev, .sprom_get_ccode = bhnd_compat_sprom_get_ccode, .sprom_get_ant_a = bhnd_compat_sprom_get_ant_a, .sprom_get_ant_bg = bhnd_compat_sprom_get_ant_bg, .sprom_get_pa0b0 = bhnd_compat_sprom_get_pa0b0, .sprom_get_pa0b1 = bhnd_compat_sprom_get_pa0b1, .sprom_get_pa0b2 = bhnd_compat_sprom_get_pa0b2, .sprom_get_gpio0 = bhnd_compat_sprom_get_gpio0, .sprom_get_gpio1 = bhnd_compat_sprom_get_gpio1, .sprom_get_gpio2 = bhnd_compat_sprom_get_gpio2, .sprom_get_gpio3 = bhnd_compat_sprom_get_gpio3, .sprom_get_maxpwr_bg = bhnd_compat_sprom_get_maxpwr_bg, .sprom_set_maxpwr_bg = bhnd_compat_sprom_set_maxpwr_bg, .sprom_get_rxpo2g = bhnd_compat_sprom_get_rxpo2g, .sprom_get_rxpo5g = bhnd_compat_sprom_get_rxpo5g, .sprom_get_tssi_bg = bhnd_compat_sprom_get_tssi_bg, .sprom_get_tri2g = bhnd_compat_sprom_get_tri2g, .sprom_get_tri5gl = bhnd_compat_sprom_get_tri5gl, .sprom_get_tri5g = bhnd_compat_sprom_get_tri5g, .sprom_get_tri5gh = bhnd_compat_sprom_get_tri5gh, .sprom_get_rssisav2g = bhnd_compat_sprom_get_rssisav2g, .sprom_get_rssismc2g = bhnd_compat_sprom_get_rssismc2g, .sprom_get_rssismf2g = bhnd_compat_sprom_get_rssismf2g, .sprom_get_bxa2g = bhnd_compat_sprom_get_bxa2g, .sprom_get_rssisav5g = bhnd_compat_sprom_get_rssisav5g, .sprom_get_rssismc5g = bhnd_compat_sprom_get_rssismc5g, .sprom_get_rssismf5g = bhnd_compat_sprom_get_rssismf5g, .sprom_get_bxa5g = bhnd_compat_sprom_get_bxa5g, .sprom_get_cck2gpo = bhnd_compat_sprom_get_cck2gpo, .sprom_get_ofdm2gpo = bhnd_compat_sprom_get_ofdm2gpo, .sprom_get_ofdm5glpo = bhnd_compat_sprom_get_ofdm5glpo, .sprom_get_ofdm5gpo = bhnd_compat_sprom_get_ofdm5gpo, .sprom_get_ofdm5ghpo = bhnd_compat_sprom_get_ofdm5ghpo, .sprom_get_bf_lo = bhnd_compat_sprom_get_bf_lo, .sprom_set_bf_lo = bhnd_compat_sprom_set_bf_lo, .sprom_get_bf_hi = bhnd_compat_sprom_get_bf_hi, .sprom_get_bf2_lo = bhnd_compat_sprom_get_bf2_lo, .sprom_get_bf2_hi = bhnd_compat_sprom_get_bf2_hi, .sprom_get_fem_2ghz_tssipos = bhnd_compat_sprom_get_fem_2ghz_tssipos, .sprom_get_fem_2ghz_extpa_gain = bhnd_compat_sprom_get_fem_2ghz_extpa_gain, .sprom_get_fem_2ghz_pdet_range = bhnd_compat_sprom_get_fem_2ghz_pdet_range, .sprom_get_fem_2ghz_tr_iso = bhnd_compat_sprom_get_fem_2ghz_tr_iso, .sprom_get_fem_2ghz_antswlut = bhnd_compat_sprom_get_fem_2ghz_antswlut, .sprom_get_fem_5ghz_extpa_gain = bhnd_compat_sprom_get_fem_5ghz_extpa_gain, .sprom_get_fem_5ghz_pdet_range = bhnd_compat_sprom_get_fem_5ghz_pdet_range, .sprom_get_fem_5ghz_antswlut = bhnd_compat_sprom_get_fem_5ghz_antswlut, .sprom_get_txpid_2g_0 = bhnd_compat_sprom_get_txpid_2g_0, .sprom_get_txpid_2g_1 = bhnd_compat_sprom_get_txpid_2g_1, .sprom_get_txpid_5gl_0 = bhnd_compat_sprom_get_txpid_5gl_0, .sprom_get_txpid_5gl_1 = bhnd_compat_sprom_get_txpid_5gl_1, .sprom_get_txpid_5g_0 = bhnd_compat_sprom_get_txpid_5g_0, .sprom_get_txpid_5g_1 = bhnd_compat_sprom_get_txpid_5g_1, .sprom_get_txpid_5gh_0 = bhnd_compat_sprom_get_txpid_5gh_0, .sprom_get_txpid_5gh_1 = bhnd_compat_sprom_get_txpid_5gh_1, .sprom_get_stbcpo = bhnd_compat_sprom_get_stbcpo, .sprom_get_cddpo = bhnd_compat_sprom_get_cddpo, .powerup = bhnd_compat_powerup, .powerdown = bhnd_compat_powerdown, .read_2 = bhnd_compat_read_2, .write_2 = bhnd_compat_write_2, .read_4 = bhnd_compat_read_4, .write_4 = bhnd_compat_write_4, .dev_up = bhnd_compat_dev_up, .dev_down = bhnd_compat_dev_down, .dev_isup = bhnd_compat_dev_isup, .pcicore_intr = bhnd_compat_pcicore_intr, .dma_translation = bhnd_compat_dma_translation, .read_multi_2 = bhnd_compat_read_multi_2, .read_multi_4 = bhnd_compat_read_multi_4, .write_multi_2 = bhnd_compat_write_multi_2, .write_multi_4 = bhnd_compat_write_multi_4, .barrier = bhnd_compat_barrier, .cc_pmu_set_ldovolt = bhnd_compat_cc_pmu_set_ldovolt, .cc_pmu_set_ldoparef = bhnd_compat_cc_pmu_set_ldoparef, .gpio_set = bhnd_compat_gpio_set, .gpio_get = bhnd_compat_gpio_get, .fix_imcfglobug = bhnd_compat_fix_imcfglobug, .sprom_get_core_power_info = bhnd_compat_sprom_get_core_power_info, .sprom_get_mcs2gpo = bhnd_compat_sprom_get_mcs2gpo, .sprom_get_mcs5glpo = bhnd_compat_sprom_get_mcs5glpo, .sprom_get_mcs5gpo = bhnd_compat_sprom_get_mcs5gpo, .sprom_get_mcs5ghpo = bhnd_compat_sprom_get_mcs5ghpo, .pmu_spuravoid_pllupdate = bhnd_compat_pmu_spuravoid_pllupdate, .cc_set32 = bhnd_compat_cc_set32, .cc_mask32 = bhnd_compat_cc_mask32, .cc_write32 = bhnd_compat_cc_write32, };