Index: head/sys/dev/bhnd/siba/siba.c =================================================================== --- head/sys/dev/bhnd/siba/siba.c (revision 329179) +++ head/sys/dev/bhnd/siba/siba.c (revision 329180) @@ -1,1575 +1,1444 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include "siba_eromvar.h" + #include "sibareg.h" #include "sibavar.h" +/* RID used when allocating EROM resources */ +#define SIBA_EROM_RID 0 + static bhnd_erom_class_t * siba_get_erom_class(driver_t *driver) { return (&siba_erom_parser); } int siba_probe(device_t dev) { device_set_desc(dev, "SIBA BHND bus"); return (BUS_PROBE_DEFAULT); } /** * Default siba(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation initializes internal siba(4) state and performs * bus enumeration, and must be called by subclassing drivers in * DEVICE_ATTACH() before any other bus methods. */ int siba_attach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; SIBA_LOCK_INIT(sc); /* Enumerate children */ if ((error = siba_add_children(dev))) { device_delete_children(dev); SIBA_LOCK_DESTROY(sc); return (error); } return (0); } int siba_detach(device_t dev) { struct siba_softc *sc; int error; sc = device_get_softc(dev); if ((error = bhnd_generic_detach(dev))) return (error); SIBA_LOCK_DESTROY(sc); return (0); } int siba_resume(device_t dev) { return (bhnd_generic_resume(dev)); } int siba_suspend(device_t dev) { return (bhnd_generic_suspend(dev)); } static int siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct siba_softc *sc; const struct siba_devinfo *dinfo; const struct bhnd_core_info *cfg; sc = device_get_softc(dev); dinfo = device_get_ivars(child); cfg = &dinfo->core_id.core_info; switch (index) { case BHND_IVAR_VENDOR: *result = cfg->vendor; return (0); case BHND_IVAR_DEVICE: *result = cfg->device; return (0); case BHND_IVAR_HWREV: *result = cfg->hwrev; return (0); case BHND_IVAR_DEVICE_CLASS: *result = bhnd_core_class(cfg); return (0); case BHND_IVAR_VENDOR_NAME: *result = (uintptr_t) bhnd_vendor_name(cfg->vendor); return (0); case BHND_IVAR_DEVICE_NAME: *result = (uintptr_t) bhnd_core_name(cfg); return (0); case BHND_IVAR_CORE_INDEX: *result = cfg->core_idx; return (0); case BHND_IVAR_CORE_UNIT: *result = cfg->unit; return (0); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: *result = (uintptr_t)NULL; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_BHND: *result = (uintptr_t)dinfo->pmu.bhnd_info; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: *result = (uintptr_t)NULL; SIBA_UNLOCK(sc); return (0); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static int siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct siba_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); dinfo = device_get_ivars(child); switch (index) { case BHND_IVAR_VENDOR: case BHND_IVAR_DEVICE: case BHND_IVAR_HWREV: case BHND_IVAR_DEVICE_CLASS: case BHND_IVAR_VENDOR_NAME: case BHND_IVAR_DEVICE_NAME: case BHND_IVAR_CORE_INDEX: case BHND_IVAR_CORE_UNIT: return (EINVAL); case BHND_IVAR_PMU_INFO: SIBA_LOCK(sc); switch (dinfo->pmu_state) { case SIBA_PMU_NONE: case SIBA_PMU_BHND: dinfo->pmu.bhnd_info = (void *)value; dinfo->pmu_state = SIBA_PMU_BHND; SIBA_UNLOCK(sc); return (0); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: panic("bhnd_set_pmu_info() called with siba PMU state " "%d", dinfo->pmu_state); return (ENXIO); } panic("invalid PMU state: %d", dinfo->pmu_state); return (ENXIO); default: return (ENOENT); } } static struct resource_list * siba_get_resource_list(device_t dev, device_t child) { struct siba_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* BHND_BUS_ALLOC_PMU() */ static int siba_alloc_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t chipc; device_t pwrctl; struct chipc_caps ccaps; siba_pmu_state pmu_state; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); pwrctl = NULL; /* Fetch ChipCommon capability flags */ chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC); if (chipc != NULL) { ccaps = *BHND_CHIPC_GET_CAPS(chipc); bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC); } else { memset(&ccaps, 0, sizeof(ccaps)); } /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and * advertises PMU support */ if (ccaps.pmu) { if ((error = bhnd_generic_alloc_pmu(dev, child))) return (error); KASSERT(dinfo->pmu_state == SIBA_PMU_BHND, ("unexpected PMU state: %d", dinfo->pmu_state)); return (0); } /* * This is either a legacy PWRCTL chipset, or the device does not * support dynamic clock control. * * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations. */ if (ccaps.pwr_ctrl) { pmu_state = SIBA_PMU_PWRCTL; pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL); if (pwrctl == NULL) { device_printf(dev, "PWRCTL not found\n"); return (ENODEV); } } else { pmu_state = SIBA_PMU_FIXED; pwrctl = NULL; } SIBA_LOCK(sc); /* Per-core PMU state already allocated? */ if (dinfo->pmu_state != SIBA_PMU_NONE) { panic("duplicate PMU allocation for %s", device_get_nameunit(child)); } /* Update the child's PMU allocation state, and transfer ownership of * the PWRCTL provider reference (if any) */ dinfo->pmu_state = pmu_state; dinfo->pmu.pwrctl = pwrctl; SIBA_UNLOCK(sc); return (0); } /* BHND_BUS_RELEASE_PMU() */ static int siba_release_pmu(device_t dev, device_t child) { struct siba_softc *sc; struct siba_devinfo *dinfo; device_t pwrctl; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("pmu over-release for %s", device_get_nameunit(child)); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_pmu(dev, child)); case SIBA_PMU_PWRCTL: /* Requesting BHND_CLOCK_DYN releases any outstanding clock * reservations */ pwrctl = dinfo->pmu.pwrctl; error = bhnd_pwrctl_request_clock(pwrctl, child, BHND_CLOCK_DYN); if (error) { SIBA_UNLOCK(sc); return (error); } /* Clean up the child's PMU state */ dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); /* Release the provider reference */ bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL); return (0); case SIBA_PMU_FIXED: /* Clean up the child's PMU state */ KASSERT(dinfo->pmu.pwrctl == NULL, ("PWRCTL reference with FIXED state")); dinfo->pmu_state = SIBA_PMU_NONE; dinfo->pmu.pwrctl = NULL; SIBA_UNLOCK(sc); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_LATENCY() */ static int siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock, u_int *latency) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_latency(dev, child, clock, latency)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock, latency); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and incurs no transition * delay. */ switch (clock) { case BHND_CLOCK_HT: *latency = 0; return (0); default: return (ENODEV); } return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_GET_CLOCK_FREQ() */ static int siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock, u_int *freq) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_get_clock_freq(dev, child, clock, freq)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock, freq); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_EXT_RSRC() */ static int siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_RELEASE_EXT_RSRC() */ static int siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_release_ext_rsrc(dev, child, rsrc)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: /* HW does not support per-core external resources */ SIBA_UNLOCK(sc); return (ENODEV); } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_REQUEST_CLOCK() */ static int siba_request_clock(device_t dev, device_t child, bhnd_clock clock) { struct siba_softc *sc; struct siba_devinfo *dinfo; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_request_clock(dev, child, clock)); case SIBA_PMU_PWRCTL: error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, clock); SIBA_UNLOCK(sc); return (error); case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* HT clock is always available, and fulfills any of the * following clock requests */ switch (clock) { case BHND_CLOCK_DYN: case BHND_CLOCK_ILP: case BHND_CLOCK_ALP: case BHND_CLOCK_HT: return (0); default: return (ENODEV); } } panic("invalid PMU state: %d", dinfo->pmu_state); } /* BHND_BUS_ENABLE_CLOCKS() */ static int siba_enable_clocks(device_t dev, device_t child, uint32_t clocks) { struct siba_softc *sc; struct siba_devinfo *dinfo; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); SIBA_LOCK(sc); switch(dinfo->pmu_state) { case SIBA_PMU_NONE: panic("no active PMU request state"); SIBA_UNLOCK(sc); return (ENXIO); case SIBA_PMU_BHND: SIBA_UNLOCK(sc); return (bhnd_generic_enable_clocks(dev, child, clocks)); case SIBA_PMU_PWRCTL: case SIBA_PMU_FIXED: SIBA_UNLOCK(sc); /* All (supported) clocks are already enabled by default */ clocks &= ~(BHND_CLOCK_DYN | BHND_CLOCK_ILP | BHND_CLOCK_ALP | BHND_CLOCK_HT); if (clocks != 0) { device_printf(dev, "%s requested unknown clocks: %#x\n", device_get_nameunit(child), clocks); return (ENODEV); } return (0); } panic("invalid PMU state: %d", dinfo->pmu_state); } static int siba_read_iost(device_t dev, device_t child, uint16_t *iost) { uint32_t tmhigh; int error; error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4); if (error) return (error); *iost = (SIBA_REG_GET(tmhigh, TMH_SISF)); return (0); } static int siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl) { uint32_t ts_low; int error; if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4))) return (error); *ioctl = (SIBA_REG_GET(ts_low, TML_SICF)); return (0); } static int siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, ts_mask; if (device_get_parent(child) != dev) return (EINVAL); /* Fetch CFG0 mapping */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Mask and set TMSTATELOW core flag bits */ ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK; ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } static bool siba_is_hw_suspended(device_t dev, device_t child) { uint32_t ts_low; uint16_t ioctl; int error; /* Fetch target state */ error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4); if (error) { device_printf(child, "error reading HW reset state: %d\n", error); return (true); } /* Is core held in RESET? */ if (ts_low & SIBA_TML_RESET) return (true); /* Is target reject enabled? */ if (ts_low & SIBA_TML_REJ_MASK) return (true); /* Is core clocked? */ ioctl = SIBA_REG_GET(ts_low, TML_SICF); if (!(ioctl & BHND_IOCTL_CLK_EN)) return (true); return (false); } static int siba_reset_hw(device_t dev, device_t child, uint16_t ioctl, uint16_t reset_ioctl) { struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t ts_low, imstate; uint16_t clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Place core into known RESET state */ if ((error = bhnd_suspend_hw(child, reset_ioctl))) return (error); /* Set RESET, clear REJ, set the caller's IOCTL flags, and * force clocks to ensure the signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, UINT32_MAX); /* Clear any target errors */ if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) { siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_SERR); } /* Clear any initiator errors */ imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE); if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_IBE|SIBA_IM_TO); } /* Release from RESET while leaving clocks forced, ensuring the * signal propagates throughout the core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_RESET); /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE * bit and allow the core to manage clock gating. */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT)); return (0); } static int siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl) { struct siba_softc *sc; struct siba_devinfo *dinfo; struct bhnd_resource *r; uint32_t idl, ts_low, ts_mask; uint16_t cflags, clkflags; int error; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); dinfo = device_get_ivars(child); /* Can't suspend the core without access to the CFG0 registers */ if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */ clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE; if (ioctl & clkflags) return (EINVAL); /* Already in RESET? */ ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW); if (ts_low & SIBA_TML_RESET) return (0); /* If clocks are already disabled, we can place the core directly * into RESET|REJ while setting the caller's IOCTL flags. */ cflags = SIBA_REG_GET(ts_low, TML_SICF); if (!(cflags & BHND_IOCTL_CLK_EN)) { ts_low = SIBA_TML_RESET | SIBA_TML_REJ | (ioctl << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); return (0); } /* Reject further transactions reaching this core */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, SIBA_TML_REJ, SIBA_TML_REJ); /* Wait for transaction busy flag to clear for all transactions * initiated by this core */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH, 0x0, SIBA_TMH_BUSY, 100000); if (error) return (error); /* If this is an initiator core, we need to reject initiator * transactions too. */ idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW); if (idl & SIBA_IDL_INIT) { /* Reject further initiator transactions */ siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, SIBA_IM_RJ, SIBA_IM_RJ); /* Wait for initiator busy flag to clear */ error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_BY, 100000); if (error) return (error); } /* Put the core into RESET, set the caller's IOCTL flags, and * force clocks to ensure the RESET signal propagates throughout the * core. */ ts_low = SIBA_TML_RESET | (ioctl << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) | (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT); ts_mask = SIBA_TML_RESET | SIBA_TML_SICF_MASK; siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low, ts_mask); /* Give RESET ample time */ DELAY(10); /* Clear previously asserted initiator reject */ if (idl & SIBA_IDL_INIT) { siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0, SIBA_IM_RJ); } /* Disable all clocks, leaving RESET and REJ asserted */ siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0, (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT); /* * Core is now in RESET. * * If the core holds any PWRCTL clock reservations, we need to release * those now. This emulates the standard bhnd(4) PMU behavior of RESET * automatically clearing clkctl */ SIBA_LOCK(sc); if (dinfo->pmu_state == SIBA_PMU_PWRCTL) { error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child, BHND_CLOCK_DYN); SIBA_UNLOCK(sc); if (error) { device_printf(child, "failed to release clock request: " "%d", error); return (error); } return (0); } else { SIBA_UNLOCK(sc); return (0); } } static int siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value, u_int width) { struct siba_devinfo *dinfo; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if (dinfo->cfg_res[0] == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(dinfo->cfg_res[0]->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0], offset); return (0); case 2: *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0], offset); return (0); case 4: *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0], offset); return (0); default: return (EINVAL); } } static int siba_write_config(device_t dev, device_t child, bus_size_t offset, const void *value, u_int width) { struct siba_devinfo *dinfo; struct bhnd_resource *r; rman_res_t r_size; /* Must be directly attached */ if (device_get_parent(child) != dev) return (EINVAL); /* CFG0 registers must be available */ dinfo = device_get_ivars(child); if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); /* Offset must fall within CFG0 */ r_size = rman_get_size(r->res); if (r_size < offset || r_size - offset < width) return (EFAULT); switch (width) { case 1: bhnd_bus_write_1(r, offset, *(const uint8_t *)value); return (0); case 2: bhnd_bus_write_2(r, offset, *(const uint8_t *)value); return (0); case 4: bhnd_bus_write_4(r, offset, *(const uint8_t *)value); return (0); default: return (EINVAL); } } static u_int siba_get_port_count(device_t dev, device_t child, bhnd_port_type type) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child, type)); dinfo = device_get_ivars(child); return (siba_port_count(&dinfo->core_id, type)); } static u_int siba_get_region_count(device_t dev, device_t child, bhnd_port_type type, u_int port) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child, type, port)); dinfo = device_get_ivars(child); return (siba_port_region_count(&dinfo->core_id, type, port)); } static int siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Look for a matching addrspace entry */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) return (addrspace->sa_rid); /* Try the config blocks */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) return (cfg->cb_rid); /* Not found */ return (-1); } static int siba_decode_port_rid(device_t dev, device_t child, int type, int rid, bhnd_port_type *port_type, u_int *port_num, u_int *region_num) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child, type, rid, port_type, port_num, region_num)); dinfo = device_get_ivars(child); /* Ports are always memory mapped */ if (type != SYS_RES_MEMORY) return (EINVAL); /* Look for a matching addrspace entry */ - for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) { + for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) { if (dinfo->addrspace[i].sa_rid != rid) continue; *port_type = BHND_PORT_DEVICE; *port_num = siba_addrspace_device_port(i); *region_num = siba_addrspace_device_region(i); return (0); } /* Try the config blocks */ for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) { if (dinfo->cfg[i].cb_rid != rid) continue; *port_type = BHND_PORT_AGENT; *port_num = siba_cfg_agent_port(i); *region_num = siba_cfg_agent_region(i); return (0); } /* Not found */ return (ENOENT); } static int siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type, u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_devinfo *dinfo; struct siba_addrspace *addrspace; struct siba_cfg_block *cfg; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) { return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child, port_type, port_num, region_num, addr, size)); } dinfo = device_get_ivars(child); /* Look for a matching addrspace */ addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num); if (addrspace != NULL) { *addr = addrspace->sa_base; *size = addrspace->sa_size - addrspace->sa_bus_reserved; return (0); } /* Look for a matching cfg block */ cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num); if (cfg != NULL) { *addr = cfg->cb_base; *size = cfg->cb_size; return (0); } /* Not found */ return (ENOENT); } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT(). */ u_int siba_get_intr_count(device_t dev, device_t child) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child)); dinfo = device_get_ivars(child); - if (!dinfo->intr_en) { + if (!dinfo->core_id.intr_en) { /* No interrupts */ return (0); } else { /* One assigned interrupt */ return (1); } } /** * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC(). */ int siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec) { struct siba_devinfo *dinfo; /* delegate non-bus-attached devices to our parent */ if (device_get_parent(child) != dev) return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child, intr, ivec)); /* Must be a valid interrupt ID */ if (intr >= siba_get_intr_count(dev, child)) return (ENXIO); KASSERT(intr == 0, ("invalid ivec %u", intr)); dinfo = device_get_ivars(child); - KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned")); - *ivec = dinfo->intr.flag; - return (0); -} + KASSERT(dinfo->core_id.intr_en, + ("core does not have an interrupt assigned")); -/** - * Register all address space mappings for @p di. - * - * @param dev The siba bus device. - * @param di The device info instance on which to register all address - * space entries. - * @param r A resource mapping the enumeration table block for @p di. - */ -static int -siba_register_addrspaces(device_t dev, struct siba_devinfo *di, - struct bhnd_resource *r) -{ - struct siba_core_id *cid; - uint32_t addr; - uint32_t size; - int error; - - cid = &di->core_id; - - - /* Register the device address space entries */ - for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) { - uint32_t adm; - u_int adm_offset; - uint32_t bus_reserved; - - /* Determine the register offset */ - adm_offset = siba_admatch_offset(i); - if (adm_offset == 0) { - device_printf(dev, "addrspace %hhu is unsupported", i); - return (ENODEV); - } - - /* Fetch the address match register value */ - adm = bhnd_bus_read_4(r, adm_offset); - - /* Parse the value */ - if ((error = siba_parse_admatch(adm, &addr, &size))) { - device_printf(dev, "failed to decode address " - " match register value 0x%x\n", adm); - return (error); - } - - /* If this is the device's core/enumeration addrespace, - * reserve the Sonics configuration register blocks for the - * use of our bus. */ - bus_reserved = 0; - if (i == SIBA_CORE_ADDRSPACE) - bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE; - - /* Append the region info */ - error = siba_append_dinfo_region(di, i, addr, size, - bus_reserved); - if (error) - return (error); - } - + *ivec = dinfo->core_id.intr_flag; return (0); } - /** - * Register all interrupt descriptors for @p dinfo. Must be called after - * configuration blocks have been mapped. - * - * @param dev The siba bus device. - * @param child The siba child device. - * @param dinfo The device info instance on which to register all interrupt - * descriptor entries. - * @param r A resource mapping the enumeration table block for @p di. - */ -static int -siba_register_interrupts(device_t dev, device_t child, - struct siba_devinfo *dinfo, struct bhnd_resource *r) -{ - uint32_t tpsflag; - int error; - - /* Is backplane interrupt distribution enabled for this core? */ - tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG)); - if ((tpsflag & SIBA_TPS_F0EN0) == 0) { - dinfo->intr_en = false; - return (0); - } - - /* Have one interrupt */ - dinfo->intr_en = true; - dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0); - dinfo->intr.mapped = false; - dinfo->intr.irq = 0; - dinfo->intr.rid = -1; - - /* Map the interrupt */ - error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */, - &dinfo->intr.irq); - if (error) { - device_printf(dev, "failed mapping interrupt line for core %u: " - "%d\n", dinfo->core_id.core_info.core_idx, error); - return (error); - } - dinfo->intr.mapped = true; - - /* Update the resource list */ - dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ, - dinfo->intr.irq, dinfo->intr.irq, 1); - - return (0); -} - -/** * Map per-core configuration blocks for @p dinfo. * * @param dev The siba bus device. * @param dinfo The device info instance on which to map all per-core * configuration blocks. */ static int siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo) { struct siba_addrspace *addrspace; rman_res_t r_start, r_count, r_end; uint8_t num_cfg; int rid; num_cfg = dinfo->core_id.num_cfg_blocks; if (num_cfg > SIBA_MAX_CFG) { device_printf(dev, "config block count %hhu out of range\n", num_cfg); return (ENXIO); } /* Fetch the core register address space */ addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0); if (addrspace == NULL) { device_printf(dev, "missing device registers\n"); return (ENXIO); } /* * Map the per-core configuration blocks */ for (uint8_t i = 0; i < num_cfg; i++) { /* Add to child's resource list */ r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i); r_count = SIBA_CFG_SIZE; r_end = r_start + r_count - 1; rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, r_start, r_end, r_count); /* Initialize config block descriptor */ dinfo->cfg[i] = ((struct siba_cfg_block) { .cb_base = r_start, .cb_size = SIBA_CFG_SIZE, .cb_rid = rid }); /* Map the config resource for bus-level access */ dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i); dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev, SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end, r_count, RF_ACTIVE|RF_SHAREABLE); if (dinfo->cfg_res[i] == NULL) { device_printf(dev, "failed to allocate SIBA_CFG%hhu\n", i); return (ENXIO); } } return (0); } static device_t siba_add_child(device_t dev, u_int order, const char *name, int unit) { struct siba_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); if ((dinfo = siba_alloc_dinfo(dev)) == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, dinfo); return (child); } static void siba_child_deleted(device_t dev, device_t child) { struct bhnd_softc *sc; struct siba_devinfo *dinfo; sc = device_get_softc(dev); /* Call required bhnd(4) implementation */ bhnd_generic_child_deleted(dev, child); /* Free siba device info */ if ((dinfo = device_get_ivars(child)) != NULL) siba_free_dinfo(dev, child, dinfo); device_set_ivars(child, NULL); } /** * Scan the core table and add all valid discovered cores to * the bus. * * @param dev The siba bus device. */ int siba_add_children(device_t dev) { - const struct bhnd_chipid *chipid; + bhnd_erom_t *erom; + struct siba_erom *siba_erom; + struct bhnd_erom_io *eio; + const struct bhnd_chipid *cid; struct siba_core_id *cores; - struct bhnd_resource *r; device_t *children; - int rid; int error; - cores = NULL; - r = NULL; + cid = BHND_BUS_GET_CHIPID(dev, dev); - chipid = BHND_BUS_GET_CHIPID(dev, dev); + /* Allocate our EROM parser */ + eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID); + erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio); + if (erom == NULL) { + bhnd_erom_io_fini(eio); + return (ENODEV); + } /* Allocate our temporary core and device table */ - cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK); - children = malloc(sizeof(*children) * chipid->ncores, M_BHND, + cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK); + children = malloc(sizeof(*children) * cid->ncores, M_BHND, M_WAITOK | M_ZERO); /* * Add child devices for all discovered cores. * * On bridged devices, we'll exhaust our available register windows if * we map config blocks on unpopulated/disabled cores. To avoid this, we * defer mapping of the per-core siba(4) config blocks until all cores * have been enumerated and otherwise configured. */ - for (u_int i = 0; i < chipid->ncores; i++) { + siba_erom = (struct siba_erom *)erom; + for (u_int i = 0; i < cid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; - uint32_t idhigh, idlow; - rman_res_t r_count, r_end, r_start; - /* Map the core's register block */ - rid = 0; - r_start = SIBA_CORE_ADDR(i); - r_count = SIBA_CORE_SIZE; - r_end = r_start + SIBA_CORE_SIZE - 1; - r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start, - r_end, r_count, RF_ACTIVE); - if (r == NULL) { - error = ENXIO; + if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i]))) goto failed; - } - /* Read the core info */ - idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); - idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW)); - - cores[i] = siba_parse_core_id(idhigh, idlow, i, 0); - - /* Determine and set unit number */ - for (u_int j = 0; j < i; j++) { - struct bhnd_core_info *cur = &cores[i].core_info; - struct bhnd_core_info *prev = &cores[j].core_info; - - if (prev->vendor == cur->vendor && - prev->device == cur->device) - cur->unit++; - } - /* Add the child device */ child = BUS_ADD_CHILD(dev, 0, NULL, -1); if (child == NULL) { error = ENXIO; goto failed; } children[i] = child; /* Initialize per-device bus info */ if ((dinfo = device_get_ivars(child)) == NULL) { error = ENXIO; goto failed; } - if ((error = siba_init_dinfo(dev, dinfo, &cores[i]))) + if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i]))) goto failed; - /* Register the core's address space(s). */ - if ((error = siba_register_addrspaces(dev, dinfo, r))) - goto failed; - - /* Register the core's interrupts */ - if ((error = siba_register_interrupts(dev, child, dinfo, r))) - goto failed; - - /* Unmap the core's register block */ - bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); - r = NULL; - /* If pins are floating or the hardware is otherwise * unpopulated, the device shouldn't be used. */ if (bhnd_is_hw_disabled(child)) device_disable(child); } + /* Free EROM (and any bridge register windows it might hold) */ + bhnd_erom_free(erom); + erom = NULL; + /* Map all valid core's config register blocks and perform interrupt * assignment */ - for (u_int i = 0; i < chipid->ncores; i++) { + for (u_int i = 0; i < cid->ncores; i++) { struct siba_devinfo *dinfo; device_t child; child = children[i]; /* Skip if core is disabled */ if (bhnd_is_hw_disabled(child)) continue; dinfo = device_get_ivars(child); /* Map the core's config blocks */ if ((error = siba_map_cfg_resources(dev, dinfo))) goto failed; /* Issue bus callback for fully initialized child. */ BHND_BUS_CHILD_ADDED(dev, child); } free(cores, M_BHND); free(children, M_BHND); return (0); failed: - for (u_int i = 0; i < chipid->ncores; i++) { + for (u_int i = 0; i < cid->ncores; i++) { if (children[i] == NULL) continue; device_delete_child(dev, children[i]); } free(cores, M_BHND); free(children, M_BHND); - - if (r != NULL) - bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r); + if (erom != NULL) + bhnd_erom_free(erom); return (error); } static device_method_t siba_methods[] = { /* Device interface */ DEVMETHOD(device_probe, siba_probe), DEVMETHOD(device_attach, siba_attach), DEVMETHOD(device_detach, siba_detach), DEVMETHOD(device_resume, siba_resume), DEVMETHOD(device_suspend, siba_suspend), /* Bus interface */ DEVMETHOD(bus_add_child, siba_add_child), DEVMETHOD(bus_child_deleted, siba_child_deleted), DEVMETHOD(bus_read_ivar, siba_read_ivar), DEVMETHOD(bus_write_ivar, siba_write_ivar), DEVMETHOD(bus_get_resource_list, siba_get_resource_list), /* BHND interface */ DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class), DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu), DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu), DEVMETHOD(bhnd_bus_request_clock, siba_request_clock), DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks), DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc), DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc), DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq), DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency), DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl), DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl), DEVMETHOD(bhnd_bus_read_iost, siba_read_iost), DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended), DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw), DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw), DEVMETHOD(bhnd_bus_read_config, siba_read_config), DEVMETHOD(bhnd_bus_write_config, siba_write_config), DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count), DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count), DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid), DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid), DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr), DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count), DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec), DEVMETHOD_END }; DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver); MODULE_VERSION(siba, 1); MODULE_DEPEND(siba, bhnd, 1, 1, 1); Index: head/sys/dev/bhnd/siba/siba_erom.c =================================================================== --- head/sys/dev/bhnd/siba/siba_erom.c (revision 329179) +++ head/sys/dev/bhnd/siba/siba_erom.c (revision 329180) @@ -1,553 +1,706 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "sibareg.h" #include "sibavar.h" +#include "siba_eromvar.h" + struct siba_erom; struct siba_erom_io; static int siba_eio_init(struct siba_erom_io *io, struct bhnd_erom_io *eio, u_int ncores); static uint32_t siba_eio_read_4(struct siba_erom_io *io, u_int core_idx, bus_size_t offset); -static struct siba_core_id siba_eio_read_core_id(struct siba_erom_io *io, - u_int core_idx, int unit); +static int siba_eio_read_core_id(struct siba_erom_io *io, + u_int core_idx, int unit, + struct siba_core_id *sid); static int siba_eio_read_chipid(struct siba_erom_io *io, bus_addr_t enum_addr, struct bhnd_chipid *cid); /** * SIBA EROM generic I/O context */ struct siba_erom_io { struct bhnd_erom_io *eio; /**< erom I/O callbacks */ bhnd_addr_t base_addr; /**< address of first core */ u_int ncores; /**< core count */ }; /** * SIBA EROM per-instance state. */ struct siba_erom { struct bhnd_erom obj; struct siba_erom_io io; /**< i/o context */ }; #define EROM_LOG(io, fmt, ...) do { \ printf("%s: " fmt, __FUNCTION__, ##__VA_ARGS__); \ } while(0) /* SIBA implementation of BHND_EROM_PROBE() */ static int siba_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio, const struct bhnd_chipid *hint, struct bhnd_chipid *cid) { struct siba_erom_io io; uint32_t idreg; int error; /* Initialize I/O context, assuming at least the first core is mapped */ if ((error = siba_eio_init(&io, eio, 1))) return (error); /* Try using the provided hint. */ if (hint != NULL) { struct siba_core_id sid; /* Validate bus type */ if (hint->chip_type != BHND_CHIPTYPE_SIBA) return (ENXIO); /* * Verify the first core's IDHIGH/IDLOW identification. * * The core must be a Broadcom core, but must *not* be * a chipcommon core; those shouldn't be hinted. * * The first core on EXTIF-equipped devices varies, but on the * BCM4710, it's a SDRAM core (0x803). */ - sid = siba_eio_read_core_id(&io, 0, 0); + if ((error = siba_eio_read_core_id(&io, 0, 0, &sid))) + return (error); if (sid.core_info.vendor != BHND_MFGID_BCM) return (ENXIO); if (sid.core_info.device == BHND_COREID_CC) return (EINVAL); *cid = *hint; } else { /* Validate bus type */ idreg = siba_eio_read_4(&io, 0, CHIPC_ID); if (CHIPC_GET_BITS(idreg, CHIPC_ID_BUS) != BHND_CHIPTYPE_SIBA) return (ENXIO); /* Identify the chipset */ if ((error = siba_eio_read_chipid(&io, SIBA_ENUM_ADDR, cid))) return (error); /* Verify the chip type */ if (cid->chip_type != BHND_CHIPTYPE_SIBA) return (ENXIO); } /* * gcc hack: ensure bhnd_chipid.ncores cannot exceed SIBA_MAX_CORES * without triggering build failure due to -Wtype-limits * * if (cid.ncores > SIBA_MAX_CORES) * return (EINVAL) */ _Static_assert((2^sizeof(cid->ncores)) <= SIBA_MAX_CORES, "ncores could result in over-read of backing resource"); return (0); } /* SIBA implementation of BHND_EROM_INIT() */ static int siba_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid, struct bhnd_erom_io *eio) { struct siba_erom *sc; int error; sc = (struct siba_erom *)erom; /* Attempt to map the full core enumeration space */ error = bhnd_erom_io_map(eio, cid->enum_addr, cid->ncores * SIBA_CORE_SIZE); if (error) { printf("%s: failed to map %u cores: %d\n", __FUNCTION__, cid->ncores, error); return (error); } /* Initialize I/O context */ return (siba_eio_init(&sc->io, eio, cid->ncores)); } /* SIBA implementation of BHND_EROM_FINI() */ static void siba_erom_fini(bhnd_erom_t *erom) { struct siba_erom *sc = (struct siba_erom *)erom; bhnd_erom_io_fini(sc->io.eio); } /* Initialize siba_erom resource I/O context */ static int siba_eio_init(struct siba_erom_io *io, struct bhnd_erom_io *eio, u_int ncores) { io->eio = eio; io->ncores = ncores; return (0); } /** * Read a 32-bit value from @p offset relative to the base address of * the given @p core_idx. * * @param io EROM I/O context. * @param core_idx Core index. * @param offset Core register offset. */ static uint32_t siba_eio_read_4(struct siba_erom_io *io, u_int core_idx, bus_size_t offset) { /* Sanity check core index and offset */ if (core_idx >= io->ncores) panic("core index %u out of range (ncores=%u)", core_idx, io->ncores); if (offset > SIBA_CORE_SIZE - sizeof(uint32_t)) panic("invalid core offset %#jx", (uintmax_t)offset); /* Perform read */ return (bhnd_erom_io_read(io->eio, SIBA_CORE_OFFSET(core_idx) + offset, 4)); } /** * Read and parse identification registers for the given @p core_index. * * @param io EROM I/O context. * @param core_idx The core index. * @param unit The caller-specified unit number to be included in the return * value. + * @param[out] sid On success, the parsed siba core id. + * + * @retval 0 success + * @retval non-zero if reading or parsing the identification registers + * otherwise fails, a regular unix error code will be + * returned. */ -static struct siba_core_id -siba_eio_read_core_id(struct siba_erom_io *io, u_int core_idx, int unit) +static int +siba_eio_read_core_id(struct siba_erom_io *io, u_int core_idx, int unit, + struct siba_core_id *sid) { - uint32_t idhigh, idlow; + struct siba_admatch admatch[SIBA_MAX_ADDRSPACE]; + uint32_t idhigh, idlow; + uint32_t tpsflag; + uint16_t ocp_vendor; + uint8_t sonics_rev; + uint8_t num_admatch; + uint8_t num_admatch_en; + uint8_t num_cfg; + bool intr_en; + u_int intr_flag; + int error; idhigh = siba_eio_read_4(io, core_idx, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = siba_eio_read_4(io, core_idx, SB0_REG_ABS(SIBA_CFG0_IDLOW)); + tpsflag = siba_eio_read_4(io, core_idx, SB0_REG_ABS(SIBA_CFG0_TPSFLAG)); - return (siba_parse_core_id(idhigh, idlow, core_idx, unit)); + ocp_vendor = SIBA_REG_GET(idhigh, IDH_VENDOR); + sonics_rev = SIBA_REG_GET(idlow, IDL_SBREV); + num_admatch = SIBA_REG_GET(idlow, IDL_NRADDR) + 1 /* + enum block */; + if (num_admatch > nitems(admatch)) { + printf("core%u: invalid admatch count %hhu\n", core_idx, + num_admatch); + return (EINVAL); + } + + /* Determine backplane interrupt distribution configuration */ + intr_en = ((tpsflag & SIBA_TPS_F0EN0) != 0); + intr_flag = SIBA_REG_GET(tpsflag, TPS_NUM0); + + /* Determine the number of sonics config register blocks */ + num_cfg = SIBA_CFG_NUM_2_2; + if (sonics_rev >= SIBA_IDL_SBREV_2_3) + num_cfg = SIBA_CFG_NUM_2_3; + + /* Parse all admatch descriptors */ + num_admatch_en = 0; + for (uint8_t i = 0; i < num_admatch; i++) { + uint32_t am_value; + u_int am_offset; + + KASSERT(i < nitems(admatch), ("invalid admatch index")); + + /* Determine the register offset */ + am_offset = siba_admatch_offset(i); + if (am_offset == 0) { + printf("core%u: addrspace %hhu is unsupported", + core_idx, i); + return (ENODEV); + } + + /* Read and parse the address match register */ + am_value = siba_eio_read_4(io, core_idx, am_offset); + error = siba_parse_admatch(am_value, &admatch[num_admatch_en]); + if (error) { + printf("core%u: failed to decode admatch[%hhu] " + "register value 0x%x\n", core_idx, i, am_value); + return (error); + } + + /* Skip disabled entries */ + if (!admatch[num_admatch_en].am_enabled) + continue; + + /* Reject unsupported negative matches. These are not used on + * any known devices */ + if (admatch[num_admatch_en].am_negative) { + printf("core%u: unsupported negative admatch[%hhu] " + "value 0x%x\n", core_idx, i, am_value); + return (ENXIO); + } + + num_admatch_en++; + } + + /* Populate the result */ + *sid = (struct siba_core_id) { + .core_info = { + .vendor = siba_get_bhnd_mfgid(ocp_vendor), + .device = SIBA_REG_GET(idhigh, IDH_DEVICE), + .hwrev = SIBA_IDH_CORE_REV(idhigh), + .core_idx = core_idx, + .unit = unit + }, + .sonics_vendor = ocp_vendor, + .sonics_rev = sonics_rev, + .intr_en = intr_en, + .intr_flag = intr_flag, + .num_admatch = num_admatch_en, + .num_cfg_blocks = num_cfg + }; + memcpy(sid->admatch, admatch, num_admatch_en * sizeof(admatch[0])); + + return (0); } /** + * Read and parse the SSB identification registers for the given @p core_index, + * returning the siba(4) core identification in @p sid. + * + * @param sc A siba EROM instance. + * @param core_idx The index of the core to be identified. + * @param[out] result On success, the parsed siba core id. + * + * @retval 0 success + * @retval non-zero if reading or parsing the identification registers + * otherwise fails, a regular unix error code will be + * returned. + */ +int +siba_erom_get_core_id(struct siba_erom *sc, u_int core_idx, + struct siba_core_id *result) +{ + struct siba_core_id sid; + int error; + + /* Fetch the core info, assuming a unit number of 0 */ + if ((error = siba_eio_read_core_id(&sc->io, core_idx, 0, &sid))) + return (error); + + /* Scan preceding cores to determine the real unit number. */ + for (u_int i = 0; i < core_idx; i++) { + struct siba_core_id prev; + + if ((error = siba_eio_read_core_id(&sc->io, i, 0, &prev))) + return (error); + + /* Bump the unit number? */ + if (sid.core_info.vendor == prev.core_info.vendor && + sid.core_info.device == prev.core_info.device) + sid.core_info.unit++; + } + + *result = sid; + return (0); +} + +/** * Read and parse the chip identification register from the ChipCommon core. * * @param io EROM I/O context. * @param enum_addr The physical address mapped by @p io. * @param cid On success, the parsed chip identifier. */ static int siba_eio_read_chipid(struct siba_erom_io *io, bus_addr_t enum_addr, struct bhnd_chipid *cid) { struct siba_core_id ccid; uint32_t idreg; + int error; /* Identify the chipcommon core */ - ccid = siba_eio_read_core_id(io, 0, 0); + if ((error = siba_eio_read_core_id(io, 0, 0, &ccid))) + return (error); + if (ccid.core_info.vendor != BHND_MFGID_BCM || ccid.core_info.device != BHND_COREID_CC) { if (bootverbose) { EROM_LOG(io, "first core not chipcommon " "(vendor=%#hx, core=%#hx)\n", ccid.core_info.vendor, ccid.core_info.device); } return (ENXIO); } /* Identify the chipset */ idreg = siba_eio_read_4(io, 0, CHIPC_ID); *cid = bhnd_parse_chipid(idreg, enum_addr); /* Fix up the core count in-place */ return (bhnd_chipid_fixed_ncores(cid, ccid.core_info.hwrev, &cid->ncores)); } static int siba_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc, struct bhnd_core_info *core) { struct siba_erom *sc; struct bhnd_core_match imatch; + int error; sc = (struct siba_erom *)erom; /* We can't determine a core's unit number during the initial scan. */ imatch = *desc; imatch.m.match.core_unit = 0; /* Locate the first matching core */ for (u_int i = 0; i < sc->io.ncores; i++) { struct siba_core_id sid; struct bhnd_core_info ci; /* Read the core info */ - sid = siba_eio_read_core_id(&sc->io, i, 0); + if ((error = siba_eio_read_core_id(&sc->io, i, 0, &sid))) + return (error); + ci = sid.core_info; /* Check for initial match */ if (!bhnd_core_matches(&ci, &imatch)) continue; /* Re-scan preceding cores to determine the unit number. */ for (u_int j = 0; j < i; j++) { - sid = siba_eio_read_core_id(&sc->io, j, 0); + error = siba_eio_read_core_id(&sc->io, j, 0, &sid); + if (error) + return (error); /* Bump the unit number? */ if (sid.core_info.vendor == ci.vendor && sid.core_info.device == ci.device) ci.unit++; } /* Check for full match against now-valid unit number */ if (!bhnd_core_matches(&ci, desc)) continue; /* Matching core found */ *core = ci; return (0); } /* Not found */ return (ENOENT); } static int siba_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc, bhnd_port_type type, u_int port, u_int region, struct bhnd_core_info *info, bhnd_addr_t *addr, bhnd_size_t *size) { struct siba_erom *sc; struct bhnd_core_info core; struct siba_core_id sid; - uint32_t am, am_addr, am_size; + struct siba_admatch admatch; + uint32_t am; u_int am_offset; u_int addrspace, cfg; int error; sc = (struct siba_erom *)erom; /* Locate the requested core */ if ((error = siba_erom_lookup_core(erom, desc, &core))) return (error); /* Fetch full siba core ident */ - sid = siba_eio_read_core_id(&sc->io, core.core_idx, core.unit); + error = siba_eio_read_core_id(&sc->io, core.core_idx, core.unit, &sid); + if (error) + return (error); /* Is port valid? */ if (!siba_is_port_valid(&sid, type, port)) return (ENOENT); /* Is region valid? */ if (region >= siba_port_region_count(&sid, type, port)) return (ENOENT); /* Is this a siba configuration region? If so, this is mapped to an * offset within the device0.0 port */ error = siba_cfg_index(&sid, type, port, region, &cfg); if (!error) { bhnd_addr_t region_addr; bhnd_addr_t region_size; bhnd_size_t cfg_offset, cfg_size; cfg_offset = SIBA_CFG_OFFSET(cfg); cfg_size = SIBA_CFG_SIZE; /* Fetch the device0.0 addr/size */ error = siba_erom_lookup_core_addr(erom, desc, BHND_PORT_DEVICE, 0, 0, NULL, ®ion_addr, ®ion_size); if (error) return (error); /* Verify that our offset fits within the region */ if (region_size < cfg_size) { printf("%s%u.%u offset %ju exceeds %s0.0 size %ju\n", bhnd_port_type_name(type), port, region, cfg_offset, bhnd_port_type_name(BHND_PORT_DEVICE), region_size); return (ENXIO); } if (BHND_ADDR_MAX - region_addr < cfg_offset) { printf("%s%u.%u offset %ju would overflow %s0.0 addr " "%ju\n", bhnd_port_type_name(type), port, region, cfg_offset, bhnd_port_type_name(BHND_PORT_DEVICE), region_addr); return (ENXIO); } if (info != NULL) *info = core; *addr = region_addr + cfg_offset; *size = cfg_size; return (0); } /* * Otherwise, must be a device port. * * Map the bhnd device port to a siba addrspace index. Unlike siba(4) * bus drivers, we do not exclude the siba(4) configuration blocks from * the first device port. */ error = siba_addrspace_index(&sid, type, port, region, &addrspace); if (error) return (error); /* Determine the register offset */ am_offset = siba_admatch_offset(addrspace); if (am_offset == 0) { printf("addrspace %u is unsupported", addrspace); return (ENODEV); } /* Read and parse the address match register */ am = siba_eio_read_4(&sc->io, core.core_idx, am_offset); - if ((error = siba_parse_admatch(am, &am_addr, &am_size))) { + if ((error = siba_parse_admatch(am, &admatch))) { printf("failed to decode address match register value 0x%x\n", am); return (error); } if (info != NULL) *info = core; - *addr = am_addr; - *size = am_size; + *addr = admatch.am_base; + *size = admatch.am_size; return (0); } /* BHND_EROM_GET_CORE_TABLE() */ static int siba_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores, u_int *num_cores) { struct siba_erom *sc; struct bhnd_core_info *out; + int error; sc = (struct siba_erom *)erom; /* Allocate our core array */ out = mallocarray(sc->io.ncores, sizeof(*out), M_BHND, M_NOWAIT); if (out == NULL) return (ENOMEM); *cores = out; *num_cores = sc->io.ncores; /* Enumerate all cores. */ for (u_int i = 0; i < sc->io.ncores; i++) { struct siba_core_id sid; /* Read the core info */ - sid = siba_eio_read_core_id(&sc->io, i, 0); + if ((error = siba_eio_read_core_id(&sc->io, i, 0, &sid))) + return (error); + out[i] = sid.core_info; /* Determine unit number */ for (u_int j = 0; j < i; j++) { if (out[j].vendor == out[i].vendor && out[j].device == out[i].device) out[i].unit++; } } return (0); } /* BHND_EROM_FREE_CORE_TABLE() */ static void siba_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores) { free(cores, M_BHND); } /* BHND_EROM_DUMP() */ static int siba_erom_dump(bhnd_erom_t *erom) { struct siba_erom *sc; int error; sc = (struct siba_erom *)erom; /* Enumerate all cores. */ for (u_int i = 0; i < sc->io.ncores; i++) { uint32_t idhigh, idlow; uint32_t nraddr; idhigh = siba_eio_read_4(&sc->io, i, SB0_REG_ABS(SIBA_CFG0_IDHIGH)); idlow = siba_eio_read_4(&sc->io, i, SB0_REG_ABS(SIBA_CFG0_IDLOW)); printf("siba core %u:\n", i); printf("\tvendor:\t0x%04x\n", SIBA_REG_GET(idhigh, IDH_VENDOR)); printf("\tdevice:\t0x%04x\n", SIBA_REG_GET(idhigh, IDH_DEVICE)); printf("\trev:\t0x%04x\n", SIBA_IDH_CORE_REV(idhigh)); printf("\tsbrev:\t0x%02x\n", SIBA_REG_GET(idlow, IDL_SBREV)); /* Enumerate the address match registers */ nraddr = SIBA_REG_GET(idlow, IDL_NRADDR); printf("\tnraddr\t0x%04x\n", nraddr); for (size_t addrspace = 0; addrspace < nraddr; addrspace++) { - uint32_t am, am_addr, am_size; - u_int am_offset; + struct siba_admatch admatch; + uint32_t am; + u_int am_offset; /* Determine the register offset */ am_offset = siba_admatch_offset(addrspace); if (am_offset == 0) { printf("addrspace %zu unsupported", addrspace); break; } /* Read and parse the address match register */ am = siba_eio_read_4(&sc->io, i, am_offset); - error = siba_parse_admatch(am, &am_addr, &am_size); - if (error) { + if ((error = siba_parse_admatch(am, &admatch))) { printf("failed to decode address match " "register value 0x%x\n", am); continue; } printf("\taddrspace %zu\n", addrspace); - printf("\t\taddr: 0x%08x\n", am_addr); - printf("\t\tsize: 0x%08x\n", am_size); + printf("\t\taddr: 0x%08x\n", admatch.am_base); + printf("\t\tsize: 0x%08x\n", admatch.am_size); } } return (0); } static kobj_method_t siba_erom_methods[] = { KOBJMETHOD(bhnd_erom_probe, siba_erom_probe), KOBJMETHOD(bhnd_erom_init, siba_erom_init), KOBJMETHOD(bhnd_erom_fini, siba_erom_fini), KOBJMETHOD(bhnd_erom_get_core_table, siba_erom_get_core_table), KOBJMETHOD(bhnd_erom_free_core_table, siba_erom_free_core_table), KOBJMETHOD(bhnd_erom_lookup_core, siba_erom_lookup_core), KOBJMETHOD(bhnd_erom_lookup_core_addr, siba_erom_lookup_core_addr), KOBJMETHOD(bhnd_erom_dump, siba_erom_dump), KOBJMETHOD_END }; BHND_EROM_DEFINE_CLASS(siba_erom, siba_erom_parser, siba_erom_methods, sizeof(struct siba_erom)); Index: head/sys/dev/bhnd/siba/siba_eromvar.h =================================================================== --- head/sys/dev/bhnd/siba/siba_eromvar.h (nonexistent) +++ head/sys/dev/bhnd/siba/siba_eromvar.h (revision 329180) @@ -0,0 +1,46 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2018 Landon Fuller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SIBA_SIBA_EROMVAR_H_ +#define _SIBA_SIBA_EROMVAR_H_ + +#include +#include + +#include "sibavar.h" + +struct siba_erom; + +#define SIBA_EROM_ + +int siba_erom_get_core_id(struct siba_erom *sc, u_int core_idx, + struct siba_core_id *result); + +#endif /* _SIBA_SIBA_EROMVAR_H_ */ Property changes on: head/sys/dev/bhnd/siba/siba_eromvar.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/bhnd/siba/siba_subr.c =================================================================== --- head/sys/dev/bhnd/siba/siba_subr.c (revision 329179) +++ head/sys/dev/bhnd/siba/siba_subr.c (revision 329180) @@ -1,696 +1,733 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "sibareg.h" #include "sibavar.h" +static int siba_register_interrupts(device_t dev, device_t child, + struct siba_devinfo *dinfo); +static int siba_append_dinfo_region(struct siba_devinfo *dinfo, + uint8_t addridx, uint32_t base, uint32_t size, + uint32_t bus_reserved); + /** * Map a siba(4) OCP vendor code to its corresponding JEDEC JEP-106 vendor * code. * * @param ocp_vendor An OCP vendor code. * @return The BHND_MFGID constant corresponding to @p ocp_vendor, or * BHND_MFGID_INVALID if the OCP vendor is unknown. */ uint16_t siba_get_bhnd_mfgid(uint16_t ocp_vendor) { switch (ocp_vendor) { case OCP_VENDOR_BCM: return (BHND_MFGID_BCM); default: return (BHND_MFGID_INVALID); } } /** - * Parse the SIBA_IDH_* fields from the per-core identification - * registers, returning a siba_core_id representation. - * - * @param idhigh The SIBA_R0_IDHIGH register. - * @param idlow The SIBA_R0_IDLOW register. - * @param core_id The core id (index) to include in the result. - * @param unit The unit number to include in the result. - */ -struct siba_core_id -siba_parse_core_id(uint32_t idhigh, uint32_t idlow, u_int core_idx, int unit) -{ - - uint16_t ocp_vendor; - uint8_t sonics_rev; - uint8_t num_addrspace; - uint8_t num_cfg; - - ocp_vendor = SIBA_REG_GET(idhigh, IDH_VENDOR); - sonics_rev = SIBA_REG_GET(idlow, IDL_SBREV); - num_addrspace = SIBA_REG_GET(idlow, IDL_NRADDR) + 1 /* + enum block */; - - /* Determine the number of sonics config register blocks */ - num_cfg = SIBA_CFG_NUM_2_2; - if (sonics_rev >= SIBA_IDL_SBREV_2_3) - num_cfg = SIBA_CFG_NUM_2_3; - - return (struct siba_core_id) { - .core_info = { - .vendor = siba_get_bhnd_mfgid(ocp_vendor), - .device = SIBA_REG_GET(idhigh, IDH_DEVICE), - .hwrev = SIBA_IDH_CORE_REV(idhigh), - .core_idx = core_idx, - .unit = unit - }, - .sonics_vendor = ocp_vendor, - .sonics_rev = sonics_rev, - .num_addrspace = num_addrspace, - .num_cfg_blocks = num_cfg - }; -} - -/** * Allocate and return a new empty device info structure. * * @param bus The requesting bus device. * * @retval NULL if allocation failed. */ struct siba_devinfo * siba_alloc_dinfo(device_t bus) { struct siba_devinfo *dinfo; dinfo = malloc(sizeof(struct siba_devinfo), M_BHND, M_NOWAIT|M_ZERO); if (dinfo == NULL) return NULL; for (u_int i = 0; i < nitems(dinfo->cfg); i++) { dinfo->cfg[i] = ((struct siba_cfg_block){ .cb_base = 0, .cb_size = 0, .cb_rid = -1, }); dinfo->cfg_res[i] = NULL; dinfo->cfg_rid[i] = -1; } resource_list_init(&dinfo->resources); dinfo->pmu_state = SIBA_PMU_NONE; - dinfo->intr_en = false; + dinfo->intr = (struct siba_intr) { + .mapped = false, + .rid = -1 + }; + return dinfo; } /** * Initialize a device info structure previously allocated via * siba_alloc_dinfo, copying the provided core id. * * @param dev The requesting bus device. + * @param child The siba child device. * @param dinfo The device info instance. * @param core Device core info. * * @retval 0 success * @retval non-zero initialization failed. */ int -siba_init_dinfo(device_t dev, struct siba_devinfo *dinfo, +siba_init_dinfo(device_t dev, device_t child, struct siba_devinfo *dinfo, const struct siba_core_id *core_id) { + int error; + dinfo->core_id = *core_id; + + /* Register all address space mappings */ + for (uint8_t i = 0; i < core_id->num_admatch; i++) { + uint32_t bus_reserved; + + /* If this is the device's core/enumeration addrespace, + * reserve the Sonics configuration register blocks for the + * use of our bus. */ + bus_reserved = 0; + if (i == SIBA_CORE_ADDRSPACE) + bus_reserved = core_id->num_cfg_blocks * SIBA_CFG_SIZE; + + /* Append the region info */ + error = siba_append_dinfo_region(dinfo, i, + core_id->admatch[i].am_base, core_id->admatch[i].am_size, + bus_reserved); + if (error) + return (error); + } + + /* Register all interrupt(s) */ + if ((error = siba_register_interrupts(dev, child, dinfo))) + return (error); + return (0); } + /** + * Register and map all interrupts for @p dinfo. + * + * @param dev The siba bus device. + * @param child The siba child device. + * @param dinfo The device info instance on which to register all interrupt + * entries. + */ +static int +siba_register_interrupts(device_t dev, device_t child, + struct siba_devinfo *dinfo) +{ + int error; + + /* Is backplane interrupt distribution enabled for this core? */ + if (!dinfo->core_id.intr_en) + return (0); + + /* Have one interrupt */ + dinfo->intr.mapped = false; + dinfo->intr.irq = 0; + dinfo->intr.rid = -1; + + /* Map the interrupt */ + error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */, + &dinfo->intr.irq); + if (error) { + device_printf(dev, "failed mapping interrupt line for core %u: " + "%d\n", dinfo->core_id.core_info.core_idx, error); + return (error); + } + dinfo->intr.mapped = true; + + /* Update the resource list */ + dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ, + dinfo->intr.irq, dinfo->intr.irq, 1); + + return (0); +} + +/** * Map an addrspace index to its corresponding bhnd(4) BHND_PORT_DEVICE port * number. * * @param addrspace Address space index. */ u_int siba_addrspace_device_port(u_int addrspace) { /* The first addrspace is always mapped to device0; the remainder * are mapped to device1 */ if (addrspace == 0) return (0); else return (1); } /** * Map an addrspace index to its corresponding bhnd(4) BHND_PORT_DEVICE port * region number. * * @param addrspace Address space index. */ u_int siba_addrspace_device_region(u_int addrspace) { /* The first addrspace is always mapped to device0.0; the remainder * are mapped to device1.0 + (n - 1) */ if (addrspace == 0) return (0); else return (addrspace - 1); } /** * Map an config block index to its corresponding bhnd(4) BHND_PORT_AGENT port * number. * * @param cfg Config block index. */ u_int siba_cfg_agent_port(u_int cfg) { /* Always agent0 */ return (0); } /** * Map an config block index to its corresponding bhnd(4) BHND_PORT_AGENT port * region number. * * @param cfg Config block index. */ u_int siba_cfg_agent_region(u_int cfg) { /* Always agent0. */ return (cfg); } /** * Return the number of bhnd(4) ports to advertise for the given * @p core_id and @p port_type. * * Refer to the siba_addrspace_index() and siba_cfg_index() functions for * information on siba's mapping of bhnd(4) port and region identifiers. * * @param core_id The siba core info. * @param port_type The bhnd(4) port type. */ u_int siba_port_count(struct siba_core_id *core_id, bhnd_port_type port_type) { switch (port_type) { case BHND_PORT_DEVICE: /* 0, 1, or 2 ports */ - return (min(core_id->num_addrspace, 2)); + return (min(core_id->num_admatch, 2)); case BHND_PORT_AGENT: /* One agent port maps all configuration blocks */ if (core_id->num_cfg_blocks > 0) return (1); /* Do not advertise an agent port if there are no configuration * register blocks */ return (0); default: return (0); } } /** * Return true if @p port of @p port_type is defined by @p core_id, false * otherwise. * * @param core_id The siba core info. * @param port_type The bhnd(4) port type. * @param port The bhnd(4) port number. */ bool siba_is_port_valid(struct siba_core_id *core_id, bhnd_port_type port_type, u_int port) { /* Verify the index against the port count */ if (siba_port_count(core_id, port_type) <= port) return (false); return (true); } /** * Return the number of bhnd(4) regions to advertise for @p core_id on the * @p port of @p port_type. * * @param core_id The siba core info. * @param port_type The bhnd(4) port type. */ u_int siba_port_region_count(struct siba_core_id *core_id, bhnd_port_type port_type, u_int port) { /* The port must exist */ if (!siba_is_port_valid(core_id, port_type, port)) return (0); switch (port_type) { case BHND_PORT_DEVICE: /* The first address space, if any, is mapped to device0.0 */ if (port == 0) - return (min(core_id->num_addrspace, 1)); + return (min(core_id->num_admatch, 1)); /* All remaining address spaces are mapped to device0.(n - 1) */ - if (port == 1 && core_id->num_addrspace >= 2) - return (core_id->num_addrspace - 1); + if (port == 1 && core_id->num_admatch >= 2) + return (core_id->num_admatch - 1); break; case BHND_PORT_AGENT: /* All config blocks are mapped to a single port */ if (port == 0) return (core_id->num_cfg_blocks); break; default: break; } /* Validated above */ panic("siba_is_port_valid() returned true for unknown %s.%u port", bhnd_port_type_name(port_type), port); } /** * Map a bhnd(4) type/port/region triplet to its associated config block index, * if any. * * We map config registers to port/region identifiers as follows: * * [port].[region] [cfg register block] * agent0.0 0 * agent0.1 1 * - * @param num_addrspace The number of available siba address spaces. * @param port_type The bhnd(4) port type. * @param port The bhnd(4) port number. * @param region The bhnd(4) port region. * @param addridx On success, the corresponding addrspace index. * * @retval 0 success * @retval ENOENT if the given type/port/region cannot be mapped to a * siba config register block. */ int siba_cfg_index(struct siba_core_id *core_id, bhnd_port_type port_type, u_int port, u_int region, u_int *cfgidx) { /* Config blocks are mapped to agent ports */ if (port_type != BHND_PORT_AGENT) return (ENOENT); /* Port must be valid */ if (!siba_is_port_valid(core_id, port_type, port)) return (ENOENT); if (region >= core_id->num_cfg_blocks) return (ENOENT); if (region >= SIBA_MAX_CFG) return (ENOENT); /* Found */ *cfgidx = region; return (0); } /** * Map an bhnd(4) type/port/region triplet to its associated config block * entry, if any. * * The only supported port type is BHND_PORT_DEVICE. * * @param dinfo The device info to search for a matching address space. * @param type The bhnd(4) port type. * @param port The bhnd(4) port number. * @param region The bhnd(4) port region. */ struct siba_cfg_block * siba_find_cfg_block(struct siba_devinfo *dinfo, bhnd_port_type type, u_int port, u_int region) { u_int cfgidx; int error; /* Map to addrspace index */ error = siba_cfg_index(&dinfo->core_id, type, port, region, &cfgidx); if (error) return (NULL); /* Found */ return (&dinfo->cfg[cfgidx]); } /** * Map a bhnd(4) type/port/region triplet to its associated address space * index, if any. * * For compatibility with bcma(4), we map address spaces to port/region * identifiers as follows: * - * [port] [addrspace] + * [port.region] [admatch index] * device0.0 0 * device1.0 1 * device1.1 2 * device1.2 3 * * @param core_id The siba core info. * @param port_type The bhnd(4) port type. * @param port The bhnd(4) port number. * @param region The bhnd(4) port region. * @param addridx On success, the corresponding addrspace index. * * @retval 0 success * @retval ENOENT if the given type/port/region cannot be mapped to a * siba address space. */ int siba_addrspace_index(struct siba_core_id *core_id, bhnd_port_type port_type, u_int port, u_int region, u_int *addridx) { u_int idx; /* Address spaces are always device ports */ if (port_type != BHND_PORT_DEVICE) return (ENOENT); /* Port must be valid */ if (!siba_is_port_valid(core_id, port_type, port)) return (ENOENT); if (port == 0) idx = region; else if (port == 1) idx = region + 1; else return (ENOENT); - if (idx >= core_id->num_addrspace) + if (idx >= core_id->num_admatch) return (ENOENT); /* Found */ *addridx = idx; return (0); } /** * Map an bhnd(4) type/port/region triplet to its associated address space * entry, if any. * * The only supported port type is BHND_PORT_DEVICE. * * @param dinfo The device info to search for a matching address space. * @param type The bhnd(4) port type. * @param port The bhnd(4) port number. * @param region The bhnd(4) port region. */ struct siba_addrspace * siba_find_addrspace(struct siba_devinfo *dinfo, bhnd_port_type type, u_int port, u_int region) { u_int addridx; int error; /* Map to addrspace index */ error = siba_addrspace_index(&dinfo->core_id, type, port, region, &addridx); if (error) return (NULL); /* Found */ if (addridx >= SIBA_MAX_ADDRSPACE) return (NULL); return (&dinfo->addrspace[addridx]); } /** * Append an address space entry to @p dinfo. * * @param dinfo The device info entry to update. * @param addridx The address space index. * @param base The mapping's base address. * @param size The mapping size. * @param bus_reserved Number of bytes to reserve in @p size for bus use * when registering the resource list entry. This is used to reserve bus * access to the core's SIBA_CFG* register blocks. * * @retval 0 success * @retval non-zero An error occurred appending the entry. */ -int +static int siba_append_dinfo_region(struct siba_devinfo *dinfo, uint8_t addridx, uint32_t base, uint32_t size, uint32_t bus_reserved) { struct siba_addrspace *sa; rman_res_t r_size; /* Verify that base + size will not overflow */ if (size > 0 && UINT32_MAX - (size - 1) < base) return (ERANGE); /* Verify that size - bus_reserved will not underflow */ if (size < bus_reserved) return (ERANGE); /* Must not be 0-length */ if (size == 0) return (EINVAL); /* Must not exceed addrspace array size */ if (addridx >= nitems(dinfo->addrspace)) return (EINVAL); /* Initialize new addrspace entry */ sa = &dinfo->addrspace[addridx]; sa->sa_base = base; sa->sa_size = size; sa->sa_bus_reserved = bus_reserved; /* Populate the resource list */ r_size = size - bus_reserved; sa->sa_rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY, base, base + (r_size - 1), r_size); return (0); } /** * Deallocate the given device info structure and any associated resources. * * @param dev The requesting bus device. * @param child The siba child device. * @param dinfo Device info associated with @p child to be deallocated. */ void siba_free_dinfo(device_t dev, device_t child, struct siba_devinfo *dinfo) { resource_list_free(&dinfo->resources); /* Free all mapped configuration blocks */ for (u_int i = 0; i < nitems(dinfo->cfg); i++) { if (dinfo->cfg_res[i] == NULL) continue; bhnd_release_resource(dev, SYS_RES_MEMORY, dinfo->cfg_rid[i], dinfo->cfg_res[i]); dinfo->cfg_res[i] = NULL; dinfo->cfg_rid[i] = -1; } /* Unmap the core's interrupt */ - if (dinfo->intr_en && dinfo->intr.mapped) { + if (dinfo->core_id.intr_en && dinfo->intr.mapped) { BHND_BUS_UNMAP_INTR(dev, child, dinfo->intr.irq); dinfo->intr.mapped = false; } free(dinfo, M_BHND); } /** * Return the core-enumeration-relative offset for the @p addrspace * SIBA_R0_ADMATCH* register. * * @param addrspace The address space index. * * @retval non-zero success * @retval 0 the given @p addrspace index is not supported. */ u_int siba_admatch_offset(uint8_t addrspace) { switch (addrspace) { case 0: return SB0_REG_ABS(SIBA_CFG0_ADMATCH0); case 1: return SB0_REG_ABS(SIBA_CFG0_ADMATCH1); case 2: return SB0_REG_ABS(SIBA_CFG0_ADMATCH2); case 3: return SB0_REG_ABS(SIBA_CFG0_ADMATCH3); default: return (0); } } /** * Parse a SIBA_R0_ADMATCH* register. * * @param addrspace The address space index. * @param am The address match register value to be parsed. - * @param[out] addr The parsed address. - * @param[out] size The parsed size. + * @param[out] admatch The parsed address match descriptor * * @retval 0 success * @retval non-zero a parse error occurred. */ int -siba_parse_admatch(uint32_t am, uint32_t *addr, uint32_t *size) +siba_parse_admatch(uint32_t am, struct siba_admatch *admatch) { - u_int am_type; + u_int am_type; - /* Negative encoding is not supported. This is not used on any - * currently known devices*/ - if (am & SIBA_AM_ADNEG) - return (EINVAL); - /* Extract the base address and size */ am_type = SIBA_REG_GET(am, AM_TYPE); switch (am_type) { case 0: - *addr = am & SIBA_AM_BASE0_MASK; - *size = 1 << (SIBA_REG_GET(am, AM_ADINT0) + 1); + /* Type 0 entries are always enabled, and do not support + * negative matching */ + admatch->am_base = am & SIBA_AM_BASE0_MASK; + admatch->am_size = 1 << (SIBA_REG_GET(am, AM_ADINT0) + 1); + admatch->am_enabled = true; + admatch->am_negative = false; break; case 1: - *addr = am & SIBA_AM_BASE1_MASK; - *size = 1 << (SIBA_REG_GET(am, AM_ADINT1) + 1); + admatch->am_base = am & SIBA_AM_BASE1_MASK; + admatch->am_size = 1 << (SIBA_REG_GET(am, AM_ADINT1) + 1); + admatch->am_enabled = ((am & SIBA_AM_ADEN) != 0); + admatch->am_negative = ((am & SIBA_AM_ADNEG) != 0); break; case 2: - *addr = am & SIBA_AM_BASE2_MASK; - *size = 1 << (SIBA_REG_GET(am, AM_ADINT2) + 1); + admatch->am_base = am & SIBA_AM_BASE2_MASK; + admatch->am_size = 1 << (SIBA_REG_GET(am, AM_ADINT2) + 1); + admatch->am_enabled = ((am & SIBA_AM_ADEN) != 0); + admatch->am_negative = ((am & SIBA_AM_ADNEG) != 0); break; default: return (EINVAL); } return (0); } /** * Write @p value to @p dev's CFG0 target/initiator state register, performing * required read-back and waiting for completion. * * @param dev The siba(4) child device. * @param reg The CFG0 state register to write (e.g. SIBA_CFG0_TMSTATELOW, * SIBA_CFG0_IMSTATE) * @param value The value to write to @p reg. * @param mask The mask of bits to be included from @p value. */ void siba_write_target_state(device_t dev, struct siba_devinfo *dinfo, bus_size_t reg, uint32_t value, uint32_t mask) { struct bhnd_resource *r; uint32_t rval; r = dinfo->cfg_res[0]; KASSERT(r != NULL, ("%s missing CFG0 mapping", device_get_nameunit(dev))); KASSERT(reg <= SIBA_CFG_SIZE-4, ("%s invalid CFG0 register offset %#jx", device_get_nameunit(dev), (uintmax_t)reg)); rval = bhnd_bus_read_4(r, reg); rval &= ~mask; rval |= (value & mask); bhnd_bus_write_4(r, reg, rval); bhnd_bus_read_4(r, reg); /* read-back */ DELAY(1); } /** * Spin for up to @p usec waiting for @p dev's CFG0 target/initiator state * register value to be equal to @p value after applying @p mask bits to both * values. * * @param dev The siba(4) child device to wait on. * @param dinfo The @p dev's device info * @param reg The state register to read (e.g. SIBA_CFG0_TMSTATEHIGH, * SIBA_CFG0_IMSTATE) * @param value The value against which @p reg will be compared. * @param mask The mask to be applied when comparing @p value with @p reg. * @param usec The maximum number of microseconds to wait for completion. * * @retval 0 if SIBA_TMH_BUSY is cleared prior to the @p usec timeout. * @retval ENODEV if SIBA_CFG0 is not mapped by @p dinfo. * @retval ETIMEDOUT if a timeout occurs. */ int siba_wait_target_state(device_t dev, struct siba_devinfo *dinfo, bus_size_t reg, uint32_t value, uint32_t mask, u_int usec) { struct bhnd_resource *r; uint32_t rval; if ((r = dinfo->cfg_res[0]) == NULL) return (ENODEV); value &= mask; for (int i = 0; i < usec; i += 10) { rval = bhnd_bus_read_4(r, reg); if ((rval & mask) == value) return (0); DELAY(10); } return (ETIMEDOUT); } Index: head/sys/dev/bhnd/siba/sibavar.h =================================================================== --- head/sys/dev/bhnd/siba/sibavar.h (revision 329179) +++ head/sys/dev/bhnd/siba/sibavar.h (revision 329180) @@ -1,235 +1,238 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef _SIBA_SIBAVAR_H_ #define _SIBA_SIBAVAR_H_ #include +#include #include #include #include #include #include #include #include "siba.h" /* * Internal definitions shared by siba(4) driver implementations. */ struct siba_addrspace; +struct siba_admatch; struct siba_cfg_block; struct siba_devinfo; struct siba_core_id; struct siba_softc; int siba_probe(device_t dev); int siba_attach(device_t dev); int siba_detach(device_t dev); int siba_resume(device_t dev); int siba_suspend(device_t dev); u_int siba_get_intr_count(device_t dev, device_t child); int siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec); uint16_t siba_get_bhnd_mfgid(uint16_t ocp_vendor); -struct siba_core_id siba_parse_core_id(uint32_t idhigh, uint32_t idlow, - u_int core_idx, int unit); - int siba_add_children(device_t bus); struct siba_devinfo *siba_alloc_dinfo(device_t dev); -int siba_init_dinfo(device_t dev, +int siba_init_dinfo(device_t dev, device_t child, struct siba_devinfo *dinfo, const struct siba_core_id *core_id); void siba_free_dinfo(device_t dev, device_t child, struct siba_devinfo *dinfo); u_int siba_port_count(struct siba_core_id *core_id, bhnd_port_type port_type); bool siba_is_port_valid(struct siba_core_id *core_id, bhnd_port_type port_type, u_int port); u_int siba_port_region_count( struct siba_core_id *core_id, bhnd_port_type port_type, u_int port); int siba_cfg_index(struct siba_core_id *core_id, bhnd_port_type type, u_int port, u_int region, u_int *cfgidx); int siba_addrspace_index(struct siba_core_id *core_id, bhnd_port_type type, u_int port, u_int region, u_int *addridx); u_int siba_addrspace_device_port(u_int addrspace); u_int siba_addrspace_device_region(u_int addrspace); u_int siba_cfg_agent_port(u_int cfg); u_int siba_cfg_agent_region(u_int cfg); struct siba_addrspace *siba_find_addrspace(struct siba_devinfo *dinfo, bhnd_port_type type, u_int port, u_int region); struct siba_cfg_block *siba_find_cfg_block(struct siba_devinfo *dinfo, bhnd_port_type type, u_int port, u_int region); -int siba_append_dinfo_region(struct siba_devinfo *dinfo, - uint8_t sid, uint32_t base, uint32_t size, - uint32_t bus_reserved); - u_int siba_admatch_offset(uint8_t addrspace); -int siba_parse_admatch(uint32_t am, uint32_t *addr, - uint32_t *size); +int siba_parse_admatch(uint32_t am, + struct siba_admatch *admatch); void siba_write_target_state(device_t dev, struct siba_devinfo *dinfo, bus_size_t reg, uint32_t value, uint32_t mask); int siba_wait_target_state(device_t dev, struct siba_devinfo *dinfo, bus_size_t reg, uint32_t value, uint32_t mask, u_int usec); /* Sonics configuration register blocks */ #define SIBA_CFG_NUM_2_2 1 /**< sonics <= 2.2 maps SIBA_CFG0. */ #define SIBA_CFG_NUM_2_3 2 /**< sonics <= 2.3 maps SIBA_CFG0 and SIBA_CFG1 */ #define SIBA_MAX_CFG SIBA_CFG_NUM_2_3 /**< maximum number of supported config register blocks */ #define SIBA_CFG_RID_BASE 100 /**< base resource ID for SIBA_CFG* register allocations */ #define SIBA_CFG_RID(_dinfo, _cfg) \ (SIBA_CFG_RID_BASE + (_cfg) + \ (_dinfo->core_id.core_info.core_idx * SIBA_MAX_CFG)) /* Sonics/OCP address space mappings */ #define SIBA_CORE_ADDRSPACE 0 /**< Address space mapping the primary device registers */ #define SIBA_MAX_ADDRSPACE 4 /**< Maximum number of Sonics/OCP * address space mappings for a * single core. */ /* bhnd(4) (port,region) representation of siba address space mappings */ #define SIBA_MAX_PORT 2 /**< maximum number of advertised * bhnd(4) ports */ +/** siba(4) address match descriptor */ +struct siba_admatch { + uint32_t am_base; /**< base address. */ + uint32_t am_size; /**< size. */ + bool am_negative; /**< if true, negative matching is performed. */ + bool am_enabled; /**< if true, matching on this entry is enabled. */ +}; + /** siba(4) address space descriptor */ struct siba_addrspace { uint32_t sa_base; /**< base address */ uint32_t sa_size; /**< size */ int sa_rid; /**< bus resource id */ uint32_t sa_bus_reserved;/**< number of bytes at high end of * address space reserved for the bus */ }; /** siba(4) config block descriptor */ struct siba_cfg_block { uint32_t cb_base; /**< base address */ uint32_t cb_size; /**< size */ int cb_rid; /**< bus resource id */ }; /** siba(4) backplane interrupt flag descriptor */ struct siba_intr { - u_int flag; /**< backplane flag # */ bool mapped; /**< if an irq has been mapped */ int rid; /**< bus resource id, or -1 if unassigned */ rman_res_t irq; /**< the mapped bus irq, if any */ }; /** * siba(4) per-core identification info. */ struct siba_core_id { - struct bhnd_core_info core_info; /**< standard bhnd(4) core info */ - uint16_t sonics_vendor; /**< OCP vendor identifier used to generate - * the JEDEC-106 bhnd(4) vendor identifier. */ - uint8_t sonics_rev; /**< sonics backplane revision code */ - uint8_t num_addrspace; /**< number of address ranges mapped to - this core. */ - uint8_t num_cfg_blocks; /**< number of Sonics configuration register - blocks mapped to the core's enumeration - space */ + struct bhnd_core_info core_info; /**< standard bhnd(4) core info */ + uint16_t sonics_vendor; /**< OCP vendor identifier used to generate + * the JEDEC-106 bhnd(4) vendor identifier. */ + uint8_t sonics_rev; /**< sonics backplane revision code */ + bool intr_en; /**< if backplane interrupt distribution is enabled for this core */ + u_int intr_flag; /**< backplane interrupt flag # */ + struct siba_admatch admatch[SIBA_MAX_ADDRSPACE]; /**< active address match descriptors defined by this core. */ + uint8_t num_admatch; /**< number of address match descriptors. */ + uint8_t num_cfg_blocks; /**< number of Sonics configuration register + blocks mapped to the core's enumeration + space */ }; /** * siba(4) per-core PMU allocation state. */ typedef enum { SIBA_PMU_NONE, /**< If the core has not yet allocated PMU state */ SIBA_PMU_BHND, /**< If standard bhnd(4) PMU support should be used */ SIBA_PMU_PWRCTL, /**< If legacy PWRCTL PMU support should be used */ SIBA_PMU_FIXED, /**< If legacy fixed (no-op) PMU support should be used */ } siba_pmu_state; /** * siba(4) per-device info */ struct siba_devinfo { struct resource_list resources; /**< per-core memory regions. */ struct siba_core_id core_id; /**< core identification info */ struct siba_addrspace addrspace[SIBA_MAX_ADDRSPACE]; /**< memory map descriptors */ struct siba_cfg_block cfg[SIBA_MAX_CFG]; /**< config block descriptors */ - struct siba_intr intr; /**< interrupt flag descriptor, if any */ - bool intr_en; /**< if true, core has an assigned interrupt flag */ + struct siba_intr intr; /**< interrupt flag mapping, if any */ struct bhnd_resource *cfg_res[SIBA_MAX_CFG]; /**< bus-mapped config block registers */ int cfg_rid[SIBA_MAX_CFG]; /**< bus-mapped config block resource IDs */ siba_pmu_state pmu_state; /**< per-core PMU state */ union { void *bhnd_info; /**< if SIBA_PMU_BHND, bhnd(4)-managed per-core PMU info. */ device_t pwrctl; /**< if SIBA_PMU_PWRCTL, legacy PWRCTL provider. */ } pmu; }; /** siba(4) per-instance state */ struct siba_softc { struct bhnd_softc bhnd_sc; /**< bhnd state */ device_t dev; /**< siba device */ struct mtx mtx; /**< state mutex */ }; #define SIBA_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define SIBA_LOCK(sc) mtx_lock(&(sc)->mtx) #define SIBA_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define SIBA_LOCK_ASSERT(sc, what) mtx_assert(&(sc)->mtx, what) #define SIBA_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx) #endif /* _SIBA_SIBAVAR_H_ */