Index: head/sys/dev/altera/atse/if_atse.c
===================================================================
--- head/sys/dev/altera/atse/if_atse.c (revision 357685)
+++ head/sys/dev/altera/atse/if_atse.c (revision 357686)
@@ -1,1602 +1,1603 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2012, 2013 Bjoern A. Zeeb
* Copyright (c) 2014 Robert N. M. Watson
* Copyright (c) 2016-2017 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
* ("MRC2"), as part of the DARPA MRC research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Altera Triple-Speed Ethernet MegaCore, Function User Guide
* UG-01008-3.0, Software Version: 12.0, June 2012.
* Available at the time of writing at:
* http://www.altera.com/literature/ug/ug_ethernet.pdf
*
* We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
*/
/*
* XXX-BZ NOTES:
* - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
* seems an IP core bug, they count ether broadcasts as multicast. Is this
* still the case?
* - figure out why the TX FIFO fill status and intr did not work as expected.
* - test 100Mbit/s and 10Mbit/s
* - blacklist the one special factory programmed ethernet address (for now
* hardcoded, later from loader?)
* - resolve all XXX, left as reminders to shake out details later
* - Jumbo frame support
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_device_polling.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define RX_QUEUE_SIZE 4096
#define TX_QUEUE_SIZE 4096
#define NUM_RX_MBUF 512
#define BUFRING_SIZE 8192
#include
/* XXX once we'd do parallel attach, we need a global lock for this. */
#define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
#define ATSE_ETHERNET_OPTION_BITS_READ 1
static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
/*
* Softc and critical resource locking.
*/
#define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
#define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
#define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
#define ATSE_DEBUG
#undef ATSE_DEBUG
#ifdef ATSE_DEBUG
#define DPRINTF(format, ...) printf(format, __VA_ARGS__)
#else
#define DPRINTF(format, ...)
#endif
/*
* Register space access macros.
*/
static inline void
csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
const char *f, const int l)
{
val4 = htole32(val4);
DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
"atse_mem_res", reg, reg * 4, val4);
bus_write_4(sc->atse_mem_res, reg * 4, val4);
}
static inline uint32_t
csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
{
uint32_t val4;
val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
"atse_mem_res", reg, reg * 4, val4);
return (val4);
}
/*
* See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
* on write and ignored on read.
*/
static inline void
pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
const char *f, const int l, const char *s)
{
uint32_t val4;
val4 = htole32(val & 0x0000ffff);
DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
"atse_mem_res", reg, (bmcr + reg) * 4, val4);
bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
}
static inline uint16_t
pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
const int l, const char *s)
{
uint32_t val4;
uint16_t val;
val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
val = le32toh(val4) & 0x0000ffff;
DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
"atse_mem_res", reg, (bmcr + reg) * 4, val);
return (val);
}
#define CSR_WRITE_4(sc, reg, val) \
csr_write_4((sc), (reg), (val), __func__, __LINE__)
#define CSR_READ_4(sc, reg) \
csr_read_4((sc), (reg), __func__, __LINE__)
#define PCS_WRITE_2(sc, reg, val) \
pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
"PCS")
#define PCS_READ_2(sc, reg) \
pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
#define PHY_WRITE_2(sc, reg, val) \
pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
"PHY")
#define PHY_READ_2(sc, reg) \
pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
static void atse_tick(void *);
static int atse_detach(device_t);
devclass_t atse_devclass;
static int
atse_rx_enqueue(struct atse_softc *sc, uint32_t n)
{
struct mbuf *m;
int i;
for (i = 0; i < n; i++) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->dev,
"%s: Can't alloc rx mbuf\n", __func__);
return (-1);
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
}
return (0);
}
static int
atse_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct atse_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
sc = arg;
ATSE_LOCK(sc);
ifp = sc->atse_ifp;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
if (err != 0) {
break;
}
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
m_freem(m);
sc->txcount--;
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
ATSE_UNLOCK(sc);
return (0);
}
static int
atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct atse_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
uint32_t cnt_processed;
sc = arg;
ATSE_LOCK(sc);
ifp = sc->atse_ifp;
cnt_processed = 0;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
if (err != 0) {
break;
}
cnt_processed++;
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
continue;
}
m->m_pkthdr.len = m->m_len = st.transferred;
m->m_pkthdr.rcvif = ifp;
m_adj(m, ETHER_ALIGN);
ATSE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
ATSE_LOCK(sc);
}
atse_rx_enqueue(sc, cnt_processed);
ATSE_UNLOCK(sc);
return (0);
}
static int
atse_transmit_locked(struct ifnet *ifp)
{
struct atse_softc *sc;
struct mbuf *m;
struct buf_ring *br;
int error;
int enq;
sc = ifp->if_softc;
br = sc->br;
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV);
if (error != 0) {
/* No space in request queue available yet. */
drbr_putback(ifp, br, m);
break;
}
drbr_advance(ifp, br);
sc->txcount++;
enq++;
/* If anyone is interested give them a copy. */
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0)
xdma_queue_submit(sc->xchan_tx);
return (0);
}
static int
atse_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct atse_softc *sc;
struct buf_ring *br;
int error;
sc = ifp->if_softc;
br = sc->br;
ATSE_LOCK(sc);
mtx_lock(&sc->br_mtx);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
error = drbr_enqueue(ifp, sc->br, m);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
error = drbr_enqueue(ifp, sc->br, m);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
error = drbr_enqueue(ifp, br, m);
if (error) {
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
error = atse_transmit_locked(ifp);
mtx_unlock(&sc->br_mtx);
ATSE_UNLOCK(sc);
return (error);
}
static void
atse_qflush(struct ifnet *ifp)
{
struct atse_softc *sc;
sc = ifp->if_softc;
printf("%s\n", __func__);
}
static int
atse_stop_locked(struct atse_softc *sc)
{
uint32_t mask, val4;
struct ifnet *ifp;
int i;
ATSE_LOCK_ASSERT(sc);
callout_stop(&sc->atse_tick);
ifp = sc->atse_ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
/* Disable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 &= ~mask;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == 0) {
break;
}
DELAY(10);
}
if ((val4 & mask) != 0) {
device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
/* Punt. */
}
sc->atse_flags &= ~ATSE_FLAGS_LINK;
return (0);
}
static u_int
atse_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
uint64_t *h = arg;
uint8_t *addr, x, y;
int i, j;
addr = LLADDR(sdl);
x = 0;
for (i = 0; i < ETHER_ADDR_LEN; i++) {
y = addr[i] & 0x01;
for (j = 1; j < 8; j++)
y ^= (addr[i] >> j) & 0x01;
x |= (y << i);
}
*h |= (1 << x);
return (1);
}
static int
atse_rxfilter_locked(struct atse_softc *sc)
{
struct ifnet *ifp;
uint32_t val4;
int i;
/* XXX-BZ can we find out if we have the MHASH synthesized? */
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/* For simplicity always hash full 48 bits of addresses. */
if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
ifp = sc->atse_ifp;
if (ifp->if_flags & IFF_PROMISC) {
val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
} else {
val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
}
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
if (ifp->if_flags & IFF_ALLMULTI) {
/* Accept all multicast addresses. */
for (i = 0; i <= MHASH_LEN; i++)
CSR_WRITE_4(sc, MHASH_START + i, 0x1);
} else {
/*
* Can hold MHASH_LEN entries.
* XXX-BZ bitstring.h would be more general.
*/
uint64_t h;
/*
* Re-build and re-program hash table. First build the
* bit-field "yes" or "no" for each slot per address, then
* do all the programming afterwards.
*/
h = 0;
(void)if_foreach_llmaddr(ifp, atse_hash_maddr, &h);
for (i = 0; i <= MHASH_LEN; i++) {
CSR_WRITE_4(sc, MHASH_START + i,
(h & (1 << i)) ? 0x01 : 0x00);
}
}
return (0);
}
static int
atse_ethernet_option_bits_read_fdt(device_t dev)
{
struct resource *res;
device_t fdev;
int i, rid;
if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ) {
return (0);
}
fdev = device_find_child(device_get_parent(dev), "cfi", 0);
if (fdev == NULL) {
return (ENOENT);
}
rid = 0;
res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (res == NULL) {
return (ENXIO);
}
for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++) {
atse_ethernet_option_bits[i] = bus_read_1(res,
ALTERA_ETHERNET_OPTION_BITS_OFF + i);
}
bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
return (0);
}
static int
atse_ethernet_option_bits_read(device_t dev)
{
int error;
error = atse_ethernet_option_bits_read_fdt(dev);
if (error == 0)
return (0);
device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
return (error);
}
static int
atse_get_eth_address(struct atse_softc *sc)
{
unsigned long hostid;
uint32_t val4;
int unit;
/*
* Make sure to only ever do this once. Otherwise a reset would
* possibly change our ethernet address, which is not good at all.
*/
if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
sc->atse_eth_addr[2] != 0x00) {
return (0);
}
if ((atse_ethernet_option_bits_flag &
ATSE_ETHERNET_OPTION_BITS_READ) == 0) {
goto get_random;
}
val4 = atse_ethernet_option_bits[0] << 24;
val4 |= atse_ethernet_option_bits[1] << 16;
val4 |= atse_ethernet_option_bits[2] << 8;
val4 |= atse_ethernet_option_bits[3];
/* They chose "safe". */
if (val4 != le32toh(0x00005afe)) {
device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
"Falling back to random numbers for hardware address.\n",
val4);
goto get_random;
}
sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
/* Handle factory default ethernet addresss: 00:07:ed:ff:ed:15 */
if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
device_printf(sc->atse_dev, "Factory programmed Ethernet "
"hardware address blacklisted. Falling back to random "
"address to avoid collisions.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
device_printf(sc->atse_dev, "All zero's Ethernet hardware "
"address blacklisted. Falling back to random address.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
device_printf(sc->atse_dev, "Multicast Ethernet hardware "
"address blacklisted. Falling back to random address.\n");
device_printf(sc->atse_dev, "Please re-program your flash.\n");
goto get_random;
}
/*
* If we find an Altera prefixed address with a 0x0 ending
* adjust by device unit. If not and this is not the first
* Ethernet, go to random.
*/
unit = device_get_unit(sc->atse_dev);
if (unit == 0x00) {
return (0);
}
if (unit > 0x0f) {
device_printf(sc->atse_dev, "We do not support Ethernet "
"addresses for more than 16 MACs. Falling back to "
"random hadware address.\n");
goto get_random;
}
if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
(sc->atse_eth_addr[5] & 0x0f) != 0x0) {
device_printf(sc->atse_dev, "Ethernet address not meeting our "
"multi-MAC standards. Falling back to random hadware "
"address.\n");
goto get_random;
}
sc->atse_eth_addr[5] |= (unit & 0x0f);
return (0);
get_random:
/*
* Fall back to random code we also use on bridge(4).
*/
getcredhostid(curthread->td_ucred, &hostid);
if (hostid == 0) {
arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
} else {
sc->atse_eth_addr[0] = 0x2;
sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
sc->atse_eth_addr[4] = hostid & 0xff;
sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
}
return (0);
}
static int
atse_set_eth_address(struct atse_softc *sc, int n)
{
uint32_t v0, v1;
v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
(sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
if (n & ATSE_ETH_ADDR_DEF) {
CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP1) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP2) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP3) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
}
if (n & ATSE_ETH_ADDR_SUPP4) {
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
}
return (0);
}
static int
atse_reset(struct atse_softc *sc)
{
uint32_t val4, mask;
uint16_t val;
int i;
/* 1. External PHY Initialization using MDIO. */
/*
* We select the right MDIO space in atse_attach() and let MII do
* anything else.
*/
/* 2. PCS Configuration Register Initialization. */
/* a. Set auto negotiation link timer to 1.6ms for SGMII. */
PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
/* b. Configure SGMII. */
val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
/* c. Enable auto negotiation. */
/* Ignore Bits 6,8,13; should be set,set,unset. */
val = PCS_READ_2(sc, PCS_CONTROL);
val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
PCS_WRITE_2(sc, PCS_CONTROL, val);
/* d. PCS reset. */
val = PCS_READ_2(sc, PCS_CONTROL);
val |= PCS_CONTROL_RESET;
PCS_WRITE_2(sc, PCS_CONTROL, val);
/* Wait for reset bit to clear; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val = PCS_READ_2(sc, PCS_CONTROL);
if ((val & PCS_CONTROL_RESET) == 0) {
break;
}
DELAY(10);
}
if ((val & PCS_CONTROL_RESET) != 0) {
device_printf(sc->atse_dev, "PCS reset timed out.\n");
return (ENXIO);
}
/* 3. MAC Configuration Register Initialization. */
/* a. Disable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 &= ~mask;
/* Samples in the manual do have the SW_RESET bit set here, why? */
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == 0) {
break;
}
DELAY(10);
}
if ((val4 & mask) != 0) {
device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
return (ENXIO);
}
/* b. MAC FIFO configuration. */
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
#if 0
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
#else
/* For store-and-forward mode, set this threshold to 0. */
CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
#endif
/* c. MAC address configuration. */
/* Also intialize supplementary addresses to our primary one. */
/* XXX-BZ FreeBSD really needs to grow and API for using these. */
atse_get_eth_address(sc);
atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
/* d. MAC function configuration. */
CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/*
* If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
* and ENA_10 (bit 25) in command_config register to 0. If half duplex
* is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
* to 1 in command_config register.
* BZ: We shoot for 1000 instead.
*/
#if 0
val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
#else
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
#endif
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
#if 0
/*
* We do not want to set this, otherwise, we could not even send
* random raw ethernet frames for various other research. By default
* FreeBSD will use the right ether source address.
*/
val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
#endif
val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
#if 0
val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
#endif
#if 1
val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
#endif
val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/*
* Make sure we do not enable 32bit alignment; FreeBSD cannot
* cope with the additional padding (though we should!?).
* Also make sure we get the CRC appended.
*/
val4 = CSR_READ_4(sc, TX_CMD_STAT);
val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
CSR_WRITE_4(sc, TX_CMD_STAT, val4);
val4 = CSR_READ_4(sc, RX_CMD_STAT);
val4 &= ~RX_CMD_STAT_RX_SHIFT16;
val4 |= RX_CMD_STAT_RX_SHIFT16;
CSR_WRITE_4(sc, RX_CMD_STAT, val4);
/* e. Reset MAC. */
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0) {
break;
}
DELAY(10);
}
if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
device_printf(sc->atse_dev, "MAC reset timed out.\n");
return (ENXIO);
}
/* f. Enable MAC transmit and receive datapath. */
mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
val4 |= mask;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
/* Wait for bits to be cleared; i=100 is excessive. */
for (i = 0; i < 100; i++) {
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
if ((val4 & mask) == mask) {
break;
}
DELAY(10);
}
if ((val4 & mask) != mask) {
device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
return (ENXIO);
}
return (0);
}
static void
atse_init_locked(struct atse_softc *sc)
{
struct ifnet *ifp;
struct mii_data *mii;
uint8_t *eaddr;
ATSE_LOCK_ASSERT(sc);
ifp = sc->atse_ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
return;
}
/*
* Must update the ether address if changed. Given we do not handle
* in atse_ioctl() but it's in the general framework, just always
* do it here before atse_reset().
*/
eaddr = IF_LLADDR(sc->atse_ifp);
bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
/* Make things frind to halt, cleanup, ... */
atse_stop_locked(sc);
atse_reset(sc);
/* ... and fire up the engine again. */
atse_rxfilter_locked(sc);
sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
mii = device_get_softc(sc->atse_miibus);
sc->atse_flags &= ~ATSE_FLAGS_LINK;
mii_mediachg(mii);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
callout_reset(&sc->atse_tick, hz, atse_tick, sc);
}
static void
atse_init(void *xsc)
{
struct atse_softc *sc;
/*
* XXXRW: There is some argument that we should immediately do RX
* processing after enabling interrupts, or one may not fire if there
* are buffered packets.
*/
sc = (struct atse_softc *)xsc;
ATSE_LOCK(sc);
atse_init_locked(sc);
ATSE_UNLOCK(sc);
}
static int
atse_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct atse_softc *sc;
struct ifreq *ifr;
int error, mask;
error = 0;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
switch (command) {
case SIOCSIFFLAGS:
ATSE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
((ifp->if_flags ^ sc->atse_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
atse_rxfilter_locked(sc);
else
atse_init_locked(sc);
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
atse_stop_locked(sc);
sc->atse_if_flags = ifp->if_flags;
ATSE_UNLOCK(sc);
break;
case SIOCSIFCAP:
ATSE_LOCK(sc);
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
ATSE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ATSE_LOCK(sc);
atse_rxfilter_locked(sc);
ATSE_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
{
struct mii_data *mii;
struct ifreq *ifr;
mii = device_get_softc(sc->atse_miibus);
ifr = (struct ifreq *)data;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
}
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
atse_tick(void *xsc)
{
struct atse_softc *sc;
struct mii_data *mii;
struct ifnet *ifp;
sc = (struct atse_softc *)xsc;
ATSE_LOCK_ASSERT(sc);
ifp = sc->atse_ifp;
mii = device_get_softc(sc->atse_miibus);
mii_tick(mii);
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
atse_miibus_statchg(sc->atse_dev);
}
callout_reset(&sc->atse_tick, hz, atse_tick, sc);
}
/*
* Set media options.
*/
static int
atse_ifmedia_upd(struct ifnet *ifp)
{
struct atse_softc *sc;
struct mii_data *mii;
struct mii_softc *miisc;
int error;
sc = ifp->if_softc;
ATSE_LOCK(sc);
mii = device_get_softc(sc->atse_miibus);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
PHY_RESET(miisc);
}
error = mii_mediachg(mii);
ATSE_UNLOCK(sc);
return (error);
}
/*
* Report current media status.
*/
static void
atse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct atse_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
ATSE_LOCK(sc);
mii = device_get_softc(sc->atse_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
ATSE_UNLOCK(sc);
}
static struct atse_mac_stats_regs {
const char *name;
const char *descr; /* Mostly copied from Altera datasheet. */
} atse_mac_stats_regs[] = {
[0x1a] =
{ "aFramesTransmittedOK",
"The number of frames that are successfully transmitted including "
"the pause frames." },
{ "aFramesReceivedOK",
"The number of frames that are successfully received including the "
"pause frames." },
{ "aFrameCheckSequenceErrors",
"The number of receive frames with CRC error." },
{ "aAlignmentErrors",
"The number of receive frames with alignment error." },
{ "aOctetsTransmittedOK",
"The lower 32 bits of the number of data and padding octets that "
"are successfully transmitted." },
{ "aOctetsReceivedOK",
"The lower 32 bits of the number of data and padding octets that "
" are successfully received." },
{ "aTxPAUSEMACCtrlFrames",
"The number of pause frames transmitted." },
{ "aRxPAUSEMACCtrlFrames",
"The number received pause frames received." },
{ "ifInErrors",
"The number of errored frames received." },
{ "ifOutErrors",
"The number of transmit frames with either a FIFO overflow error, "
"a FIFO underflow error, or a error defined by the user "
"application." },
{ "ifInUcastPkts",
"The number of valid unicast frames received." },
{ "ifInMulticastPkts",
"The number of valid multicast frames received. The count does "
"not include pause frames." },
{ "ifInBroadcastPkts",
"The number of valid broadcast frames received." },
{ "ifOutDiscards",
"This statistics counter is not in use. The MAC function does not "
"discard frames that are written to the FIFO buffer by the user "
"application." },
{ "ifOutUcastPkts",
"The number of valid unicast frames transmitted." },
{ "ifOutMulticastPkts",
"The number of valid multicast frames transmitted, excluding pause "
"frames." },
{ "ifOutBroadcastPkts",
"The number of valid broadcast frames transmitted." },
{ "etherStatsDropEvents",
"The number of frames that are dropped due to MAC internal errors "
"when FIFO buffer overflow persists." },
{ "etherStatsOctets",
"The lower 32 bits of the total number of octets received. This "
"count includes both good and errored frames." },
{ "etherStatsPkts",
"The total number of good and errored frames received." },
{ "etherStatsUndersizePkts",
"The number of frames received with length less than 64 bytes. "
"This count does not include errored frames." },
{ "etherStatsOversizePkts",
"The number of frames received that are longer than the value "
"configured in the frm_length register. This count does not "
"include errored frames." },
{ "etherStatsPkts64Octets",
"The number of 64-byte frames received. This count includes good "
"and errored frames." },
{ "etherStatsPkts65to127Octets",
"The number of received good and errored frames between the length "
"of 65 and 127 bytes." },
{ "etherStatsPkts128to255Octets",
"The number of received good and errored frames between the length "
"of 128 and 255 bytes." },
{ "etherStatsPkts256to511Octets",
"The number of received good and errored frames between the length "
"of 256 and 511 bytes." },
{ "etherStatsPkts512to1023Octets",
"The number of received good and errored frames between the length "
"of 512 and 1023 bytes." },
{ "etherStatsPkts1024to1518Octets",
"The number of received good and errored frames between the length "
"of 1024 and 1518 bytes." },
{ "etherStatsPkts1519toXOctets",
"The number of received good and errored frames between the length "
"of 1519 and the maximum frame length configured in the frm_length "
"register." },
{ "etherStatsJabbers",
"Too long frames with CRC error." },
{ "etherStatsFragments",
"Too short frames with CRC error." },
/* 0x39 unused, 0x3a/b non-stats. */
[0x3c] =
/* Extended Statistics Counters */
{ "msb_aOctetsTransmittedOK",
"Upper 32 bits of the number of data and padding octets that are "
"successfully transmitted." },
{ "msb_aOctetsReceivedOK",
"Upper 32 bits of the number of data and padding octets that are "
"successfully received." },
{ "msb_etherStatsOctets",
"Upper 32 bits of the total number of octets received. This count "
"includes both good and errored frames." }
};
static int
sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
{
struct atse_softc *sc;
int error, offset, s;
sc = arg1;
offset = arg2;
s = CSR_READ_4(sc, offset);
error = sysctl_handle_int(oidp, &s, 0, req);
if (error || !req->newptr) {
return (error);
}
return (0);
}
static struct atse_rx_err_stats_regs {
const char *name;
const char *descr;
} atse_rx_err_stats_regs[] = {
#define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
#define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
#define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
#define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
#define ATSE_RX_ERR_4 4 /* ? */
#define ATSE_RX_ERR_5 5 /* / */
{ "rx_err_fifo_thres_eop",
"FIFO threshold reached, reported on EOP." },
{ "rx_err_fifo_elen",
"Frame or payload length not valid." },
{ "rx_err_fifo_crc32",
"CRC-32 error." },
{ "rx_err_fifo_thres_trunc",
"FIFO threshold reached, truncated frame" },
{ "rx_err_4",
"?" },
{ "rx_err_5",
"?" },
};
static int
sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
{
struct atse_softc *sc;
int error, offset, s;
sc = arg1;
offset = arg2;
s = sc->atse_rx_err[offset];
error = sysctl_handle_int(oidp, &s, 0, req);
if (error || !req->newptr) {
return (error);
}
return (0);
}
static void
atse_sysctl_stats_attach(device_t dev)
{
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
struct atse_softc *sc;
int i;
sc = device_get_softc(dev);
sctx = device_get_sysctl_ctx(dev);
soid = device_get_sysctl_tree(dev);
/* MAC statistics. */
for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
if (atse_mac_stats_regs[i].name == NULL ||
atse_mac_stats_regs[i].descr == NULL) {
continue;
}
SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
atse_mac_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
sc, i, sysctl_atse_mac_stats_proc, "IU",
atse_mac_stats_regs[i].descr);
}
/* rx_err[]. */
for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
if (atse_rx_err_stats_regs[i].name == NULL ||
atse_rx_err_stats_regs[i].descr == NULL) {
continue;
}
SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
atse_rx_err_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
sc, i, sysctl_atse_rx_err_stats_proc, "IU",
atse_rx_err_stats_regs[i].descr);
}
}
/*
* Generic device handling routines.
*/
int
atse_attach(device_t dev)
{
struct atse_softc *sc;
struct ifnet *ifp;
uint32_t caps;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
/* Get xDMA controller */
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/*
* Only final (EOP) write can be less than "symbols per beat" value
* so we have to defrag mbuf chain.
* Chapter 15. On-Chip FIFO Memory Core.
* Embedded Peripherals IP User Guide.
*/
caps = XCHAN_CAP_NOSEG;
/* Alloc xDMA virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_tx, atse_xdma_tx_intr, sc, &sc->ih_tx);
+ error = xdma_setup_intr(sc->xchan_tx, 0,
+ atse_xdma_tx_intr, sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
8, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
/* Get RX xDMA controller */
sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
if (sc->xdma_rx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/* Alloc xDMA virtual channel. */
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_rx, atse_xdma_rx_intr, sc, &sc->ih_rx);
+ error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
+ atse_xdma_rx_intr, sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
xdma_prep_sg(sc->xchan_rx,
RX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
1, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
mtx_init(&sc->br_mtx, "buf ring mtx", NULL, MTX_DEF);
sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &sc->br_mtx);
if (sc->br == NULL) {
return (ENOMEM);
}
atse_ethernet_option_bits_read(dev);
mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
/*
* We are only doing single-PHY with this driver currently. The
* defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
* 1st PHY address (0) apart from the fact that BMCR0 is always
* the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
*/
#if 0 /* Always PCS. */
sc->atse_bmcr0 = MDIO_0_START;
CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
#endif
/* Always use matching PHY for atse[0..]. */
sc->atse_phy_addr = device_get_unit(dev);
sc->atse_bmcr1 = MDIO_1_START;
CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
/* Reset the adapter. */
atse_reset(sc);
/* Setup interface. */
ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "if_alloc() failed\n");
error = ENOSPC;
goto err;
}
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
- IFF_NEEDSEPOCH;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = atse_ioctl;
ifp->if_transmit = atse_transmit;
ifp->if_qflush = atse_qflush;
ifp->if_init = atse_init;
IFQ_SET_MAXLEN(&ifp->if_snd, ATSE_TX_LIST_CNT - 1);
ifp->if_snd.ifq_drv_maxlen = ATSE_TX_LIST_CNT - 1;
IFQ_SET_READY(&ifp->if_snd);
/* MII setup. */
error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "attaching PHY failed: %d\n", error);
goto err;
}
/* Call media-indepedent attach routine. */
ether_ifattach(ifp, sc->atse_eth_addr);
/* Tell the upper layer(s) about vlan mtu support. */
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
err:
if (error != 0) {
atse_detach(dev);
}
if (error == 0) {
atse_sysctl_stats_attach(dev);
}
atse_rx_enqueue(sc, NUM_RX_MBUF);
xdma_queue_submit(sc->xchan_rx);
return (error);
}
static int
atse_detach(device_t dev)
{
struct atse_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
device_get_nameunit(dev)));
ifp = sc->atse_ifp;
/* Only cleanup if attach succeeded. */
if (device_is_attached(dev)) {
ATSE_LOCK(sc);
atse_stop_locked(sc);
ATSE_UNLOCK(sc);
callout_drain(&sc->atse_tick);
ether_ifdetach(ifp);
}
if (sc->atse_miibus != NULL) {
device_delete_child(dev, sc->atse_miibus);
}
if (ifp != NULL) {
if_free(ifp);
}
mtx_destroy(&sc->atse_mtx);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}
/* Shared between nexus and fdt implementation. */
void
atse_detach_resources(device_t dev)
{
struct atse_softc *sc;
sc = device_get_softc(dev);
if (sc->atse_mem_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
sc->atse_mem_res);
sc->atse_mem_res = NULL;
}
}
int
atse_detach_dev(device_t dev)
{
int error;
error = atse_detach(dev);
if (error) {
/* We are basically in undefined state now. */
device_printf(dev, "atse_detach() failed: %d\n", error);
return (error);
}
atse_detach_resources(dev);
return (0);
}
int
atse_miibus_readreg(device_t dev, int phy, int reg)
{
struct atse_softc *sc;
int val;
sc = device_get_softc(dev);
/*
* We currently do not support re-mapping of MDIO space on-the-fly
* but de-facto hard-code the phy#.
*/
if (phy != sc->atse_phy_addr) {
return (0);
}
val = PHY_READ_2(sc, reg);
return (val);
}
int
atse_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct atse_softc *sc;
sc = device_get_softc(dev);
/*
* We currently do not support re-mapping of MDIO space on-the-fly
* but de-facto hard-code the phy#.
*/
if (phy != sc->atse_phy_addr) {
return (0);
}
PHY_WRITE_2(sc, reg, data);
return (0);
}
void
atse_miibus_statchg(device_t dev)
{
struct atse_softc *sc;
struct mii_data *mii;
struct ifnet *ifp;
uint32_t val4;
sc = device_get_softc(dev);
ATSE_LOCK_ASSERT(sc);
mii = device_get_softc(sc->atse_miibus);
ifp = sc->atse_ifp;
if (mii == NULL || ifp == NULL ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
return;
}
val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
/* Assume no link. */
sc->atse_flags &= ~ATSE_FLAGS_LINK;
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
(IFM_ACTIVE | IFM_AVALID)) {
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_10_T:
val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
case IFM_100_TX:
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
case IFM_1000_T:
val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
sc->atse_flags |= ATSE_FLAGS_LINK;
break;
default:
break;
}
}
if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
/* Need to stop the MAC? */
return;
}
if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
} else {
val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
}
/* flow control? */
/* Make sure the MAC is activated. */
val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
}
MODULE_DEPEND(atse, ether, 1, 1, 1);
MODULE_DEPEND(atse, miibus, 1, 1, 1);
Index: head/sys/dev/flash/cqspi.c
===================================================================
--- head/sys/dev/flash/cqspi.c (revision 357685)
+++ head/sys/dev/flash/cqspi.c (revision 357686)
@@ -1,769 +1,769 @@
/*-
* Copyright (c) 2017-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Cadence Quad SPI Flash Controller driver.
* 4B-addressing mode supported only.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "qspi_if.h"
#define CQSPI_DEBUG
#undef CQSPI_DEBUG
#ifdef CQSPI_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define CQSPI_SECTORSIZE 512
#define TX_QUEUE_SIZE 16
#define RX_QUEUE_SIZE 16
#define READ4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
#define READ2(_sc, _reg) bus_read_2((_sc)->res[0], _reg)
#define READ1(_sc, _reg) bus_read_1((_sc)->res[0], _reg)
#define WRITE4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
#define WRITE2(_sc, _reg, _val) bus_write_2((_sc)->res[0], _reg, _val)
#define WRITE1(_sc, _reg, _val) bus_write_1((_sc)->res[0], _reg, _val)
#define READ_DATA_4(_sc, _reg) bus_read_4((_sc)->res[1], _reg)
#define READ_DATA_1(_sc, _reg) bus_read_1((_sc)->res[1], _reg)
#define WRITE_DATA_4(_sc, _reg, _val) bus_write_4((_sc)->res[1], _reg, _val)
#define WRITE_DATA_1(_sc, _reg, _val) bus_write_1((_sc)->res[1], _reg, _val)
struct cqspi_softc {
device_t dev;
struct resource *res[3];
bus_space_tag_t bst;
bus_space_handle_t bsh;
void *ih;
uint8_t read_op_done;
uint8_t write_op_done;
uint32_t fifo_depth;
uint32_t fifo_width;
uint32_t trigger_address;
uint32_t sram_phys;
/* xDMA */
xdma_controller_t *xdma_tx;
xdma_channel_t *xchan_tx;
void *ih_tx;
xdma_controller_t *xdma_rx;
xdma_channel_t *xchan_rx;
void *ih_rx;
struct intr_config_hook config_intrhook;
struct mtx sc_mtx;
};
#define CQSPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define CQSPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define CQSPI_LOCK_INIT(_sc) \
mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
"cqspi", MTX_DEF)
#define CQSPI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
#define CQSPI_ASSERT_LOCKED(_sc) \
mtx_assert(&_sc->sc_mtx, MA_OWNED);
#define CQSPI_ASSERT_UNLOCKED(_sc) \
mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
static struct resource_spec cqspi_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_MEMORY, 1, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static struct ofw_compat_data compat_data[] = {
{ "cdns,qspi-nor", 1 },
{ NULL, 0 },
};
static void
cqspi_intr(void *arg)
{
struct cqspi_softc *sc;
uint32_t pending;
sc = arg;
pending = READ4(sc, CQSPI_IRQSTAT);
dprintf("%s: IRQSTAT %x\n", __func__, pending);
if (pending & (IRQMASK_INDOPDONE | IRQMASK_INDXFRLVL |
IRQMASK_INDSRAMFULL)) {
/* TODO: PIO operation done */
}
WRITE4(sc, CQSPI_IRQSTAT, pending);
}
static int
cqspi_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
struct xdma_transfer_status st;
struct cqspi_softc *sc;
struct bio *bp;
int ret;
int deq;
sc = arg;
dprintf("%s\n", __func__);
deq = 0;
while (1) {
ret = xdma_dequeue_bio(sc->xchan_tx, &bp, &st);
if (ret != 0) {
break;
}
sc->write_op_done = 1;
deq++;
}
if (deq > 1)
device_printf(sc->dev,
"Warning: more than 1 tx bio dequeued\n");
wakeup(&sc->xdma_tx);
return (0);
}
static int
cqspi_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
struct xdma_transfer_status st;
struct cqspi_softc *sc;
struct bio *bp;
int ret;
int deq;
sc = arg;
dprintf("%s\n", __func__);
deq = 0;
while (1) {
ret = xdma_dequeue_bio(sc->xchan_rx, &bp, &st);
if (ret != 0) {
break;
}
sc->read_op_done = 1;
deq++;
}
if (deq > 1)
device_printf(sc->dev,
"Warning: more than 1 rx bio dequeued\n");
wakeup(&sc->xdma_rx);
return (0);
}
static int
cqspi_wait_for_completion(struct cqspi_softc *sc)
{
int timeout;
int i;
timeout = 10000;
for (i = timeout; i > 0; i--) {
if ((READ4(sc, CQSPI_FLASHCMD) & FLASHCMD_CMDEXECSTAT) == 0) {
break;
}
}
if (i == 0) {
device_printf(sc->dev, "%s: cmd timed out: %x\n",
__func__, READ4(sc, CQSPI_FLASHCMD));
return (-1);
}
return (0);
}
static int
cqspi_cmd_write_addr(struct cqspi_softc *sc, uint8_t cmd,
uint32_t addr, uint32_t len)
{
uint32_t reg;
int ret;
dprintf("%s: %x\n", __func__, cmd);
WRITE4(sc, CQSPI_FLASHCMDADDR, addr);
reg = (cmd << FLASHCMD_CMDOPCODE_S);
reg |= (FLASHCMD_ENCMDADDR);
reg |= ((len - 1) << FLASHCMD_NUMADDRBYTES_S);
WRITE4(sc, CQSPI_FLASHCMD, reg);
reg |= FLASHCMD_EXECCMD;
WRITE4(sc, CQSPI_FLASHCMD, reg);
ret = cqspi_wait_for_completion(sc);
return (ret);
}
static int
cqspi_cmd_write(struct cqspi_softc *sc, uint8_t cmd,
uint8_t *addr, uint32_t len)
{
uint32_t reg;
int ret;
reg = (cmd << FLASHCMD_CMDOPCODE_S);
WRITE4(sc, CQSPI_FLASHCMD, reg);
reg |= FLASHCMD_EXECCMD;
WRITE4(sc, CQSPI_FLASHCMD, reg);
ret = cqspi_wait_for_completion(sc);
return (ret);
}
static int
cqspi_cmd_read(struct cqspi_softc *sc, uint8_t cmd,
uint8_t *addr, uint32_t len)
{
uint32_t data;
uint32_t reg;
uint8_t *buf;
int ret;
int i;
if (len > 8) {
device_printf(sc->dev, "Failed to read data\n");
return (-1);
}
dprintf("%s: %x\n", __func__, cmd);
buf = (uint8_t *)addr;
reg = (cmd << FLASHCMD_CMDOPCODE_S);
reg |= ((len - 1) << FLASHCMD_NUMRDDATABYTES_S);
reg |= FLASHCMD_ENRDDATA;
WRITE4(sc, CQSPI_FLASHCMD, reg);
reg |= FLASHCMD_EXECCMD;
WRITE4(sc, CQSPI_FLASHCMD, reg);
ret = cqspi_wait_for_completion(sc);
if (ret != 0) {
device_printf(sc->dev, "%s: cmd failed: %x\n",
__func__, cmd);
return (ret);
}
data = READ4(sc, CQSPI_FLASHCMDRDDATALO);
for (i = 0; i < len; i++)
buf[i] = (data >> (i * 8)) & 0xff;
return (0);
}
static int
cqspi_wait_ready(struct cqspi_softc *sc)
{
uint8_t data;
int ret;
do {
ret = cqspi_cmd_read(sc, CMD_READ_STATUS, &data, 1);
} while (data & STATUS_WIP);
return (0);
}
static int
cqspi_write_reg(device_t dev, device_t child,
uint8_t opcode, uint8_t *addr, uint32_t len)
{
struct cqspi_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = cqspi_cmd_write(sc, opcode, addr, len);
return (ret);
}
static int
cqspi_read_reg(device_t dev, device_t child,
uint8_t opcode, uint8_t *addr, uint32_t len)
{
struct cqspi_softc *sc;
int ret;
sc = device_get_softc(dev);
ret = cqspi_cmd_read(sc, opcode, addr, len);
return (ret);
}
static int
cqspi_wait_idle(struct cqspi_softc *sc)
{
uint32_t reg;
do {
reg = READ4(sc, CQSPI_CFG);
if (reg & CFG_IDLE) {
break;
}
} while (1);
return (0);
}
static int
cqspi_erase(device_t dev, device_t child, off_t offset)
{
struct cqspi_softc *sc;
int ret;
sc = device_get_softc(dev);
cqspi_wait_idle(sc);
cqspi_wait_ready(sc);
ret = cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
cqspi_wait_idle(sc);
cqspi_wait_ready(sc);
ret = cqspi_cmd_write_addr(sc, CMD_QUAD_SECTOR_ERASE, offset, 4);
cqspi_wait_idle(sc);
return (0);
}
static int
cqspi_write(device_t dev, device_t child, struct bio *bp,
off_t offset, caddr_t data, off_t count)
{
struct cqspi_softc *sc;
uint32_t reg;
dprintf("%s: offset 0x%llx count %lld bytes\n",
__func__, offset, count);
sc = device_get_softc(dev);
cqspi_wait_ready(sc);
reg = cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
cqspi_wait_idle(sc);
cqspi_wait_ready(sc);
cqspi_wait_idle(sc);
reg = DMAPER_NUMSGLREQBYTES_4;
reg |= DMAPER_NUMBURSTREQBYTES_4;
WRITE4(sc, CQSPI_DMAPER, reg);
WRITE4(sc, CQSPI_INDWRWATER, 64);
WRITE4(sc, CQSPI_INDWR, INDRD_IND_OPS_DONE_STATUS);
WRITE4(sc, CQSPI_INDWR, 0);
WRITE4(sc, CQSPI_INDWRCNT, count);
WRITE4(sc, CQSPI_INDWRSTADDR, offset);
reg = (0 << DEVWR_DUMMYWRCLKS_S);
reg |= DEVWR_DATA_WIDTH_QUAD;
reg |= DEVWR_ADDR_WIDTH_SINGLE;
reg |= (CMD_QUAD_PAGE_PROGRAM << DEVWR_WROPCODE_S);
WRITE4(sc, CQSPI_DEVWR, reg);
reg = DEVRD_DATA_WIDTH_QUAD;
reg |= DEVRD_ADDR_WIDTH_SINGLE;
reg |= DEVRD_INST_WIDTH_SINGLE;
WRITE4(sc, CQSPI_DEVRD, reg);
xdma_enqueue_bio(sc->xchan_tx, &bp,
sc->sram_phys, 4, 4, XDMA_MEM_TO_DEV);
xdma_queue_submit(sc->xchan_tx);
sc->write_op_done = 0;
WRITE4(sc, CQSPI_INDWR, INDRD_START);
while (sc->write_op_done == 0)
tsleep(&sc->xdma_tx, PCATCH | PZERO, "spi", hz/2);
cqspi_wait_idle(sc);
return (0);
}
static int
cqspi_read(device_t dev, device_t child, struct bio *bp,
off_t offset, caddr_t data, off_t count)
{
struct cqspi_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
dprintf("%s: offset 0x%llx count %lld bytes\n",
__func__, offset, count);
cqspi_wait_idle(sc);
reg = DMAPER_NUMSGLREQBYTES_4;
reg |= DMAPER_NUMBURSTREQBYTES_4;
WRITE4(sc, CQSPI_DMAPER, reg);
WRITE4(sc, CQSPI_INDRDWATER, 64);
WRITE4(sc, CQSPI_INDRD, INDRD_IND_OPS_DONE_STATUS);
WRITE4(sc, CQSPI_INDRD, 0);
WRITE4(sc, CQSPI_INDRDCNT, count);
WRITE4(sc, CQSPI_INDRDSTADDR, offset);
reg = (0 << DEVRD_DUMMYRDCLKS_S);
reg |= DEVRD_DATA_WIDTH_QUAD;
reg |= DEVRD_ADDR_WIDTH_SINGLE;
reg |= DEVRD_INST_WIDTH_SINGLE;
reg |= DEVRD_ENMODEBITS;
reg |= (CMD_READ_4B_QUAD_OUTPUT << DEVRD_RDOPCODE_S);
WRITE4(sc, CQSPI_DEVRD, reg);
WRITE4(sc, CQSPI_MODEBIT, 0xff);
WRITE4(sc, CQSPI_IRQMASK, 0);
xdma_enqueue_bio(sc->xchan_rx, &bp, sc->sram_phys, 4, 4,
XDMA_DEV_TO_MEM);
xdma_queue_submit(sc->xchan_rx);
sc->read_op_done = 0;
WRITE4(sc, CQSPI_INDRD, INDRD_START);
while (sc->read_op_done == 0)
tsleep(&sc->xdma_rx, PCATCH | PZERO, "spi", hz/2);
cqspi_wait_idle(sc);
return (0);
}
static int
cqspi_init(struct cqspi_softc *sc)
{
pcell_t dts_value[1];
phandle_t node;
uint32_t reg;
int len;
device_printf(sc->dev, "Module ID %x\n",
READ4(sc, CQSPI_MODULEID));
if ((node = ofw_bus_get_node(sc->dev)) == -1) {
return (ENXIO);
}
if ((len = OF_getproplen(node, "cdns,fifo-depth")) <= 0) {
return (ENXIO);
}
OF_getencprop(node, "cdns,fifo-depth", dts_value, len);
sc->fifo_depth = dts_value[0];
if ((len = OF_getproplen(node, "cdns,fifo-width")) <= 0) {
return (ENXIO);
}
OF_getencprop(node, "cdns,fifo-width", dts_value, len);
sc->fifo_width = dts_value[0];
if ((len = OF_getproplen(node, "cdns,trigger-address")) <= 0) {
return (ENXIO);
}
OF_getencprop(node, "cdns,trigger-address", dts_value, len);
sc->trigger_address = dts_value[0];
/* Disable controller */
reg = READ4(sc, CQSPI_CFG);
reg &= ~(CFG_EN);
WRITE4(sc, CQSPI_CFG, reg);
reg = READ4(sc, CQSPI_DEVSZ);
reg &= ~(DEVSZ_NUMADDRBYTES_M);
reg |= ((4 - 1) - DEVSZ_NUMADDRBYTES_S);
WRITE4(sc, CQSPI_DEVSZ, reg);
WRITE4(sc, CQSPI_SRAMPART, sc->fifo_depth/2);
/* TODO: calculate baud rate and delay values. */
reg = READ4(sc, CQSPI_CFG);
/* Configure baud rate */
reg &= ~(CFG_BAUD_M);
reg |= CFG_BAUD12;
reg |= CFG_ENDMA;
WRITE4(sc, CQSPI_CFG, reg);
reg = (3 << DELAY_NSS_S);
reg |= (3 << DELAY_BTWN_S);
reg |= (1 << DELAY_AFTER_S);
reg |= (1 << DELAY_INIT_S);
WRITE4(sc, CQSPI_DELAY, reg);
READ4(sc, CQSPI_RDDATACAP);
reg &= ~(RDDATACAP_DELAY_M);
reg |= (1 << RDDATACAP_DELAY_S);
WRITE4(sc, CQSPI_RDDATACAP, reg);
/* Enable controller */
reg = READ4(sc, CQSPI_CFG);
reg |= (CFG_EN);
WRITE4(sc, CQSPI_CFG, reg);
return (0);
}
static int
cqspi_add_devices(device_t dev)
{
phandle_t child, node;
device_t child_dev;
int error;
node = ofw_bus_get_node(dev);
for (child = OF_child(node); child != 0; child = OF_peer(child)) {
child_dev =
simplebus_add_device(dev, child, 0, NULL, -1, NULL);
if (child_dev == NULL) {
return (ENXIO);
}
error = device_probe_and_attach(child_dev);
if (error != 0) {
printf("can't probe and attach: %d\n", error);
}
}
return (0);
}
static void
cqspi_delayed_attach(void *arg)
{
struct cqspi_softc *sc;
sc = arg;
cqspi_add_devices(sc->dev);
bus_generic_attach(sc->dev);
config_intrhook_disestablish(&sc->config_intrhook);
}
static int
cqspi_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev)) {
return (ENXIO);
}
if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
return (ENXIO);
}
device_set_desc(dev, "Cadence Quad SPI controller");
return (0);
}
static int
cqspi_attach(device_t dev)
{
struct cqspi_softc *sc;
uint32_t caps;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
if (bus_alloc_resources(dev, cqspi_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
sc->sram_phys = rman_get_start(sc->res[1]);
/* Setup interrupt handlers */
if (bus_setup_intr(sc->dev, sc->res[2], INTR_TYPE_BIO | INTR_MPSAFE,
NULL, cqspi_intr, sc, &sc->ih)) {
device_printf(sc->dev, "Unable to setup intr\n");
return (ENXIO);
}
CQSPI_LOCK_INIT(sc);
caps = 0;
/* Get xDMA controller. */
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
if (sc->xdma_rx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/* Alloc xDMA virtual channels. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup xDMA interrupt handlers. */
- error = xdma_setup_intr(sc->xchan_tx, cqspi_xdma_tx_intr,
+ error = xdma_setup_intr(sc->xchan_tx, 0, cqspi_xdma_tx_intr,
sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
- error = xdma_setup_intr(sc->xchan_rx, cqspi_xdma_rx_intr,
+ error = xdma_setup_intr(sc->xchan_rx, 0, cqspi_xdma_rx_intr,
sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
cqspi_init(sc);
sc->config_intrhook.ich_func = cqspi_delayed_attach;
sc->config_intrhook.ich_arg = sc;
if (config_intrhook_establish(&sc->config_intrhook) != 0) {
device_printf(dev, "config_intrhook_establish failed\n");
return (ENOMEM);
}
return (0);
}
static int
cqspi_detach(device_t dev)
{
return (ENXIO);
}
static device_method_t cqspi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, cqspi_probe),
DEVMETHOD(device_attach, cqspi_attach),
DEVMETHOD(device_detach, cqspi_detach),
/* Quad SPI Flash Interface */
DEVMETHOD(qspi_read_reg, cqspi_read_reg),
DEVMETHOD(qspi_write_reg, cqspi_write_reg),
DEVMETHOD(qspi_read, cqspi_read),
DEVMETHOD(qspi_write, cqspi_write),
DEVMETHOD(qspi_erase, cqspi_erase),
{ 0, 0 }
};
static devclass_t cqspi_devclass;
DEFINE_CLASS_1(cqspi, cqspi_driver, cqspi_methods,
sizeof(struct cqspi_softc), simplebus_driver);
DRIVER_MODULE(cqspi, simplebus, cqspi_driver, cqspi_devclass, 0, 0);
Index: head/sys/dev/xdma/xdma.c
===================================================================
--- head/sys/dev/xdma/xdma.c (revision 357685)
+++ head/sys/dev/xdma/xdma.c (revision 357686)
@@ -1,561 +1,570 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
+#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
/*
* Multiple xDMA controllers may work with single DMA device,
* so we have global lock for physical channel management.
*/
static struct mtx xdma_mtx;
#define XDMA_LOCK() mtx_lock(&xdma_mtx)
#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
#define FDT_REG_CELLS 4
#ifdef FDT
static int
xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
{
struct xdma_iommu *xio;
phandle_t node;
pcell_t prop;
size_t len;
node = ofw_bus_get_node(xdma->dma_dev);
if (OF_getproplen(node, "xdma,iommu") <= 0)
return (0);
len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
if (len != sizeof(prop)) {
device_printf(xdma->dev,
"%s: Can't get iommu device node\n", __func__);
return (0);
}
xio = &xchan->xio;
xio->dev = OF_device_from_xref(prop);
if (xio->dev == NULL) {
device_printf(xdma->dev,
"%s: Can't get iommu device\n", __func__);
return (0);
}
/* Found */
return (1);
}
#endif
/*
* Allocate virtual xDMA channel.
*/
xdma_channel_t *
xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
{
xdma_channel_t *xchan;
int ret;
xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
xchan->xdma = xdma;
#ifdef FDT
/* Check if this DMA controller supports IOMMU. */
if (xdma_get_iommu_fdt(xdma, xchan))
caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
#endif
xchan->caps = caps;
XDMA_LOCK();
/* Request a real channel from hardware driver. */
ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request hardware channel.\n", __func__);
XDMA_UNLOCK();
free(xchan, M_XDMA);
return (NULL);
}
TAILQ_INIT(&xchan->ie_handlers);
mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
TAILQ_INIT(&xchan->bank);
TAILQ_INIT(&xchan->queue_in);
TAILQ_INIT(&xchan->queue_out);
TAILQ_INIT(&xchan->processing);
if (xchan->caps & XCHAN_CAP_IOMMU)
xdma_iommu_init(&xchan->xio);
TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
XDMA_UNLOCK();
return (xchan);
}
int
xdma_channel_free(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int err;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XDMA_LOCK();
/* Free the real DMA channel. */
err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't free real hw channel.\n", __func__);
XDMA_UNLOCK();
return (-1);
}
if (xchan->flags & XCHAN_TYPE_SG)
xdma_channel_free_sg(xchan);
if (xchan->caps & XCHAN_CAP_IOMMU)
xdma_iommu_release(&xchan->xio);
xdma_teardown_all_intr(xchan);
mtx_destroy(&xchan->mtx_lock);
mtx_destroy(&xchan->mtx_qin_lock);
mtx_destroy(&xchan->mtx_qout_lock);
mtx_destroy(&xchan->mtx_bank_lock);
mtx_destroy(&xchan->mtx_proc_lock);
TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
free(xchan, M_XDMA);
XDMA_UNLOCK();
return (0);
}
int
-xdma_setup_intr(xdma_channel_t *xchan,
+xdma_setup_intr(xdma_channel_t *xchan, int flags,
int (*cb)(void *, xdma_transfer_status_t *),
void *arg, void **ihandler)
{
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (cb == NULL) {
device_printf(xdma->dev,
"%s: Can't setup interrupt handler.\n",
__func__);
return (-1);
}
ih = malloc(sizeof(struct xdma_intr_handler),
M_XDMA, M_WAITOK | M_ZERO);
+ ih->flags = flags;
ih->cb = cb;
ih->cb_user = arg;
XCHAN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
XCHAN_UNLOCK(xchan);
if (ihandler != NULL)
*ihandler = ih;
return (0);
}
int
xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
{
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (ih == NULL) {
device_printf(xdma->dev,
"%s: Can't teardown interrupt.\n", __func__);
return (-1);
}
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
return (0);
}
int
xdma_teardown_all_intr(xdma_channel_t *xchan)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
}
return (0);
}
int
xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XCHAN_LOCK(xchan);
ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request a transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
XCHAN_UNLOCK(xchan);
return (0);
}
int
xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't process command.\n", __func__);
return (-1);
}
return (0);
}
void
xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
+ struct epoch_tracker et;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
- TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
- if (ih->cb != NULL)
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+ if (ih->cb != NULL) {
+ if (ih->flags & XDMA_INTR_NET)
+ NET_EPOCH_ENTER(et);
ih->cb(ih->cb_user, status);
+ if (ih->flags & XDMA_INTR_NET)
+ NET_EPOCH_EXIT(et);
+ }
+ }
if (xchan->flags & XCHAN_TYPE_SG)
xdma_queue_submit(xchan);
}
#ifdef FDT
/*
* Notify the DMA driver we have machine-dependent data in FDT.
*/
static int
xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
{
uint32_t ret;
ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
cells, ncells, (void **)&xdma->data);
return (ret);
}
int
xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
{
pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
pcell_t *regp;
int addr_cells, size_cells;
int i, reg_len, ret, tuple_size, tuples;
u_long mem_start, mem_size;
if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
&size_cells)) != 0)
return (ret);
if (addr_cells > 2)
return (ERANGE);
tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
reg_len = OF_getproplen(memory, "reg");
if (reg_len <= 0 || reg_len > sizeof(reg))
return (ERANGE);
if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
return (ENXIO);
tuples = reg_len / tuple_size;
regp = (pcell_t *)®
for (i = 0; i < tuples; i++) {
ret = fdt_data_to_res(regp, addr_cells, size_cells,
&mem_start, &mem_size);
if (ret != 0)
return (ret);
vmem_add(vmem, mem_start, mem_size, 0);
regp += addr_cells + size_cells;
}
return (0);
}
vmem_t *
xdma_get_memory(device_t dev)
{
phandle_t mem_node, node;
pcell_t mem_handle;
vmem_t *vmem;
node = ofw_bus_get_node(dev);
if (node <= 0) {
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
return (NULL);
}
if (!OF_hasprop(node, "memory-region"))
return (NULL);
if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
sizeof(mem_handle)) <= 0)
return (NULL);
vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
PAGE_SIZE, M_BESTFIT | M_WAITOK);
if (vmem == NULL)
return (NULL);
mem_node = OF_node_from_xref(mem_handle);
if (xdma_handle_mem_node(vmem, mem_node) != 0) {
vmem_destroy(vmem);
return (NULL);
}
return (vmem);
}
void
xdma_put_memory(vmem_t *vmem)
{
vmem_destroy(vmem);
}
void
xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
{
xchan->vmem = vmem;
}
/*
* Allocate xdma controller.
*/
xdma_controller_t *
xdma_ofw_get(device_t dev, const char *prop)
{
phandle_t node, parent;
xdma_controller_t *xdma;
device_t dma_dev;
pcell_t *cells;
int ncells;
int error;
int ndmas;
int idx;
node = ofw_bus_get_node(dev);
if (node <= 0)
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
error = ofw_bus_parse_xref_list_get_length(node,
"dmas", "#dma-cells", &ndmas);
if (error) {
device_printf(dev,
"%s can't get dmas list.\n", __func__);
return (NULL);
}
if (ndmas == 0) {
device_printf(dev,
"%s dmas list is empty.\n", __func__);
return (NULL);
}
error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
if (error != 0) {
device_printf(dev,
"%s can't find string index.\n", __func__);
return (NULL);
}
error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
idx, &parent, &ncells, &cells);
if (error != 0) {
device_printf(dev,
"%s can't get dma device xref.\n", __func__);
return (NULL);
}
dma_dev = OF_device_from_xref(parent);
if (dma_dev == NULL) {
device_printf(dev,
"%s can't get dma device.\n", __func__);
return (NULL);
}
xdma = malloc(sizeof(struct xdma_controller),
M_XDMA, M_WAITOK | M_ZERO);
xdma->dev = dev;
xdma->dma_dev = dma_dev;
TAILQ_INIT(&xdma->channels);
xdma_ofw_md_data(xdma, cells, ncells);
free(cells, M_OFWPROP);
return (xdma);
}
#endif
/*
* Allocate xdma controller.
*/
xdma_controller_t *
xdma_get(device_t dev, device_t dma_dev)
{
xdma_controller_t *xdma;
xdma = malloc(sizeof(struct xdma_controller),
M_XDMA, M_WAITOK | M_ZERO);
xdma->dev = dev;
xdma->dma_dev = dma_dev;
TAILQ_INIT(&xdma->channels);
return (xdma);
}
/*
* Free xDMA controller object.
*/
int
xdma_put(xdma_controller_t *xdma)
{
XDMA_LOCK();
/* Ensure no channels allocated. */
if (!TAILQ_EMPTY(&xdma->channels)) {
device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
return (-1);
}
free(xdma->data, M_DEVBUF);
free(xdma, M_XDMA);
XDMA_UNLOCK();
return (0);
}
static void
xdma_init(void)
{
mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
}
SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
Index: head/sys/dev/xdma/xdma.h
===================================================================
--- head/sys/dev/xdma/xdma.h (revision 357685)
+++ head/sys/dev/xdma/xdma.h (revision 357686)
@@ -1,303 +1,305 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_XDMA_XDMA_H_
#define _DEV_XDMA_XDMA_H_
#include
#include
#ifdef FDT
#include
#include
#endif
#include
#include
enum xdma_direction {
XDMA_MEM_TO_MEM,
XDMA_MEM_TO_DEV,
XDMA_DEV_TO_MEM,
XDMA_DEV_TO_DEV,
};
enum xdma_operation_type {
XDMA_MEMCPY,
XDMA_CYCLIC,
XDMA_FIFO,
XDMA_SG,
};
enum xdma_request_type {
XR_TYPE_PHYS,
XR_TYPE_VIRT,
XR_TYPE_MBUF,
XR_TYPE_BIO,
};
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
XDMA_CMD_TERMINATE,
};
struct xdma_transfer_status {
uint32_t transferred;
int error;
};
typedef struct xdma_transfer_status xdma_transfer_status_t;
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
void *data; /* OFW MD part. */
vmem_t *vmem; /* Bounce memory. */
/* List of virtual channels allocated. */
TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
};
typedef struct xdma_controller xdma_controller_t;
struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
vm_offset_t vaddr;
vm_offset_t paddr;
vm_size_t size;
};
struct xdma_request {
struct mbuf *m;
struct bio *bp;
enum xdma_operation_type operation;
enum xdma_request_type req_type;
enum xdma_direction direction;
bus_addr_t src_addr;
bus_addr_t dst_addr;
uint8_t src_width;
uint8_t dst_width;
bus_size_t block_num;
bus_size_t block_len;
xdma_transfer_status_t status;
void *user;
TAILQ_ENTRY(xdma_request) xr_next;
struct xchan_buf buf;
};
struct xdma_sglist {
bus_addr_t src_addr;
bus_addr_t dst_addr;
size_t len;
uint8_t src_width;
uint8_t dst_width;
enum xdma_direction direction;
bool first;
bool last;
};
struct xdma_iommu {
struct pmap p;
vmem_t *vmem; /* VA space */
device_t dev; /* IOMMU device */
};
struct xdma_channel {
xdma_controller_t *xdma;
vmem_t *vmem;
uint32_t flags;
#define XCHAN_BUFS_ALLOCATED (1 << 0)
#define XCHAN_SGLIST_ALLOCATED (1 << 1)
#define XCHAN_CONFIGURED (1 << 2)
#define XCHAN_TYPE_CYCLIC (1 << 3)
#define XCHAN_TYPE_MEMCPY (1 << 4)
#define XCHAN_TYPE_FIFO (1 << 5)
#define XCHAN_TYPE_SG (1 << 6)
uint32_t caps;
#define XCHAN_CAP_BUSDMA (1 << 0)
#define XCHAN_CAP_NOSEG (1 << 1)
#define XCHAN_CAP_BOUNCE (1 << 2)
#define XCHAN_CAP_IOMMU (1 << 3)
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
TAILQ_ENTRY(xdma_channel) xchan_next;
struct mtx mtx_lock;
struct mtx mtx_qin_lock;
struct mtx mtx_qout_lock;
struct mtx mtx_bank_lock;
struct mtx mtx_proc_lock;
/* Request queue. */
bus_dma_tag_t dma_tag_bufs;
struct xdma_request *xr_mem;
uint32_t xr_num;
/* Bus dma tag options. */
bus_size_t maxsegsize;
bus_size_t maxnsegs;
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
struct xdma_sglist *sg;
TAILQ_HEAD(, xdma_request) bank;
TAILQ_HEAD(, xdma_request) queue_in;
TAILQ_HEAD(, xdma_request) queue_out;
TAILQ_HEAD(, xdma_request) processing;
/* iommu */
struct xdma_iommu xio;
};
typedef struct xdma_channel xdma_channel_t;
struct xdma_intr_handler {
int (*cb)(void *cb_user, xdma_transfer_status_t *status);
+ int flags;
+#define XDMA_INTR_NET (1 << 0)
void *cb_user;
TAILQ_ENTRY(xdma_intr_handler) ih_next;
};
static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
#define XCHAN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
#define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
#define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
#define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
#define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
#define XDMA_SGLIST_MAXLEN 2048
#define XDMA_MAX_SEG 128
/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
xdma_controller_t *xdma_get(device_t dev, device_t dma_dev);
int xdma_put(xdma_controller_t *xdma);
vmem_t * xdma_get_memory(device_t dev);
void xdma_put_memory(vmem_t *vmem);
#ifdef FDT
int xdma_handle_mem_node(vmem_t *vmem, phandle_t memory);
#endif
/* xDMA channel ops */
xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
/* SG interface */
int xdma_prep_sg(xdma_channel_t *, uint32_t,
bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
void xdma_channel_free_sg(xdma_channel_t *xchan);
int xdma_queue_submit_sg(xdma_channel_t *xchan);
void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
/* Queue operations */
int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
xdma_transfer_status_t *);
int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
xdma_transfer_status_t *status);
int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue(xdma_channel_t *xchan, void **user,
xdma_transfer_status_t *status);
int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
int xdma_queue_submit(xdma_channel_t *xchan);
/* Mbuf operations */
uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
/* Channel Control */
int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
/* Interrupt callback */
-int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
+int xdma_setup_intr(xdma_channel_t *xchan, int flags, int (*cb)(void *,
xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
/* Sglist */
int xchan_sglist_alloc(xdma_channel_t *xchan);
void xchan_sglist_free(xdma_channel_t *xchan);
int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
uint32_t nsegs, struct xdma_request *xr);
/* Requests bank */
void xchan_bank_init(xdma_channel_t *xchan);
int xchan_bank_free(xdma_channel_t *xchan);
struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
/* IOMMU */
void xdma_iommu_add_entry(xdma_channel_t *xchan, vm_offset_t *va,
vm_paddr_t pa, vm_size_t size, vm_prot_t prot);
void xdma_iommu_remove_entry(xdma_channel_t *xchan, vm_offset_t va);
int xdma_iommu_init(struct xdma_iommu *xio);
int xdma_iommu_release(struct xdma_iommu *xio);
#endif /* !_DEV_XDMA_XDMA_H_ */
Index: head/sys/dev/xdma/xdma_fdt_test.c
===================================================================
--- head/sys/dev/xdma/xdma_fdt_test.c (revision 357685)
+++ head/sys/dev/xdma/xdma_fdt_test.c (revision 357686)
@@ -1,431 +1,431 @@
/*-
* Copyright (c) 2016 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* xDMA memcpy test driver. */
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* To use this test add a compatible node to your dts, e.g.
*
* xdma_test {
* compatible = "freebsd,xdma-test";
*
* dmas = <&dma 0 0 0xffffffff>;
* dma-names = "test";
* };
*/
struct xdmatest_softc {
device_t dev;
xdma_controller_t *xdma;
xdma_channel_t *xchan;
void *ih;
struct intr_config_hook config_intrhook;
char *src;
char *dst;
uint32_t len;
uintptr_t src_phys;
uintptr_t dst_phys;
bus_dma_tag_t src_dma_tag;
bus_dmamap_t src_dma_map;
bus_dma_tag_t dst_dma_tag;
bus_dmamap_t dst_dma_map;
struct mtx mtx;
int done;
struct proc *newp;
struct xdma_request req;
};
static int xdmatest_probe(device_t dev);
static int xdmatest_attach(device_t dev);
static int xdmatest_detach(device_t dev);
static int
xdmatest_intr(void *arg)
{
struct xdmatest_softc *sc;
sc = arg;
sc->done = 1;
mtx_lock(&sc->mtx);
wakeup(sc);
mtx_unlock(&sc->mtx);
return (0);
}
static void
xdmatest_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
bus_addr_t *addr;
if (err)
return;
addr = (bus_addr_t*)arg;
*addr = segs[0].ds_addr;
}
static int
xdmatest_alloc_test_memory(struct xdmatest_softc *sc)
{
int err;
sc->len = (0x1000000 - 8); /* 16mb */
sc->len = 8;
/* Source memory. */
err = bus_dma_tag_create(
bus_get_dma_tag(sc->dev),
1024, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->len, 1, /* maxsize, nsegments*/
sc->len, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->src_dma_tag);
if (err) {
device_printf(sc->dev,
"%s: Can't create bus_dma tag.\n", __func__);
return (-1);
}
err = bus_dmamem_alloc(sc->src_dma_tag, (void **)&sc->src,
BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->src_dma_map);
if (err) {
device_printf(sc->dev,
"%s: Can't allocate memory.\n", __func__);
return (-1);
}
err = bus_dmamap_load(sc->src_dma_tag, sc->src_dma_map, sc->src,
sc->len, xdmatest_dmamap_cb, &sc->src_phys, BUS_DMA_WAITOK);
if (err) {
device_printf(sc->dev,
"%s: Can't load DMA map.\n", __func__);
return (-1);
}
/* Destination memory. */
err = bus_dma_tag_create(
bus_get_dma_tag(sc->dev),
1024, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->len, 1, /* maxsize, nsegments*/
sc->len, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->dst_dma_tag);
if (err) {
device_printf(sc->dev,
"%s: Can't create bus_dma tag.\n", __func__);
return (-1);
}
err = bus_dmamem_alloc(sc->dst_dma_tag, (void **)&sc->dst,
BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->dst_dma_map);
if (err) {
device_printf(sc->dev,
"%s: Can't allocate memory.\n", __func__);
return (-1);
}
err = bus_dmamap_load(sc->dst_dma_tag, sc->dst_dma_map, sc->dst,
sc->len, xdmatest_dmamap_cb, &sc->dst_phys, BUS_DMA_WAITOK);
if (err) {
device_printf(sc->dev,
"%s: Can't load DMA map.\n", __func__);
return (-1);
}
return (0);
}
static int
xdmatest_test(struct xdmatest_softc *sc)
{
int err;
int i;
/* Get xDMA controller. */
sc->xdma = xdma_ofw_get(sc->dev, "test");
if (sc->xdma == NULL) {
device_printf(sc->dev, "Can't find xDMA controller.\n");
return (-1);
}
/* Alloc xDMA virtual channel. */
sc->xchan = xdma_channel_alloc(sc->xdma);
if (sc->xchan == NULL) {
device_printf(sc->dev, "Can't alloc virtual DMA channel.\n");
return (-1);
}
/* Setup callback. */
- err = xdma_setup_intr(sc->xchan, xdmatest_intr, sc, &sc->ih);
+ err = xdma_setup_intr(sc->xchan, 0, xdmatest_intr, sc, &sc->ih);
if (err) {
device_printf(sc->dev, "Can't setup xDMA interrupt handler.\n");
return (-1);
}
/* We are going to fill memory. */
bus_dmamap_sync(sc->src_dma_tag, sc->src_dma_map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->dst_dma_tag, sc->dst_dma_map, BUS_DMASYNC_PREWRITE);
/* Fill memory. */
for (i = 0; i < sc->len; i++) {
sc->src[i] = (i & 0xff);
sc->dst[i] = 0;
}
sc->req.type = XR_TYPE_PHYS_ADDR;
sc->req.direction = XDMA_MEM_TO_MEM;
sc->req.src_addr = sc->src_phys;
sc->req.dst_addr = sc->dst_phys;
sc->req.src_width = 4;
sc->req.dst_width = 4;
sc->req.block_len = sc->len;
sc->req.block_num = 1;
err = xdma_request(sc->xchan, sc->src_phys, sc->dst_phys, sc->len);
if (err != 0) {
device_printf(sc->dev, "Can't configure virtual channel.\n");
return (-1);
}
/* Start operation. */
xdma_begin(sc->xchan);
return (0);
}
static int
xdmatest_verify(struct xdmatest_softc *sc)
{
int err;
int i;
/* We have memory updated by DMA controller. */
bus_dmamap_sync(sc->src_dma_tag, sc->src_dma_map, BUS_DMASYNC_POSTREAD);
bus_dmamap_sync(sc->dst_dma_tag, sc->dst_dma_map, BUS_DMASYNC_POSTWRITE);
for (i = 0; i < sc->len; i++) {
if (sc->dst[i] != sc->src[i]) {
device_printf(sc->dev,
"%s: Test failed: iter %d\n", __func__, i);
return (-1);
}
}
err = xdma_channel_free(sc->xchan);
if (err != 0) {
device_printf(sc->dev,
"%s: Test failed: can't deallocate channel.\n", __func__);
return (-1);
}
err = xdma_put(sc->xdma);
if (err != 0) {
device_printf(sc->dev,
"%s: Test failed: can't deallocate xDMA.\n", __func__);
return (-1);
}
return (0);
}
static void
xdmatest_worker(void *arg)
{
struct xdmatest_softc *sc;
int timeout;
int err;
sc = arg;
device_printf(sc->dev, "Worker %d started.\n",
device_get_unit(sc->dev));
while (1) {
sc->done = 0;
mtx_lock(&sc->mtx);
if (xdmatest_test(sc) != 0) {
mtx_unlock(&sc->mtx);
device_printf(sc->dev,
"%s: Test failed.\n", __func__);
break;
}
timeout = 100;
do {
mtx_sleep(sc, &sc->mtx, 0, "xdmatest_wait", hz);
} while (timeout-- && sc->done == 0);
if (timeout != 0) {
err = xdmatest_verify(sc);
if (err == 0) {
/* Test succeeded. */
mtx_unlock(&sc->mtx);
continue;
}
}
mtx_unlock(&sc->mtx);
device_printf(sc->dev,
"%s: Test failed.\n", __func__);
break;
}
}
static void
xdmatest_delayed_attach(void *arg)
{
struct xdmatest_softc *sc;
sc = arg;
if (kproc_create(xdmatest_worker, (void *)sc, &sc->newp, 0, 0,
"xdmatest_worker") != 0) {
device_printf(sc->dev,
"%s: Failed to create worker thread.\n", __func__);
}
config_intrhook_disestablish(&sc->config_intrhook);
}
static int
xdmatest_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "freebsd,xdma-test"))
return (ENXIO);
device_set_desc(dev, "xDMA test driver");
return (BUS_PROBE_DEFAULT);
}
static int
xdmatest_attach(device_t dev)
{
struct xdmatest_softc *sc;
int err;
sc = device_get_softc(dev);
sc->dev = dev;
mtx_init(&sc->mtx, device_get_nameunit(dev), "xdmatest", MTX_DEF);
/* Allocate test memory */
err = xdmatest_alloc_test_memory(sc);
if (err != 0) {
device_printf(sc->dev, "Can't allocate test memory.\n");
return (-1);
}
/* We'll run test later, but before / mount. */
sc->config_intrhook.ich_func = xdmatest_delayed_attach;
sc->config_intrhook.ich_arg = sc;
if (config_intrhook_establish(&sc->config_intrhook) != 0)
device_printf(dev, "config_intrhook_establish failed\n");
return (0);
}
static int
xdmatest_detach(device_t dev)
{
struct xdmatest_softc *sc;
sc = device_get_softc(dev);
bus_dmamap_unload(sc->src_dma_tag, sc->src_dma_map);
bus_dmamem_free(sc->src_dma_tag, sc->src, sc->src_dma_map);
bus_dma_tag_destroy(sc->src_dma_tag);
bus_dmamap_unload(sc->dst_dma_tag, sc->dst_dma_map);
bus_dmamem_free(sc->dst_dma_tag, sc->dst, sc->dst_dma_map);
bus_dma_tag_destroy(sc->dst_dma_tag);
return (0);
}
static device_method_t xdmatest_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, xdmatest_probe),
DEVMETHOD(device_attach, xdmatest_attach),
DEVMETHOD(device_detach, xdmatest_detach),
DEVMETHOD_END
};
static driver_t xdmatest_driver = {
"xdmatest",
xdmatest_methods,
sizeof(struct xdmatest_softc),
};
static devclass_t xdmatest_devclass;
DRIVER_MODULE(xdmatest, simplebus, xdmatest_driver, xdmatest_devclass, 0, 0);
Index: head/sys/dev/xilinx/if_xae.c
===================================================================
--- head/sys/dev/xilinx/if_xae.c (revision 357685)
+++ head/sys/dev/xilinx/if_xae.c (revision 357686)
@@ -1,1162 +1,1162 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "miibus_if.h"
#define READ4(_sc, _reg) \
bus_read_4((_sc)->res[0], _reg)
#define WRITE4(_sc, _reg, _val) \
bus_write_4((_sc)->res[0], _reg, _val)
#define READ8(_sc, _reg) \
bus_read_8((_sc)->res[0], _reg)
#define WRITE8(_sc, _reg, _val) \
bus_write_8((_sc)->res[0], _reg, _val)
#define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
#define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
#define XAE_DEBUG
#undef XAE_DEBUG
#ifdef XAE_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define RX_QUEUE_SIZE 64
#define TX_QUEUE_SIZE 64
#define NUM_RX_MBUF 16
#define BUFRING_SIZE 8192
#define MDIO_CLK_DIV_DEFAULT 29
#define PHY1_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, 1, _r)
#define PHY1_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, 1, _r, _v)
#define PHY_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
#define PHY_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
/* Use this macro to access regs > 0x1f */
#define WRITE_TI_EREG(sc, reg, data) { \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
PHY_WR(sc, MII_MMDAADR, reg); \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
PHY_WR(sc, MII_MMDAADR, data); \
}
/* Not documented, Xilinx VCU118 workaround */
#define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
#define DP83867_SGMIICTL1 0xD3 /* not documented register */
#define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
static struct resource_spec xae_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static void xae_stop_locked(struct xae_softc *sc);
static void xae_setup_rxfilter(struct xae_softc *sc);
static int
xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
{
struct mbuf *m;
int i;
for (i = 0; i < n; i++) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->dev,
"%s: Can't alloc rx mbuf\n", __func__);
return (-1);
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
}
return (0);
}
static int
xae_get_phyaddr(phandle_t node, int *phy_addr)
{
phandle_t phy_node;
pcell_t phy_handle, phy_reg;
if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
sizeof(phy_handle)) <= 0)
return (ENXIO);
phy_node = OF_node_from_xref(phy_handle);
if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
sizeof(phy_reg)) <= 0)
return (ENXIO);
*phy_addr = phy_reg;
return (0);
}
static int
xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
sc = arg;
XAE_LOCK(sc);
ifp = sc->ifp;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
if (err != 0) {
break;
}
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
m_freem(m);
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
XAE_UNLOCK(sc);
return (0);
}
static int
xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
uint32_t cnt_processed;
sc = arg;
dprintf("%s\n", __func__);
XAE_LOCK(sc);
ifp = sc->ifp;
cnt_processed = 0;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
if (err != 0) {
break;
}
cnt_processed++;
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
continue;
}
m->m_pkthdr.len = m->m_len = st.transferred;
m->m_pkthdr.rcvif = ifp;
XAE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
XAE_LOCK(sc);
}
xae_rx_enqueue(sc, cnt_processed);
XAE_UNLOCK(sc);
return (0);
}
static void
xae_qflush(struct ifnet *ifp)
{
struct xae_softc *sc;
sc = ifp->if_softc;
}
static int
xae_transmit_locked(struct ifnet *ifp)
{
struct xae_softc *sc;
struct mbuf *m;
struct buf_ring *br;
int error;
int enq;
dprintf("%s\n", __func__);
sc = ifp->if_softc;
br = sc->br;
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
error = xdma_enqueue_mbuf(sc->xchan_tx,
&m, 0, 4, 4, XDMA_MEM_TO_DEV);
if (error != 0) {
/* No space in request queue available yet. */
drbr_putback(ifp, br, m);
break;
}
drbr_advance(ifp, br);
enq++;
/* If anyone is interested give them a copy. */
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0)
xdma_queue_submit(sc->xchan_tx);
return (0);
}
static int
xae_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct xae_softc *sc;
int error;
dprintf("%s\n", __func__);
sc = ifp->if_softc;
XAE_LOCK(sc);
error = drbr_enqueue(ifp, sc->br, m);
if (error) {
XAE_UNLOCK(sc);
return (error);
}
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
XAE_UNLOCK(sc);
return (0);
}
if (!sc->link_is_up) {
XAE_UNLOCK(sc);
return (0);
}
error = xae_transmit_locked(ifp);
XAE_UNLOCK(sc);
return (error);
}
static void
xae_stop_locked(struct xae_softc *sc)
{
struct ifnet *ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
callout_stop(&sc->xae_callout);
/* Stop the transmitter */
reg = READ4(sc, XAE_TC);
reg &= ~TC_TX;
WRITE4(sc, XAE_TC, reg);
/* Stop the receiver. */
reg = READ4(sc, XAE_RCW1);
reg &= ~RCW1_RX;
WRITE4(sc, XAE_RCW1, reg);
}
static uint64_t
xae_stat(struct xae_softc *sc, int counter_id)
{
uint64_t new, old;
uint64_t delta;
KASSERT(counter_id < XAE_MAX_COUNTERS,
("counter %d is out of range", counter_id));
new = READ8(sc, XAE_STATCNT(counter_id));
old = sc->counters[counter_id];
if (new >= old)
delta = new - old;
else
delta = UINT64_MAX - old + new;
sc->counters[counter_id] = new;
return (delta);
}
static void
xae_harvest_stats(struct xae_softc *sc)
{
struct ifnet *ifp;
ifp = sc->ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_IERRORS,
xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
xae_stat(sc, RX_ALIGNMENT_ERRORS));
if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_OERRORS,
xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
xae_stat(sc, TX_LATE_COLLISIONS) +
xae_stat(sc, TX_EXCESS_COLLISIONS));
}
static void
xae_tick(void *arg)
{
struct xae_softc *sc;
struct ifnet *ifp;
int link_was_up;
sc = arg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
/* Gather stats from hardware counters. */
xae_harvest_stats(sc);
/* Check the media status. */
link_was_up = sc->link_is_up;
mii_tick(sc->mii_softc);
if (sc->link_is_up && !link_was_up)
xae_transmit_locked(sc->ifp);
/* Schedule another check one second from now. */
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init_locked(struct xae_softc *sc)
{
struct ifnet *ifp;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
xae_setup_rxfilter(sc);
/* Enable the transmitter */
WRITE4(sc, XAE_TC, TC_TX);
/* Enable the receiver. */
WRITE4(sc, XAE_RCW1, RCW1_RX);
/*
* Call mii_mediachg() which will call back into xae_miibus_statchg()
* to set up the remaining config registers based on current media.
*/
mii_mediachg(sc->mii_softc);
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init(void *arg)
{
struct xae_softc *sc;
sc = arg;
XAE_LOCK(sc);
xae_init_locked(sc);
XAE_UNLOCK(sc);
}
static void
xae_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
{
struct xae_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
mii = sc->mii_softc;
XAE_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
XAE_UNLOCK(sc);
}
static int
xae_media_change_locked(struct xae_softc *sc)
{
return (mii_mediachg(sc->mii_softc));
}
static int
xae_media_change(struct ifnet * ifp)
{
struct xae_softc *sc;
int error;
sc = ifp->if_softc;
XAE_LOCK(sc);
error = xae_media_change_locked(sc);
XAE_UNLOCK(sc);
return (error);
}
static u_int
xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct xae_softc *sc = arg;
uint32_t reg;
uint8_t *ma;
if (cnt >= XAE_MULTICAST_TABLE_SIZE)
return (1);
ma = LLADDR(sdl);
reg = READ4(sc, XAE_FFC) & 0xffffff00;
reg |= cnt;
WRITE4(sc, XAE_FFC, reg);
reg = (ma[0]);
reg |= (ma[1] << 8);
reg |= (ma[2] << 16);
reg |= (ma[3] << 24);
WRITE4(sc, XAE_FFV(0), reg);
reg = ma[4];
reg |= ma[5] << 8;
WRITE4(sc, XAE_FFV(1), reg);
return (1);
}
static void
xae_setup_rxfilter(struct xae_softc *sc)
{
struct ifnet *ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
/*
* Set the multicast (group) filter hash.
*/
if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
reg = READ4(sc, XAE_FFC);
reg |= FFC_PM;
WRITE4(sc, XAE_FFC, reg);
} else {
reg = READ4(sc, XAE_FFC);
reg &= ~FFC_PM;
WRITE4(sc, XAE_FFC, reg);
if_foreach_llmaddr(ifp, xae_write_maddr, sc);
}
/*
* Set the primary address.
*/
reg = sc->macaddr[0];
reg |= (sc->macaddr[1] << 8);
reg |= (sc->macaddr[2] << 16);
reg |= (sc->macaddr[3] << 24);
WRITE4(sc, XAE_UAW0, reg);
reg = sc->macaddr[4];
reg |= (sc->macaddr[5] << 8);
WRITE4(sc, XAE_UAW1, reg);
}
static int
xae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct xae_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int mask, error;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
XAE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
xae_setup_rxfilter(sc);
} else {
if (!sc->is_detaching)
xae_init_locked(sc);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
xae_stop_locked(sc);
}
sc->if_flags = ifp->if_flags;
XAE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
XAE_LOCK(sc);
xae_setup_rxfilter(sc);
XAE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = sc->mii_softc;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_MTU) {
/* No work to do except acknowledge the change took */
ifp->if_capenable ^= IFCAP_VLAN_MTU;
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
xae_intr(void *arg)
{
}
static int
xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
{
phandle_t node;
int len;
node = ofw_bus_get_node(sc->dev);
/* Check if there is property */
if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
return (EINVAL);
if (len != ETHER_ADDR_LEN)
return (EINVAL);
OF_getprop(node, "local-mac-address", hwaddr,
ETHER_ADDR_LEN);
return (0);
}
static int
mdio_wait(struct xae_softc *sc)
{
uint32_t reg;
int timeout;
timeout = 200;
do {
reg = READ4(sc, XAE_MDIO_CTRL);
if (reg & MDIO_CTRL_READY)
break;
DELAY(1);
} while (timeout--);
if (timeout <= 0) {
printf("Failed to get MDIO ready\n");
return (1);
}
return (0);
}
static int
xae_miibus_read_reg(device_t dev, int phy, int reg)
{
struct xae_softc *sc;
uint32_t mii;
int rv;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (0);
mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (0);
rv = READ4(sc, XAE_MDIO_READ);
return (rv);
}
static int
xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
{
struct xae_softc *sc;
uint32_t mii;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (1);
mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_WRITE, val);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (1);
return (0);
}
static void
xae_phy_fixup(struct xae_softc *sc)
{
uint32_t reg;
device_t dev;
dev = sc->dev;
do {
WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
reg = PHY_RD(sc, DP83867_CFG2);
reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
reg |= CFG2_INTERRUPT_POLARITY;
reg |= CFG2_SPEED_OPT_ENHANCED_EN;
reg |= CFG2_SPEED_OPT_10M_EN;
PHY_WR(sc, DP83867_CFG2, reg);
WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
PHY_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
do {
PHY1_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
DELAY(40000);
} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
}
static int
get_xdma_std(struct xae_softc *sc)
{
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL)
return (ENXIO);
sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
if (sc->xdma_rx == NULL) {
xdma_put(sc->xdma_tx);
return (ENXIO);
}
return (0);
}
static int
get_xdma_axistream(struct xae_softc *sc)
{
struct axidma_fdt_data *data;
device_t dma_dev;
phandle_t node;
pcell_t prop;
size_t len;
node = ofw_bus_get_node(sc->dev);
len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
if (len != sizeof(prop)) {
device_printf(sc->dev,
"%s: Couldn't get axistream-connected prop.\n", __func__);
return (ENXIO);
}
dma_dev = OF_device_from_xref(prop);
if (dma_dev == NULL) {
device_printf(sc->dev, "Could not get DMA device by xref.\n");
return (ENXIO);
}
sc->xdma_tx = xdma_get(sc->dev, dma_dev);
if (sc->xdma_tx == NULL) {
device_printf(sc->dev, "Could not find DMA controller.\n");
return (ENXIO);
}
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = AXIDMA_TX_CHAN;
sc->xdma_tx->data = data;
sc->xdma_rx = xdma_get(sc->dev, dma_dev);
if (sc->xdma_rx == NULL) {
device_printf(sc->dev, "Could not find DMA controller.\n");
return (ENXIO);
}
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = AXIDMA_RX_CHAN;
sc->xdma_rx->data = data;
return (0);
}
static int
setup_xdma(struct xae_softc *sc)
{
device_t dev;
vmem_t *vmem;
int error;
dev = sc->dev;
/* Get xDMA controller */
error = get_xdma_std(sc);
if (error) {
device_printf(sc->dev,
"Fallback to axistream-connected property\n");
error = get_xdma_axistream(sc);
}
if (error) {
device_printf(dev, "Could not find xDMA controllers.\n");
return (ENXIO);
}
/* Alloc xDMA TX virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_tx,
+ error = xdma_setup_intr(sc->xchan_tx, 0,
xae_xdma_tx_intr, sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA TX interrupt handler.\n");
return (ENXIO);
}
/* Alloc xDMA RX virtual channel. */
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_rx,
+ error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
xae_xdma_rx_intr, sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA RX interrupt handler.\n");
return (ENXIO);
}
/* Setup bounce buffer */
vmem = xdma_get_memory(dev);
if (vmem) {
xchan_set_memory(sc->xchan_tx, vmem);
xchan_set_memory(sc->xchan_rx, vmem);
}
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
8, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
xdma_prep_sg(sc->xchan_rx,
RX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
1, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
return (0);
}
static int
xae_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
return (ENXIO);
device_set_desc(dev, "Xilinx AXI Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
xae_attach(device_t dev)
{
struct xae_softc *sc;
struct ifnet *ifp;
phandle_t node;
uint32_t reg;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
node = ofw_bus_get_node(dev);
if (setup_xdma(sc) != 0) {
device_printf(dev, "Could not setup xDMA.\n");
return (ENXIO);
}
mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
MTX_NETWORK_LOCK, MTX_DEF);
sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &sc->mtx);
if (sc->br == NULL)
return (ENOMEM);
if (bus_alloc_resources(dev, xae_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
device_printf(sc->dev, "Identification: %x\n",
READ4(sc, XAE_IDENT));
/* Get MAC addr */
if (xae_get_hwaddr(sc, sc->macaddr)) {
device_printf(sc->dev, "can't get mac\n");
return (ENXIO);
}
/* Enable MII clock */
reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
reg |= MDIO_SETUP_ENABLE;
WRITE4(sc, XAE_MDIO_SETUP, reg);
if (mdio_wait(sc))
return (ENXIO);
callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
/* Setup interrupt handler. */
error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, xae_intr, sc, &sc->intr_cookie);
if (error != 0) {
device_printf(dev, "could not setup interrupt handler.\n");
return (ENXIO);
}
/* Set up the ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "could not allocate ifp.\n");
return (ENXIO);
}
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
ifp->if_transmit = xae_transmit;
ifp->if_qflush = xae_qflush;
ifp->if_ioctl = xae_ioctl;
ifp->if_init = xae_init;
IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
IFQ_SET_READY(&ifp->if_snd);
if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
return (ENXIO);
/* Attach the mii driver. */
error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "PHY attach failed\n");
return (ENXIO);
}
sc->mii_softc = device_get_softc(sc->miibus);
/* Apply vcu118 workaround. */
if (OF_getproplen(node, "xlnx,vcu118") >= 0)
xae_phy_fixup(sc);
/* All ready to run, attach the ethernet interface. */
ether_ifattach(ifp, sc->macaddr);
sc->is_attached = true;
xae_rx_enqueue(sc, NUM_RX_MBUF);
xdma_queue_submit(sc->xchan_rx);
return (0);
}
static int
xae_detach(device_t dev)
{
struct xae_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
device_get_nameunit(dev)));
ifp = sc->ifp;
/* Only cleanup if attach succeeded. */
if (device_is_attached(dev)) {
XAE_LOCK(sc);
xae_stop_locked(sc);
XAE_UNLOCK(sc);
callout_drain(&sc->xae_callout);
ether_ifdetach(ifp);
}
if (sc->miibus != NULL)
device_delete_child(dev, sc->miibus);
if (ifp != NULL)
if_free(ifp);
mtx_destroy(&sc->mtx);
bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
bus_release_resources(dev, xae_spec, sc->res);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}
static void
xae_miibus_statchg(device_t dev)
{
struct xae_softc *sc;
struct mii_data *mii;
uint32_t reg;
/*
* Called by the MII bus driver when the PHY establishes
* link to set the MAC interface registers.
*/
sc = device_get_softc(dev);
XAE_ASSERT_LOCKED(sc);
mii = sc->mii_softc;
if (mii->mii_media_status & IFM_ACTIVE)
sc->link_is_up = true;
else
sc->link_is_up = false;
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
reg = SPEED_1000;
break;
case IFM_100_TX:
reg = SPEED_100;
break;
case IFM_10_T:
reg = SPEED_10;
break;
case IFM_NONE:
sc->link_is_up = false;
return;
default:
sc->link_is_up = false;
device_printf(dev, "Unsupported media %u\n",
IFM_SUBTYPE(mii->mii_media_active));
return;
}
WRITE4(sc, XAE_SPEED, reg);
}
static device_method_t xae_methods[] = {
DEVMETHOD(device_probe, xae_probe),
DEVMETHOD(device_attach, xae_attach),
DEVMETHOD(device_detach, xae_detach),
/* MII Interface */
DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
DEVMETHOD(miibus_statchg, xae_miibus_statchg),
{ 0, 0 }
};
driver_t xae_driver = {
"xae",
xae_methods,
sizeof(struct xae_softc),
};
static devclass_t xae_devclass;
DRIVER_MODULE(xae, simplebus, xae_driver, xae_devclass, 0, 0);
DRIVER_MODULE(miibus, xae, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(xae, ether, 1, 1, 1);
MODULE_DEPEND(xae, miibus, 1, 1, 1);
Index: head/sys/mips/ingenic/jz4780_aic.c
===================================================================
--- head/sys/mips/ingenic/jz4780_aic.c (revision 357685)
+++ head/sys/mips/ingenic/jz4780_aic.c (revision 357686)
@@ -1,809 +1,809 @@
/*-
* Copyright (c) 2016-2018 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Ingenic JZ4780 Audio Interface Controller (AIC). */
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define AIC_NCHANNELS 1
struct aic_softc {
device_t dev;
struct resource *res[1];
bus_space_tag_t bst;
bus_space_handle_t bsh;
struct mtx *lock;
int pos;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
bus_addr_t buf_base_phys;
uint32_t *buf_base;
uintptr_t aic_fifo_paddr;
int dma_size;
clk_t clk_aic;
clk_t clk_i2s;
struct aic_rate *sr;
void *ih;
int internal_codec;
/* xDMA */
struct xdma_channel *xchan;
xdma_controller_t *xdma_tx;
struct xdma_request req;
};
/* Channel registers */
struct sc_chinfo {
struct snd_dbuf *buffer;
struct pcm_channel *channel;
struct sc_pcminfo *parent;
/* Channel information */
uint32_t dir;
uint32_t format;
/* Flags */
uint32_t run;
};
/* PCM device private data */
struct sc_pcminfo {
device_t dev;
uint32_t chnum;
struct sc_chinfo chan[AIC_NCHANNELS];
struct aic_softc *sc;
};
static struct resource_spec aic_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ -1, 0 }
};
static int aic_probe(device_t dev);
static int aic_attach(device_t dev);
static int aic_detach(device_t dev);
static int setup_xdma(struct sc_pcminfo *scp);
struct aic_rate {
uint32_t speed;
};
static struct aic_rate rate_map[] = {
{ 48000 },
/* TODO: add more frequences */
{ 0 },
};
/*
* Mixer interface.
*/
static int
aicmixer_init(struct snd_mixer *m)
{
struct sc_pcminfo *scp;
struct aic_softc *sc;
int mask;
scp = mix_getdevinfo(m);
sc = scp->sc;
if (sc == NULL)
return -1;
mask = SOUND_MASK_PCM;
snd_mtxlock(sc->lock);
pcm_setflags(scp->dev, pcm_getflags(scp->dev) | SD_F_SOFTPCMVOL);
mix_setdevs(m, mask);
snd_mtxunlock(sc->lock);
return (0);
}
static int
aicmixer_set(struct snd_mixer *m, unsigned dev,
unsigned left, unsigned right)
{
struct sc_pcminfo *scp;
scp = mix_getdevinfo(m);
/* Here we can configure hardware volume on our DAC */
return (0);
}
static kobj_method_t aicmixer_methods[] = {
KOBJMETHOD(mixer_init, aicmixer_init),
KOBJMETHOD(mixer_set, aicmixer_set),
KOBJMETHOD_END
};
MIXER_DECLARE(aicmixer);
/*
* Channel interface.
*/
static void *
aicchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
struct pcm_channel *c, int dir)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_softc *sc;
scp = (struct sc_pcminfo *)devinfo;
sc = scp->sc;
snd_mtxlock(sc->lock);
ch = &scp->chan[0];
ch->dir = dir;
ch->run = 0;
ch->buffer = b;
ch->channel = c;
ch->parent = scp;
snd_mtxunlock(sc->lock);
if (sndbuf_setup(ch->buffer, sc->buf_base, sc->dma_size) != 0) {
device_printf(scp->dev, "Can't setup sndbuf.\n");
return NULL;
}
return (ch);
}
static int
aicchan_free(kobj_t obj, void *data)
{
struct sc_chinfo *ch = data;
struct sc_pcminfo *scp = ch->parent;
struct aic_softc *sc = scp->sc;
snd_mtxlock(sc->lock);
/* TODO: free channel buffer */
snd_mtxunlock(sc->lock);
return (0);
}
static int
aicchan_setformat(kobj_t obj, void *data, uint32_t format)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
ch = data;
scp = ch->parent;
ch->format = format;
return (0);
}
static uint32_t
aicchan_setspeed(kobj_t obj, void *data, uint32_t speed)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_rate *sr;
struct aic_softc *sc;
int threshold;
int i;
ch = data;
scp = ch->parent;
sc = scp->sc;
sr = NULL;
/* First look for equal frequency. */
for (i = 0; rate_map[i].speed != 0; i++) {
if (rate_map[i].speed == speed)
sr = &rate_map[i];
}
/* If no match, just find nearest. */
if (sr == NULL) {
for (i = 0; rate_map[i].speed != 0; i++) {
sr = &rate_map[i];
threshold = sr->speed + ((rate_map[i + 1].speed != 0) ?
((rate_map[i + 1].speed - sr->speed) >> 1) : 0);
if (speed < threshold)
break;
}
}
sc->sr = sr;
/* Clocks can be reconfigured here. */
return (sr->speed);
}
static uint32_t
aicchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_softc *sc;
ch = data;
scp = ch->parent;
sc = scp->sc;
sndbuf_resize(ch->buffer, sc->dma_size / blocksize, blocksize);
return (sndbuf_getblksz(ch->buffer));
}
static int
aic_intr(void *arg, xdma_transfer_status_t *status)
{
struct sc_pcminfo *scp;
struct xdma_request *req;
xdma_channel_t *xchan;
struct sc_chinfo *ch;
struct aic_softc *sc;
int bufsize;
scp = arg;
sc = scp->sc;
ch = &scp->chan[0];
req = &sc->req;
xchan = sc->xchan;
bufsize = sndbuf_getsize(ch->buffer);
sc->pos += req->block_len;
if (sc->pos >= bufsize)
sc->pos -= bufsize;
if (ch->run)
chn_intr(ch->channel);
return (0);
}
static int
setup_xdma(struct sc_pcminfo *scp)
{
struct aic_softc *sc;
struct sc_chinfo *ch;
int fmt;
int err;
ch = &scp->chan[0];
sc = scp->sc;
fmt = sndbuf_getfmt(ch->buffer);
KASSERT(fmt & AFMT_16BIT, ("16-bit audio supported only."));
sc->req.operation = XDMA_CYCLIC;
sc->req.req_type = XR_TYPE_PHYS;
sc->req.direction = XDMA_MEM_TO_DEV;
sc->req.src_addr = sc->buf_base_phys;
sc->req.dst_addr = sc->aic_fifo_paddr;
sc->req.src_width = 2;
sc->req.dst_width = 2;
sc->req.block_len = sndbuf_getblksz(ch->buffer);
sc->req.block_num = sndbuf_getblkcnt(ch->buffer);
err = xdma_request(sc->xchan, &sc->req);
if (err != 0) {
device_printf(sc->dev, "Can't configure virtual channel\n");
return (-1);
}
xdma_control(sc->xchan, XDMA_CMD_BEGIN);
return (0);
}
static int
aic_start(struct sc_pcminfo *scp)
{
struct aic_softc *sc;
int reg;
sc = scp->sc;
/* Ensure clock enabled. */
reg = READ4(sc, I2SCR);
reg |= (I2SCR_ESCLK);
WRITE4(sc, I2SCR, reg);
setup_xdma(scp);
reg = (AICCR_OSS_16 | AICCR_ISS_16);
reg |= (AICCR_CHANNEL_2);
reg |= (AICCR_TDMS);
reg |= (AICCR_ERPL);
WRITE4(sc, AICCR, reg);
return (0);
}
static int
aic_stop(struct sc_pcminfo *scp)
{
struct aic_softc *sc;
int reg;
sc = scp->sc;
reg = READ4(sc, AICCR);
reg &= ~(AICCR_TDMS | AICCR_ERPL);
WRITE4(sc, AICCR, reg);
xdma_control(sc->xchan, XDMA_CMD_TERMINATE);
return (0);
}
static int
aicchan_trigger(kobj_t obj, void *data, int go)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_softc *sc;
ch = data;
scp = ch->parent;
sc = scp->sc;
snd_mtxlock(sc->lock);
switch (go) {
case PCMTRIG_START:
ch->run = 1;
sc->pos = 0;
aic_start(scp);
break;
case PCMTRIG_STOP:
case PCMTRIG_ABORT:
ch->run = 0;
aic_stop(scp);
sc->pos = 0;
bzero(sc->buf_base, sc->dma_size);
break;
}
snd_mtxunlock(sc->lock);
return (0);
}
static uint32_t
aicchan_getptr(kobj_t obj, void *data)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_softc *sc;
ch = data;
scp = ch->parent;
sc = scp->sc;
return (sc->pos);
}
static uint32_t aic_pfmt[] = {
SND_FORMAT(AFMT_S16_LE, 2, 0),
0
};
static struct pcmchan_caps aic_pcaps = {48000, 48000, aic_pfmt, 0};
static struct pcmchan_caps *
aicchan_getcaps(kobj_t obj, void *data)
{
return (&aic_pcaps);
}
static kobj_method_t aicchan_methods[] = {
KOBJMETHOD(channel_init, aicchan_init),
KOBJMETHOD(channel_free, aicchan_free),
KOBJMETHOD(channel_setformat, aicchan_setformat),
KOBJMETHOD(channel_setspeed, aicchan_setspeed),
KOBJMETHOD(channel_setblocksize, aicchan_setblocksize),
KOBJMETHOD(channel_trigger, aicchan_trigger),
KOBJMETHOD(channel_getptr, aicchan_getptr),
KOBJMETHOD(channel_getcaps, aicchan_getcaps),
KOBJMETHOD_END
};
CHANNEL_DECLARE(aicchan);
static void
aic_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
{
bus_addr_t *addr;
if (err)
return;
addr = (bus_addr_t*)arg;
*addr = segs[0].ds_addr;
}
static int
aic_dma_setup(struct aic_softc *sc)
{
device_t dev;
int err;
dev = sc->dev;
/* DMA buffer size. */
sc->dma_size = 131072;
/*
* Must use dma_size boundary as modulo feature required.
* Modulo feature allows setup circular buffer.
*/
err = bus_dma_tag_create(
bus_get_dma_tag(sc->dev),
4, sc->dma_size, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->dma_size, 1, /* maxsize, nsegments */
sc->dma_size, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->dma_tag);
if (err) {
device_printf(dev, "cannot create bus dma tag\n");
return (-1);
}
err = bus_dmamem_alloc(sc->dma_tag, (void **)&sc->buf_base,
BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->dma_map);
if (err) {
device_printf(dev, "cannot allocate memory\n");
return (-1);
}
err = bus_dmamap_load(sc->dma_tag, sc->dma_map, sc->buf_base,
sc->dma_size, aic_dmamap_cb, &sc->buf_base_phys, BUS_DMA_WAITOK);
if (err) {
device_printf(dev, "cannot load DMA map\n");
return (-1);
}
bzero(sc->buf_base, sc->dma_size);
return (0);
}
static int
aic_configure_clocks(struct aic_softc *sc)
{
uint64_t aic_freq;
uint64_t i2s_freq;
device_t dev;
int err;
dev = sc->dev;
err = clk_get_by_ofw_name(sc->dev, 0, "aic", &sc->clk_aic);
if (err != 0) {
device_printf(dev, "Can't find aic clock.\n");
return (-1);
}
err = clk_enable(sc->clk_aic);
if (err != 0) {
device_printf(dev, "Can't enable aic clock.\n");
return (-1);
}
err = clk_get_by_ofw_name(sc->dev, 0, "i2s", &sc->clk_i2s);
if (err != 0) {
device_printf(dev, "Can't find i2s clock.\n");
return (-1);
}
err = clk_enable(sc->clk_i2s);
if (err != 0) {
device_printf(dev, "Can't enable i2s clock.\n");
return (-1);
}
err = clk_set_freq(sc->clk_i2s, 12000000, 0);
if (err != 0) {
device_printf(dev, "Can't set i2s frequency.\n");
return (-1);
}
clk_get_freq(sc->clk_aic, &aic_freq);
clk_get_freq(sc->clk_i2s, &i2s_freq);
device_printf(dev, "Frequency aic %d i2s %d\n",
(uint32_t)aic_freq, (uint32_t)i2s_freq);
return (0);
}
static int
aic_configure(struct aic_softc *sc)
{
int reg;
WRITE4(sc, AICFR, AICFR_RST);
/* Configure AIC */
reg = 0;
if (sc->internal_codec) {
reg |= (AICFR_ICDC);
} else {
reg |= (AICFR_SYNCD | AICFR_BCKD);
}
reg |= (AICFR_AUSEL); /* I2S/MSB-justified format. */
reg |= (AICFR_TFTH(8)); /* Transmit FIFO threshold */
reg |= (AICFR_RFTH(7)); /* Receive FIFO threshold */
WRITE4(sc, AICFR, reg);
reg = READ4(sc, AICFR);
reg |= (AICFR_ENB); /* Enable the controller. */
WRITE4(sc, AICFR, reg);
return (0);
}
static int
sysctl_hw_pcm_internal_codec(SYSCTL_HANDLER_ARGS)
{
struct sc_pcminfo *scp;
struct sc_chinfo *ch;
struct aic_softc *sc;
int error, val;
if (arg1 == NULL)
return (EINVAL);
scp = arg1;
sc = scp->sc;
ch = &scp->chan[0];
snd_mtxlock(sc->lock);
val = sc->internal_codec;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || req->newptr == NULL) {
snd_mtxunlock(sc->lock);
return (error);
}
if (val < 0 || val > 1) {
snd_mtxunlock(sc->lock);
return (EINVAL);
}
if (sc->internal_codec != val) {
sc->internal_codec = val;
if (ch->run)
aic_stop(scp);
aic_configure(sc);
if (ch->run)
aic_start(scp);
}
snd_mtxunlock(sc->lock);
return (0);
}
static int
aic_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ingenic,jz4780-i2s"))
return (ENXIO);
device_set_desc(dev, "Audio Interface Controller");
return (BUS_PROBE_DEFAULT);
}
static int
aic_attach(device_t dev)
{
char status[SND_STATUSLEN];
struct sc_pcminfo *scp;
struct aic_softc *sc;
int err;
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
sc->dev = dev;
sc->pos = 0;
sc->internal_codec = 1;
/* Get xDMA controller */
sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
if (sc->xdma_tx == NULL) {
device_printf(dev, "Can't find DMA controller.\n");
return (ENXIO);
}
/* Alloc xDMA virtual channel. */
sc->xchan = xdma_channel_alloc(sc->xdma_tx, 0);
if (sc->xchan == NULL) {
device_printf(dev, "Can't alloc virtual DMA channel.\n");
return (ENXIO);
}
/* Setup sound subsystem */
sc->lock = snd_mtxcreate(device_get_nameunit(dev), "aic softc");
if (sc->lock == NULL) {
device_printf(dev, "Can't create mtx.\n");
return (ENXIO);
}
if (bus_alloc_resources(dev, aic_spec, sc->res)) {
device_printf(dev,
"could not allocate resources for device\n");
return (ENXIO);
}
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
sc->aic_fifo_paddr = rman_get_start(sc->res[0]) + AICDR;
/* Setup PCM. */
scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
scp->sc = sc;
scp->dev = dev;
/* Setup audio buffer. */
err = aic_dma_setup(sc);
if (err != 0) {
device_printf(dev, "Can't setup sound buffer.\n");
return (ENXIO);
}
/* Setup clocks. */
err = aic_configure_clocks(sc);
if (err != 0) {
device_printf(dev, "Can't configure clocks.\n");
return (ENXIO);
}
err = aic_configure(sc);
if (err != 0) {
device_printf(dev, "Can't configure AIC.\n");
return (ENXIO);
}
pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE);
/* Setup interrupt handler. */
- err = xdma_setup_intr(sc->xchan, aic_intr, scp, &sc->ih);
+ err = xdma_setup_intr(sc->xchan, 0, aic_intr, scp, &sc->ih);
if (err) {
device_printf(sc->dev,
"Can't setup xDMA interrupt handler.\n");
return (ENXIO);
}
err = pcm_register(dev, scp, 1, 0);
if (err) {
device_printf(dev, "Can't register pcm.\n");
return (ENXIO);
}
scp->chnum = 0;
pcm_addchan(dev, PCMDIR_PLAY, &aicchan_class, scp);
scp->chnum++;
snprintf(status, SND_STATUSLEN, "at %s", ofw_bus_get_name(dev));
pcm_setstatus(dev, status);
mixer_init(dev, &aicmixer_class, scp);
/* Create device sysctl node. */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "internal_codec", CTLTYPE_INT | CTLFLAG_RW,
scp, 0, sysctl_hw_pcm_internal_codec, "I",
"use internal audio codec");
return (0);
}
static int
aic_detach(device_t dev)
{
struct aic_softc *sc;
sc = device_get_softc(dev);
xdma_channel_free(sc->xchan);
bus_release_resources(dev, aic_spec, sc->res);
return (0);
}
static device_method_t aic_pcm_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, aic_probe),
DEVMETHOD(device_attach, aic_attach),
DEVMETHOD(device_detach, aic_detach),
DEVMETHOD_END
};
static driver_t aic_pcm_driver = {
"pcm",
aic_pcm_methods,
PCM_SOFTC_SIZE,
};
DRIVER_MODULE(aic, simplebus, aic_pcm_driver, pcm_devclass, 0, 0);
MODULE_DEPEND(aic, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER);
MODULE_VERSION(aic, 1);