Index: head/sys/dev/xdma/xdma.c
===================================================================
--- head/sys/dev/xdma/xdma.c (revision 357652)
+++ head/sys/dev/xdma/xdma.c (revision 357653)
@@ -1,543 +1,561 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
/*
* Multiple xDMA controllers may work with single DMA device,
* so we have global lock for physical channel management.
*/
static struct mtx xdma_mtx;
#define XDMA_LOCK() mtx_lock(&xdma_mtx)
#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
#define FDT_REG_CELLS 4
#ifdef FDT
static int
xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
{
struct xdma_iommu *xio;
phandle_t node;
pcell_t prop;
size_t len;
node = ofw_bus_get_node(xdma->dma_dev);
if (OF_getproplen(node, "xdma,iommu") <= 0)
return (0);
len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
if (len != sizeof(prop)) {
device_printf(xdma->dev,
"%s: Can't get iommu device node\n", __func__);
return (0);
}
xio = &xchan->xio;
xio->dev = OF_device_from_xref(prop);
if (xio->dev == NULL) {
device_printf(xdma->dev,
"%s: Can't get iommu device\n", __func__);
return (0);
}
/* Found */
return (1);
}
#endif
/*
* Allocate virtual xDMA channel.
*/
xdma_channel_t *
xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
{
xdma_channel_t *xchan;
int ret;
xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
xchan->xdma = xdma;
#ifdef FDT
/* Check if this DMA controller supports IOMMU. */
if (xdma_get_iommu_fdt(xdma, xchan))
caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
#endif
xchan->caps = caps;
XDMA_LOCK();
/* Request a real channel from hardware driver. */
ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request hardware channel.\n", __func__);
XDMA_UNLOCK();
free(xchan, M_XDMA);
return (NULL);
}
TAILQ_INIT(&xchan->ie_handlers);
mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
TAILQ_INIT(&xchan->bank);
TAILQ_INIT(&xchan->queue_in);
TAILQ_INIT(&xchan->queue_out);
TAILQ_INIT(&xchan->processing);
if (xchan->caps & XCHAN_CAP_IOMMU)
xdma_iommu_init(&xchan->xio);
TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
XDMA_UNLOCK();
return (xchan);
}
int
xdma_channel_free(xdma_channel_t *xchan)
{
xdma_controller_t *xdma;
int err;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XDMA_LOCK();
/* Free the real DMA channel. */
err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
if (err != 0) {
device_printf(xdma->dev,
"%s: Can't free real hw channel.\n", __func__);
XDMA_UNLOCK();
return (-1);
}
if (xchan->flags & XCHAN_TYPE_SG)
xdma_channel_free_sg(xchan);
if (xchan->caps & XCHAN_CAP_IOMMU)
xdma_iommu_release(&xchan->xio);
xdma_teardown_all_intr(xchan);
mtx_destroy(&xchan->mtx_lock);
mtx_destroy(&xchan->mtx_qin_lock);
mtx_destroy(&xchan->mtx_qout_lock);
mtx_destroy(&xchan->mtx_bank_lock);
mtx_destroy(&xchan->mtx_proc_lock);
TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
free(xchan, M_XDMA);
XDMA_UNLOCK();
return (0);
}
int
xdma_setup_intr(xdma_channel_t *xchan,
int (*cb)(void *, xdma_transfer_status_t *),
void *arg, void **ihandler)
{
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (cb == NULL) {
device_printf(xdma->dev,
"%s: Can't setup interrupt handler.\n",
__func__);
return (-1);
}
ih = malloc(sizeof(struct xdma_intr_handler),
M_XDMA, M_WAITOK | M_ZERO);
ih->cb = cb;
ih->cb_user = arg;
XCHAN_LOCK(xchan);
TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
XCHAN_UNLOCK(xchan);
if (ihandler != NULL)
*ihandler = ih;
return (0);
}
int
xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
{
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
/* Sanity check. */
if (ih == NULL) {
device_printf(xdma->dev,
"%s: Can't teardown interrupt.\n", __func__);
return (-1);
}
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
return (0);
}
int
xdma_teardown_all_intr(xdma_channel_t *xchan)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
free(ih, M_XDMA);
}
return (0);
}
int
xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
XCHAN_LOCK(xchan);
ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't request a transfer.\n", __func__);
XCHAN_UNLOCK(xchan);
return (-1);
}
XCHAN_UNLOCK(xchan);
return (0);
}
int
xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
{
xdma_controller_t *xdma;
int ret;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
if (ret != 0) {
device_printf(xdma->dev,
"%s: Can't process command.\n", __func__);
return (-1);
}
return (0);
}
void
xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
{
struct xdma_intr_handler *ih_tmp;
struct xdma_intr_handler *ih;
xdma_controller_t *xdma;
xdma = xchan->xdma;
KASSERT(xdma != NULL, ("xdma is NULL"));
TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
if (ih->cb != NULL)
ih->cb(ih->cb_user, status);
if (xchan->flags & XCHAN_TYPE_SG)
xdma_queue_submit(xchan);
}
#ifdef FDT
/*
* Notify the DMA driver we have machine-dependent data in FDT.
*/
static int
xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
{
uint32_t ret;
ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
cells, ncells, (void **)&xdma->data);
return (ret);
}
int
xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
{
pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
pcell_t *regp;
int addr_cells, size_cells;
int i, reg_len, ret, tuple_size, tuples;
u_long mem_start, mem_size;
if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
&size_cells)) != 0)
return (ret);
if (addr_cells > 2)
return (ERANGE);
tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
reg_len = OF_getproplen(memory, "reg");
if (reg_len <= 0 || reg_len > sizeof(reg))
return (ERANGE);
if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
return (ENXIO);
tuples = reg_len / tuple_size;
regp = (pcell_t *)®
for (i = 0; i < tuples; i++) {
ret = fdt_data_to_res(regp, addr_cells, size_cells,
&mem_start, &mem_size);
if (ret != 0)
return (ret);
vmem_add(vmem, mem_start, mem_size, 0);
regp += addr_cells + size_cells;
}
return (0);
}
vmem_t *
xdma_get_memory(device_t dev)
{
phandle_t mem_node, node;
pcell_t mem_handle;
vmem_t *vmem;
node = ofw_bus_get_node(dev);
if (node <= 0) {
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
return (NULL);
}
if (!OF_hasprop(node, "memory-region"))
return (NULL);
if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
sizeof(mem_handle)) <= 0)
return (NULL);
vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
PAGE_SIZE, M_BESTFIT | M_WAITOK);
if (vmem == NULL)
return (NULL);
mem_node = OF_node_from_xref(mem_handle);
if (xdma_handle_mem_node(vmem, mem_node) != 0) {
vmem_destroy(vmem);
return (NULL);
}
return (vmem);
}
void
xdma_put_memory(vmem_t *vmem)
{
vmem_destroy(vmem);
}
void
xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
{
xchan->vmem = vmem;
}
/*
* Allocate xdma controller.
*/
xdma_controller_t *
xdma_ofw_get(device_t dev, const char *prop)
{
phandle_t node, parent;
xdma_controller_t *xdma;
device_t dma_dev;
pcell_t *cells;
int ncells;
int error;
int ndmas;
int idx;
node = ofw_bus_get_node(dev);
if (node <= 0)
device_printf(dev,
"%s called on not ofw based device.\n", __func__);
error = ofw_bus_parse_xref_list_get_length(node,
"dmas", "#dma-cells", &ndmas);
if (error) {
device_printf(dev,
"%s can't get dmas list.\n", __func__);
return (NULL);
}
if (ndmas == 0) {
device_printf(dev,
"%s dmas list is empty.\n", __func__);
return (NULL);
}
error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
if (error != 0) {
device_printf(dev,
"%s can't find string index.\n", __func__);
return (NULL);
}
error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
idx, &parent, &ncells, &cells);
if (error != 0) {
device_printf(dev,
"%s can't get dma device xref.\n", __func__);
return (NULL);
}
dma_dev = OF_device_from_xref(parent);
if (dma_dev == NULL) {
device_printf(dev,
"%s can't get dma device.\n", __func__);
return (NULL);
}
xdma = malloc(sizeof(struct xdma_controller),
M_XDMA, M_WAITOK | M_ZERO);
xdma->dev = dev;
xdma->dma_dev = dma_dev;
TAILQ_INIT(&xdma->channels);
xdma_ofw_md_data(xdma, cells, ncells);
free(cells, M_OFWPROP);
return (xdma);
}
#endif
/*
+ * Allocate xdma controller.
+ */
+xdma_controller_t *
+xdma_get(device_t dev, device_t dma_dev)
+{
+ xdma_controller_t *xdma;
+
+ xdma = malloc(sizeof(struct xdma_controller),
+ M_XDMA, M_WAITOK | M_ZERO);
+ xdma->dev = dev;
+ xdma->dma_dev = dma_dev;
+
+ TAILQ_INIT(&xdma->channels);
+
+ return (xdma);
+}
+
+/*
* Free xDMA controller object.
*/
int
xdma_put(xdma_controller_t *xdma)
{
XDMA_LOCK();
/* Ensure no channels allocated. */
if (!TAILQ_EMPTY(&xdma->channels)) {
device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
return (-1);
}
free(xdma->data, M_DEVBUF);
free(xdma, M_XDMA);
XDMA_UNLOCK();
return (0);
}
static void
xdma_init(void)
{
mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
}
SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
Index: head/sys/dev/xdma/xdma.h
===================================================================
--- head/sys/dev/xdma/xdma.h (revision 357652)
+++ head/sys/dev/xdma/xdma.h (revision 357653)
@@ -1,302 +1,303 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2016-2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_XDMA_XDMA_H_
#define _DEV_XDMA_XDMA_H_
#include
#include
#ifdef FDT
#include
#include
#endif
#include
#include
enum xdma_direction {
XDMA_MEM_TO_MEM,
XDMA_MEM_TO_DEV,
XDMA_DEV_TO_MEM,
XDMA_DEV_TO_DEV,
};
enum xdma_operation_type {
XDMA_MEMCPY,
XDMA_CYCLIC,
XDMA_FIFO,
XDMA_SG,
};
enum xdma_request_type {
XR_TYPE_PHYS,
XR_TYPE_VIRT,
XR_TYPE_MBUF,
XR_TYPE_BIO,
};
enum xdma_command {
XDMA_CMD_BEGIN,
XDMA_CMD_PAUSE,
XDMA_CMD_TERMINATE,
};
struct xdma_transfer_status {
uint32_t transferred;
int error;
};
typedef struct xdma_transfer_status xdma_transfer_status_t;
struct xdma_controller {
device_t dev; /* DMA consumer device_t. */
device_t dma_dev; /* A real DMA device_t. */
void *data; /* OFW MD part. */
vmem_t *vmem; /* Bounce memory. */
/* List of virtual channels allocated. */
TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
};
typedef struct xdma_controller xdma_controller_t;
struct xchan_buf {
bus_dmamap_t map;
uint32_t nsegs;
uint32_t nsegs_left;
vm_offset_t vaddr;
vm_offset_t paddr;
vm_size_t size;
};
struct xdma_request {
struct mbuf *m;
struct bio *bp;
enum xdma_operation_type operation;
enum xdma_request_type req_type;
enum xdma_direction direction;
bus_addr_t src_addr;
bus_addr_t dst_addr;
uint8_t src_width;
uint8_t dst_width;
bus_size_t block_num;
bus_size_t block_len;
xdma_transfer_status_t status;
void *user;
TAILQ_ENTRY(xdma_request) xr_next;
struct xchan_buf buf;
};
struct xdma_sglist {
bus_addr_t src_addr;
bus_addr_t dst_addr;
size_t len;
uint8_t src_width;
uint8_t dst_width;
enum xdma_direction direction;
bool first;
bool last;
};
struct xdma_iommu {
struct pmap p;
vmem_t *vmem; /* VA space */
device_t dev; /* IOMMU device */
};
struct xdma_channel {
xdma_controller_t *xdma;
vmem_t *vmem;
uint32_t flags;
#define XCHAN_BUFS_ALLOCATED (1 << 0)
#define XCHAN_SGLIST_ALLOCATED (1 << 1)
#define XCHAN_CONFIGURED (1 << 2)
#define XCHAN_TYPE_CYCLIC (1 << 3)
#define XCHAN_TYPE_MEMCPY (1 << 4)
#define XCHAN_TYPE_FIFO (1 << 5)
#define XCHAN_TYPE_SG (1 << 6)
uint32_t caps;
#define XCHAN_CAP_BUSDMA (1 << 0)
#define XCHAN_CAP_NOSEG (1 << 1)
#define XCHAN_CAP_BOUNCE (1 << 2)
#define XCHAN_CAP_IOMMU (1 << 3)
/* A real hardware driver channel. */
void *chan;
/* Interrupt handlers. */
TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
TAILQ_ENTRY(xdma_channel) xchan_next;
struct mtx mtx_lock;
struct mtx mtx_qin_lock;
struct mtx mtx_qout_lock;
struct mtx mtx_bank_lock;
struct mtx mtx_proc_lock;
/* Request queue. */
bus_dma_tag_t dma_tag_bufs;
struct xdma_request *xr_mem;
uint32_t xr_num;
/* Bus dma tag options. */
bus_size_t maxsegsize;
bus_size_t maxnsegs;
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
struct xdma_sglist *sg;
TAILQ_HEAD(, xdma_request) bank;
TAILQ_HEAD(, xdma_request) queue_in;
TAILQ_HEAD(, xdma_request) queue_out;
TAILQ_HEAD(, xdma_request) processing;
/* iommu */
struct xdma_iommu xio;
};
typedef struct xdma_channel xdma_channel_t;
struct xdma_intr_handler {
int (*cb)(void *cb_user, xdma_transfer_status_t *status);
void *cb_user;
TAILQ_ENTRY(xdma_intr_handler) ih_next;
};
static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
#define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
#define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
#define XCHAN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
#define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
#define QUEUE_IN_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
#define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
#define QUEUE_OUT_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
#define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
#define QUEUE_BANK_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
#define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
#define QUEUE_PROC_ASSERT_LOCKED(xchan) \
mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
#define XDMA_SGLIST_MAXLEN 2048
#define XDMA_MAX_SEG 128
/* xDMA controller ops */
xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
+xdma_controller_t *xdma_get(device_t dev, device_t dma_dev);
int xdma_put(xdma_controller_t *xdma);
vmem_t * xdma_get_memory(device_t dev);
void xdma_put_memory(vmem_t *vmem);
#ifdef FDT
int xdma_handle_mem_node(vmem_t *vmem, phandle_t memory);
#endif
/* xDMA channel ops */
xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
int xdma_channel_free(xdma_channel_t *);
int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
/* SG interface */
int xdma_prep_sg(xdma_channel_t *, uint32_t,
bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
void xdma_channel_free_sg(xdma_channel_t *xchan);
int xdma_queue_submit_sg(xdma_channel_t *xchan);
void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
/* Queue operations */
int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
xdma_transfer_status_t *);
int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
xdma_transfer_status_t *status);
int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
uint8_t, uint8_t, enum xdma_direction dir);
int xdma_dequeue(xdma_channel_t *xchan, void **user,
xdma_transfer_status_t *status);
int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
int xdma_queue_submit(xdma_channel_t *xchan);
/* Mbuf operations */
uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
/* Channel Control */
int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
/* Interrupt callback */
int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
xdma_transfer_status_t *), void *arg, void **);
int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
int xdma_teardown_all_intr(xdma_channel_t *xchan);
void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
/* Sglist */
int xchan_sglist_alloc(xdma_channel_t *xchan);
void xchan_sglist_free(xdma_channel_t *xchan);
int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
uint32_t nsegs, struct xdma_request *xr);
/* Requests bank */
void xchan_bank_init(xdma_channel_t *xchan);
int xchan_bank_free(xdma_channel_t *xchan);
struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
/* IOMMU */
void xdma_iommu_add_entry(xdma_channel_t *xchan, vm_offset_t *va,
vm_paddr_t pa, vm_size_t size, vm_prot_t prot);
void xdma_iommu_remove_entry(xdma_channel_t *xchan, vm_offset_t va);
int xdma_iommu_init(struct xdma_iommu *xio);
int xdma_iommu_release(struct xdma_iommu *xio);
#endif /* !_DEV_XDMA_XDMA_H_ */
Index: head/sys/dev/xilinx/axidma.c
===================================================================
--- head/sys/dev/xilinx/axidma.c (revision 357652)
+++ head/sys/dev/xilinx/axidma.c (revision 357653)
@@ -1,649 +1,649 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Xilinx AXI DMA controller driver. */
#include
__FBSDID("$FreeBSD$");
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#include
#endif
#include
#include
#include "xdma_if.h"
+#define READ4(_sc, _reg) \
+ bus_space_read_4(_sc->bst, _sc->bsh, _reg)
+#define WRITE4(_sc, _reg, _val) \
+ bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
+#define READ8(_sc, _reg) \
+ bus_space_read_8(_sc->bst, _sc->bsh, _reg)
+#define WRITE8(_sc, _reg, _val) \
+ bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
+
#define AXIDMA_DEBUG
#undef AXIDMA_DEBUG
#ifdef AXIDMA_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
-#define AXIDMA_NCHANNELS 2
-#define AXIDMA_DESCS_NUM 512
-#define AXIDMA_TX_CHAN 0
-#define AXIDMA_RX_CHAN 1
-
extern struct bus_space memmap_bus;
-
-struct axidma_fdt_data {
- int id;
-};
struct axidma_channel {
struct axidma_softc *sc;
xdma_channel_t *xchan;
bool used;
int idx_head;
int idx_tail;
struct axidma_desc **descs;
vm_paddr_t *descs_phys;
uint32_t descs_num;
vm_size_t mem_size;
vm_offset_t mem_paddr;
vm_offset_t mem_vaddr;
uint32_t descs_used_count;
};
struct axidma_softc {
device_t dev;
struct resource *res[3];
bus_space_tag_t bst;
bus_space_handle_t bsh;
void *ih[2];
struct axidma_desc desc;
struct axidma_channel channels[AXIDMA_NCHANNELS];
};
static struct resource_spec axidma_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0 }
};
#define HWTYPE_NONE 0
#define HWTYPE_STD 1
static struct ofw_compat_data compat_data[] = {
{ "xlnx,eth-dma", HWTYPE_STD },
{ NULL, HWTYPE_NONE },
};
static int axidma_probe(device_t dev);
static int axidma_attach(device_t dev);
static int axidma_detach(device_t dev);
static inline uint32_t
axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
{
return ((curidx + 1) % chan->descs_num);
}
static void
axidma_intr(struct axidma_softc *sc,
struct axidma_channel *chan)
{
xdma_transfer_status_t status;
xdma_transfer_status_t st;
struct axidma_fdt_data *data;
xdma_controller_t *xdma;
struct axidma_desc *desc;
struct xdma_channel *xchan;
uint32_t tot_copied;
int pending;
int errors;
xchan = chan->xchan;
xdma = xchan->xdma;
data = xdma->data;
pending = READ4(sc, AXI_DMASR(data->id));
WRITE4(sc, AXI_DMASR(data->id), pending);
errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
| DMASR_DMADECOREERR | DMASR_SGINTERR
| DMASR_SGSLVERR | DMASR_SGDECERR));
dprintf("%s: AXI_DMASR %x\n", __func__,
READ4(sc, AXI_DMASR(data->id)));
dprintf("%s: AXI_CURDESC %x\n", __func__,
READ4(sc, AXI_CURDESC(data->id)));
dprintf("%s: AXI_TAILDESC %x\n", __func__,
READ4(sc, AXI_TAILDESC(data->id)));
tot_copied = 0;
while (chan->idx_tail != chan->idx_head) {
desc = chan->descs[chan->idx_tail];
if ((desc->status & BD_STATUS_CMPLT) == 0)
break;
st.error = errors;
st.transferred = desc->status & BD_CONTROL_LEN_M;
tot_copied += st.transferred;
xchan_seg_done(xchan, &st);
chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
atomic_subtract_int(&chan->descs_used_count, 1);
}
/* Finish operation */
status.error = errors;
status.transferred = tot_copied;
xdma_callback(chan->xchan, &status);
}
static void
axidma_intr_rx(void *arg)
{
struct axidma_softc *sc;
struct axidma_channel *chan;
dprintf("%s\n", __func__);
sc = arg;
chan = &sc->channels[AXIDMA_RX_CHAN];
axidma_intr(sc, chan);
}
static void
axidma_intr_tx(void *arg)
{
struct axidma_softc *sc;
struct axidma_channel *chan;
dprintf("%s\n", __func__);
sc = arg;
chan = &sc->channels[AXIDMA_TX_CHAN];
axidma_intr(sc, chan);
}
static int
axidma_reset(struct axidma_softc *sc, int chan_id)
{
int timeout;
WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
timeout = 100;
do {
if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
break;
} while (timeout--);
dprintf("timeout %d\n", timeout);
if (timeout == 0)
return (-1);
dprintf("%s: read control after reset: %x\n",
__func__, READ4(sc, AXI_DMACR(chan_id)));
return (0);
}
static int
axidma_probe(device_t dev)
{
int hwtype;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
if (hwtype == HWTYPE_NONE)
return (ENXIO);
device_set_desc(dev, "Xilinx AXI DMA");
return (BUS_PROBE_DEFAULT);
}
static int
axidma_attach(device_t dev)
{
struct axidma_softc *sc;
phandle_t xref, node;
int err;
sc = device_get_softc(dev);
sc->dev = dev;
if (bus_alloc_resources(dev, axidma_spec, sc->res)) {
device_printf(dev, "could not allocate resources.\n");
return (ENXIO);
}
/* CSR memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
/* Setup interrupt handler */
err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, axidma_intr_tx, sc, &sc->ih[0]);
if (err) {
device_printf(dev, "Unable to alloc interrupt resource.\n");
return (ENXIO);
}
/* Setup interrupt handler */
err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
NULL, axidma_intr_rx, sc, &sc->ih[1]);
if (err) {
device_printf(dev, "Unable to alloc interrupt resource.\n");
return (ENXIO);
}
node = ofw_bus_get_node(dev);
xref = OF_xref_from_node(node);
OF_device_register_xref(xref, dev);
return (0);
}
static int
axidma_detach(device_t dev)
{
struct axidma_softc *sc;
sc = device_get_softc(dev);
bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
bus_release_resources(dev, axidma_spec, sc->res);
return (0);
}
static int
axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
{
struct xdma_channel *xchan;
int nsegments;
nsegments = chan->descs_num;
xchan = chan->xchan;
free(chan->descs, M_DEVBUF);
free(chan->descs_phys, M_DEVBUF);
pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
kva_free(chan->mem_vaddr, chan->mem_size);
vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
return (0);
}
static int
axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
uint32_t desc_size)
{
struct axidma_channel *chan;
int nsegments;
int i;
chan = (struct axidma_channel *)xchan->chan;
nsegments = chan->descs_num;
chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (chan->descs == NULL) {
device_printf(sc->dev,
"%s: Can't allocate memory.\n", __func__);
return (-1);
}
chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
M_DEVBUF, M_NOWAIT | M_ZERO);
chan->mem_size = desc_size * nsegments;
if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
&chan->mem_paddr)) {
device_printf(sc->dev, "Failed to allocate memory.\n");
return (-1);
}
chan->mem_vaddr = kva_alloc(chan->mem_size);
pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr);
device_printf(sc->dev, "Allocated chunk %lx %d\n",
chan->mem_paddr, chan->mem_size);
for (i = 0; i < nsegments; i++) {
chan->descs[i] = (struct axidma_desc *)
((uint64_t)chan->mem_vaddr + desc_size * i);
chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
}
return (0);
}
static int
axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
if (xchan->caps & XCHAN_CAP_BUSDMA) {
device_printf(sc->dev,
"Error: busdma operation is not implemented.");
return (-1);
}
xdma = xchan->xdma;
data = xdma->data;
chan = &sc->channels[data->id];
if (chan->used == false) {
if (axidma_reset(sc, data->id) != 0)
return (-1);
chan->xchan = xchan;
xchan->caps |= XCHAN_CAP_BOUNCE;
xchan->chan = (void *)chan;
chan->sc = sc;
chan->used = true;
chan->idx_head = 0;
chan->idx_tail = 0;
chan->descs_used_count = 0;
chan->descs_num = AXIDMA_DESCS_NUM;
return (0);
}
return (-1);
}
static int
axidma_channel_free(device_t dev, struct xdma_channel *xchan)
{
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
axidma_desc_free(sc, chan);
chan->used = false;
return (0);
}
static int
axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
uint32_t *capacity)
{
struct axidma_channel *chan;
uint32_t c;
chan = (struct axidma_channel *)xchan->chan;
/* At least one descriptor must be left empty. */
c = (chan->descs_num - chan->descs_used_count - 1);
*capacity = c;
return (0);
}
static int
axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
struct xdma_sglist *sg, uint32_t sg_n)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_desc *desc;
struct axidma_softc *sc;
uint32_t src_addr;
uint32_t dst_addr;
uint32_t addr;
uint32_t len;
uint32_t tmp;
int i;
int tail;
dprintf("%s: sg_n %d\n", __func__, sg_n);
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
xdma = xchan->xdma;
data = xdma->data;
if (sg_n == 0)
return (0);
tail = chan->idx_head;
tmp = 0;
for (i = 0; i < sg_n; i++) {
src_addr = (uint32_t)sg[i].src_addr;
dst_addr = (uint32_t)sg[i].dst_addr;
len = (uint32_t)sg[i].len;
dprintf("%s(%d): src %x dst %x len %d\n", __func__,
data->id, src_addr, dst_addr, len);
desc = chan->descs[chan->idx_head];
if (sg[i].direction == XDMA_MEM_TO_DEV)
desc->phys = src_addr;
else
desc->phys = dst_addr;
desc->status = 0;
desc->control = len;
if (sg[i].first == 1)
desc->control |= BD_CONTROL_TXSOF;
if (sg[i].last == 1)
desc->control |= BD_CONTROL_TXEOF;
tmp = chan->idx_head;
atomic_add_int(&chan->descs_used_count, 1);
chan->idx_head = axidma_next_desc(chan, chan->idx_head);
}
dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
READ8(sc, AXI_CURDESC(data->id)));
dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
READ8(sc, AXI_CURDESC(data->id)));
dprintf("%s(%d): status %x\n", __func__, data->id,
READ4(sc, AXI_DMASR(data->id)));
addr = chan->descs_phys[tmp];
WRITE8(sc, AXI_TAILDESC(data->id), addr);
return (0);
}
static int
axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
{
xdma_controller_t *xdma;
struct axidma_fdt_data *data;
struct axidma_channel *chan;
struct axidma_desc *desc;
struct axidma_softc *sc;
uint32_t addr;
uint32_t reg;
int ret;
int i;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
xdma = xchan->xdma;
data = xdma->data;
dprintf("%s(%d)\n", __func__, data->id);
ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
if (ret != 0) {
device_printf(sc->dev,
"%s: Can't allocate descriptors.\n", __func__);
return (-1);
}
for (i = 0; i < chan->descs_num; i++) {
desc = chan->descs[i];
bzero(desc, sizeof(struct axidma_desc));
if (i == (chan->descs_num - 1))
desc->next = chan->descs_phys[0];
else
desc->next = chan->descs_phys[i + 1];
desc->status = 0;
desc->control = 0;
dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
data->id, i, (uint64_t)desc, le32toh(desc->next));
}
addr = chan->descs_phys[0];
WRITE8(sc, AXI_CURDESC(data->id), addr);
reg = READ4(sc, AXI_DMACR(data->id));
reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
WRITE4(sc, AXI_DMACR(data->id), reg);
reg |= DMACR_RS;
WRITE4(sc, AXI_DMACR(data->id), reg);
return (0);
}
static int
axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
{
struct axidma_channel *chan;
struct axidma_softc *sc;
sc = device_get_softc(dev);
chan = (struct axidma_channel *)xchan->chan;
switch (cmd) {
case XDMA_CMD_BEGIN:
case XDMA_CMD_TERMINATE:
case XDMA_CMD_PAUSE:
/* TODO: implement me */
return (-1);
}
return (0);
}
#ifdef FDT
static int
axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
{
struct axidma_fdt_data *data;
if (ncells != 1)
return (-1);
data = malloc(sizeof(struct axidma_fdt_data),
M_DEVBUF, (M_WAITOK | M_ZERO));
data->id = cells[0];
*ptr = data;
return (0);
}
#endif
static device_method_t axidma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axidma_probe),
DEVMETHOD(device_attach, axidma_attach),
DEVMETHOD(device_detach, axidma_detach),
/* xDMA Interface */
DEVMETHOD(xdma_channel_alloc, axidma_channel_alloc),
DEVMETHOD(xdma_channel_free, axidma_channel_free),
DEVMETHOD(xdma_channel_control, axidma_channel_control),
/* xDMA SG Interface */
DEVMETHOD(xdma_channel_capacity, axidma_channel_capacity),
DEVMETHOD(xdma_channel_prep_sg, axidma_channel_prep_sg),
DEVMETHOD(xdma_channel_submit_sg, axidma_channel_submit_sg),
#ifdef FDT
DEVMETHOD(xdma_ofw_md_data, axidma_ofw_md_data),
#endif
DEVMETHOD_END
};
static driver_t axidma_driver = {
"axidma",
axidma_methods,
sizeof(struct axidma_softc),
};
static devclass_t axidma_devclass;
EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, axidma_devclass, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
Index: head/sys/dev/xilinx/axidma.h
===================================================================
--- head/sys/dev/xilinx/axidma.h (revision 357652)
+++ head/sys/dev/xilinx/axidma.h (revision 357653)
@@ -1,96 +1,96 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_XILINX_AXIDMA_H_
#define _DEV_XILINX_AXIDMA_H_
#define AXI_DMACR(n) (0x00 + 0x30 * (n)) /* DMA Control register */
#define DMACR_RS (1 << 0) /* Run / Stop. */
#define DMACR_RESET (1 << 2) /* Soft reset the AXI DMA core. */
#define DMACR_IOC_IRQEN (1 << 12) /* Interrupt on Complete (IOC) Interrupt Enable. */
#define DMACR_DLY_IRQEN (1 << 13) /* Interrupt on Delay Timer Interrupt Enable. */
#define DMACR_ERR_IRQEN (1 << 14) /* Interrupt on Error Interrupt Enable. */
#define AXI_DMASR(n) (0x04 + 0x30 * (n)) /* DMA Status register */
#define DMASR_HALTED (1 << 0)
#define DMASR_IDLE (1 << 1)
#define DMASR_SGINCLD (1 << 3) /* Scatter Gather Enabled */
#define DMASR_DMAINTERR (1 << 4) /* DMA Internal Error. */
#define DMASR_DMASLVERR (1 << 5) /* DMA Slave Error. */
#define DMASR_DMADECOREERR (1 << 6) /* Decode Error. */
#define DMASR_SGINTERR (1 << 8) /* Scatter Gather Internal Error. */
#define DMASR_SGSLVERR (1 << 9) /* Scatter Gather Slave Error. */
#define DMASR_SGDECERR (1 << 10) /* Scatter Gather Decode Error. */
#define DMASR_IOC_IRQ (1 << 12) /* Interrupt on Complete. */
#define DMASR_DLY_IRQ (1 << 13) /* Interrupt on Delay. */
#define DMASR_ERR_IRQ (1 << 14) /* Interrupt on Error. */
#define AXI_CURDESC(n) (0x08 + 0x30 * (n)) /* Current Descriptor Pointer. Lower 32 bits of the address. */
#define AXI_CURDESC_MSB(n) (0x0C + 0x30 * (n)) /* Current Descriptor Pointer. Upper 32 bits of address. */
#define AXI_TAILDESC(n) (0x10 + 0x30 * (n)) /* Tail Descriptor Pointer. Lower 32 bits. */
#define AXI_TAILDESC_MSB(n) (0x14 + 0x30 * (n)) /* Tail Descriptor Pointer. Upper 32 bits of address. */
#define AXI_SG_CTL 0x2C /* Scatter/Gather User and Cache */
-#define READ4(_sc, _reg) \
- bus_space_read_4(_sc->bst, _sc->bsh, _reg)
-#define WRITE4(_sc, _reg, _val) \
- bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
-#define READ8(_sc, _reg) \
- bus_space_read_8(_sc->bst, _sc->bsh, _reg)
-#define WRITE8(_sc, _reg, _val) \
- bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
+#define AXIDMA_NCHANNELS 2
+#define AXIDMA_DESCS_NUM 512
+#define AXIDMA_TX_CHAN 0
+#define AXIDMA_RX_CHAN 1
struct axidma_desc {
uint32_t next;
uint32_t reserved1;
uint32_t phys;
uint32_t reserved2;
uint32_t reserved3;
uint32_t reserved4;
uint32_t control;
#define BD_CONTROL_TXSOF (1 << 27) /* Start of Frame. */
#define BD_CONTROL_TXEOF (1 << 26) /* End of Frame. */
#define BD_CONTROL_LEN_S 0 /* Buffer Length. */
#define BD_CONTROL_LEN_M (0x3ffffff << BD_CONTROL_LEN_S)
uint32_t status;
#define BD_STATUS_CMPLT (1 << 31)
#define BD_STATUS_TRANSFERRED_S 0
#define BD_STATUS_TRANSFERRED_M (0x7fffff << BD_STATUS_TRANSFERRED_S)
uint32_t app0;
uint32_t app1;
uint32_t app2;
uint32_t app3;
uint32_t app4;
uint32_t reserved[3];
+};
+
+struct axidma_fdt_data {
+ int id;
};
#endif /* !_DEV_XILINX_AXIDMA_H_ */
Index: head/sys/dev/xilinx/if_xae.c
===================================================================
--- head/sys/dev/xilinx/if_xae.c (revision 357652)
+++ head/sys/dev/xilinx/if_xae.c (revision 357653)
@@ -1,1097 +1,1162 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Ruslan Bukin
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
* Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
* DARPA SSITH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
+#include
+
#include "miibus_if.h"
#define READ4(_sc, _reg) \
bus_read_4((_sc)->res[0], _reg)
#define WRITE4(_sc, _reg, _val) \
bus_write_4((_sc)->res[0], _reg, _val)
#define READ8(_sc, _reg) \
bus_read_8((_sc)->res[0], _reg)
#define WRITE8(_sc, _reg, _val) \
bus_write_8((_sc)->res[0], _reg, _val)
#define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
#define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
#define XAE_DEBUG
#undef XAE_DEBUG
#ifdef XAE_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
#define RX_QUEUE_SIZE 64
#define TX_QUEUE_SIZE 64
#define NUM_RX_MBUF 16
#define BUFRING_SIZE 8192
#define MDIO_CLK_DIV_DEFAULT 29
#define PHY1_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, 1, _r)
#define PHY1_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, 1, _r, _v)
#define PHY_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
#define PHY_WR(sc, _r, _v) \
xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
/* Use this macro to access regs > 0x1f */
#define WRITE_TI_EREG(sc, reg, data) { \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
PHY_WR(sc, MII_MMDAADR, reg); \
PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
PHY_WR(sc, MII_MMDAADR, data); \
}
/* Not documented, Xilinx VCU118 workaround */
#define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
#define DP83867_SGMIICTL1 0xD3 /* not documented register */
#define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
static struct resource_spec xae_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static void xae_stop_locked(struct xae_softc *sc);
static void xae_setup_rxfilter(struct xae_softc *sc);
static int
xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
{
struct mbuf *m;
int i;
for (i = 0; i < n; i++) {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->dev,
"%s: Can't alloc rx mbuf\n", __func__);
return (-1);
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
}
return (0);
}
static int
xae_get_phyaddr(phandle_t node, int *phy_addr)
{
phandle_t phy_node;
pcell_t phy_handle, phy_reg;
if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
sizeof(phy_handle)) <= 0)
return (ENXIO);
phy_node = OF_node_from_xref(phy_handle);
if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
sizeof(phy_reg)) <= 0)
return (ENXIO);
*phy_addr = phy_reg;
return (0);
}
static int
xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
sc = arg;
XAE_LOCK(sc);
ifp = sc->ifp;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
if (err != 0) {
break;
}
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
m_freem(m);
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
XAE_UNLOCK(sc);
return (0);
}
static int
xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
{
xdma_transfer_status_t st;
struct xae_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
int err;
uint32_t cnt_processed;
sc = arg;
dprintf("%s\n", __func__);
XAE_LOCK(sc);
ifp = sc->ifp;
cnt_processed = 0;
for (;;) {
err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
if (err != 0) {
break;
}
cnt_processed++;
if (st.error != 0) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
m_freem(m);
continue;
}
m->m_pkthdr.len = m->m_len = st.transferred;
m->m_pkthdr.rcvif = ifp;
XAE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
XAE_LOCK(sc);
}
xae_rx_enqueue(sc, cnt_processed);
XAE_UNLOCK(sc);
return (0);
}
static void
xae_qflush(struct ifnet *ifp)
{
struct xae_softc *sc;
sc = ifp->if_softc;
}
static int
xae_transmit_locked(struct ifnet *ifp)
{
struct xae_softc *sc;
struct mbuf *m;
struct buf_ring *br;
int error;
int enq;
dprintf("%s\n", __func__);
sc = ifp->if_softc;
br = sc->br;
enq = 0;
while ((m = drbr_peek(ifp, br)) != NULL) {
error = xdma_enqueue_mbuf(sc->xchan_tx,
&m, 0, 4, 4, XDMA_MEM_TO_DEV);
if (error != 0) {
/* No space in request queue available yet. */
drbr_putback(ifp, br, m);
break;
}
drbr_advance(ifp, br);
enq++;
/* If anyone is interested give them a copy. */
ETHER_BPF_MTAP(ifp, m);
}
if (enq > 0)
xdma_queue_submit(sc->xchan_tx);
return (0);
}
static int
xae_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct xae_softc *sc;
int error;
dprintf("%s\n", __func__);
sc = ifp->if_softc;
XAE_LOCK(sc);
error = drbr_enqueue(ifp, sc->br, m);
if (error) {
XAE_UNLOCK(sc);
return (error);
}
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
XAE_UNLOCK(sc);
return (0);
}
if (!sc->link_is_up) {
XAE_UNLOCK(sc);
return (0);
}
error = xae_transmit_locked(ifp);
XAE_UNLOCK(sc);
return (error);
}
static void
xae_stop_locked(struct xae_softc *sc)
{
struct ifnet *ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
callout_stop(&sc->xae_callout);
/* Stop the transmitter */
reg = READ4(sc, XAE_TC);
reg &= ~TC_TX;
WRITE4(sc, XAE_TC, reg);
/* Stop the receiver. */
reg = READ4(sc, XAE_RCW1);
reg &= ~RCW1_RX;
WRITE4(sc, XAE_RCW1, reg);
}
static uint64_t
xae_stat(struct xae_softc *sc, int counter_id)
{
uint64_t new, old;
uint64_t delta;
KASSERT(counter_id < XAE_MAX_COUNTERS,
("counter %d is out of range", counter_id));
new = READ8(sc, XAE_STATCNT(counter_id));
old = sc->counters[counter_id];
if (new >= old)
delta = new - old;
else
delta = UINT64_MAX - old + new;
sc->counters[counter_id] = new;
return (delta);
}
static void
xae_harvest_stats(struct xae_softc *sc)
{
struct ifnet *ifp;
ifp = sc->ifp;
if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_IERRORS,
xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
xae_stat(sc, RX_ALIGNMENT_ERRORS));
if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
if_inc_counter(ifp, IFCOUNTER_OERRORS,
xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
xae_stat(sc, TX_LATE_COLLISIONS) +
xae_stat(sc, TX_EXCESS_COLLISIONS));
}
static void
xae_tick(void *arg)
{
struct xae_softc *sc;
struct ifnet *ifp;
int link_was_up;
sc = arg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
/* Gather stats from hardware counters. */
xae_harvest_stats(sc);
/* Check the media status. */
link_was_up = sc->link_is_up;
mii_tick(sc->mii_softc);
if (sc->link_is_up && !link_was_up)
xae_transmit_locked(sc->ifp);
/* Schedule another check one second from now. */
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init_locked(struct xae_softc *sc)
{
struct ifnet *ifp;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
xae_setup_rxfilter(sc);
/* Enable the transmitter */
WRITE4(sc, XAE_TC, TC_TX);
/* Enable the receiver. */
WRITE4(sc, XAE_RCW1, RCW1_RX);
/*
* Call mii_mediachg() which will call back into xae_miibus_statchg()
* to set up the remaining config registers based on current media.
*/
mii_mediachg(sc->mii_softc);
callout_reset(&sc->xae_callout, hz, xae_tick, sc);
}
static void
xae_init(void *arg)
{
struct xae_softc *sc;
sc = arg;
XAE_LOCK(sc);
xae_init_locked(sc);
XAE_UNLOCK(sc);
}
static void
xae_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
{
struct xae_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
mii = sc->mii_softc;
XAE_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
XAE_UNLOCK(sc);
}
static int
xae_media_change_locked(struct xae_softc *sc)
{
return (mii_mediachg(sc->mii_softc));
}
static int
xae_media_change(struct ifnet * ifp)
{
struct xae_softc *sc;
int error;
sc = ifp->if_softc;
XAE_LOCK(sc);
error = xae_media_change_locked(sc);
XAE_UNLOCK(sc);
return (error);
}
static u_int
xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct xae_softc *sc = arg;
uint32_t reg;
uint8_t *ma;
if (cnt >= XAE_MULTICAST_TABLE_SIZE)
return (1);
ma = LLADDR(sdl);
reg = READ4(sc, XAE_FFC) & 0xffffff00;
reg |= cnt;
WRITE4(sc, XAE_FFC, reg);
reg = (ma[0]);
reg |= (ma[1] << 8);
reg |= (ma[2] << 16);
reg |= (ma[3] << 24);
WRITE4(sc, XAE_FFV(0), reg);
reg = ma[4];
reg |= ma[5] << 8;
WRITE4(sc, XAE_FFV(1), reg);
return (1);
}
static void
xae_setup_rxfilter(struct xae_softc *sc)
{
struct ifnet *ifp;
uint32_t reg;
XAE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
/*
* Set the multicast (group) filter hash.
*/
if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
reg = READ4(sc, XAE_FFC);
reg |= FFC_PM;
WRITE4(sc, XAE_FFC, reg);
} else {
reg = READ4(sc, XAE_FFC);
reg &= ~FFC_PM;
WRITE4(sc, XAE_FFC, reg);
if_foreach_llmaddr(ifp, xae_write_maddr, sc);
}
/*
* Set the primary address.
*/
reg = sc->macaddr[0];
reg |= (sc->macaddr[1] << 8);
reg |= (sc->macaddr[2] << 16);
reg |= (sc->macaddr[3] << 24);
WRITE4(sc, XAE_UAW0, reg);
reg = sc->macaddr[4];
reg |= (sc->macaddr[5] << 8);
WRITE4(sc, XAE_UAW1, reg);
}
static int
xae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct xae_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int mask, error;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
XAE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
xae_setup_rxfilter(sc);
} else {
if (!sc->is_detaching)
xae_init_locked(sc);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
xae_stop_locked(sc);
}
sc->if_flags = ifp->if_flags;
XAE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
XAE_LOCK(sc);
xae_setup_rxfilter(sc);
XAE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = sc->mii_softc;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_MTU) {
/* No work to do except acknowledge the change took */
ifp->if_capenable ^= IFCAP_VLAN_MTU;
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
xae_intr(void *arg)
{
}
static int
xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
{
phandle_t node;
int len;
node = ofw_bus_get_node(sc->dev);
/* Check if there is property */
if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
return (EINVAL);
if (len != ETHER_ADDR_LEN)
return (EINVAL);
OF_getprop(node, "local-mac-address", hwaddr,
ETHER_ADDR_LEN);
return (0);
}
static int
mdio_wait(struct xae_softc *sc)
{
uint32_t reg;
int timeout;
timeout = 200;
do {
reg = READ4(sc, XAE_MDIO_CTRL);
if (reg & MDIO_CTRL_READY)
break;
DELAY(1);
} while (timeout--);
if (timeout <= 0) {
printf("Failed to get MDIO ready\n");
return (1);
}
return (0);
}
static int
xae_miibus_read_reg(device_t dev, int phy, int reg)
{
struct xae_softc *sc;
uint32_t mii;
int rv;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (0);
mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (0);
rv = READ4(sc, XAE_MDIO_READ);
return (rv);
}
static int
xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
{
struct xae_softc *sc;
uint32_t mii;
sc = device_get_softc(dev);
if (mdio_wait(sc))
return (1);
mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
mii |= (reg << MDIO_TX_REGAD_S);
mii |= (phy << MDIO_TX_PHYAD_S);
WRITE4(sc, XAE_MDIO_WRITE, val);
WRITE4(sc, XAE_MDIO_CTRL, mii);
if (mdio_wait(sc))
return (1);
return (0);
}
static void
xae_phy_fixup(struct xae_softc *sc)
{
uint32_t reg;
device_t dev;
dev = sc->dev;
do {
WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
reg = PHY_RD(sc, DP83867_CFG2);
reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
reg |= CFG2_INTERRUPT_POLARITY;
reg |= CFG2_SPEED_OPT_ENHANCED_EN;
reg |= CFG2_SPEED_OPT_10M_EN;
PHY_WR(sc, DP83867_CFG2, reg);
WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
PHY_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
do {
PHY1_WR(sc, MII_BMCR,
BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
DELAY(40000);
} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
}
static int
+get_xdma_std(struct xae_softc *sc)
+{
+
+ sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
+ if (sc->xdma_tx == NULL)
+ return (ENXIO);
+
+ sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
+ if (sc->xdma_rx == NULL) {
+ xdma_put(sc->xdma_tx);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+get_xdma_axistream(struct xae_softc *sc)
+{
+ struct axidma_fdt_data *data;
+ device_t dma_dev;
+ phandle_t node;
+ pcell_t prop;
+ size_t len;
+
+ node = ofw_bus_get_node(sc->dev);
+ len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
+ if (len != sizeof(prop)) {
+ device_printf(sc->dev,
+ "%s: Couldn't get axistream-connected prop.\n", __func__);
+ return (ENXIO);
+ }
+ dma_dev = OF_device_from_xref(prop);
+ if (dma_dev == NULL) {
+ device_printf(sc->dev, "Could not get DMA device by xref.\n");
+ return (ENXIO);
+ }
+
+ sc->xdma_tx = xdma_get(sc->dev, dma_dev);
+ if (sc->xdma_tx == NULL) {
+ device_printf(sc->dev, "Could not find DMA controller.\n");
+ return (ENXIO);
+ }
+ data = malloc(sizeof(struct axidma_fdt_data),
+ M_DEVBUF, (M_WAITOK | M_ZERO));
+ data->id = AXIDMA_TX_CHAN;
+ sc->xdma_tx->data = data;
+
+ sc->xdma_rx = xdma_get(sc->dev, dma_dev);
+ if (sc->xdma_rx == NULL) {
+ device_printf(sc->dev, "Could not find DMA controller.\n");
+ return (ENXIO);
+ }
+ data = malloc(sizeof(struct axidma_fdt_data),
+ M_DEVBUF, (M_WAITOK | M_ZERO));
+ data->id = AXIDMA_RX_CHAN;
+ sc->xdma_rx->data = data;
+
+ return (0);
+}
+
+static int
setup_xdma(struct xae_softc *sc)
{
device_t dev;
vmem_t *vmem;
int error;
dev = sc->dev;
/* Get xDMA controller */
- sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
- if (sc->xdma_tx == NULL) {
- device_printf(dev, "Could not find DMA controller.\n");
- return (ENXIO);
+ error = get_xdma_std(sc);
+
+ if (error) {
+ device_printf(sc->dev,
+ "Fallback to axistream-connected property\n");
+ error = get_xdma_axistream(sc);
}
- sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
- if (sc->xdma_rx == NULL) {
- device_printf(dev, "Could not find DMA controller.\n");
+ if (error) {
+ device_printf(dev, "Could not find xDMA controllers.\n");
return (ENXIO);
}
/* Alloc xDMA TX virtual channel. */
sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
if (sc->xchan_tx == NULL) {
device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_tx,
xae_xdma_tx_intr, sc, &sc->ih_tx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA TX interrupt handler.\n");
return (ENXIO);
}
/* Alloc xDMA RX virtual channel. */
sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
if (sc->xchan_rx == NULL) {
device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
return (ENXIO);
}
/* Setup interrupt handler. */
error = xdma_setup_intr(sc->xchan_rx,
xae_xdma_rx_intr, sc, &sc->ih_rx);
if (error) {
device_printf(sc->dev,
"Can't setup xDMA RX interrupt handler.\n");
return (ENXIO);
}
/* Setup bounce buffer */
vmem = xdma_get_memory(dev);
if (vmem) {
xchan_set_memory(sc->xchan_tx, vmem);
xchan_set_memory(sc->xchan_rx, vmem);
}
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
8, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
xdma_prep_sg(sc->xchan_rx,
RX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
1, /* maxnsegs */
16, /* alignment */
0, /* boundary */
BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR);
return (0);
}
static int
xae_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
return (ENXIO);
device_set_desc(dev, "Xilinx AXI Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
xae_attach(device_t dev)
{
struct xae_softc *sc;
struct ifnet *ifp;
phandle_t node;
uint32_t reg;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
node = ofw_bus_get_node(dev);
if (setup_xdma(sc) != 0) {
device_printf(dev, "Could not setup xDMA.\n");
return (ENXIO);
}
mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
MTX_NETWORK_LOCK, MTX_DEF);
sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
M_NOWAIT, &sc->mtx);
if (sc->br == NULL)
return (ENOMEM);
if (bus_alloc_resources(dev, xae_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* Memory interface */
sc->bst = rman_get_bustag(sc->res[0]);
sc->bsh = rman_get_bushandle(sc->res[0]);
device_printf(sc->dev, "Identification: %x\n",
READ4(sc, XAE_IDENT));
/* Get MAC addr */
if (xae_get_hwaddr(sc, sc->macaddr)) {
device_printf(sc->dev, "can't get mac\n");
return (ENXIO);
}
/* Enable MII clock */
reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
reg |= MDIO_SETUP_ENABLE;
WRITE4(sc, XAE_MDIO_SETUP, reg);
if (mdio_wait(sc))
return (ENXIO);
callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
/* Setup interrupt handler. */
error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, xae_intr, sc, &sc->intr_cookie);
if (error != 0) {
device_printf(dev, "could not setup interrupt handler.\n");
return (ENXIO);
}
/* Set up the ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "could not allocate ifp.\n");
return (ENXIO);
}
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
ifp->if_transmit = xae_transmit;
ifp->if_qflush = xae_qflush;
ifp->if_ioctl = xae_ioctl;
ifp->if_init = xae_init;
IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
IFQ_SET_READY(&ifp->if_snd);
if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
return (ENXIO);
/* Attach the mii driver. */
error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "PHY attach failed\n");
return (ENXIO);
}
sc->mii_softc = device_get_softc(sc->miibus);
/* Apply vcu118 workaround. */
if (OF_getproplen(node, "xlnx,vcu118") >= 0)
xae_phy_fixup(sc);
/* All ready to run, attach the ethernet interface. */
ether_ifattach(ifp, sc->macaddr);
sc->is_attached = true;
xae_rx_enqueue(sc, NUM_RX_MBUF);
xdma_queue_submit(sc->xchan_rx);
return (0);
}
static int
xae_detach(device_t dev)
{
struct xae_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
device_get_nameunit(dev)));
ifp = sc->ifp;
/* Only cleanup if attach succeeded. */
if (device_is_attached(dev)) {
XAE_LOCK(sc);
xae_stop_locked(sc);
XAE_UNLOCK(sc);
callout_drain(&sc->xae_callout);
ether_ifdetach(ifp);
}
if (sc->miibus != NULL)
device_delete_child(dev, sc->miibus);
if (ifp != NULL)
if_free(ifp);
mtx_destroy(&sc->mtx);
bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
bus_release_resources(dev, xae_spec, sc->res);
xdma_channel_free(sc->xchan_tx);
xdma_channel_free(sc->xchan_rx);
xdma_put(sc->xdma_tx);
xdma_put(sc->xdma_rx);
return (0);
}
static void
xae_miibus_statchg(device_t dev)
{
struct xae_softc *sc;
struct mii_data *mii;
uint32_t reg;
/*
* Called by the MII bus driver when the PHY establishes
* link to set the MAC interface registers.
*/
sc = device_get_softc(dev);
XAE_ASSERT_LOCKED(sc);
mii = sc->mii_softc;
if (mii->mii_media_status & IFM_ACTIVE)
sc->link_is_up = true;
else
sc->link_is_up = false;
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
reg = SPEED_1000;
break;
case IFM_100_TX:
reg = SPEED_100;
break;
case IFM_10_T:
reg = SPEED_10;
break;
case IFM_NONE:
sc->link_is_up = false;
return;
default:
sc->link_is_up = false;
device_printf(dev, "Unsupported media %u\n",
IFM_SUBTYPE(mii->mii_media_active));
return;
}
WRITE4(sc, XAE_SPEED, reg);
}
static device_method_t xae_methods[] = {
DEVMETHOD(device_probe, xae_probe),
DEVMETHOD(device_attach, xae_attach),
DEVMETHOD(device_detach, xae_detach),
/* MII Interface */
DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
DEVMETHOD(miibus_statchg, xae_miibus_statchg),
{ 0, 0 }
};
driver_t xae_driver = {
"xae",
xae_methods,
sizeof(struct xae_softc),
};
static devclass_t xae_devclass;
DRIVER_MODULE(xae, simplebus, xae_driver, xae_devclass, 0, 0);
DRIVER_MODULE(miibus, xae, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(xae, ether, 1, 1, 1);
MODULE_DEPEND(xae, miibus, 1, 1, 1);