diff --git a/sys/dev/dpaa2/dpaa2_bp.c b/sys/dev/dpaa2/dpaa2_bp.c index 78e1ca68cdb1..51f708422257 100644 --- a/sys/dev/dpaa2/dpaa2_bp.c +++ b/sys/dev/dpaa2/dpaa2_bp.c @@ -1,205 +1,219 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The DPAA2 Buffer Pool (DPBP) driver. * * The DPBP configures a buffer pool that can be associated with DPAA2 network * and accelerator interfaces. */ #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" /* DPAA2 Buffer Pool resource specification. */ struct resource_spec dpaa2_bp_spec[] = { /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPBP. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (0u) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; static int dpaa2_bp_probe(device_t dev) { /* DPBP device will be added by the parent resource container. */ device_set_desc(dev, "DPAA2 Buffer Pool"); return (BUS_PROBE_DEFAULT); } static int dpaa2_bp_detach(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_bp_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, bp_token; + int error; - if (sc->cmd != NULL) { - (void)DPAA2_CMD_BP_DISABLE(dev, child, sc->cmd); - (void)DPAA2_CMD_BP_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->bp_token)); - (void)DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->rc_token)); + DPAA2_CMD_INIT(&cmd); - dpaa2_mcp_free_command(sc->cmd); - sc->cmd = NULL; + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); + goto err_exit; } + error = DPAA2_CMD_BP_OPEN(dev, child, &cmd, dinfo->id, &bp_token); + if (error) { + device_printf(dev, "%s: failed to open DPBP: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; + } + (void)DPAA2_CMD_BP_DISABLE(dev, child, &cmd); + (void)DPAA2_CMD_BP_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); dinfo->portal = NULL; bus_release_resources(sc->dev, dpaa2_bp_spec, sc->res); return (0); + +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (ENXIO); } static int dpaa2_bp_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_bp_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; + struct dpaa2_cmd cmd; + uint16_t rc_token, bp_token; int error; sc->dev = dev; - sc->cmd = NULL; error = bus_alloc_resources(sc->dev, dpaa2_bp_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); - return (ENXIO); + goto err_exit; } /* Send commands to MC via allocated portal. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; - /* Allocate a command to send to MC hardware. */ - error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); - if (error) { - device_printf(dev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, error); - dpaa2_bp_detach(dev); - return (ENXIO); - } + DPAA2_CMD_INIT(&cmd); - /* Open resource container and DPBP object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, - &sc->rc_token); + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); - dpaa2_bp_detach(dev); - return (ENXIO); + goto detach; } - error = DPAA2_CMD_BP_OPEN(dev, child, sc->cmd, dinfo->id, &sc->bp_token); + error = DPAA2_CMD_BP_OPEN(dev, child, &cmd, dinfo->id, &bp_token); if (error) { device_printf(dev, "%s: failed to open DPBP: id=%d, error=%d\n", __func__, dinfo->id, error); - dpaa2_bp_detach(dev); - return (ENXIO); + goto close_rc; } - /* Prepare DPBP object. */ - error = DPAA2_CMD_BP_RESET(dev, child, sc->cmd); + error = DPAA2_CMD_BP_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPBP: id=%d, error=%d\n", __func__, dinfo->id, error); - dpaa2_bp_detach(dev); - return (ENXIO); + goto close_bp; } - error = DPAA2_CMD_BP_ENABLE(dev, child, sc->cmd); + error = DPAA2_CMD_BP_ENABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to enable DPBP: id=%d, " "error=%d\n", __func__, dinfo->id, error); - dpaa2_bp_detach(dev); - return (ENXIO); + goto close_bp; } - error = DPAA2_CMD_BP_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr); + error = DPAA2_CMD_BP_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { device_printf(dev, "%s: failed to get DPBP attributes: id=%d, " "error=%d\n", __func__, dinfo->id, error); - dpaa2_bp_detach(dev); - return (ENXIO); + goto close_bp; } + (void)DPAA2_CMD_BP_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, bp_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_bp: + (void)DPAA2_CMD_BP_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, bp_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +detach: + dpaa2_bp_detach(dev); +err_exit: + return (ENXIO); } static device_method_t dpaa2_bp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_bp_probe), DEVMETHOD(device_attach, dpaa2_bp_attach), DEVMETHOD(device_detach, dpaa2_bp_detach), DEVMETHOD_END }; static driver_t dpaa2_bp_driver = { "dpaa2_bp", dpaa2_bp_methods, sizeof(struct dpaa2_bp_softc), }; DRIVER_MODULE(dpaa2_bp, dpaa2_rc, dpaa2_bp_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_bp.h b/sys/dev/dpaa2/dpaa2_bp.h index 3ba7196eb030..d05cb399cc17 100644 --- a/sys/dev/dpaa2/dpaa2_bp.h +++ b/sys/dev/dpaa2/dpaa2_bp.h @@ -1,74 +1,68 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_BP_H #define _DPAA2_BP_H #include /* Maximum resources per DPBP: 1 DPMCP. */ #define DPAA2_BP_MAX_RESOURCES 1 /** * @brief Attributes of the DPBP object. * * id: DPBP object ID. * bpid: Hardware buffer pool ID; should be used as an argument in * acquire/release operations on buffers. */ struct dpaa2_bp_attr { uint32_t id; uint16_t bpid; }; /** * @brief Configuration/state of the buffer pool. */ struct dpaa2_bp_conf { uint8_t bdi; uint8_t state; /* bitmask */ uint32_t free_bufn; }; /** * @brief Software context for the DPAA2 Buffer Pool driver. */ struct dpaa2_bp_softc { device_t dev; struct dpaa2_bp_attr attr; - - /* Help to send commands to MC. */ - struct dpaa2_cmd *cmd; - uint16_t rc_token; - uint16_t bp_token; - struct resource *res[DPAA2_BP_MAX_RESOURCES]; }; extern struct resource_spec dpaa2_bp_spec[]; #endif /* _DPAA2_BP_H */ diff --git a/sys/dev/dpaa2/dpaa2_con.c b/sys/dev/dpaa2/dpaa2_con.c index 602497c2c8de..993cdb2fe29d 100644 --- a/sys/dev/dpaa2/dpaa2_con.c +++ b/sys/dev/dpaa2/dpaa2_con.c @@ -1,213 +1,196 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The DPAA2 Concentrator (DPCON) driver. * * Supports configuration of QBMan channels for advanced scheduling of ingress * packets from one or more network interfaces. * * DPCONs are used to distribute Rx or Tx Confirmation traffic to different * cores, via affine DPIO objects. The implication is that one DPCON must be * available for each core where Rx or Tx Confirmation traffic should be * distributed to. * * QBMan channel contains several work queues. The WQs within a channel have a * priority relative to each other. Each channel consists of either eight or two * WQs, and thus, there are either eight or two possible priorities in a channel. */ #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_mc.h" #include "dpaa2_cmd_if.h" /* DPAA2 Concentrator resource specification. */ struct resource_spec dpaa2_con_spec[] = { /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPCON. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (0u) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; static int dpaa2_con_detach(device_t dev); /* * Device interface. */ static int dpaa2_con_probe(device_t dev) { /* DPCON device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 Concentrator"); return (BUS_PROBE_DEFAULT); } static int dpaa2_con_detach(device_t dev) { - device_t child = dev; - struct dpaa2_con_softc *sc = device_get_softc(dev); - - DPAA2_CMD_CON_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->con_token)); - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); - dpaa2_mcp_free_command(sc->cmd); - - sc->cmd = NULL; - sc->con_token = 0; - sc->rc_token = 0; - + /* TBD */ return (0); } static int dpaa2_con_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_con_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; + struct dpaa2_cmd cmd; + uint16_t rc_token, con_token; int error; sc->dev = dev; error = bus_alloc_resources(sc->dev, dpaa2_con_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); - return (ENXIO); + goto err_exit; } /* Obtain MC portal. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; - /* Allocate a command to send to MC hardware. */ - error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); - if (error) { - device_printf(dev, "Failed to allocate dpaa2_cmd: error=%d\n", - error); - goto err_exit; - } + DPAA2_CMD_INIT(&cmd); - /* Open resource container and DPCON object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, - &sc->rc_token); + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { - device_printf(dev, "Failed to open DPRC: error=%d\n", error); - goto err_free_cmd; + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); + goto err_exit; } - error = DPAA2_CMD_CON_OPEN(dev, child, sc->cmd, dinfo->id, - &sc->con_token); + error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, dinfo->id, &con_token); if (error) { - device_printf(dev, "Failed to open DPCON: id=%d, error=%d\n", - dinfo->id, error); - goto err_close_rc; + device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; } - /* Prepare DPCON object. */ - error = DPAA2_CMD_CON_RESET(dev, child, sc->cmd); + error = DPAA2_CMD_CON_RESET(dev, child, &cmd); if (error) { - device_printf(dev, "Failed to reset DPCON: id=%d, error=%d\n", - dinfo->id, error); - goto err_close_con; + device_printf(dev, "%s: failed to reset DPCON: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_con; } - error = DPAA2_CMD_CON_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr); + error = DPAA2_CMD_CON_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { - device_printf(dev, "Failed to get DPCON attributes: id=%d, " - "error=%d\n", dinfo->id, error); - goto err_close_con; + device_printf(dev, "%s: failed to get DPCON attributes: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_con; } - /* TODO: Enable debug output via sysctl (to reduce output). */ - if (bootverbose) + if (bootverbose) { device_printf(dev, "chan_id=%d, priorities=%d\n", sc->attr.chan_id, sc->attr.prior_num); + } + (void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, con_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); - err_close_con: - DPAA2_CMD_CON_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->con_token)); - err_close_rc: - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); - err_free_cmd: - dpaa2_mcp_free_command(sc->cmd); - err_exit: +close_con: + DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, con_token)); +close_rc: + DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: return (ENXIO); } static device_method_t dpaa2_con_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_con_probe), DEVMETHOD(device_attach, dpaa2_con_attach), DEVMETHOD(device_detach, dpaa2_con_detach), DEVMETHOD_END }; static driver_t dpaa2_con_driver = { "dpaa2_con", dpaa2_con_methods, sizeof(struct dpaa2_con_softc), }; DRIVER_MODULE(dpaa2_con, dpaa2_rc, dpaa2_con_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_con.h b/sys/dev/dpaa2/dpaa2_con.h index 82fd50f4eaed..85a492935d46 100644 --- a/sys/dev/dpaa2/dpaa2_con.h +++ b/sys/dev/dpaa2/dpaa2_con.h @@ -1,70 +1,65 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_CON_H #define _DPAA2_CON_H #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" /* Maximum resources per DPCON: 1 DPMCP. */ #define DPAA2_CON_MAX_RESOURCES 1 /** * @brief Attributes of the DPCON object. * * id: DPCON object ID. * chan_id: QBMan channel ID to be used for dequeue operations. * prior_num: Number of priorities for the DPCON channel (1-8). */ struct dpaa2_con_attr { uint32_t id; uint16_t chan_id; uint8_t prior_num; }; /** * @brief Software context for the DPAA2 Concentrator driver. */ struct dpaa2_con_softc { device_t dev; struct resource *res[DPAA2_CON_MAX_RESOURCES]; struct dpaa2_con_attr attr; - - /* Help to send commands to MC. */ - struct dpaa2_cmd *cmd; - uint16_t rc_token; - uint16_t con_token; }; extern struct resource_spec dpaa2_con_spec[]; #endif /* _DPAA2_CON_H */ diff --git a/sys/dev/dpaa2/dpaa2_io.c b/sys/dev/dpaa2/dpaa2_io.c index e2b7992bfdb6..b644516308b2 100644 --- a/sys/dev/dpaa2/dpaa2_io.c +++ b/sys/dev/dpaa2/dpaa2_io.c @@ -1,570 +1,588 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * QBMan command interface and the DPAA2 I/O (DPIO) driver. * * The DPIO object allows configuration of the QBMan software portal with * optional notification capabilities. * * Software portals are used by the driver to communicate with the QBMan. The * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue * and dequeue operations, as well as buffer release and acquire operations – * using QBMan. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" #include "dpaa2_io.h" #include "dpaa2_ni.h" #define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */ #define DPIO_POLL_MAX 32 /* * Memory: * 0: cache-enabled part of the QBMan software portal. * 1: cache-inhibited part of the QBMan software portal. * 2: control registers of the QBMan software portal? * * Note that MSI should be allocated separately using pseudo-PCI interface. */ struct resource_spec dpaa2_io_spec[] = { /* * System Memory resources. */ #define MEM_RES_NUM (3u) #define MEM_RID_OFF (0u) #define MEM_RID(rid) ((rid) + MEM_RID_OFF) { SYS_RES_MEMORY, MEM_RID(0), RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, MEM_RID(1), RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, MEM_RID(2), RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPIO. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (MEM_RID_OFF + MEM_RES_NUM) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; /* Configuration routines. */ static int dpaa2_io_setup_irqs(device_t dev); static int dpaa2_io_release_irqs(device_t dev); static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc); static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc); /* Interrupt handlers */ static void dpaa2_io_intr(void *arg); static int dpaa2_io_probe(device_t dev) { /* DPIO device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 I/O"); return (BUS_PROBE_DEFAULT); } static int dpaa2_io_detach(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_io_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, io_token; int error; + DPAA2_CMD_INIT(&cmd); + /* Tear down interrupt handler and release IRQ resources. */ dpaa2_io_release_irqs(dev); /* Free software portal helper object. */ dpaa2_swp_free_portal(sc->swp); - /* Disable DPIO object. */ - error = DPAA2_CMD_IO_DISABLE(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->io_token)); - if (error && bootverbose) + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); + goto err_exit; + } + error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); + if (error) { + device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_IO_DISABLE(dev, child, &cmd); + if (error && bootverbose) { device_printf(dev, "%s: failed to disable DPIO: id=%d, " "error=%d\n", __func__, dinfo->id, error); + } - /* Close control sessions with the DPAA2 objects. */ - DPAA2_CMD_IO_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->io_token)); - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); - - /* Free pre-allocated MC command. */ - dpaa2_mcp_free_command(sc->cmd); - sc->cmd = NULL; - sc->io_token = 0; - sc->rc_token = 0; + (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); /* Unmap memory resources of the portal. */ for (int i = 0; i < MEM_RES_NUM; i++) { - if (sc->res[MEM_RID(i)] == NULL) + if (sc->res[MEM_RID(i)] == NULL) { continue; + } error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY, sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]); - if (error && bootverbose) + if (error && bootverbose) { device_printf(dev, "%s: failed to unmap memory " "resource: rid=%d, error=%d\n", __func__, MEM_RID(i), error); + } } /* Release allocated resources. */ bus_release_resources(dev, dpaa2_io_spec, sc->res); return (0); + +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int dpaa2_io_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_io_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; + struct dpaa2_cmd cmd; struct resource_map_request req; struct { vm_memattr_t memattr; char *label; } map_args[MEM_RES_NUM] = { { VM_MEMATTR_WRITE_BACK, "cache-enabled part" }, { VM_MEMATTR_DEVICE, "cache-inhibited part" }, { VM_MEMATTR_DEVICE, "control registers" } }; + uint16_t rc_token, io_token; int error; sc->dev = dev; sc->swp = NULL; - sc->cmd = NULL; sc->intr = NULL; sc->irq_resource = NULL; /* Allocate resources. */ error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); return (ENXIO); } /* Set allocated MC portal up. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; /* Map memory resources of the portal. */ for (int i = 0; i < MEM_RES_NUM; i++) { - if (sc->res[MEM_RID(i)] == NULL) + if (sc->res[MEM_RID(i)] == NULL) { continue; + } resource_init_map_request(&req); req.memattr = map_args[i].memattr; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]); if (error) { device_printf(dev, "%s: failed to map %s: error=%d\n", __func__, map_args[i].label, error); goto err_exit; } } - /* Allocate a command to send to the MC hardware. */ - error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); - if (error) { - device_printf(dev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, error); - goto err_exit; - } + DPAA2_CMD_INIT(&cmd); - /* Prepare DPIO object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, - &sc->rc_token); + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); goto err_exit; } - error = DPAA2_CMD_IO_OPEN(dev, child, sc->cmd, dinfo->id, &sc->io_token); + error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); if (error) { device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", __func__, dinfo->id, error); - goto err_exit; + goto close_rc; } - error = DPAA2_CMD_IO_RESET(dev, child, sc->cmd); + error = DPAA2_CMD_IO_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n", __func__, dinfo->id, error); - goto err_exit; + goto close_io; } - error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr); + error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { device_printf(dev, "%s: failed to get DPIO attributes: id=%d, " "error=%d\n", __func__, dinfo->id, error); - goto err_exit; + goto close_io; } - error = DPAA2_CMD_IO_ENABLE(dev, child, sc->cmd); + error = DPAA2_CMD_IO_ENABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to enable DPIO: id=%d, " "error=%d\n", __func__, dinfo->id, error); - goto err_exit; + goto close_io; } /* Prepare descriptor of the QBMan software portal. */ sc->swp_desc.dpio_dev = dev; sc->swp_desc.swp_version = sc->attr.swp_version; sc->swp_desc.swp_clk = sc->attr.swp_clk; sc->swp_desc.swp_id = sc->attr.swp_id; sc->swp_desc.has_notif = sc->attr.priors_num ? true : false; sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false; sc->swp_desc.cena_res = sc->res[0]; sc->swp_desc.cena_map = &sc->map[0]; sc->swp_desc.cinh_res = sc->res[1]; sc->swp_desc.cinh_map = &sc->map[1]; /* * Compute how many 256 QBMAN cycles fit into one ns. This is because * the interrupt timeout period register needs to be specified in QBMAN * clock cycles in increments of 256. */ sc->swp_desc.swp_cycles_ratio = 256000 / (sc->swp_desc.swp_clk / 1000000); /* Initialize QBMan software portal. */ error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_swp: " "error=%d\n", __func__, error); goto err_exit; } error = dpaa2_io_setup_irqs(dev); if (error) { device_printf(dev, "%s: failed to setup IRQs: error=%d\n", __func__, error); goto err_exit; } -#if 0 - /* TODO: Enable debug output via sysctl (to reduce output). */ - if (bootverbose) + if (bootverbose) { device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, " "notif_priors=%d, swp_version=0x%x\n", sc->attr.id, sc->attr.swp_id, sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL ? "local_channel" : "no_channel", sc->attr.priors_num, sc->attr.swp_version); -#endif + } + + (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); +close_io: + (void)DPAA2_CMD_IO_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, io_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: dpaa2_io_detach(dev); return (ENXIO); } /** * @brief Enqueue multiple frames to a frame queue using one FQID. */ static int dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid, struct dpaa2_fd *fd, int frames_n) { struct dpaa2_io_softc *sc = device_get_softc(iodev); struct dpaa2_swp *swp = sc->swp; struct dpaa2_eq_desc ed; uint32_t flags = 0; memset(&ed, 0, sizeof(ed)); /* Setup enqueue descriptor. */ dpaa2_swp_set_ed_norp(&ed, false); dpaa2_swp_set_ed_fq(&ed, fqid); return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n)); } /** * @brief Configure the channel data availability notification (CDAN) * in a particular WQ channel paired with DPIO. */ static int dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx) { struct dpaa2_io_softc *sc = device_get_softc(iodev); /* Enable generation of the CDAN notifications. */ - if (ctx->cdan_en) + if (ctx->cdan_en) { return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id, DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en, ctx->qman_ctx)); + } return (0); } /** * @brief Query current configuration/state of the buffer pool. */ static int dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf) { struct dpaa2_io_softc *sc = device_get_softc(iodev); return (dpaa2_swp_query_bp(sc->swp, bpid, conf)); } /** * @brief Release one or more buffer pointers to the QBMan buffer pool. */ static int dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf, uint32_t buf_num) { struct dpaa2_io_softc *sc = device_get_softc(iodev); return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num)); } /** * @brief Configure DPNI object to generate interrupts. */ static int dpaa2_io_setup_irqs(device_t dev) { struct dpaa2_io_softc *sc = device_get_softc(dev); int error; /* * Setup interrupts generated by the software portal. */ dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI); dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); /* Configure IRQs. */ error = dpaa2_io_setup_msi(sc); if (error) { device_printf(dev, "%s: failed to allocate MSI: error=%d\n", __func__, error); return (error); } if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { device_printf(dev, "%s: failed to allocate IRQ resource\n", __func__); return (ENXIO); } if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) { device_printf(dev, "%s: failed to setup IRQ resource\n", __func__); return (ENXIO); } /* Wrap DPIO ID around number of CPUs. */ bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus); /* * Setup and enable Static Dequeue Command to receive CDANs from * channel 0. */ if (sc->swp_desc.has_notif) dpaa2_swp_set_push_dequeue(sc->swp, 0, true); return (0); } static int dpaa2_io_release_irqs(device_t dev) { struct dpaa2_io_softc *sc = device_get_softc(dev); /* Disable receiving CDANs from channel 0. */ if (sc->swp_desc.has_notif) dpaa2_swp_set_push_dequeue(sc->swp, 0, false); /* Release IRQ resources. */ if (sc->intr != NULL) bus_teardown_intr(dev, sc->irq_resource, &sc->intr); if (sc->irq_resource != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0], sc->irq_resource); (void)dpaa2_io_release_msi(device_get_softc(dev)); /* Configure software portal to stop generating interrupts. */ dpaa2_swp_set_intr_trigger(sc->swp, 0); dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); return (0); } /** * @brief Allocate MSI interrupts for this DPAA2 I/O object. */ static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc) { int val; val = pci_msi_count(sc->dev); if (val < DPAA2_IO_MSI_COUNT) device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, DPAA2_IO_MSI_COUNT); val = MIN(val, DPAA2_IO_MSI_COUNT); if (pci_alloc_msi(sc->dev, &val) != 0) return (EINVAL); for (int i = 0; i < val; i++) sc->irq_rid[i] = i + 1; return (0); } static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc) { int error; error = pci_release_msi(sc->dev); if (error) { device_printf(sc->dev, "%s: failed to release MSI: error=%d/n", __func__, error); return (error); } return (0); } /** * @brief DPAA2 I/O interrupt handler. */ static void dpaa2_io_intr(void *arg) { struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg; struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX]; struct dpaa2_dq dq; uint32_t idx, status; uint16_t flags; int rc, cdan_n = 0; status = dpaa2_swp_read_intr_status(sc->swp); if (status == 0) { return; } DPAA2_SWP_LOCK(sc->swp, &flags); if (flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(sc->swp); return; } for (int i = 0; i < DPIO_POLL_MAX; i++) { rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx); if (rc) { break; } if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) == DPAA2_DQRR_RESULT_CDAN) { ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx; } else { /* TODO: Report unknown DQRR entry. */ } dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx); } DPAA2_SWP_UNLOCK(sc->swp); for (int i = 0; i < cdan_n; i++) { ctx[i]->poll(ctx[i]->channel); } /* Enable software portal interrupts back */ dpaa2_swp_clear_intr_status(sc->swp, status); dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0); } static device_method_t dpaa2_io_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_io_probe), DEVMETHOD(device_attach, dpaa2_io_attach), DEVMETHOD(device_detach, dpaa2_io_detach), /* QBMan software portal interface */ DEVMETHOD(dpaa2_swp_enq_multiple_fq, dpaa2_io_enq_multiple_fq), DEVMETHOD(dpaa2_swp_conf_wq_channel, dpaa2_io_conf_wq_channel), DEVMETHOD(dpaa2_swp_query_bp, dpaa2_io_query_bp), DEVMETHOD(dpaa2_swp_release_bufs, dpaa2_io_release_bufs), DEVMETHOD_END }; static driver_t dpaa2_io_driver = { "dpaa2_io", dpaa2_io_methods, sizeof(struct dpaa2_io_softc), }; DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_io.h b/sys/dev/dpaa2/dpaa2_io.h index d02dab8144df..13def050fffb 100644 --- a/sys/dev/dpaa2/dpaa2_io.h +++ b/sys/dev/dpaa2/dpaa2_io.h @@ -1,110 +1,105 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_IO_H #define _DPAA2_IO_H #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" /* Maximum resources per DPIO: 3 SYS_MEM + 1 DPMCP. */ #define DPAA2_IO_MAX_RESOURCES 4 /* Maximum number of MSIs supported by the DPIO objects. */ #define DPAA2_IO_MSI_COUNT 1 enum dpaa2_io_chan_mode { DPAA2_IO_NO_CHANNEL, DPAA2_IO_LOCAL_CHANNEL }; /** * @brief Attributes of the DPIO object. * * swp_ce_paddr: Physical address of the cache-enabled area. * swp_ci_paddr: Physical address of the cache-inhibited area. * swp_version: Hardware IP version of the software portal. * swp_clk: QBMAN clock frequency value in Hz. * id: DPIO object ID. * swp_id: Software portal ID. * priors_num: Number of priorities for the notification channel (1-8); * relevant only if channel mode is "local channel". * chan_mode: Notification channel mode. */ struct dpaa2_io_attr { uint64_t swp_ce_paddr; uint64_t swp_ci_paddr; uint32_t swp_version; uint32_t swp_clk; uint32_t id; uint16_t swp_id; uint8_t priors_num; enum dpaa2_io_chan_mode chan_mode; }; /** * @brief Context used by DPIO to configure data availability notifications * (CDAN) on a particular WQ channel. */ struct dpaa2_io_notif_ctx { void (*poll)(void *); device_t io_dev; void *channel; uint64_t qman_ctx; uint16_t fq_chan_id; bool cdan_en; }; /** * @brief Software context for the DPAA2 I/O driver. */ struct dpaa2_io_softc { device_t dev; struct dpaa2_swp_desc swp_desc; struct dpaa2_swp *swp; struct dpaa2_io_attr attr; - /* Help to send commands to MC. */ - struct dpaa2_cmd *cmd; - uint16_t rc_token; - uint16_t io_token; - struct resource *res[DPAA2_IO_MAX_RESOURCES]; struct resource_map map[DPAA2_IO_MAX_RESOURCES]; int irq_rid[DPAA2_IO_MSI_COUNT]; struct resource *irq_resource; void *intr; /* interrupt handle */ }; extern struct resource_spec dpaa2_io_spec[]; #endif /* _DPAA2_IO_H */ diff --git a/sys/dev/dpaa2/dpaa2_mac.c b/sys/dev/dpaa2/dpaa2_mac.c index d6e381c0dd15..990286e53bfa 100644 --- a/sys/dev/dpaa2/dpaa2_mac.c +++ b/sys/dev/dpaa2/dpaa2_mac.c @@ -1,376 +1,414 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The DPAA2 MAC driver. * * For every DPAA2 MAC, there is an MC object named DPMAC, for MDIO and link * state updates. The DPMAC virtualizes the MDIO interface, so each PHY driver * may see a private interface (removing the need for synchronization in GPP on * the multiplexed MDIO hardware). */ #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" #include "dpaa2_ni.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" /* Index of the only DPMAC IRQ. */ #define DPMAC_IRQ_INDEX 0 /* DPMAC IRQ statuses. */ #define DPMAC_IRQ_LINK_CFG_REQ 0x00000001 /* change in requested link config. */ #define DPMAC_IRQ_LINK_CHANGED 0x00000002 /* link state changed */ #define DPMAC_IRQ_LINK_UP_REQ 0x00000004 /* link up request */ #define DPMAC_IRQ_LINK_DOWN_REQ 0x00000008 /* link down request */ #define DPMAC_IRQ_EP_CHANGED 0x00000010 /* DPAA2 endpoint dis/connected */ /* DPAA2 MAC resource specification. */ struct resource_spec dpaa2_mac_spec[] = { /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPMAC. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (0u) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; /* Interrupt configuration routines. */ static int dpaa2_mac_setup_irq(device_t); static int dpaa2_mac_setup_msi(struct dpaa2_mac_softc *); /* Subroutines to get text representation. */ static const char *dpaa2_mac_ethif_to_str(enum dpaa2_mac_eth_if); static const char *dpaa2_mac_link_type_to_str(enum dpaa2_mac_link_type); /* Interrupt handlers */ static void dpaa2_mac_intr(void *arg); static int dpaa2_mac_probe(device_t dev) { /* DPIO device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 MAC"); return (BUS_PROBE_DEFAULT); } static int dpaa2_mac_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_mac_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; + struct dpaa2_cmd cmd; + uint16_t rc_token, mac_token; int error; sc->dev = dev; memset(sc->addr, 0, ETHER_ADDR_LEN); error = bus_alloc_resources(sc->dev, dpaa2_mac_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); - return (ENXIO); + goto err_exit; } /* Obtain MC portal. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; - /* Allocate a command to send to MC hardware. */ - error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { - device_printf(dev, "Failed to allocate dpaa2_cmd: error=%d\n", - error); + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); goto err_exit; } - - /* Open resource container and DPMAC object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, - &sc->rc_token); + error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, dinfo->id, &mac_token); if (error) { - device_printf(dev, "Failed to open DPRC: error=%d\n", error); - goto err_free_cmd; + device_printf(dev, "%s: failed to open DPMAC: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; } - error = DPAA2_CMD_MAC_OPEN(dev, child, sc->cmd, dinfo->id, - &sc->mac_token); + + error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { - device_printf(dev, "Failed to open DPMAC: id=%d, error=%d\n", - dinfo->id, error); - goto err_close_rc; + device_printf(dev, "%s: failed to get DPMAC attributes: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_mac; } - - error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr); + error = DPAA2_CMD_MAC_GET_ADDR(dev, child, &cmd, sc->addr); if (error) { - device_printf(dev, "Failed to get DPMAC attributes: id=%d, " - "error=%d\n", dinfo->id, error); - goto err_close_mac; + device_printf(dev, "%s: failed to get physical address: " + "error=%d\n", __func__, error); } - error = DPAA2_CMD_MAC_GET_ADDR(dev, child, sc->cmd, sc->addr); - if (error) - device_printf(dev, "Failed to get physical address: error=%d\n", - error); - /* - * TODO: Enable debug output via sysctl. - */ + if (bootverbose) { device_printf(dev, "ether %6D\n", sc->addr, ":"); device_printf(dev, "max_rate=%d, eth_if=%s, link_type=%s\n", sc->attr.max_rate, dpaa2_mac_ethif_to_str(sc->attr.eth_if), dpaa2_mac_link_type_to_str(sc->attr.link_type)); } error = dpaa2_mac_setup_irq(dev); if (error) { - device_printf(dev, "Failed to setup IRQs: error=%d\n", error); - goto err_close_mac; + device_printf(dev, "%s: failed to setup IRQs: error=%d\n", + __func__, error); + goto close_mac; } + (void)DPAA2_CMD_MAC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mac_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); -err_close_mac: - DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->mac_token)); -err_close_rc: - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); -err_free_cmd: - dpaa2_mcp_free_command(sc->cmd); +close_mac: + (void)DPAA2_CMD_MAC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mac_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (ENXIO); } static int dpaa2_mac_detach(device_t dev) { - device_t child = dev; - struct dpaa2_mac_softc *sc = device_get_softc(dev); - - DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->mac_token)); - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); - dpaa2_mcp_free_command(sc->cmd); - - sc->cmd = NULL; - sc->rc_token = 0; - sc->mac_token = 0; - + /* TBD */ return (0); } /** * @brief Configure DPMAC object to generate interrupts. */ static int dpaa2_mac_setup_irq(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_mac_softc *sc = device_get_softc(dev); - struct dpaa2_cmd *cmd = sc->cmd; - uint16_t mac_token = sc->mac_token; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, mac_token; uint32_t irq_mask; int error; - /* Configure IRQs. */ error = dpaa2_mac_setup_msi(sc); if (error) { - device_printf(dev, "Failed to allocate MSI\n"); - return (error); + device_printf(dev, "%s: failed to allocate MSI\n", __func__); + goto err_exit; } if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { - device_printf(dev, "Failed to allocate IRQ resource\n"); - return (ENXIO); + device_printf(dev, "%s: failed to allocate IRQ resource\n", + __func__); + error = ENXIO; + goto err_exit; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, dpaa2_mac_intr, sc, &sc->intr)) { - device_printf(dev, "Failed to setup IRQ resource\n"); - return (ENXIO); + device_printf(dev, "%s: failed to setup IRQ resource\n", + __func__); + error = ENXIO; + goto err_exit; + } + + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); + goto err_exit; + } + error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, dinfo->id, &mac_token); + if (error) { + device_printf(dev, "%s: failed to open DPMAC: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; } - /* Configure DPNI to generate interrupts. */ irq_mask = DPMAC_IRQ_LINK_CFG_REQ | DPMAC_IRQ_LINK_CHANGED | DPMAC_IRQ_LINK_UP_REQ | DPMAC_IRQ_LINK_DOWN_REQ | DPMAC_IRQ_EP_CHANGED; - error = DPAA2_CMD_MAC_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd, - mac_token), DPMAC_IRQ_INDEX, irq_mask); + error = DPAA2_CMD_MAC_SET_IRQ_MASK(dev, child, &cmd, DPMAC_IRQ_INDEX, + irq_mask); if (error) { - device_printf(dev, "Failed to set IRQ mask\n"); - return (error); + device_printf(dev, "%s: failed to set IRQ mask\n", __func__); + goto close_mac; } - - /* Enable IRQ. */ - error = DPAA2_CMD_MAC_SET_IRQ_ENABLE(dev, child, cmd, DPMAC_IRQ_INDEX, + error = DPAA2_CMD_MAC_SET_IRQ_ENABLE(dev, child, &cmd, DPMAC_IRQ_INDEX, true); if (error) { - device_printf(dev, "Failed to enable IRQ\n"); - return (error); + device_printf(dev, "%s: failed to enable IRQ\n", __func__); + goto close_mac; } + (void)DPAA2_CMD_MAC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mac_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_mac: + (void)DPAA2_CMD_MAC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mac_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Allocate MSI interrupts for DPMAC. */ static int dpaa2_mac_setup_msi(struct dpaa2_mac_softc *sc) { int val; val = pci_msi_count(sc->dev); if (val < DPAA2_MAC_MSI_COUNT) device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, DPAA2_MAC_MSI_COUNT); val = MIN(val, DPAA2_MAC_MSI_COUNT); if (pci_alloc_msi(sc->dev, &val) != 0) return (EINVAL); for (int i = 0; i < val; i++) sc->irq_rid[i] = i + 1; return (0); } static void dpaa2_mac_intr(void *arg) { struct dpaa2_mac_softc *sc = (struct dpaa2_mac_softc *) arg; - device_t child = sc->dev; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; uint32_t status = ~0u; /* clear all IRQ status bits */ + uint16_t rc_token, mac_token; int error; - error = DPAA2_CMD_MAC_GET_IRQ_STATUS(sc->dev, child, - dpaa2_mcp_tk(sc->cmd, sc->mac_token), DPMAC_IRQ_INDEX, &status); - if (error) + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open DPRC: error=%d\n", + __func__, error); + goto err_exit; + } + error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, dinfo->id, &mac_token); + if (error) { + device_printf(dev, "%s: failed to open DPMAC: id=%d, error=%d\n", + __func__, dinfo->id, error); + goto close_rc; + } + error = DPAA2_CMD_MAC_GET_IRQ_STATUS(dev, child, &cmd, DPMAC_IRQ_INDEX, + &status); + if (error) { device_printf(sc->dev, "%s: failed to obtain IRQ status: " "error=%d\n", __func__, error); + } + + (void)DPAA2_CMD_MAC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mac_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return; } static const char * dpaa2_mac_ethif_to_str(enum dpaa2_mac_eth_if eth_if) { switch (eth_if) { case DPAA2_MAC_ETH_IF_MII: return ("MII"); case DPAA2_MAC_ETH_IF_RMII: return ("RMII"); case DPAA2_MAC_ETH_IF_SMII: return ("SMII"); case DPAA2_MAC_ETH_IF_GMII: return ("GMII"); case DPAA2_MAC_ETH_IF_RGMII: return ("RGMII"); case DPAA2_MAC_ETH_IF_SGMII: return ("SGMII"); case DPAA2_MAC_ETH_IF_QSGMII: return ("QSGMII"); case DPAA2_MAC_ETH_IF_XAUI: return ("XAUI"); case DPAA2_MAC_ETH_IF_XFI: return ("XFI"); case DPAA2_MAC_ETH_IF_CAUI: return ("CAUI"); case DPAA2_MAC_ETH_IF_1000BASEX: return ("1000BASE-X"); case DPAA2_MAC_ETH_IF_USXGMII: return ("USXGMII"); default: return ("unknown"); } } static const char * dpaa2_mac_link_type_to_str(enum dpaa2_mac_link_type link_type) { switch (link_type) { case DPAA2_MAC_LINK_TYPE_NONE: return ("NONE"); case DPAA2_MAC_LINK_TYPE_FIXED: return ("FIXED"); case DPAA2_MAC_LINK_TYPE_PHY: return ("PHY"); case DPAA2_MAC_LINK_TYPE_BACKPLANE: return ("BACKPLANE"); default: return ("unknown"); } } static device_method_t dpaa2_mac_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_mac_probe), DEVMETHOD(device_attach, dpaa2_mac_attach), DEVMETHOD(device_detach, dpaa2_mac_detach), DEVMETHOD_END }; static driver_t dpaa2_mac_driver = { "dpaa2_mac", dpaa2_mac_methods, sizeof(struct dpaa2_mac_softc), }; DRIVER_MODULE(dpaa2_mac, dpaa2_rc, dpaa2_mac_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_mac.h b/sys/dev/dpaa2/dpaa2_mac.h index cbdf2d824045..d31513e725c4 100644 --- a/sys/dev/dpaa2/dpaa2_mac.h +++ b/sys/dev/dpaa2/dpaa2_mac.h @@ -1,124 +1,118 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MAC_H #define _DPAA2_MAC_H #include #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" #define DPAA2_MAC_MAX_RESOURCES 1 /* Maximum resources per DPMAC: 1 DPMCP. */ #define DPAA2_MAC_MSI_COUNT 1 /* MSIs per DPMAC */ /* DPMAC link configuration options. */ #define DPAA2_MAC_LINK_OPT_AUTONEG ((uint64_t) 0x01u) #define DPAA2_MAC_LINK_OPT_HALF_DUPLEX ((uint64_t) 0x02u) #define DPAA2_MAC_LINK_OPT_PAUSE ((uint64_t) 0x04u) #define DPAA2_MAC_LINK_OPT_ASYM_PAUSE ((uint64_t) 0x08u) enum dpaa2_mac_eth_if { DPAA2_MAC_ETH_IF_MII, DPAA2_MAC_ETH_IF_RMII, DPAA2_MAC_ETH_IF_SMII, DPAA2_MAC_ETH_IF_GMII, DPAA2_MAC_ETH_IF_RGMII, DPAA2_MAC_ETH_IF_SGMII, DPAA2_MAC_ETH_IF_QSGMII, DPAA2_MAC_ETH_IF_XAUI, DPAA2_MAC_ETH_IF_XFI, DPAA2_MAC_ETH_IF_CAUI, DPAA2_MAC_ETH_IF_1000BASEX, DPAA2_MAC_ETH_IF_USXGMII }; enum dpaa2_mac_link_type { DPAA2_MAC_LINK_TYPE_NONE, DPAA2_MAC_LINK_TYPE_FIXED, DPAA2_MAC_LINK_TYPE_PHY, DPAA2_MAC_LINK_TYPE_BACKPLANE }; /** * @brief Attributes of the DPMAC object. * * id: DPMAC object ID. * max_rate: Maximum supported rate (in Mbps). * eth_if: Type of the Ethernet interface. * link_type: Type of the link. */ struct dpaa2_mac_attr { uint32_t id; uint32_t max_rate; enum dpaa2_mac_eth_if eth_if; enum dpaa2_mac_link_type link_type; }; /** * @brief Link state of the DPMAC object. */ struct dpaa2_mac_link_state { uint64_t options; uint64_t supported; uint64_t advert; uint32_t rate; bool up; bool state_valid; }; /** * @brief Software context for the DPAA2 MAC driver. * * dev: Device associated with this software context. * addr: Physical address assigned to the DPMAC object. * attr: Attributes of the DPMAC object. */ struct dpaa2_mac_softc { device_t dev; uint8_t addr[ETHER_ADDR_LEN]; struct resource *res[DPAA2_MAC_MAX_RESOURCES]; struct dpaa2_mac_attr attr; - /* Help to send commands to MC. */ - struct dpaa2_cmd *cmd; - uint16_t rc_token; - uint16_t mac_token; - - /* Interrupts. */ int irq_rid[DPAA2_MAC_MSI_COUNT]; struct resource *irq_res; void *intr; /* interrupt handle */ }; extern struct resource_spec dpaa2_mac_spec[]; #endif /* _DPAA2_MAC_H */ diff --git a/sys/dev/dpaa2/dpaa2_mcp.c b/sys/dev/dpaa2/dpaa2_mcp.c index f41d9a7d21b0..9d24463413f3 100644 --- a/sys/dev/dpaa2/dpaa2_mcp.c +++ b/sys/dev/dpaa2/dpaa2_mcp.c @@ -1,318 +1,258 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * DPAA2 MC command portal and helper routines. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_cmd_if.h" MALLOC_DEFINE(M_DPAA2_MCP, "dpaa2_mcp", "DPAA2 Management Complex Portal"); static struct resource_spec dpaa2_mcp_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, RESOURCE_SPEC_END }; int dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res, struct resource_map *map, uint16_t flags) { const int mflags = flags & DPAA2_PORTAL_NOWAIT_ALLOC ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO); struct dpaa2_mcp *p; if (!mcp || !res || !map) return (DPAA2_CMD_STAT_EINVAL); p = malloc(sizeof(struct dpaa2_mcp), M_DPAA2_MCP, mflags); if (p == NULL) return (DPAA2_CMD_STAT_NO_MEMORY); mtx_init(&p->lock, "mcp_sleep_lock", NULL, MTX_DEF); p->res = res; p->map = map; p->flags = flags; p->rc_api_major = 0; /* DPRC API version to be cached later. */ p->rc_api_minor = 0; *mcp = p; return (0); } void dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp) { uint16_t flags; KASSERT(mcp != NULL, ("%s: mcp is NULL", __func__)); DPAA2_MCP_LOCK(mcp, &flags); mcp->flags |= DPAA2_PORTAL_DESTROYED; DPAA2_MCP_UNLOCK(mcp); /* Let threads stop using this portal. */ DELAY(DPAA2_PORTAL_TIMEOUT); mtx_destroy(&mcp->lock); free(mcp, M_DPAA2_MCP); } -int -dpaa2_mcp_init_command(struct dpaa2_cmd **cmd, uint16_t flags) -{ - const int mflags = flags & DPAA2_CMD_NOWAIT_ALLOC - ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO); - struct dpaa2_cmd *c; - struct dpaa2_cmd_header *hdr; - - if (!cmd) - return (DPAA2_CMD_STAT_EINVAL); - - c = malloc(sizeof(struct dpaa2_cmd), M_DPAA2_MCP, mflags); - if (!c) - return (DPAA2_CMD_STAT_NO_MEMORY); - - hdr = (struct dpaa2_cmd_header *) &c->header; - hdr->srcid = 0; - hdr->status = DPAA2_CMD_STAT_OK; - hdr->token = 0; - hdr->cmdid = 0; - hdr->flags_hw = DPAA2_CMD_DEF; - hdr->flags_sw = DPAA2_CMD_DEF; - if (flags & DPAA2_CMD_HIGH_PRIO) - hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; - if (flags & DPAA2_CMD_INTR_DIS) - hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; - for (uint32_t i = 0; i < DPAA2_CMD_PARAMS_N; i++) - c->params[i] = 0; - *cmd = c; - - return (0); -} - -void -dpaa2_mcp_free_command(struct dpaa2_cmd *cmd) -{ - if (cmd != NULL) - free(cmd, M_DPAA2_MCP); -} - struct dpaa2_cmd * dpaa2_mcp_tk(struct dpaa2_cmd *cmd, uint16_t token) { struct dpaa2_cmd_header *hdr; - if (cmd != NULL) { - hdr = (struct dpaa2_cmd_header *) &cmd->header; - hdr->token = token; - } + KASSERT(cmd != NULL, ("%s: cmd is NULL", __func__)); + + hdr = (struct dpaa2_cmd_header *) &cmd->header; + hdr->token = token; return (cmd); } struct dpaa2_cmd * dpaa2_mcp_f(struct dpaa2_cmd *cmd, uint16_t flags) { struct dpaa2_cmd_header *hdr; - if (cmd) { - hdr = (struct dpaa2_cmd_header *) &cmd->header; - hdr->flags_hw = DPAA2_CMD_DEF; - hdr->flags_sw = DPAA2_CMD_DEF; - - if (flags & DPAA2_CMD_HIGH_PRIO) - hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; - if (flags & DPAA2_CMD_INTR_DIS) - hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; + KASSERT(cmd != NULL, ("%s: cmd is NULL", __func__)); + + hdr = (struct dpaa2_cmd_header *) &cmd->header; + hdr->flags_hw = DPAA2_CMD_DEF; + hdr->flags_sw = DPAA2_CMD_DEF; + if (flags & DPAA2_CMD_HIGH_PRIO) { + hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; + } + if (flags & DPAA2_CMD_INTR_DIS) { + hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; } return (cmd); } static int dpaa2_mcp_probe(device_t dev) { /* DPMCP device will be added by the parent resource container. */ device_set_desc(dev, "DPAA2 MC portal"); return (BUS_PROBE_DEFAULT); } static int dpaa2_mcp_detach(device_t dev) { return (0); } static int dpaa2_mcp_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_mcp_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); - struct dpaa2_cmd *cmd; + struct dpaa2_cmd cmd; struct dpaa2_mcp *portal; struct resource_map_request req; uint16_t rc_token, mcp_token; int error; sc->dev = dev; error = bus_alloc_resources(sc->dev, dpaa2_mcp_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); goto err_exit; } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); goto err_exit; } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "%s: failed to map MC portal memory\n", __func__); goto err_exit; } /* Initialize portal to send commands to MC. */ error = dpaa2_mcp_init_portal(&portal, sc->res[0], &sc->map[0], DPAA2_PORTAL_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_mcp: " "error=%d\n", __func__, error); goto err_exit; } - /* Allocate a command to send to MC hardware. */ - error = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF); - if (error) { - device_printf(dev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, error); - goto err_exit; - } + DPAA2_CMD_INIT(&cmd); /* Open resource container and DPMCP object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, cmd, rcinfo->id, &rc_token); + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); - goto err_free_cmd; + goto err_exit; } - error = DPAA2_CMD_MCP_OPEN(dev, child, cmd, dinfo->id, &mcp_token); + error = DPAA2_CMD_MCP_OPEN(dev, child, &cmd, dinfo->id, &mcp_token); if (error) { device_printf(dev, "%s: failed to open DPMCP: id=%d, error=%d\n", __func__, dinfo->id, error); - goto err_close_rc; + goto close_rc; } /* Prepare DPMCP object. */ - error = DPAA2_CMD_MCP_RESET(dev, child, cmd); + error = DPAA2_CMD_MCP_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPMCP: id=%d, " "error=%d\n", __func__, dinfo->id, error); - goto err_close_mcp; - } - - /* Close the DPMCP object and the resource container. */ - error = DPAA2_CMD_MCP_CLOSE(dev, child, cmd); - if (error) { - device_printf(dev, "%s: failed to close DPMCP: id=%d, " - "error=%d\n", __func__, dinfo->id, error); - goto err_close_rc; - } - error = DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(cmd, rc_token)); - if (error) { - device_printf(dev, "%s: failed to close DPRC: error=%d\n", - __func__, error); - goto err_free_cmd; + goto close_mcp; } - dpaa2_mcp_free_command(cmd); + (void)DPAA2_CMD_MCP_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); dinfo->portal = portal; return (0); -err_close_mcp: - DPAA2_CMD_MCP_CLOSE(dev, child, dpaa2_mcp_tk(cmd, mcp_token)); -err_close_rc: - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(cmd, rc_token)); -err_free_cmd: - dpaa2_mcp_free_command(cmd); +close_mcp: + (void)DPAA2_CMD_MCP_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mcp_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: dpaa2_mcp_detach(dev); return (ENXIO); } static device_method_t dpaa2_mcp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_mcp_probe), DEVMETHOD(device_attach, dpaa2_mcp_attach), DEVMETHOD(device_detach, dpaa2_mcp_detach), DEVMETHOD_END }; static driver_t dpaa2_mcp_driver = { "dpaa2_mcp", dpaa2_mcp_methods, sizeof(struct dpaa2_mcp_softc), }; DRIVER_MODULE(dpaa2_mcp, dpaa2_rc, dpaa2_mcp_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_mcp.h b/sys/dev/dpaa2/dpaa2_mcp.h index 55052ca7afb2..5e1926308b53 100644 --- a/sys/dev/dpaa2/dpaa2_mcp.h +++ b/sys/dev/dpaa2/dpaa2_mcp.h @@ -1,449 +1,473 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MCP_H #define _DPAA2_MCP_H #include #include #include #include "dpaa2_types.h" /* * DPAA2 MC command interface helper routines. */ #define DPAA2_PORTAL_TIMEOUT 100000 /* us */ #define DPAA2_MCP_MEM_WIDTH 0x40 /* Minimal size of the MC portal. */ #define DPAA2_MCP_MAX_RESOURCES 1 /* resources per DPMCP: 1 SYS_MEM */ /* * Portal flags. * * TODO: Use the same flags for both MC and software portals. */ #define DPAA2_PORTAL_DEF 0x0u #define DPAA2_PORTAL_NOWAIT_ALLOC 0x2u /* Do not sleep during init */ #define DPAA2_PORTAL_LOCKED 0x4000u /* Wait till portal's unlocked */ #define DPAA2_PORTAL_DESTROYED 0x8000u /* Terminate any operations */ /* Command flags. */ #define DPAA2_CMD_DEF 0x0u #define DPAA2_CMD_HIGH_PRIO 0x80u /* High priority command */ #define DPAA2_CMD_INTR_DIS 0x100u /* Disable cmd finished intr */ #define DPAA2_CMD_NOWAIT_ALLOC 0x8000u /* Do not sleep during init */ /* DPAA2 command return codes. */ #define DPAA2_CMD_STAT_OK 0x0 /* Set by MC on success */ #define DPAA2_CMD_STAT_READY 0x1 /* Ready to be processed */ #define DPAA2_CMD_STAT_AUTH_ERR 0x3 /* Illegal object-portal-icid */ #define DPAA2_CMD_STAT_NO_PRIVILEGE 0x4 /* No privilege */ #define DPAA2_CMD_STAT_DMA_ERR 0x5 /* DMA or I/O error */ #define DPAA2_CMD_STAT_CONFIG_ERR 0x6 /* Invalid/conflicting params */ #define DPAA2_CMD_STAT_TIMEOUT 0x7 /* Command timed out */ #define DPAA2_CMD_STAT_NO_RESOURCE 0x8 /* No DPAA2 resources */ #define DPAA2_CMD_STAT_NO_MEMORY 0x9 /* No memory available */ #define DPAA2_CMD_STAT_BUSY 0xA /* Device is busy */ #define DPAA2_CMD_STAT_UNSUPPORTED_OP 0xB /* Unsupported operation */ #define DPAA2_CMD_STAT_INVALID_STATE 0xC /* Invalid state */ /* Driver-specific return codes. */ #define DPAA2_CMD_STAT_UNKNOWN_OBJ 0xFD /* Unknown DPAA2 object. */ #define DPAA2_CMD_STAT_EINVAL 0xFE /* Invalid argument */ #define DPAA2_CMD_STAT_ERR 0xFF /* General error */ /* Object's memory region flags. */ #define DPAA2_RC_REG_CACHEABLE 0x1 /* Cacheable memory mapping */ #define DPAA2_HW_FLAG_HIGH_PRIO 0x80u #define DPAA2_SW_FLAG_INTR_DIS 0x01u #define DPAA2_CMD_PARAMS_N 7u #define DPAA2_LABEL_SZ 16 /* ------------------------- MNG command IDs -------------------------------- */ #define CMD_MNG_BASE_VERSION 1 #define CMD_MNG_ID_OFFSET 4 #define CMD_MNG(id) (((id) << CMD_MNG_ID_OFFSET) | CMD_MNG_BASE_VERSION) #define CMDID_MNG_GET_VER CMD_MNG(0x831) #define CMDID_MNG_GET_SOC_VER CMD_MNG(0x832) #define CMDID_MNG_GET_CONT_ID CMD_MNG(0x830) /* ------------------------- DPRC command IDs ------------------------------- */ #define CMD_RC_BASE_VERSION 1 #define CMD_RC_2ND_VERSION 2 #define CMD_RC_3RD_VERSION 3 #define CMD_RC_ID_OFFSET 4 #define CMD_RC(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_BASE_VERSION) #define CMD_RC_V2(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_2ND_VERSION) #define CMD_RC_V3(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_3RD_VERSION) #define CMDID_RC_OPEN CMD_RC(0x805) #define CMDID_RC_CLOSE CMD_RC(0x800) #define CMDID_RC_GET_API_VERSION CMD_RC(0xA05) #define CMDID_RC_GET_ATTR CMD_RC(0x004) #define CMDID_RC_RESET_CONT CMD_RC(0x005) #define CMDID_RC_RESET_CONT_V2 CMD_RC_V2(0x005) #define CMDID_RC_SET_IRQ CMD_RC(0x010) #define CMDID_RC_SET_IRQ_ENABLE CMD_RC(0x012) #define CMDID_RC_SET_IRQ_MASK CMD_RC(0x014) #define CMDID_RC_GET_IRQ_STATUS CMD_RC(0x016) #define CMDID_RC_CLEAR_IRQ_STATUS CMD_RC(0x017) #define CMDID_RC_GET_CONT_ID CMD_RC(0x830) #define CMDID_RC_GET_OBJ_COUNT CMD_RC(0x159) #define CMDID_RC_GET_OBJ CMD_RC(0x15A) #define CMDID_RC_GET_OBJ_DESC CMD_RC(0x162) #define CMDID_RC_GET_OBJ_REG CMD_RC(0x15E) #define CMDID_RC_GET_OBJ_REG_V2 CMD_RC_V2(0x15E) #define CMDID_RC_GET_OBJ_REG_V3 CMD_RC_V3(0x15E) #define CMDID_RC_SET_OBJ_IRQ CMD_RC(0x15F) #define CMDID_RC_GET_CONN CMD_RC(0x16C) /* ------------------------- DPIO command IDs ------------------------------- */ #define CMD_IO_BASE_VERSION 1 #define CMD_IO_ID_OFFSET 4 #define CMD_IO(id) (((id) << CMD_IO_ID_OFFSET) | CMD_IO_BASE_VERSION) #define CMDID_IO_OPEN CMD_IO(0x803) #define CMDID_IO_CLOSE CMD_IO(0x800) #define CMDID_IO_ENABLE CMD_IO(0x002) #define CMDID_IO_DISABLE CMD_IO(0x003) #define CMDID_IO_GET_ATTR CMD_IO(0x004) #define CMDID_IO_RESET CMD_IO(0x005) #define CMDID_IO_SET_IRQ_ENABLE CMD_IO(0x012) #define CMDID_IO_SET_IRQ_MASK CMD_IO(0x014) #define CMDID_IO_GET_IRQ_STATUS CMD_IO(0x016) #define CMDID_IO_ADD_STATIC_DQ_CHAN CMD_IO(0x122) /* ------------------------- DPNI command IDs ------------------------------- */ #define CMD_NI_BASE_VERSION 1 #define CMD_NI_2ND_VERSION 2 #define CMD_NI_4TH_VERSION 4 #define CMD_NI_ID_OFFSET 4 #define CMD_NI(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_BASE_VERSION) #define CMD_NI_V2(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_2ND_VERSION) #define CMD_NI_V4(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_4TH_VERSION) #define CMDID_NI_OPEN CMD_NI(0x801) #define CMDID_NI_CLOSE CMD_NI(0x800) #define CMDID_NI_ENABLE CMD_NI(0x002) #define CMDID_NI_DISABLE CMD_NI(0x003) #define CMDID_NI_GET_API_VER CMD_NI(0xA01) #define CMDID_NI_RESET CMD_NI(0x005) #define CMDID_NI_GET_ATTR CMD_NI(0x004) #define CMDID_NI_SET_BUF_LAYOUT CMD_NI(0x265) #define CMDID_NI_GET_TX_DATA_OFF CMD_NI(0x212) #define CMDID_NI_GET_PORT_MAC_ADDR CMD_NI(0x263) #define CMDID_NI_SET_PRIM_MAC_ADDR CMD_NI(0x224) #define CMDID_NI_GET_PRIM_MAC_ADDR CMD_NI(0x225) #define CMDID_NI_SET_LINK_CFG CMD_NI(0x21A) #define CMDID_NI_GET_LINK_CFG CMD_NI(0x278) #define CMDID_NI_GET_LINK_STATE CMD_NI(0x215) #define CMDID_NI_SET_QOS_TABLE CMD_NI(0x240) #define CMDID_NI_CLEAR_QOS_TABLE CMD_NI(0x243) #define CMDID_NI_SET_POOLS CMD_NI(0x200) #define CMDID_NI_SET_ERR_BEHAVIOR CMD_NI(0x20B) #define CMDID_NI_GET_QUEUE CMD_NI(0x25F) #define CMDID_NI_SET_QUEUE CMD_NI(0x260) #define CMDID_NI_GET_QDID CMD_NI(0x210) #define CMDID_NI_ADD_MAC_ADDR CMD_NI(0x226) #define CMDID_NI_REMOVE_MAC_ADDR CMD_NI(0x227) #define CMDID_NI_CLEAR_MAC_FILTERS CMD_NI(0x228) #define CMDID_NI_SET_MFL CMD_NI(0x216) #define CMDID_NI_SET_OFFLOAD CMD_NI(0x26C) #define CMDID_NI_SET_IRQ_MASK CMD_NI(0x014) #define CMDID_NI_SET_IRQ_ENABLE CMD_NI(0x012) #define CMDID_NI_GET_IRQ_STATUS CMD_NI(0x016) #define CMDID_NI_SET_UNI_PROMISC CMD_NI(0x222) #define CMDID_NI_SET_MULTI_PROMISC CMD_NI(0x220) #define CMDID_NI_GET_STATISTICS CMD_NI(0x25D) #define CMDID_NI_SET_RX_TC_DIST CMD_NI(0x235) /* ------------------------- DPBP command IDs ------------------------------- */ #define CMD_BP_BASE_VERSION 1 #define CMD_BP_ID_OFFSET 4 #define CMD_BP(id) (((id) << CMD_BP_ID_OFFSET) | CMD_BP_BASE_VERSION) #define CMDID_BP_OPEN CMD_BP(0x804) #define CMDID_BP_CLOSE CMD_BP(0x800) #define CMDID_BP_ENABLE CMD_BP(0x002) #define CMDID_BP_DISABLE CMD_BP(0x003) #define CMDID_BP_GET_ATTR CMD_BP(0x004) #define CMDID_BP_RESET CMD_BP(0x005) /* ------------------------- DPMAC command IDs ------------------------------ */ #define CMD_MAC_BASE_VERSION 1 #define CMD_MAC_2ND_VERSION 2 #define CMD_MAC_ID_OFFSET 4 #define CMD_MAC(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_BASE_VERSION) #define CMD_MAC_V2(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_2ND_VERSION) #define CMDID_MAC_OPEN CMD_MAC(0x80C) #define CMDID_MAC_CLOSE CMD_MAC(0x800) #define CMDID_MAC_RESET CMD_MAC(0x005) #define CMDID_MAC_MDIO_READ CMD_MAC(0x0C0) #define CMDID_MAC_MDIO_WRITE CMD_MAC(0x0C1) #define CMDID_MAC_GET_ADDR CMD_MAC(0x0C5) #define CMDID_MAC_GET_ATTR CMD_MAC(0x004) #define CMDID_MAC_SET_LINK_STATE CMD_MAC_V2(0x0C3) #define CMDID_MAC_SET_IRQ_MASK CMD_MAC(0x014) #define CMDID_MAC_SET_IRQ_ENABLE CMD_MAC(0x012) #define CMDID_MAC_GET_IRQ_STATUS CMD_MAC(0x016) /* ------------------------- DPCON command IDs ------------------------------ */ #define CMD_CON_BASE_VERSION 1 #define CMD_CON_ID_OFFSET 4 #define CMD_CON(id) (((id) << CMD_CON_ID_OFFSET) | CMD_CON_BASE_VERSION) #define CMDID_CON_OPEN CMD_CON(0x808) #define CMDID_CON_CLOSE CMD_CON(0x800) #define CMDID_CON_ENABLE CMD_CON(0x002) #define CMDID_CON_DISABLE CMD_CON(0x003) #define CMDID_CON_GET_ATTR CMD_CON(0x004) #define CMDID_CON_RESET CMD_CON(0x005) #define CMDID_CON_SET_NOTIF CMD_CON(0x100) /* ------------------------- DPMCP command IDs ------------------------------ */ #define CMD_MCP_BASE_VERSION 1 #define CMD_MCP_2ND_VERSION 2 #define CMD_MCP_ID_OFFSET 4 #define CMD_MCP(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_BASE_VERSION) #define CMD_MCP_V2(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_2ND_VERSION) #define CMDID_MCP_CREATE CMD_MCP_V2(0x90B) #define CMDID_MCP_DESTROY CMD_MCP(0x98B) #define CMDID_MCP_OPEN CMD_MCP(0x80B) #define CMDID_MCP_CLOSE CMD_MCP(0x800) #define CMDID_MCP_RESET CMD_MCP(0x005) #define DPAA2_MCP_LOCK(__mcp, __flags) do { \ mtx_assert(&(__mcp)->lock, MA_NOTOWNED); \ mtx_lock(&(__mcp)->lock); \ *(__flags) = (__mcp)->flags; \ (__mcp)->flags |= DPAA2_PORTAL_LOCKED; \ } while (0) #define DPAA2_MCP_UNLOCK(__mcp) do { \ mtx_assert(&(__mcp)->lock, MA_OWNED); \ (__mcp)->flags &= ~DPAA2_PORTAL_LOCKED; \ mtx_unlock(&(__mcp)->lock); \ } while (0) enum dpaa2_rc_region_type { DPAA2_RC_REG_MC_PORTAL, DPAA2_RC_REG_QBMAN_PORTAL }; /** * @brief Helper object to interact with the MC portal. * * res: Unmapped portal's I/O memory. * map: Mapped portal's I/O memory. * lock: Lock to send a command to the portal and wait for the * result. * flags: Current state of the object. * rc_api_major: Major version of the DPRC API. * rc_api_minor: Minor version of the DPRC API. */ struct dpaa2_mcp { struct resource *res; struct resource_map *map; struct mtx lock; uint16_t flags; uint16_t rc_api_major; uint16_t rc_api_minor; }; /** * @brief Command object holds data to be written to the MC portal. * * header: 8 least significant bytes of the MC portal. * params: Parameters to pass together with the command to MC. Might keep * command execution results. * * NOTE: 64 bytes. */ struct dpaa2_cmd { uint64_t header; uint64_t params[DPAA2_CMD_PARAMS_N]; }; /** * @brief Helper object to access fields of the MC command header. * * srcid: The SoC architected source ID of the submitter. This field is * reserved and cannot be written by the driver. * flags_hw: Bits from 8 to 15 of the command header. Most of them are * reserved at the moment. * status: Command ready/status. This field is used as the handshake field * between MC and the driver. MC reports command completion with * success/error codes in this field. * flags_sw: ... * token: ... * cmdid: ... * * NOTE: 8 bytes. */ struct dpaa2_cmd_header { uint8_t srcid; uint8_t flags_hw; uint8_t status; uint8_t flags_sw; uint16_t token; uint16_t cmdid; } __packed; /** * @brief Information about DPAA2 object. * * id: ID of a logical object resource. * vendor: Object vendor identifier. * irq_count: Number of interrupts supported by the object. * reg_count: Number of mappable regions supported by the object. * state: Object state (combination of states). * ver_major: Major version of the object. * ver_minor: Minor version of the object. * flags: Object attributes flags. * type: ... * label: ... */ struct dpaa2_obj { uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint8_t label[DPAA2_LABEL_SZ]; enum dpaa2_dev_type type; }; /** * @brief Attributes of the DPRC object. * * cont_id: Container ID. * portal_id: Container's portal ID. * options: Container's options as set at container's creation. * icid: Container's isolation context ID. */ struct dpaa2_rc_attr { uint32_t cont_id; uint32_t portal_id; uint32_t options; uint32_t icid; }; /** * @brief Description of the object's memory region. * * base_paddr: Region base physical address. * base_offset: Region base offset. * size: Region size (in bytes). * flags: Region flags (cacheable, etc.) * type: Type of a software portal this region belongs to. */ struct dpaa2_rc_obj_region { uint64_t base_paddr; uint64_t base_offset; uint32_t size; uint32_t flags; enum dpaa2_rc_region_type type; }; /** * @brief DPAA2 endpoint descriptor. * * obj_id: Endpoint object ID. * if_id: Interface ID; for endpoints with multiple interfaces * (DPSW, DPDMUX), 0 - otherwise. * type: Endpoint object type, null-terminated string. */ struct dpaa2_ep_desc { uint32_t obj_id; uint32_t if_id; enum dpaa2_dev_type type; }; /** * @brief Configuration of the channel data availability notification (CDAN). * * qman_ctx: Context value provided with each CDAN message. * dpio_id: DPIO object ID configured with a notification channel. * prior: Priority selection within the DPIO channel; valid values * are 0-7, depending on the number of priorities in that channel. */ struct dpaa2_con_notif_cfg { uint64_t qman_ctx; uint32_t dpio_id; uint8_t prior; }; /** * @brief Attributes of the DPMCP object. * * id: DPMCP object ID. * options: Options of the MC portal (disabled high-prio commands, etc.). */ struct dpaa2_mcp_attr { uint32_t id; uint32_t options; }; /** * @brief Software context for the DPAA2 MC portal. */ struct dpaa2_mcp_softc { device_t dev; struct dpaa2_mcp_attr attr; struct resource *res[DPAA2_MCP_MAX_RESOURCES]; struct resource_map map[DPAA2_MCP_MAX_RESOURCES]; }; int dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res, struct resource_map *map, uint16_t flags); -int dpaa2_mcp_init_command(struct dpaa2_cmd **cmd, uint16_t flags); void dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp); -void dpaa2_mcp_free_command(struct dpaa2_cmd *cmd); /* to quickly update command token */ -struct dpaa2_cmd *dpaa2_mcp_tk(struct dpaa2_cmd *cmd, uint16_t token); +struct dpaa2_cmd *dpaa2_mcp_tk(struct dpaa2_cmd *cmd, const uint16_t token); /* to quickly update command flags */ -struct dpaa2_cmd *dpaa2_mcp_f(struct dpaa2_cmd *cmd, uint16_t flags); +struct dpaa2_cmd *dpaa2_mcp_f(struct dpaa2_cmd *cmd, const uint16_t flags); + +#define DPAA2_CMD_INIT_FLAGS(__cmd, __flags) do { \ + KASSERT((__cmd) != NULL, ("%s:%d: failed", __func__, __LINE__)); \ + struct dpaa2_cmd_header *__hdr; \ + uint32_t __dcpi; \ + \ + __hdr = (struct dpaa2_cmd_header *)&((__cmd)->header); \ + __hdr->srcid = 0; \ + __hdr->status = DPAA2_CMD_STAT_OK; \ + __hdr->token = 0; \ + __hdr->cmdid = 0; \ + __hdr->flags_hw = DPAA2_CMD_DEF; \ + __hdr->flags_sw = DPAA2_CMD_DEF; \ + if ((__flags) & DPAA2_CMD_HIGH_PRIO) { \ + __hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; \ + } \ + if ((__flags) & DPAA2_CMD_INTR_DIS) { \ + __hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; \ + } \ + for (__dcpi = 0; __dcpi < DPAA2_CMD_PARAMS_N; __dcpi++) { \ + (__cmd)->params[__dcpi] = 0; \ + } \ +} while (0) +#define DPAA2_CMD_INIT(c) DPAA2_CMD_INIT_FLAGS((c), DPAA2_CMD_DEF) +#define DPAA2_CMD_TK(c, t) dpaa2_mcp_tk((c), (t)) +#define DPAA2_CMD_F(c, f) dpaa2_mcp_f((c), (f)) #endif /* _DPAA2_MCP_H */ diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c index c02ed99f1173..e5b0fe59d14d 100644 --- a/sys/dev/dpaa2/dpaa2_ni.c +++ b/sys/dev/dpaa2/dpaa2_ni.c @@ -1,3669 +1,4258 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * Copyright © 2022 Mathew McBride * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The DPAA2 Network Interface (DPNI) driver. * * The DPNI object is a network interface that is configurable to support a wide * range of features from a very basic Ethernet interface up to a * high-functioning network interface. The DPNI supports features that are * expected by standard network stacks, from basic features to offloads. * * DPNIs work with Ethernet traffic, starting with the L2 header. Additional * functions are provided for standard network protocols (L2, L3, L4, etc.). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_acpi.h" #include "opt_platform.h" #include "pcib_if.h" #include "pci_if.h" #include "miibus_if.h" #include "memac_mdio_if.h" #include "dpaa2_types.h" #include "dpaa2_mc.h" #include "dpaa2_mc_if.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" #include "dpaa2_ni.h" #define BIT(x) (1ul << (x)) #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) /* Frame Dequeue Response status bits. */ #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0) #define ALIGN_UP(x, y) roundup2((x), (y)) #define ALIGN_DOWN(x, y) rounddown2((x), (y)) #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE) #define DPNI_LOCK(__sc) do { \ mtx_assert(&(__sc)->lock, MA_NOTOWNED); \ mtx_lock(&(__sc)->lock); \ } while (0) #define DPNI_UNLOCK(__sc) do { \ mtx_assert(&(__sc)->lock, MA_OWNED); \ mtx_unlock(&(__sc)->lock); \ } while (0) #define TX_LOCK(__tx) do { \ mtx_assert(&(__tx)->lock, MA_NOTOWNED); \ mtx_lock(&(__tx)->lock); \ } while (0) #define TX_UNLOCK(__tx) do { \ mtx_assert(&(__tx)->lock, MA_OWNED); \ mtx_unlock(&(__tx)->lock); \ } while (0) #define DPAA2_TX_RING(sc, chan, tc) \ (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)]) #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */ #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */ #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */ /* Default maximum frame length. */ #define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN) /* Minimally supported version of the DPNI API. */ #define DPNI_VER_MAJOR 7 #define DPNI_VER_MINOR 0 /* Rx/Tx buffers configuration. */ #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */ #define BUF_ALIGN 64 #define BUF_SWA_SIZE 64 /* SW annotation size */ #define BUF_RX_HWA_SIZE 64 /* HW annotation size */ #define BUF_TX_HWA_SIZE 128 /* HW annotation size */ #define BUF_SIZE (MJUM9BYTES) #define BUF_MAXADDR_49BIT 0x1FFFFFFFFFFFFul #define BUF_MAXADDR (BUS_SPACE_MAXADDR) #define DPAA2_TX_BUFRING_SZ (4096u) #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */ #define DPAA2_TX_SEG_SZ (4096u) #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ) #define DPAA2_TX_SGT_SZ (512u) /* bytes */ /* Size of a buffer to keep a QoS table key configuration. */ #define ETH_QOS_KCFG_BUF_SIZE 256 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ #define DPAA2_CLASSIFIER_DMA_SIZE 256 /* Channel storage buffer configuration. */ #define ETH_STORE_FRAMES 16u #define ETH_STORE_SIZE ((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq)) #define ETH_STORE_ALIGN 64u /* Buffers layout options. */ #define BUF_LOPT_TIMESTAMP 0x1 #define BUF_LOPT_PARSER_RESULT 0x2 #define BUF_LOPT_FRAME_STATUS 0x4 #define BUF_LOPT_PRIV_DATA_SZ 0x8 #define BUF_LOPT_DATA_ALIGN 0x10 #define BUF_LOPT_DATA_HEAD_ROOM 0x20 #define BUF_LOPT_DATA_TAIL_ROOM 0x40 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */ #define DPAA2_NI_BUF_CHAN_MASK (0xFu) #define DPAA2_NI_BUF_CHAN_SHIFT (60) #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu) #define DPAA2_NI_BUF_IDX_SHIFT (49) #define DPAA2_NI_TX_IDX_MASK (0x7u) #define DPAA2_NI_TX_IDX_SHIFT (57) #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu) #define DPAA2_NI_TXBUF_IDX_SHIFT (49) #define DPAA2_NI_FD_FMT_MASK (0x3u) #define DPAA2_NI_FD_FMT_SHIFT (12) #define DPAA2_NI_FD_ERR_MASK (0xFFu) #define DPAA2_NI_FD_ERR_SHIFT (0) #define DPAA2_NI_FD_SL_MASK (0x1u) #define DPAA2_NI_FD_SL_SHIFT (14) #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu) #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu) /* Enables TCAM for Flow Steering and QoS look-ups. */ #define DPNI_OPT_HAS_KEY_MASKING 0x10 /* Unique IDs for the supported Rx classification header fields. */ #define DPAA2_ETH_DIST_ETHDST BIT(0) #define DPAA2_ETH_DIST_ETHSRC BIT(1) #define DPAA2_ETH_DIST_ETHTYPE BIT(2) #define DPAA2_ETH_DIST_VLAN BIT(3) #define DPAA2_ETH_DIST_IPSRC BIT(4) #define DPAA2_ETH_DIST_IPDST BIT(5) #define DPAA2_ETH_DIST_IPPROTO BIT(6) #define DPAA2_ETH_DIST_L4SRC BIT(7) #define DPAA2_ETH_DIST_L4DST BIT(8) #define DPAA2_ETH_DIST_ALL (~0ULL) /* L3-L4 network traffic flow hash options. */ #define RXH_L2DA (1 << 1) #define RXH_VLAN (1 << 2) #define RXH_L3_PROTO (1 << 3) #define RXH_IP_SRC (1 << 4) #define RXH_IP_DST (1 << 5) #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ #define RXH_DISCARD (1 << 31) /* Default Rx hash options, set during attaching. */ #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface"); /* DPAA2 Network Interface resource specification. */ struct resource_spec dpaa2_ni_spec[] = { /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPNI. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (0u) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* * DPIO resources (software portals). * * NOTE: One per running core. While DPIOs are the source of data * availability interrupts, the DPCONs are used to identify the * network interface that has produced ingress data to that core. */ #define IO_RES_NUM (16u) #define IO_RID_OFF (MCP_RID_OFF + MCP_RES_NUM) #define IO_RID(rid) ((rid) + IO_RID_OFF) /* --- */ { DPAA2_DEV_IO, IO_RID(0), RF_ACTIVE | RF_SHAREABLE }, { DPAA2_DEV_IO, IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, { DPAA2_DEV_IO, IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* * DPBP resources (buffer pools). * * NOTE: One per network interface. */ #define BP_RES_NUM (1u) #define BP_RID_OFF (IO_RID_OFF + IO_RES_NUM) #define BP_RID(rid) ((rid) + BP_RID_OFF) /* --- */ { DPAA2_DEV_BP, BP_RID(0), RF_ACTIVE }, /* * DPCON resources (channels). * * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be * distributed to. * NOTE: Since it is necessary to distinguish between traffic from * different network interfaces arriving on the same core, the * DPCONs must be private to the DPNIs. */ #define CON_RES_NUM (16u) #define CON_RID_OFF (BP_RID_OFF + BP_RES_NUM) #define CON_RID(rid) ((rid) + CON_RID_OFF) /* --- */ { DPAA2_DEV_CON, CON_RID(0), RF_ACTIVE }, { DPAA2_DEV_CON, CON_RID(1), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(2), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(3), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(4), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(5), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(6), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(7), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(8), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(9), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(10), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(11), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(12), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(13), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(14), RF_ACTIVE | RF_OPTIONAL }, { DPAA2_DEV_CON, CON_RID(15), RF_ACTIVE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; /* Supported header fields for Rx hash distribution key */ static const struct dpaa2_eth_dist_fields dist_fields[] = { { /* L2 header */ .rxnfc_field = RXH_L2DA, .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_DA, .id = DPAA2_ETH_DIST_ETHDST, .size = 6, }, { .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_SA, .id = DPAA2_ETH_DIST_ETHSRC, .size = 6, }, { /* This is the last ethertype field parsed: * depending on frame format, it can be the MAC ethertype * or the VLAN etype. */ .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_TYPE, .id = DPAA2_ETH_DIST_ETHTYPE, .size = 2, }, { /* VLAN header */ .rxnfc_field = RXH_VLAN, .cls_prot = NET_PROT_VLAN, .cls_field = NH_FLD_VLAN_TCI, .id = DPAA2_ETH_DIST_VLAN, .size = 2, }, { /* IP header */ .rxnfc_field = RXH_IP_SRC, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_SRC, .id = DPAA2_ETH_DIST_IPSRC, .size = 4, }, { .rxnfc_field = RXH_IP_DST, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_DST, .id = DPAA2_ETH_DIST_IPDST, .size = 4, }, { .rxnfc_field = RXH_L3_PROTO, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_PROTO, .id = DPAA2_ETH_DIST_IPPROTO, .size = 1, }, { /* Using UDP ports, this is functionally equivalent to raw * byte pairs from L4 header. */ .rxnfc_field = RXH_L4_B_0_1, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_SRC, .id = DPAA2_ETH_DIST_L4SRC, .size = 2, }, { .rxnfc_field = RXH_L4_B_2_3, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_DST, .id = DPAA2_ETH_DIST_L4DST, .size = 2, }, }; static struct dpni_stat { int page; int cnt; char *name; char *desc; } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = { /* PAGE, COUNTER, NAME, DESCRIPTION */ { 0, 0, "in_all_frames", "All accepted ingress frames" }, { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" }, { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" }, { 1, 0, "eg_all_frames", "All egress frames transmitted" }, { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" }, { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" }, { 2, 0, "in_filtered_frames", "All ingress frames discarded due to " "filtering" }, { 2, 1, "in_discarded_frames", "All frames discarded due to errors" }, { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer " "depletion in DPNI buffer pools" }, }; /* Device interface */ static int dpaa2_ni_probe(device_t); static int dpaa2_ni_attach(device_t); static int dpaa2_ni_detach(device_t); /* DPAA2 network interface setup and configuration */ static int dpaa2_ni_setup(device_t); static int dpaa2_ni_setup_channels(device_t); static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *, enum dpaa2_ni_queue_type); static int dpaa2_ni_bind(device_t); static int dpaa2_ni_setup_rx_dist(device_t); static int dpaa2_ni_setup_irqs(device_t); static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *); /* Tx/Rx flow configuration */ -static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_cmd *, - struct dpaa2_ni_fq *); -static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_cmd *, - struct dpaa2_ni_fq *); -static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_cmd *, - struct dpaa2_ni_fq *); +static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *); +static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *); +static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *); /* Configuration subroutines */ -static int dpaa2_ni_set_buf_layout(device_t, struct dpaa2_cmd *); -static int dpaa2_ni_set_pause_frame(device_t, struct dpaa2_cmd *); -static int dpaa2_ni_set_qos_table(device_t, struct dpaa2_cmd *); -static int dpaa2_ni_set_mac_addr(device_t, struct dpaa2_cmd *, uint16_t, - uint16_t); +static int dpaa2_ni_set_buf_layout(device_t); +static int dpaa2_ni_set_pause_frame(device_t); +static int dpaa2_ni_set_qos_table(device_t); +static int dpaa2_ni_set_mac_addr(device_t); static int dpaa2_ni_set_hash(device_t, uint64_t); static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t); /* Buffers and buffer pools */ static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t); static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int); static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int); static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *, struct dpaa2_ni_channel *); /* Frame descriptor routines */ static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *, struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *); static int dpaa2_ni_fd_err(struct dpaa2_fd *); static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *); static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *); static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *); static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *); static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *); static int dpaa2_ni_fd_format(struct dpaa2_fd *); static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *); static int dpaa2_ni_fd_offset(struct dpaa2_fd *); /* Various subroutines */ static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t); static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *); static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *, struct dpaa2_dq **); /* Network interface routines */ static void dpaa2_ni_init(void *); static int dpaa2_ni_transmit(if_t , struct mbuf *); static void dpaa2_ni_qflush(if_t ); static int dpaa2_ni_ioctl(if_t , u_long, caddr_t); static int dpaa2_ni_update_mac_filters(if_t ); static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int); /* Interrupt handlers */ static void dpaa2_ni_intr(void *); /* MII handlers */ static void dpaa2_ni_miibus_statchg(device_t); static int dpaa2_ni_media_change(if_t ); static void dpaa2_ni_media_status(if_t , struct ifmediareq *); static void dpaa2_ni_media_tick(void *); /* DMA mapping callback */ static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int); /* Tx/Rx routines. */ static void dpaa2_ni_poll(void *); static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *, struct mbuf *); static void dpaa2_ni_bp_task(void *, int); /* Tx/Rx subroutines */ static int dpaa2_ni_consume_frames(struct dpaa2_ni_channel *, struct dpaa2_ni_fq **, uint32_t *); static int dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); static int dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); static int dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); /* sysctl(9) */ static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_probe(device_t dev) { /* DPNI device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 Network Interface"); return (BUS_PROBE_DEFAULT); } static int dpaa2_ni_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; if_t ifp; char tq_name[32]; int error; sc->dev = dev; sc->ifp = NULL; sc->miibus = NULL; sc->mii = NULL; sc->media_status = 0; sc->if_flags = 0; sc->link_state = LINK_STATE_UNKNOWN; sc->buf_align = 0; /* For debug purposes only! */ sc->rx_anomaly_frames = 0; sc->rx_single_buf_frames = 0; sc->rx_sg_buf_frames = 0; sc->rx_enq_rej_frames = 0; sc->rx_ieoi_err_frames = 0; sc->tx_single_buf_frames = 0; sc->tx_sg_frames = 0; DPAA2_ATOMIC_XCHG(&sc->buf_num, 0); DPAA2_ATOMIC_XCHG(&sc->buf_free, 0); sc->bp_dmat = NULL; sc->st_dmat = NULL; sc->rxd_dmat = NULL; sc->qos_dmat = NULL; sc->qos_kcfg.type = DPAA2_BUF_STORE; sc->qos_kcfg.store.dmap = NULL; sc->qos_kcfg.store.paddr = 0; sc->qos_kcfg.store.vaddr = NULL; sc->rxd_kcfg.type = DPAA2_BUF_STORE; sc->rxd_kcfg.store.dmap = NULL; sc->rxd_kcfg.store.paddr = 0; sc->rxd_kcfg.store.vaddr = NULL; sc->mac.dpmac_id = 0; sc->mac.phy_dev = NULL; memset(sc->mac.addr, 0, ETHER_ADDR_LEN); error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); - return (ENXIO); + goto err_exit; } /* Obtain MC portal. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF); /* Allocate network interface */ ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "%s: failed to allocate network interface\n", __func__); - return (ENXIO); + goto err_exit; } sc->ifp = ifp; if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev)); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); if_setinitfn(ifp, dpaa2_ni_init); if_setioctlfn(ifp, dpaa2_ni_ioctl); if_settransmitfn(ifp, dpaa2_ni_transmit); if_setqflushfn(ifp, dpaa2_ni_qflush); if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU); if_setcapenable(ifp, if_getcapabilities(ifp)); - /* Allocate a command to send to MC hardware. */ - error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF); - if (error) { - device_printf(dev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, error); - goto err_exit; - } + DPAA2_CMD_INIT(&cmd); /* Open resource container and network interface object. */ - error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id, - &sc->rc_token); + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); - goto err_free_cmd; + goto err_exit; } - error = DPAA2_CMD_NI_OPEN(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->rc_token), dinfo->id, &sc->ni_token); + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); - goto err_close_rc; + goto close_rc; } + /* + * XXX-DSL: Release new buffers on Buffer Pool State Change Notification + * (BPSCN) returned as a result to the VDQ command instead. + * It is similar to CDAN processed in dpaa2_io_intr(). + */ /* Create a taskqueue thread to release new buffers to the pool. */ TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc); bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "%s_tqbp", device_get_nameunit(dev)); sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK, taskqueue_thread_enqueue, &sc->bp_taskq); if (sc->bp_taskq == NULL) { device_printf(dev, "%s: failed to allocate task queue: %s\n", __func__, tq_name); - goto err_close_ni; + goto close_ni; } taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name); error = dpaa2_ni_setup(dev); if (error) { device_printf(dev, "%s: failed to setup DPNI: error=%d\n", __func__, error); - goto err_close_ni; + goto close_ni; } error = dpaa2_ni_setup_channels(dev); if (error) { device_printf(dev, "%s: failed to setup QBMan channels: " "error=%d\n", __func__, error); - goto err_close_ni; + goto close_ni; } error = dpaa2_ni_bind(dev); if (error) { device_printf(dev, "%s: failed to bind DPNI: error=%d\n", __func__, error); - goto err_close_ni; + goto close_ni; } error = dpaa2_ni_setup_irqs(dev); if (error) { device_printf(dev, "%s: failed to setup IRQs: error=%d\n", __func__, error); - goto err_close_ni; + goto close_ni; } error = dpaa2_ni_setup_sysctls(sc); if (error) { device_printf(dev, "%s: failed to setup sysctls: error=%d\n", __func__, error); - goto err_close_ni; + goto close_ni; } ether_ifattach(sc->ifp, sc->mac.addr); callout_init(&sc->mii_callout, 0); return (0); -err_close_ni: - DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token)); -err_close_rc: - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); -err_free_cmd: - dpaa2_mcp_free_command(sc->cmd); +close_ni: + DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (ENXIO); } static void dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); ifmr->ifm_count = 0; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_current = ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; /* * In non-PHY usecases, we need to signal link state up, otherwise * certain things requiring a link event (e.g async DHCP client) from * devd do not happen. */ if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) { if_link_state_change(ifp, LINK_STATE_UP); } /* * TODO: Check the status of the link partner (DPMAC, DPNI or other) and * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as * the MC firmware sets the status, instead of us telling the MC what * it is. */ DPNI_UNLOCK(sc); return; } static void dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc) { /* * FIXME: When the DPNI is connected to a DPMAC, we can get the * 'apparent' speed from it. */ sc->fixed_link = true; ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change, dpaa2_ni_fixed_media_status); ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T); } static int dpaa2_ni_detach(device_t dev) { - device_t child = dev; - struct dpaa2_ni_softc *sc = device_get_softc(dev); - - DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token)); - DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token)); - dpaa2_mcp_free_command(sc->cmd); - - sc->cmd = NULL; - sc->ni_token = 0; - sc->rc_token = 0; - + /* TBD */ return (0); } /** * @brief Configure DPAA2 network interface object. */ static int dpaa2_ni_setup(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */ - struct dpaa2_cmd *cmd = sc->cmd; + struct dpaa2_cmd cmd; uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */ - uint16_t rc_token = sc->rc_token; - uint16_t ni_token = sc->ni_token; - uint16_t mac_token; + uint16_t rc_token, ni_token, mac_token; struct dpaa2_mac_attr attr; enum dpaa2_mac_link_type link_type; uint32_t link; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Check if we can work with this DPNI object. */ - error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, dpaa2_mcp_tk(cmd, - ni_token), &sc->api_major, &sc->api_minor); + error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major, + &sc->api_minor); if (error) { device_printf(dev, "%s: failed to get DPNI API version\n", __func__); - return (error); + goto close_ni; } if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { device_printf(dev, "%s: DPNI API version %u.%u not supported, " "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor, DPNI_VER_MAJOR, DPNI_VER_MINOR); error = ENODEV; - return (error); + goto close_ni; } /* Reset the DPNI object. */ - error = DPAA2_CMD_NI_RESET(dev, child, cmd); + error = DPAA2_CMD_NI_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPNI: id=%d\n", __func__, dinfo->id); - return (error); + goto close_ni; } /* Obtain attributes of the DPNI object. */ - error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, cmd, &sc->attr); + error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { device_printf(dev, "%s: failed to obtain DPNI attributes: " "id=%d\n", __func__, dinfo->id); - return (error); + goto close_ni; } if (bootverbose) { - device_printf(dev, "options=0x%#x queues=%d tx_channels=%d " + device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d " "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues, sc->attr.num.channels, sc->attr.wriop_ver); device_printf(dev, "\ttraffic classes: rx=%d tx=%d " "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs, sc->attr.num.cgs); device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d " "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan, sc->attr.entries.qos, sc->attr.entries.fs); device_printf(dev, "\tkey sizes: qos=%d fs=%d\n", sc->attr.key_size.qos, sc->attr.key_size.fs); } /* Configure buffer layouts of the DPNI queues. */ - error = dpaa2_ni_set_buf_layout(dev, cmd); + error = dpaa2_ni_set_buf_layout(dev); if (error) { device_printf(dev, "%s: failed to configure buffer layout\n", __func__); - return (error); + goto close_ni; } /* Configure DMA resources. */ error = dpaa2_ni_setup_dma(sc); if (error) { device_printf(dev, "%s: failed to setup DMA\n", __func__); - return (error); + goto close_ni; } /* Setup link between DPNI and an object it's connected to. */ ep1_desc.obj_id = dinfo->id; ep1_desc.if_id = 0; /* DPNI has the only endpoint */ ep1_desc.type = dinfo->dtype; - error = DPAA2_CMD_RC_GET_CONN(dev, child, dpaa2_mcp_tk(cmd, rc_token), + error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token), &ep1_desc, &ep2_desc, &link); - if (error) + if (error) { device_printf(dev, "%s: failed to obtain an object DPNI is " "connected to: error=%d\n", __func__, error); - else { + } else { device_printf(dev, "connected to %s (id=%d)\n", dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id); - error = dpaa2_ni_set_mac_addr(dev, cmd, rc_token, ni_token); - if (error) - device_printf(dev, "%s: failed to set MAC " - "address: error=%d\n", __func__, error); + error = dpaa2_ni_set_mac_addr(dev); + if (error) { + device_printf(dev, "%s: failed to set MAC address: " + "error=%d\n", __func__, error); + } if (ep2_desc.type == DPAA2_DEV_MAC) { /* * This is the simplest case when DPNI is connected to * DPMAC directly. */ sc->mac.dpmac_id = ep2_desc.obj_id; link_type = DPAA2_MAC_LINK_TYPE_NONE; /* * Need to determine if DPMAC type is PHY (attached to * conventional MII PHY) or FIXED (usually SFP/SerDes, * link state managed by MC firmware). */ error = DPAA2_CMD_MAC_OPEN(sc->dev, child, - dpaa2_mcp_tk(sc->cmd, sc->rc_token), - sc->mac.dpmac_id, &mac_token); + DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id, + &mac_token); /* * Under VFIO, the DPMAC might be sitting in another * container (DPRC) we don't have access to. * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is * the case. */ if (error) { device_printf(dev, "%s: failed to open " "connected DPMAC: %d (assuming in other DPRC)\n", __func__, sc->mac.dpmac_id); link_type = DPAA2_MAC_LINK_TYPE_FIXED; } else { error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, - sc->cmd, &attr); - if (error) + &cmd, &attr); + if (error) { device_printf(dev, "%s: failed to get " "DPMAC attributes: id=%d, " "error=%d\n", __func__, dinfo->id, error); - else + } else { link_type = attr.link_type; + } } - DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, - mac_token)); + DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) { device_printf(dev, "connected DPMAC is in FIXED " "mode\n"); dpaa2_ni_setup_fixed_link(sc); } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) { device_printf(dev, "connected DPMAC is in PHY " "mode\n"); error = DPAA2_MC_GET_PHY_DEV(dev, &sc->mac.phy_dev, sc->mac.dpmac_id); if (error == 0) { error = MEMAC_MDIO_SET_NI_DEV( sc->mac.phy_dev, dev); - if (error != 0) + if (error != 0) { device_printf(dev, "%s: failed " "to set dpni dev on memac " "mdio dev %s: error=%d\n", __func__, device_get_nameunit( sc->mac.phy_dev), error); + } } if (error == 0) { error = MEMAC_MDIO_GET_PHY_LOC( sc->mac.phy_dev, &sc->mac.phy_loc); - if (error == ENODEV) + if (error == ENODEV) { error = 0; - if (error != 0) + } + if (error != 0) { device_printf(dev, "%s: failed " "to get phy location from " "memac mdio dev %s: error=%d\n", __func__, device_get_nameunit( sc->mac.phy_dev), error); + } } if (error == 0) { error = mii_attach(sc->mac.phy_dev, &sc->miibus, sc->ifp, dpaa2_ni_media_change, dpaa2_ni_media_status, BMSR_DEFCAPMASK, sc->mac.phy_loc, MII_OFFSET_ANY, 0); - if (error != 0) + if (error != 0) { device_printf(dev, "%s: failed " "to attach to miibus: " "error=%d\n", __func__, error); + } } - if (error == 0) + if (error == 0) { sc->mii = device_get_softc(sc->miibus); + } } else { device_printf(dev, "%s: DPMAC link type is not " "supported\n", __func__); } } else if (ep2_desc.type == DPAA2_DEV_NI || ep2_desc.type == DPAA2_DEV_MUX || ep2_desc.type == DPAA2_DEV_SW) { dpaa2_ni_setup_fixed_link(sc); } } /* Select mode to enqueue frames. */ /* ... TBD ... */ /* * Update link configuration to enable Rx/Tx pause frames support. * * NOTE: MC may generate an interrupt to the DPMAC and request changes * in link configuration. It might be necessary to attach miibus * and PHY before this point. */ - error = dpaa2_ni_set_pause_frame(dev, dpaa2_mcp_tk(cmd, ni_token)); + error = dpaa2_ni_set_pause_frame(dev); if (error) { device_printf(dev, "%s: failed to configure Rx/Tx pause " "frames\n", __func__); - return (error); + goto close_ni; } /* Configure ingress traffic classification. */ - error = dpaa2_ni_set_qos_table(dev, dpaa2_mcp_tk(cmd, ni_token)); - if (error) + error = dpaa2_ni_set_qos_table(dev); + if (error) { device_printf(dev, "%s: failed to configure QoS table: " "error=%d\n", __func__, error); + goto close_ni; + } /* Add broadcast physical address to the MAC filtering table. */ memset(eth_bca, 0xff, ETHER_ADDR_LEN); - error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, cmd, eth_bca); + error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd, + ni_token), eth_bca); if (error) { device_printf(dev, "%s: failed to add broadcast physical " "address to the MAC filtering table\n", __func__); - return (error); + goto close_ni; } /* Set the maximum allowed length for received frames. */ - error = DPAA2_CMD_NI_SET_MFL(dev, child, cmd, DPAA2_ETH_MFL); + error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL); if (error) { device_printf(dev, "%s: failed to set maximum length for " "received frames\n", __func__); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Сonfigure QBMan channels and register data availability notifications. */ static int dpaa2_ni_setup_channels(device_t dev) { + device_t pdev = device_get_parent(dev); + device_t child = dev; + device_t io_dev, con_dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); - struct dpaa2_con_softc *consc; - struct dpaa2_devinfo *io_info, *con_info; - device_t io_dev, con_dev, child = dev; struct dpaa2_ni_channel *channel; - struct dpaa2_io_notif_ctx *ctx; + struct dpaa2_con_softc *consc; struct dpaa2_con_notif_cfg notif_cfg; + struct dpaa2_devinfo *rc_info = device_get_ivars(pdev); + struct dpaa2_devinfo *io_info; + struct dpaa2_devinfo *con_info; + struct dpaa2_io_notif_ctx *ctx; struct dpaa2_buf *buf; - int error; + struct dpaa2_cmd cmd; struct sysctl_ctx_list *sysctl_ctx; struct sysctl_oid *node; struct sysctl_oid_list *parent; uint32_t i, num_chan; + uint16_t rc_token, con_token; + int error; /* Calculate number of the channels based on the allocated resources. */ - for (i = 0; i < IO_RES_NUM; i++) - if (!sc->res[IO_RID(i)]) + for (i = 0; i < IO_RES_NUM; i++) { + if (!sc->res[IO_RID(i)]) { break; + } + } num_chan = i; - for (i = 0; i < CON_RES_NUM; i++) - if (!sc->res[CON_RID(i)]) + for (i = 0; i < CON_RES_NUM; i++) { + if (!sc->res[CON_RID(i)]) { break; + } + } num_chan = i < num_chan ? i : num_chan; - - /* Limit maximum channels. */ sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS ? DPAA2_NI_MAX_CHANNELS : num_chan; - - /* Limit channels by number of the queues. */ sc->chan_n = sc->chan_n > sc->attr.num.queues ? sc->attr.num.queues : sc->chan_n; device_printf(dev, "channels=%d\n", sc->chan_n); sysctl_ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); - node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels"); parent = SYSCTL_CHILDREN(node); /* Setup channels for the portal. */ for (uint32_t i = 0; i < sc->chan_n; i++) { - /* Select software portal. */ io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]); io_info = device_get_ivars(io_dev); - /* Select DPCON (channel). */ con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]); consc = device_get_softc(con_dev); con_info = device_get_ivars(con_dev); - /* Enable selected channel. */ - error = DPAA2_CMD_CON_ENABLE(dev, child, dpaa2_mcp_tk(consc->cmd, - consc->con_token)); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id, + &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource " + "container: id=%d, error=%d\n", __func__, + rc_info->id, error); + return (error); + } + error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id, + &con_token); + if (error) { + device_printf(dev, "%s: failed to open DPCON: id=%d, " + "error=%d\n", __func__, con_info->id, error); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); + return (error); + } + + error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to enable channel: " "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id, consc->attr.chan_id); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI, M_WAITOK | M_ZERO); if (!channel) { device_printf(dev, "%s: failed to allocate a channel\n", __func__); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (ENOMEM); } sc->channels[i] = channel; channel->id = consc->attr.chan_id; channel->flowid = i; channel->ni_dev = dev; channel->io_dev = io_dev; channel->con_dev = con_dev; channel->recycled_n = 0; + channel->tx_frames = 0; /* for debug purposes */ + channel->tx_dropped = 0; /* for debug purposes */ + channel->rxq_n = 0; buf = &channel->store; buf->type = DPAA2_BUF_STORE; buf->store.dmat = NULL; buf->store.dmap = NULL; buf->store.paddr = 0; buf->store.vaddr = NULL; - /* For debug purposes only! */ - channel->tx_frames = 0; - channel->tx_dropped = 0; - - /* None of the frame queues for this channel configured yet. */ - channel->rxq_n = 0; - /* Setup WQ channel notification context. */ ctx = &channel->ctx; ctx->qman_ctx = (uint64_t) ctx; ctx->cdan_en = true; ctx->fq_chan_id = channel->id; ctx->io_dev = channel->io_dev; ctx->channel = channel; ctx->poll = dpaa2_ni_poll; /* Register the new notification context. */ error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx); if (error) { device_printf(dev, "%s: failed to register notification " "context\n", __func__); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } /* Register DPCON notification with Management Complex. */ notif_cfg.dpio_id = io_info->id; notif_cfg.prior = 0; notif_cfg.qman_ctx = ctx->qman_ctx; - error = DPAA2_CMD_CON_SET_NOTIF(dev, child, dpaa2_mcp_tk( - consc->cmd, consc->con_token), ¬if_cfg); + error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, ¬if_cfg); if (error) { device_printf(dev, "%s: failed to set DPCON " "notification: dpcon_id=%d, chan_id=%d\n", __func__, con_info->id, consc->attr.chan_id); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } /* Allocate initial # of Rx buffers and a channel storage. */ error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT); if (error) { device_printf(dev, "%s: failed to seed buffer pool\n", __func__); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } error = dpaa2_ni_seed_chan_storage(sc, channel); if (error) { device_printf(dev, "%s: failed to seed channel " "storage\n", __func__); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } /* Prepare queues for this channel. */ error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF); if (error) { device_printf(dev, "%s: failed to prepare TxConf " "queue: error=%d\n", __func__, error); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX); if (error) { device_printf(dev, "%s: failed to prepare Rx queue: " "error=%d\n", __func__, error); + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); return (error); } - if (bootverbose) + if (bootverbose) { device_printf(dev, "channel: dpio_id=%d " "dpcon_id=%d chan_id=%d, priorities=%d\n", io_info->id, con_info->id, channel->id, consc->attr.prior_num); + } + + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); } /* There is exactly one Rx error queue per DPNI. */ error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR); if (error) { device_printf(dev, "%s: failed to prepare RxError queue: " "error=%d\n", __func__, error); return (error); } return (0); } /** * @brief Performs an initial configuration of the frame queues. */ static int dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan, enum dpaa2_ni_queue_type queue_type) { struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_fq *fq; switch (queue_type) { case DPAA2_NI_QUEUE_TX_CONF: /* One queue per channel. */ fq = &chan->txc_queue; fq->consume = dpaa2_ni_tx_conf; fq->chan = chan; fq->flowid = chan->flowid; fq->tc = 0; /* ignored */ fq->type = queue_type; break; case DPAA2_NI_QUEUE_RX: KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS, ("too many Rx traffic classes: rx_tcs=%d\n", sc->attr.num.rx_tcs)); /* One queue per Rx traffic class within a channel. */ for (int i = 0; i < sc->attr.num.rx_tcs; i++) { fq = &chan->rx_queues[i]; fq->consume = dpaa2_ni_rx; fq->chan = chan; fq->flowid = chan->flowid; fq->tc = (uint8_t) i; fq->type = queue_type; chan->rxq_n++; } break; case DPAA2_NI_QUEUE_RX_ERR: /* One queue per network interface. */ fq = &sc->rxe_queue; fq->consume = dpaa2_ni_rx_err; fq->chan = chan; fq->flowid = 0; /* ignored */ fq->tc = 0; /* ignored */ fq->type = queue_type; break; default: device_printf(dev, "%s: unexpected frame queue type: %d\n", __func__, queue_type); return (EINVAL); } return (0); } /** * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels. */ static int dpaa2_ni_bind(device_t dev) { - device_t bp_dev, child = dev; + device_t pdev = device_get_parent(dev); + device_t child = dev; + device_t bp_dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *bp_info; - struct dpaa2_cmd *cmd = sc->cmd; + struct dpaa2_cmd cmd; struct dpaa2_ni_pools_cfg pools_cfg; struct dpaa2_ni_err_cfg err_cfg; struct dpaa2_ni_channel *chan; - uint16_t ni_token = sc->ni_token; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Select buffer pool (only one available at the moment). */ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); bp_info = device_get_ivars(bp_dev); /* Configure buffers pool. */ pools_cfg.pools_num = 1; pools_cfg.pools[0].bp_obj_id = bp_info->id; pools_cfg.pools[0].backup_flag = 0; pools_cfg.pools[0].buf_sz = sc->buf_sz; - error = DPAA2_CMD_NI_SET_POOLS(dev, child, dpaa2_mcp_tk(cmd, ni_token), - &pools_cfg); + error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg); if (error) { device_printf(dev, "%s: failed to set buffer pools\n", __func__); - return (error); + goto close_ni; } /* Setup ingress traffic distribution. */ error = dpaa2_ni_setup_rx_dist(dev); if (error && error != EOPNOTSUPP) { device_printf(dev, "%s: failed to setup ingress traffic " "distribution\n", __func__); - return (error); + goto close_ni; } - if (bootverbose && error == EOPNOTSUPP) + if (bootverbose && error == EOPNOTSUPP) { device_printf(dev, "Ingress traffic distribution not " "supported\n"); + } /* Configure handling of error frames. */ err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK; err_cfg.set_err_fas = false; err_cfg.action = DPAA2_NI_ERR_DISCARD; - error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, cmd, &err_cfg); + error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg); if (error) { device_printf(dev, "%s: failed to set errors behavior\n", __func__); - return (error); + goto close_ni; } /* Configure channel queues to generate CDANs. */ for (uint32_t i = 0; i < sc->chan_n; i++) { chan = sc->channels[i]; /* Setup Rx flows. */ for (uint32_t j = 0; j < chan->rxq_n; j++) { - error = dpaa2_ni_setup_rx_flow(dev, cmd, - &chan->rx_queues[j]); + error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]); if (error) { device_printf(dev, "%s: failed to setup Rx " "flow: error=%d\n", __func__, error); - return (error); + goto close_ni; } } /* Setup Tx flow. */ - error = dpaa2_ni_setup_tx_flow(dev, cmd, &chan->txc_queue); + error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue); if (error) { device_printf(dev, "%s: failed to setup Tx " "flow: error=%d\n", __func__, error); - return (error); + goto close_ni; } } /* Configure RxError queue to generate CDAN. */ - error = dpaa2_ni_setup_rx_err_flow(dev, cmd, &sc->rxe_queue); + error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue); if (error) { device_printf(dev, "%s: failed to setup RxError flow: " "error=%d\n", __func__, error); - return (error); + goto close_ni; } /* * Get the Queuing Destination ID (QDID) that should be used for frame * enqueue operations. */ - error = DPAA2_CMD_NI_GET_QDID(dev, child, cmd, DPAA2_NI_QUEUE_TX, + error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX, &sc->tx_qdid); if (error) { device_printf(dev, "%s: failed to get Tx queuing destination " "ID\n", __func__); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Setup ingress traffic distribution. * * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option * hasn't been set for DPNI and a number of DPNI queues > 1. */ static int dpaa2_ni_setup_rx_dist(device_t dev) { /* * Have the interface implicitly distribute traffic based on the default * hash key. */ return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT)); } static int -dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_cmd *cmd, - struct dpaa2_ni_fq *fq) +dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq) { + device_t pdev = device_get_parent(dev); device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; + struct dpaa2_cmd cmd; struct dpaa2_ni_queue_cfg queue_cfg = {0}; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); queue_cfg.type = DPAA2_NI_QUEUE_RX; queue_cfg.tc = fq->tc; queue_cfg.idx = fq->flowid; - error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain Rx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); - return (error); + goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 1; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; - error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update Rx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); - return (error); + goto close_ni; } if (bootverbose) { device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, " "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id, fq->fqid, (uint64_t) fq); } + (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int -dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_cmd *cmd, - struct dpaa2_ni_fq *fq) +dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; struct dpaa2_ni_queue_cfg queue_cfg = {0}; struct dpaa2_ni_tx_ring *tx; struct dpaa2_buf *buf; + struct dpaa2_cmd cmd; uint32_t tx_rings_n = 0; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS, ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__, sc->attr.num.tx_tcs)); KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX, ("%s: too many Tx buffers (%d): max=%d\n", __func__, DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX)); /* Setup Tx rings. */ for (int i = 0; i < sc->attr.num.tx_tcs; i++) { queue_cfg.type = DPAA2_NI_QUEUE_TX; queue_cfg.tc = i; queue_cfg.idx = fq->flowid; queue_cfg.chan_id = fq->chan->id; - error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain Tx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); - return (error); + goto close_ni; } tx = &fq->tx_rings[i]; tx->fq = fq; tx->fqid = queue_cfg.fqid; tx->txid = tx_rings_n; if (bootverbose) { device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, " "fqid=%d\n", fq->flowid, i, fq->chan->id, queue_cfg.fqid); } mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF); /* Allocate Tx ring buffer. */ tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT, &tx->lock); if (tx->idx_br == NULL) { device_printf(dev, "%s: failed to setup Tx ring buffer" " (2) fqid=%d\n", __func__, tx->fqid); - return (ENOMEM); + goto close_ni; } /* Configure Tx buffers. */ for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) { buf = &tx->buf[j]; buf->type = DPAA2_BUF_TX; buf->tx.dmat = buf->tx.sgt_dmat = NULL; buf->tx.dmap = buf->tx.sgt_dmap = NULL; buf->tx.paddr = buf->tx.sgt_paddr = 0; buf->tx.vaddr = buf->tx.sgt_vaddr = NULL; buf->tx.m = NULL; buf->tx.idx = 0; error = dpaa2_ni_seed_txbuf(sc, buf, j); /* Add index of the Tx buffer to the ring. */ buf_ring_enqueue(tx->idx_br, (void *) j); } tx_rings_n++; } /* All Tx queues which belong to the same flowid have the same qdbin. */ fq->tx_qdbin = queue_cfg.qdbin; queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF; queue_cfg.tc = 0; /* ignored for TxConf queue */ queue_cfg.idx = fq->flowid; - error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain TxConf queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); - return (error); + goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 0; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; - error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update TxConf queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int -dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_cmd *cmd, - struct dpaa2_ni_fq *fq) +dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq) { + device_t pdev = device_get_parent(dev); device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; struct dpaa2_ni_queue_cfg queue_cfg = {0}; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR; queue_cfg.tc = fq->tc; /* ignored */ queue_cfg.idx = fq->flowid; /* ignored */ - error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain RxErr queue " "configuration\n", __func__); - return (error); + goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 1; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; - error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg); + error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update RxErr queue " "configuration\n", __func__); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Configure DPNI object to generate interrupts. */ static int dpaa2_ni_setup_irqs(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); - struct dpaa2_cmd *cmd = sc->cmd; - uint16_t ni_token = sc->ni_token; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Configure IRQs. */ error = dpaa2_ni_setup_msi(sc); if (error) { device_printf(dev, "%s: failed to allocate MSI\n", __func__); - return (error); + goto close_ni; } if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { device_printf(dev, "%s: failed to allocate IRQ resource\n", __func__); - return (ENXIO); + goto close_ni; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, dpaa2_ni_intr, sc, &sc->intr)) { device_printf(dev, "%s: failed to setup IRQ resource\n", __func__); - return (ENXIO); + goto close_ni; } - /* Configure DPNI to generate interrupts. */ - error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd, - ni_token), DPNI_IRQ_INDEX, + error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX, DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED); if (error) { device_printf(dev, "%s: failed to set DPNI IRQ mask\n", __func__); - return (error); + goto close_ni; } - /* Enable IRQ. */ - error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, cmd, DPNI_IRQ_INDEX, + error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX, true); if (error) { device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Allocate MSI interrupts for DPNI. */ static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc) { int val; val = pci_msi_count(sc->dev); if (val < DPAA2_NI_MSI_COUNT) device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, DPAA2_IO_MSI_COUNT); val = MIN(val, DPAA2_NI_MSI_COUNT); if (pci_alloc_msi(sc->dev, &val) != 0) return (EINVAL); for (int i = 0; i < val; i++) sc->irq_rid[i] = i + 1; return (0); } /** * @brief Update DPNI according to the updated interface capabilities. */ static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc) { const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM; const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM; + device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* Setup checksums validation. */ - error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token), DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum); + error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, + DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum); if (error) { device_printf(dev, "%s: failed to %s L3 checksum validation\n", __func__, en_rxcsum ? "enable" : "disable"); - return (error); + goto close_ni; } - error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd, + error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum); if (error) { device_printf(dev, "%s: failed to %s L4 checksum validation\n", __func__, en_rxcsum ? "enable" : "disable"); - return (error); + goto close_ni; } /* Setup checksums generation. */ - error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd, + error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum); if (error) { device_printf(dev, "%s: failed to %s L3 checksum generation\n", __func__, en_txcsum ? "enable" : "disable"); - return (error); + goto close_ni; } - error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd, + error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum); if (error) { device_printf(dev, "%s: failed to %s L4 checksum generation\n", __func__, en_txcsum ? "enable" : "disable"); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Update DPNI according to the updated interface flags. */ static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc) { const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC; const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI; + device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; - error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token), en_promisc ? true : en_allmulti); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd, + en_promisc ? true : en_allmulti); if (error) { device_printf(dev, "%s: failed to %s multicast promiscuous " "mode\n", __func__, en_allmulti ? "enable" : "disable"); - return (error); + goto close_ni; } - error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, sc->cmd, en_promisc); + error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc); if (error) { device_printf(dev, "%s: failed to %s unicast promiscuous mode\n", __func__, en_promisc ? "enable" : "disable"); - return (error); + goto close_ni; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *node, *node2; struct sysctl_oid_list *parent, *parent2; char cbuf[128]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); /* Add DPNI statistics. */ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics"); parent = SYSCTL_CHILDREN(node); for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) { SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name, CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats, "IU", dpni_stat_sysctls[i].desc); } SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames", CTLFLAG_RD, &sc->rx_anomaly_frames, "Rx frames in the buffers outside of the buffer pools"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames", CTLFLAG_RD, &sc->rx_single_buf_frames, "Rx frames in single buffers"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames", CTLFLAG_RD, &sc->rx_sg_buf_frames, "Rx frames in scatter/gather list"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames", CTLFLAG_RD, &sc->rx_enq_rej_frames, "Enqueue rejected by QMan"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames", CTLFLAG_RD, &sc->rx_ieoi_err_frames, "QMan IEOI error"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames", CTLFLAG_RD, &sc->tx_single_buf_frames, "Tx single buffer frames"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames", CTLFLAG_RD, &sc->tx_sg_frames, "Tx S/G frames"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num", CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num, "IU", "number of Rx buffers in the buffer pool"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free", CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free, "IU", "number of free Rx buffers in the buffer pool"); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); /* Add channels statistics. */ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels"); parent = SYSCTL_CHILDREN(node); for (int i = 0; i < sc->chan_n; i++) { snprintf(cbuf, sizeof(cbuf), "%d", i); node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel"); parent2 = SYSCTL_CHILDREN(node2); SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames", CTLFLAG_RD, &sc->channels[i]->tx_frames, "Tx frames counter"); SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped", CTLFLAG_RD, &sc->channels[i]->tx_dropped, "Tx dropped counter"); } return (0); } static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc) { device_t dev = sc->dev; int error; KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1), ("unexpected buffer alignment: %d\n", sc->buf_align)); /* * DMA tag to allocate buffers for buffer pool. * * NOTE: QBMan supports DMA addresses up to 49-bits maximum. * Bits 63-49 are not used by QBMan. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), sc->buf_align, 0, /* alignment, boundary */ BUF_MAXADDR_49BIT, /* low restricted addr */ BUF_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ BUF_SIZE, 1, /* maxsize, nsegments */ BUF_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bp_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for buffer " "pool\n", __func__); return (error); } /* DMA tag to map Tx mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), sc->buf_align, 0, /* alignment, boundary */ BUF_MAXADDR_49BIT, /* low restricted addr */ BUF_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ DPAA2_TX_SEGS_MAXSZ, /* maxsize */ DPAA2_TX_SEGLIMIT, /* nsegments */ DPAA2_TX_SEG_SZ, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for Tx " "buffers\n", __func__); return (error); } /* DMA tag to allocate channel storage. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), ETH_STORE_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ ETH_STORE_SIZE, 1, /* maxsize, nsegments */ ETH_STORE_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->st_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for channel " "storage\n", __func__); return (error); } /* DMA tag for Rx distribution key. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */ DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rxd_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for Rx " "distribution key\n", __func__); return (error); } error = bus_dma_tag_create( bus_get_dma_tag(dev), PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */ ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->qos_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for QoS key\n", __func__); return (error); } error = bus_dma_tag_create( bus_get_dma_tag(dev), PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ DPAA2_TX_SGT_SZ, 1, /* maxsize, nsegments */ DPAA2_TX_SGT_SZ, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sgt_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for S/G " "tables\n", __func__); return (error); } return (0); } /** * @brief Configure buffer layouts of the different DPNI queues. */ static int -dpaa2_ni_set_buf_layout(device_t dev, struct dpaa2_cmd *cmd) +dpaa2_ni_set_buf_layout(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_buf_layout buf_layout = {0}; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* * Select Rx/Tx buffer alignment. It's necessary to ensure that the * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending * on the WRIOP version. */ sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) || sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0)) ? BUF_ALIGN_V1 : BUF_ALIGN; /* * We need to ensure that the buffer size seen by WRIOP is a multiple * of 64 or 256 bytes depending on the WRIOP version. */ sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align); - if (bootverbose) + if (bootverbose) { device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n", sc->buf_sz, sc->buf_align); + } /* * Frame Descriptor Tx buffer layout * * ADDR -> |---------------------| * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes * |---------------------| * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes * |---------------------| * | DATA HEADROOM | * ADDR + OFFSET -> |---------------------| * | | * | | * | FRAME DATA | * | | * | | * |---------------------| * | DATA TAILROOM | * |---------------------| * * NOTE: It's for a single buffer frame only. */ buf_layout.queue_type = DPAA2_NI_QUEUE_TX; buf_layout.pd_size = BUF_SWA_SIZE; buf_layout.pass_timestamp = true; buf_layout.pass_frame_status = true; buf_layout.options = BUF_LOPT_PRIV_DATA_SZ | BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */ BUF_LOPT_FRAME_STATUS; - error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout); + error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set Tx buffer layout\n", __func__); - return (error); + goto close_ni; } /* Tx-confirmation buffer layout */ buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF; buf_layout.options = BUF_LOPT_TIMESTAMP | BUF_LOPT_FRAME_STATUS; - error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout); + error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set TxConf buffer layout\n", __func__); - return (error); + goto close_ni; } /* * Driver should reserve the amount of space indicated by this command * as headroom in all Tx frames. */ - error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, cmd, &sc->tx_data_off); + error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off); if (error) { device_printf(dev, "%s: failed to obtain Tx data offset\n", __func__); - return (error); + goto close_ni; } - if (bootverbose) + if (bootverbose) { device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off); - if ((sc->tx_data_off % 64) != 0) + } + if ((sc->tx_data_off % 64) != 0) { device_printf(dev, "Tx data offset (%d) is not a multiplication " "of 64 bytes\n", sc->tx_data_off); + } /* * Frame Descriptor Rx buffer layout * * ADDR -> |---------------------| * | SW FRAME ANNOTATION | 0 bytes * |---------------------| * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes * |---------------------| * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE * ADDR + OFFSET -> |---------------------| * | | * | | * | FRAME DATA | * | | * | | * |---------------------| * | DATA TAILROOM | 0 bytes * |---------------------| * * NOTE: It's for a single buffer frame only. */ buf_layout.queue_type = DPAA2_NI_QUEUE_RX; buf_layout.pd_size = 0; buf_layout.fd_align = sc->buf_align; buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE; buf_layout.tail_size = 0; buf_layout.pass_frame_status = true; buf_layout.pass_parser_result = true; buf_layout.pass_timestamp = true; buf_layout.options = BUF_LOPT_PRIV_DATA_SZ | BUF_LOPT_DATA_ALIGN | BUF_LOPT_DATA_HEAD_ROOM | BUF_LOPT_DATA_TAIL_ROOM | BUF_LOPT_FRAME_STATUS | BUF_LOPT_PARSER_RESULT | BUF_LOPT_TIMESTAMP; - error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout); + error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set Rx buffer layout\n", __func__); - return (error); + goto close_ni; } - return (0); + error = 0; +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Enable Rx/Tx pause frames. * * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI * itself generates pause frames (Tx frame). */ static int -dpaa2_ni_set_pause_frame(device_t dev, struct dpaa2_cmd *cmd) +dpaa2_ni_set_pause_frame(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_link_cfg link_cfg = {0}; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; - error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, cmd, &link_cfg); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg); if (error) { device_printf(dev, "%s: failed to obtain link configuration: " "error=%d\n", __func__, error); - return (error); + goto close_ni; } /* Enable both Rx and Tx pause frames by default. */ link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE; link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE; - error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, cmd, &link_cfg); + error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg); if (error) { device_printf(dev, "%s: failed to set link configuration: " "error=%d\n", __func__, error); - return (error); + goto close_ni; } sc->link_options = link_cfg.options; - - return (0); + error = 0; +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } /** * @brief Configure QoS table to determine the traffic class for the received * frame. */ static int -dpaa2_ni_set_qos_table(device_t dev, struct dpaa2_cmd *cmd) +dpaa2_ni_set_qos_table(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_qos_table tbl; struct dpaa2_buf *buf = &sc->qos_kcfg; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; if (sc->attr.num.rx_tcs == 1 || !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) { - if (bootverbose) + if (bootverbose) { device_printf(dev, "Ingress traffic classification is " "not supported\n"); + } return (0); } /* * Allocate a buffer visible to the device to hold the QoS table key * configuration. */ KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer", __func__)); - if (__predict_true(buf->store.dmat == NULL)) + if (__predict_true(buf->store.dmat == NULL)) { buf->store.dmat = sc->qos_dmat; + } error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); if (error) { device_printf(dev, "%s: failed to allocate a buffer for QoS key " "configuration\n", __func__); - return (error); + goto err_exit; } error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb, &buf->store.paddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "%s: failed to map QoS key configuration " "buffer into bus space\n", __func__); - return (error); + goto err_exit; + } + + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_rc; } tbl.default_tc = 0; tbl.discard_on_miss = false; tbl.keep_entries = false; tbl.kcfg_busaddr = buf->store.paddr; - error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, cmd, &tbl); + error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl); if (error) { device_printf(dev, "%s: failed to set QoS table\n", __func__); - return (error); + goto close_ni; } - error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, cmd); + error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to clear QoS table\n", __func__); - return (error); + goto close_ni; } - return (0); + error = 0; +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int -dpaa2_ni_set_mac_addr(device_t dev, struct dpaa2_cmd *cmd, uint16_t rc_token, - uint16_t ni_token) +dpaa2_ni_set_mac_addr(device_t dev) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); if_t ifp = sc->ifp; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; struct ether_addr rnd_mac_addr; + uint16_t rc_token, ni_token; uint8_t mac_addr[ETHER_ADDR_LEN]; uint8_t dpni_mac_addr[ETHER_ADDR_LEN]; int error; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " + "error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + /* * Get the MAC address associated with the physical port, if the DPNI is * connected to a DPMAC directly associated with one of the physical * ports. */ - error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, dpaa2_mcp_tk(cmd, - ni_token), mac_addr); + error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to obtain the MAC address " "associated with the physical port\n", __func__); - return (error); + goto close_ni; } /* Get primary MAC address from the DPNI attributes. */ - error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, cmd, dpni_mac_addr); + error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr); if (error) { device_printf(dev, "%s: failed to obtain primary MAC address\n", __func__); - return (error); + goto close_ni; } if (!ETHER_IS_ZERO(mac_addr)) { /* Set MAC address of the physical port as DPNI's primary one. */ - error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd, + error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to set primary MAC " "address\n", __func__); - return (error); + goto close_ni; } - for (int i = 0; i < ETHER_ADDR_LEN; i++) + for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = mac_addr[i]; + } } else if (ETHER_IS_ZERO(dpni_mac_addr)) { /* Generate random MAC address as DPNI's primary one. */ ether_gen_addr(ifp, &rnd_mac_addr); - for (int i = 0; i < ETHER_ADDR_LEN; i++) + for (int i = 0; i < ETHER_ADDR_LEN; i++) { mac_addr[i] = rnd_mac_addr.octet[i]; + } - error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd, + error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to set random primary " "MAC address\n", __func__); - return (error); + goto close_ni; } - for (int i = 0; i < ETHER_ADDR_LEN; i++) + for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = mac_addr[i]; + } } else { - for (int i = 0; i < ETHER_ADDR_LEN; i++) + for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = dpni_mac_addr[i]; + } } - return (0); + error = 0; +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static void dpaa2_ni_miibus_statchg(device_t dev) { - struct dpaa2_ni_softc *sc; - device_t child; + device_t pdev = device_get_parent(dev); + device_t child = dev; + struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_mac_link_state mac_link = { 0 }; - uint16_t mac_token; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_cmd cmd; + uint16_t rc_token, mac_token; int error, link_state; - sc = device_get_softc(dev); - if (sc->fixed_link || sc->mii == NULL) + if (sc->fixed_link || sc->mii == NULL) { return; + } /* * Note: ifp link state will only be changed AFTER we are called so we * cannot rely on ifp->if_linkstate here. */ if (sc->mii->mii_media_status & IFM_AVALID) { - if (sc->mii->mii_media_status & IFM_ACTIVE) + if (sc->mii->mii_media_status & IFM_ACTIVE) { link_state = LINK_STATE_UP; - else + } else { link_state = LINK_STATE_DOWN; - } else + } + } else { link_state = LINK_STATE_UNKNOWN; + } if (link_state != sc->link_state) { - sc->link_state = link_state; - child = sc->dev; - error = DPAA2_CMD_MAC_OPEN(sc->dev, child, dpaa2_mcp_tk(sc->cmd, - sc->rc_token), sc->mac.dpmac_id, &mac_token); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, + &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource " + "container: id=%d, error=%d\n", __func__, rcinfo->id, + error); + goto err_exit; + } + error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id, + &mac_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: " "id=%d, error=%d\n", __func__, sc->mac.dpmac_id, error); - return; + goto close_rc; } if (link_state == LINK_STATE_UP || link_state == LINK_STATE_DOWN) { /* Update DPMAC link state. */ mac_link.supported = sc->mii->mii_media.ifm_media; mac_link.advert = sc->mii->mii_media.ifm_media; mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */ mac_link.options = DPAA2_MAC_LINK_OPT_AUTONEG | DPAA2_MAC_LINK_OPT_PAUSE; mac_link.up = (link_state == LINK_STATE_UP) ? true : false; mac_link.state_valid = true; /* Inform DPMAC about link state. */ - error = DPAA2_CMD_MAC_SET_LINK_STATE(sc->dev, child, - sc->cmd, &mac_link); - if (error) + error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd, + &mac_link); + if (error) { device_printf(sc->dev, "%s: failed to set DPMAC " "link state: id=%d, error=%d\n", __func__, sc->mac.dpmac_id, error); + } } - DPAA2_CMD_MAC_CLOSE(sc->dev, child, dpaa2_mcp_tk(sc->cmd, - mac_token)); + (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); } + + return; + +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return; } /** * @brief Callback function to process media change request. */ static int dpaa2_ni_media_change(if_t ifp) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); if (sc->mii) { mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; } else if (sc->fixed_link) { if_printf(ifp, "%s: can't change media in fixed mode\n", __func__); } DPNI_UNLOCK(sc); return (0); } /** * @brief Callback function to process media status request. */ static void dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); if (sc->mii) { mii_pollstat(sc->mii); ifmr->ifm_active = sc->mii->mii_media_active; ifmr->ifm_status = sc->mii->mii_media_status; } DPNI_UNLOCK(sc); } /** * @brief Callout function to check and update media status. */ static void dpaa2_ni_media_tick(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; /* Check for media type change */ if (sc->mii) { mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); dpaa2_ni_media_change(sc->ifp); } } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); } static void dpaa2_ni_init(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; if_t ifp = sc->ifp; + device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; DPNI_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { DPNI_UNLOCK(sc); return; } DPNI_UNLOCK(sc); - error = DPAA2_CMD_NI_ENABLE(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token)); - if (error) + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd); + if (error) { device_printf(dev, "%s: failed to enable DPNI: error=%d\n", __func__, error); + } DPNI_LOCK(sc); - if (sc->mii) + if (sc->mii) { mii_mediachg(sc->mii); + } callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); DPNI_UNLOCK(sc); /* Force link-state update to initilize things. */ dpaa2_ni_miibus_statchg(dev); + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); + return; + +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: return; } static int dpaa2_ni_transmit(if_t ifp, struct mbuf *m) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); struct dpaa2_ni_channel *chan; struct dpaa2_ni_tx_ring *tx; uint32_t fqid; boolean_t found = false; int chan_n = 0; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) return (0); if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { fqid = m->m_pkthdr.flowid; for (int i = 0; i < sc->chan_n; i++) { chan = sc->channels[i]; for (int j = 0; j < chan->rxq_n; j++) { if (fqid == chan->rx_queues[j].fqid) { chan_n = chan->flowid; found = true; break; } } if (found) { break; } } } tx = DPAA2_TX_RING(sc, chan_n, 0); TX_LOCK(tx); dpaa2_ni_tx_locked(sc, tx, m); TX_UNLOCK(tx); return (0); } static void dpaa2_ni_qflush(if_t ifp) { /* TODO: Find a way to drain Tx queues in QBMan. */ if_qflush(ifp); } static int -dpaa2_ni_ioctl(if_t ifp, u_long cmd, caddr_t data) +dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; - device_t dev, child; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; uint32_t changed = 0; + uint16_t rc_token, ni_token; int mtu, error, rc = 0; - dev = child = sc->dev; + DPAA2_CMD_INIT(&cmd); - switch (cmd) { + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + switch (c) { case SIOCSIFMTU: DPNI_LOCK(sc); mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) { DPNI_UNLOCK(sc); - return (EINVAL); + error = EINVAL; + goto close_ni; } if_setmtu(ifp, mtu); DPNI_UNLOCK(sc); /* Update maximum frame length. */ - error = DPAA2_CMD_NI_SET_MFL(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token), mtu + ETHER_HDR_LEN); + error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, + mtu + ETHER_HDR_LEN); if (error) { device_printf(dev, "%s: failed to update maximum frame " "length: error=%d\n", __func__, error); - return (error); + goto close_ni; } break; case SIOCSIFCAP: changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { - if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) + if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) { if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); - else + } else { if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); + } } rc = dpaa2_ni_setup_if_caps(sc); if (rc) { printf("%s: failed to update iface capabilities: " "error=%d\n", __func__, rc); rc = ENXIO; } break; case SIOCSIFFLAGS: DPNI_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { changed = if_getflags(ifp) ^ sc->if_flags; if (changed & IFF_PROMISC || changed & IFF_ALLMULTI) { rc = dpaa2_ni_setup_if_flags(sc); } } else { DPNI_UNLOCK(sc); dpaa2_ni_init(sc); DPNI_LOCK(sc); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { - /* dpni_if_stop(sc); */ + /* FIXME: Disable DPNI. See dpaa2_ni_init(). */ } sc->if_flags = if_getflags(ifp); DPNI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: DPNI_LOCK(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { DPNI_UNLOCK(sc); rc = dpaa2_ni_update_mac_filters(ifp); - if (rc) + if (rc) { device_printf(dev, "%s: failed to update MAC " "filters: error=%d\n", __func__, rc); + } DPNI_LOCK(sc); } DPNI_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->mii) - rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, cmd); + rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c); else if(sc->fixed_link) { - rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, cmd); + rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c); } break; default: - rc = ether_ioctl(ifp, cmd, data); + rc = ether_ioctl(ifp, c, data); + break; } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (rc); + +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static int dpaa2_ni_update_mac_filters(if_t ifp) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); struct dpaa2_ni_mcaddr_ctx ctx; - device_t dev, child; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int error; - dev = child = sc->dev; + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } /* Remove all multicast MAC filters. */ - error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token), false, true); + error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true); if (error) { device_printf(dev, "%s: failed to clear multicast MAC filters: " "error=%d\n", __func__, error); - return (error); + goto close_ni; } ctx.ifp = ifp; ctx.error = 0; ctx.nent = 0; if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx); - return (ctx.error); + error = ctx.error; +close_ni: + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return (error); } static u_int dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct dpaa2_ni_mcaddr_ctx *ctx = arg; struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp); - device_t dev, child; - - dev = child = sc->dev; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; + int error; - if (ctx->error != 0) + if (ctx->error != 0) { return (0); + } if (ETHER_IS_MULTICAST(LLADDR(sdl))) { - ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, dpaa2_mcp_tk( - sc->cmd, sc->ni_token), LLADDR(sdl)); + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, + &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource " + "container: id=%d, error=%d\n", __func__, rcinfo->id, + error); + return (0); + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, + &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); + return (0); + } + + ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd, + LLADDR(sdl)); + + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + ni_token)); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); + if (ctx->error != 0) { device_printf(dev, "%s: can't add more then %d MAC " "addresses, switching to the multicast promiscuous " "mode\n", __func__, ctx->nent); /* Enable multicast promiscuous mode. */ DPNI_LOCK(sc); if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0); sc->if_flags |= IFF_ALLMULTI; ctx->error = dpaa2_ni_setup_if_flags(sc); DPNI_UNLOCK(sc); return (0); } ctx->nent++; } return (1); } static void dpaa2_ni_intr(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; - device_t child = sc->dev; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; uint32_t status = ~0u; /* clear all IRQ status bits */ + uint16_t rc_token, ni_token; int error; - error = DPAA2_CMD_NI_GET_IRQ_STATUS(sc->dev, child, dpaa2_mcp_tk(sc->cmd, - sc->ni_token), DPNI_IRQ_INDEX, &status); - if (error) + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX, + &status); + if (error) { device_printf(sc->dev, "%s: failed to obtain IRQ status: " "error=%d\n", __func__, error); + } + + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +err_exit: + return; } /** * @brief Callback to obtain a physical address of the only DMA segment mapped. */ static void dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error == 0) { KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg)); *(bus_addr_t *) arg = segs[0].ds_addr; } } /** * @brief Release new buffers to the buffer pool if necessary. */ static void dpaa2_ni_bp_task(void *arg, int count) { device_t bp_dev; struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; struct dpaa2_bp_softc *bpsc; struct dpaa2_bp_conf bp_conf; const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); int error; /* There's only one buffer pool for now. */ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); bpsc = device_get_softc(bp_dev); /* Get state of the buffer pool. */ error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid, &bp_conf); if (error) { device_printf(sc->dev, "%s: failed to query buffer pool " "configuration: error=%d\n", __func__, error); return; } /* Double allocated buffers number if free buffers < 25%. */ if (bp_conf.free_bufn < (buf_num >> 2)) { (void)dpaa2_ni_seed_buf_pool(sc, buf_num); DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn); } } /** * @brief Poll frames from a specific channel when CDAN is received. * * NOTE: To be called from the DPIO interrupt handler. */ static void dpaa2_ni_poll(void *arg) { struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg; struct dpaa2_io_softc *iosc; struct dpaa2_swp *swp; struct dpaa2_ni_fq *fq; int error, consumed = 0; KASSERT(chan != NULL, ("%s: channel is NULL", __func__)); iosc = device_get_softc(chan->io_dev); swp = iosc->swp; do { error = dpaa2_swp_pull(swp, chan->id, &chan->store, ETH_STORE_FRAMES); if (error) { device_printf(chan->ni_dev, "%s: failed to pull frames: " "chan_id=%d, error=%d\n", __func__, chan->id, error); break; } /* * TODO: Combine frames from the same Rx queue returned as * a result to the current VDQ command into a chain (linked * with m_nextpkt) to ammortize the FQ lock. */ error = dpaa2_ni_consume_frames(chan, &fq, &consumed); if (error == ENOENT) { break; } if (error == ETIMEDOUT) { device_printf(chan->ni_dev, "%s: timeout to consume " "frames: chan_id=%d\n", __func__, chan->id); } } while (true); /* Re-arm channel to generate CDAN. */ error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx); if (error) { device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, " "error=%d\n", __func__, chan->id, error); } } /** * @brief Transmit mbufs. */ static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx, struct mbuf *m) { struct dpaa2_ni_fq *fq = tx->fq; struct dpaa2_buf *buf; struct dpaa2_fd fd; struct mbuf *m_d; bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT]; uint64_t idx; void *pidx; int error, rc, txnsegs; /* Obtain an index of a Tx buffer. */ pidx = buf_ring_dequeue_sc(tx->idx_br); if (__predict_false(pidx == NULL)) { /* TODO: Do not give up easily. */ m_freem(m); return; } else { idx = (uint64_t) pidx; buf = &tx->buf[idx]; buf->tx.m = m; buf->tx.idx = idx; buf->tx.sgt_paddr = 0; } /* Load mbuf to transmit. */ error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { /* Too many fragments, trying to defragment... */ m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT); if (m_d == NULL) { device_printf(sc->dev, "%s: mbuf " "defragmentation failed\n", __func__); fq->chan->tx_dropped++; goto err; } buf->tx.m = m = m_d; error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to load " "mbuf: error=%d\n", __func__, error); fq->chan->tx_dropped++; goto err; } } /* Build frame descriptor. */ error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to build frame " "descriptor: error=%d\n", __func__, error); fq->chan->tx_dropped++; goto err_unload; } /* TODO: Enqueue several frames in a single command. */ for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) { /* TODO: Return error codes instead of # of frames. */ rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1); if (rc == 1) { break; } } bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE); if (rc != 1) { fq->chan->tx_dropped++; goto err_unload; } else { fq->chan->tx_frames++; } return; err_unload: bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap); if (buf->tx.sgt_paddr != 0) { bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap); } err: m_freem(buf->tx.m); buf_ring_enqueue(tx->idx_br, pidx); } static int dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src, uint32_t *consumed) { struct dpaa2_ni_fq *fq = NULL; struct dpaa2_dq *dq; struct dpaa2_fd *fd; int rc, frames = 0; do { rc = dpaa2_ni_chan_storage_next(chan, &dq); if (rc == EINPROGRESS) { if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { fd = &dq->fdr.fd; fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; fq->consume(chan, fq, fd); frames++; } } else if (rc == EALREADY || rc == ENOENT) { if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { fd = &dq->fdr.fd; fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; fq->consume(chan, fq, fd); frames++; } break; } else { KASSERT(1 == 0, ("%s: should not reach here", __func__)); } } while (true); KASSERT(chan->store_idx < chan->store_sz, ("channel store idx >= size: store_idx=%d, store_sz=%d", chan->store_idx, chan->store_sz)); /* * A dequeue operation pulls frames from a single queue into the store. * Return the frame queue and a number of consumed frames as an output. */ if (src != NULL) *src = fq; if (consumed != NULL) *consumed = frames; return (rc); } /** * @brief Receive frames. */ static int dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd) { struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev); struct dpaa2_bp_softc *bpsc; struct dpaa2_buf *buf; if_t ifp = sc->ifp; struct mbuf *m; device_t bp_dev; bus_addr_t paddr = (bus_addr_t) fd->addr; bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD]; void *buf_data; int buf_idx, buf_len; int error, released_n = 0; /* * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the * physical address. */ buf_idx = dpaa2_ni_fd_buf_idx(fd); buf = &sc->buf[buf_idx]; KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); if (paddr != buf->rx.paddr) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", __func__, paddr, buf->rx.paddr); } /* Update statistics. */ switch (dpaa2_ni_fd_err(fd)) { case 1: /* Enqueue rejected by QMan */ sc->rx_enq_rej_frames++; break; case 2: /* QMan IEOI error */ sc->rx_ieoi_err_frames++; break; default: break; } switch (dpaa2_ni_fd_format(fd)) { case DPAA2_FD_SINGLE: sc->rx_single_buf_frames++; break; case DPAA2_FD_SG: sc->rx_sg_buf_frames++; break; default: break; } m = buf->rx.m; buf->rx.m = NULL; bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap); buf_len = dpaa2_ni_fd_data_len(fd); buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd); /* Prefetch mbuf data. */ __builtin_prefetch(buf_data); /* Write value to mbuf (avoid reading). */ m->m_flags |= M_PKTHDR; m->m_data = buf_data; m->m_len = buf_len; m->m_pkthdr.len = buf_len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.flowid = fq->fqid; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); if_input(ifp, m); /* Keep the buffer to be recycled. */ chan->recycled[chan->recycled_n++] = paddr; KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD, ("%s: too many buffers to recycle", __func__)); /* Re-seed and release recycled buffers back to the pool. */ if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) { /* Release new buffers to the pool if needed. */ taskqueue_enqueue(sc->bp_taskq, &sc->bp_task); for (int i = 0; i < chan->recycled_n; i++) { paddr = chan->recycled[i]; /* Parse ADDR_TOK of the recycled buffer. */ buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT) & DPAA2_NI_BUF_IDX_MASK; buf = &sc->buf[buf_idx]; /* Seed recycled buffer. */ error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx); KASSERT(error == 0, ("%s: failed to seed recycled " "buffer: error=%d", __func__, error)); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to seed " "recycled buffer: error=%d\n", __func__, error); continue; } /* Prepare buffer to be released in a single command. */ released[released_n++] = buf->rx.paddr; } /* There's only one buffer pool for now. */ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); bpsc = device_get_softc(bp_dev); error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, released, released_n); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to release buffers " "to the pool: error=%d\n", __func__, error); return (error); } /* Be ready to recycle the next portion of the buffers. */ chan->recycled_n = 0; } return (0); } /** * @brief Receive Rx error frames. */ static int dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd) { device_t bp_dev; struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev); struct dpaa2_bp_softc *bpsc; struct dpaa2_buf *buf; bus_addr_t paddr = (bus_addr_t) fd->addr; int buf_idx, error; /* * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the * physical address. */ buf_idx = dpaa2_ni_fd_buf_idx(fd); buf = &sc->buf[buf_idx]; KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); if (paddr != buf->rx.paddr) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", __func__, paddr, buf->rx.paddr); } /* There's only one buffer pool for now. */ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); bpsc = device_get_softc(bp_dev); /* Release buffer to QBMan buffer pool. */ error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1); if (error != 0) { device_printf(sc->dev, "%s: failed to release frame buffer to " "the pool: error=%d\n", __func__, error); return (error); } return (0); } /** * @brief Receive Tx confirmation frames. */ static int dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd) { struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev); struct dpaa2_ni_channel *buf_chan; struct dpaa2_ni_tx_ring *tx; struct dpaa2_buf *buf; bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT); uint64_t buf_idx; int chan_idx, tx_idx; /* * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits * (not used by QBMan) of the physical address. */ chan_idx = dpaa2_ni_fd_chan_idx(fd); tx_idx = dpaa2_ni_fd_tx_idx(fd); buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd); KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index", __func__)); KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index", __func__)); buf_chan = sc->channels[chan_idx]; tx = &buf_chan->txc_queue.tx_rings[tx_idx]; buf = &tx->buf[buf_idx]; KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); if (paddr != buf->tx.paddr) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", __func__, paddr, buf->tx.paddr); } bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap); if (buf->tx.sgt_paddr != 0) bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap); m_freem(buf->tx.m); /* Return Tx buffer index back to the ring. */ buf_ring_enqueue(tx->idx_br, (void *) buf_idx); return (0); } /** * @brief Compare versions of the DPAA2 network interface API. */ static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major, uint16_t minor) { if (sc->api_major == major) return sc->api_minor - minor; return sc->api_major - major; } /** * @brief Allocate Rx buffers visible to QBMan and release them to the pool. */ static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn) { device_t bp_dev; struct dpaa2_bp_softc *bpsc; struct dpaa2_buf *buf; bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD]; const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num); int i, error, bufn = 0; KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not " "created?", __func__)); /* There's only one buffer pool for now. */ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); bpsc = device_get_softc(bp_dev); /* Limit # of buffers released to the pool. */ if (allocated + seedn > DPAA2_NI_BUFS_MAX) seedn = DPAA2_NI_BUFS_MAX - allocated; /* Release "seedn" buffers to the pool. */ for (i = allocated; i < (allocated + seedn); i++) { /* Enough buffers were allocated for a single command. */ if (bufn == DPAA2_SWP_BUFS_PER_CMD) { error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev, bpsc->attr.bpid, paddr, bufn); if (error) { device_printf(sc->dev, "%s: failed to release " "buffers to the pool (1)\n", __func__); return (error); } DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); bufn = 0; } buf = &sc->buf[i]; buf->type = DPAA2_BUF_RX; buf->rx.m = NULL; buf->rx.dmap = NULL; buf->rx.paddr = 0; buf->rx.vaddr = NULL; error = dpaa2_ni_seed_rxbuf(sc, buf, i); if (error) break; paddr[bufn] = buf->rx.paddr; bufn++; } /* Release if there are buffers left. */ if (bufn > 0) { error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev, bpsc->attr.bpid, paddr, bufn); if (error) { device_printf(sc->dev, "%s: failed to release " "buffers to the pool (2)\n", __func__); return (error); } DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); } return (0); } /** * @brief Prepare Rx buffer to be released to the buffer pool. */ static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx) { struct mbuf *m; bus_dmamap_t dmap; bus_dma_segment_t segs; int error, nsegs; KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not " "allocated?", __func__)); KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); /* Keep DMA tag for this buffer. */ if (__predict_false(buf->rx.dmat == NULL)) buf->rx.dmat = sc->bp_dmat; /* Create a DMA map for the giving buffer if it doesn't exist yet. */ if (__predict_false(buf->rx.dmap == NULL)) { error = bus_dmamap_create(buf->rx.dmat, 0, &dmap); if (error) { device_printf(sc->dev, "%s: failed to create DMA map " "for buffer: buf_idx=%d, error=%d\n", __func__, idx, error); return (error); } buf->rx.dmap = dmap; } /* Allocate mbuf if needed. */ if (__predict_false(buf->rx.m == NULL)) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE); if (__predict_false(m == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf for " "buffer\n", __func__); return (ENOMEM); } m->m_len = m->m_ext.ext_size; m->m_pkthdr.len = m->m_ext.ext_size; buf->rx.m = m; } else m = buf->rx.m; error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap, m, &segs, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs)); KASSERT(error == 0, ("failed to map mbuf: error=%d", error)); if (__predict_false(error != 0 || nsegs != 1)) { device_printf(sc->dev, "%s: failed to map mbuf: error=%d, " "nsegs=%d\n", __func__, error, nsegs); bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap); m_freem(m); return (error); } buf->rx.paddr = segs.ds_addr; buf->rx.vaddr = m->m_data; /* * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by * QBMan and is supposed to assist in physical to virtual address * translation. * * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed * by QBMan must be configured in the DMA tag accordingly. */ buf->rx.paddr = ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) << DPAA2_NI_BUF_IDX_SHIFT) | (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK); return (0); } /** * @brief Prepare Tx buffer to be added to the Tx ring. */ static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx) { bus_dmamap_t dmap; int error; KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?", __func__)); KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?", __func__)); KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); /* Keep DMA tags for this buffer. */ if (__predict_true(buf->tx.dmat == NULL)) buf->tx.dmat = sc->tx_dmat; if (__predict_true(buf->tx.sgt_dmat == NULL)) buf->tx.sgt_dmat = sc->sgt_dmat; /* Create a DMA map for the giving buffer if it doesn't exist yet. */ if (__predict_true(buf->tx.dmap == NULL)) { error = bus_dmamap_create(buf->tx.dmat, 0, &dmap); if (error != 0) { device_printf(sc->dev, "%s: failed to create " "Tx DMA map: error=%d\n", __func__, error); return (error); } buf->tx.dmap = dmap; } /* Allocate a buffer to store scatter/gather table. */ if (__predict_true(buf->tx.sgt_vaddr == NULL)) { error = bus_dmamem_alloc(buf->tx.sgt_dmat, &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->tx.sgt_dmap); if (error != 0) { device_printf(sc->dev, "%s: failed to allocate " "S/G table: error=%d\n", __func__, error); return (error); } } return (0); } /** * @brief Allocate channel storage visible to QBMan. */ static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc, struct dpaa2_ni_channel *chan) { struct dpaa2_buf *buf = &chan->store; int error; KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not " "allocated?", __func__)); KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer", __func__)); /* Keep DMA tag for this buffer. */ if (__predict_false(buf->store.dmat == NULL)) { buf->store.dmat = sc->st_dmat; } if (__predict_false(buf->store.vaddr == NULL)) { error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); if (error) { device_printf(sc->dev, "%s: failed to allocate channel " "storage\n", __func__); return (error); } } if (__predict_false(buf->store.paddr == 0)) { error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb, &buf->store.paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->dev, "%s: failed to map channel " "storage\n", __func__); return (error); } } chan->store_sz = ETH_STORE_FRAMES; chan->store_idx = 0; return (0); } /** * @brief Build a DPAA2 frame descriptor. */ static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx, struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs, struct dpaa2_fd *fd) { struct dpaa2_ni_channel *chan = tx->fq->chan; struct dpaa2_sg_entry *sgt; int i, error; KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, " "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT)); KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?", __func__)); /* Reset frame descriptor fields. */ memset(fd, 0, sizeof(*fd)); if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) { /* Populate S/G table. */ sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr + sc->tx_data_off; for (i = 0; i < txnsegs; i++) { sgt[i].addr = (uint64_t) txsegs[i].ds_addr; sgt[i].len = (uint32_t) txsegs[i].ds_len; sgt[i].offset_fmt = 0u; } sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */ KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0", __func__, buf->tx.sgt_paddr)); /* Load S/G table. */ error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap, buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb, &buf->tx.sgt_paddr, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to map S/G table: " "error=%d\n", __func__, error); return (error); } buf->tx.paddr = buf->tx.sgt_paddr; buf->tx.vaddr = buf->tx.sgt_vaddr; sc->tx_sg_frames++; /* for sysctl(9) */ } else { return (EINVAL); } fd->addr = ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) << DPAA2_NI_BUF_CHAN_SHIFT) | ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) << DPAA2_NI_TX_IDX_SHIFT) | ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) << DPAA2_NI_TXBUF_IDX_SHIFT) | (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK); fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len; fd->bpid_ivp_bmt = 0; fd->offset_fmt_sl = 0x2000u | sc->tx_data_off; fd->ctrl = 0x00800000u; return (0); } static int dpaa2_ni_fd_err(struct dpaa2_fd *fd) { return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK); } static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *fd) { if (dpaa2_ni_fd_short_len(fd)) return (fd->data_length & DPAA2_NI_FD_LEN_MASK); return (fd->data_length); } static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd) { return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) & DPAA2_NI_BUF_CHAN_MASK); } static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd) { return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) & DPAA2_NI_BUF_IDX_MASK); } static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd) { return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) & DPAA2_NI_TX_IDX_MASK); } static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd) { return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) & DPAA2_NI_TXBUF_IDX_MASK); } static int dpaa2_ni_fd_format(struct dpaa2_fd *fd) { return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >> DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK)); } static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *fd) { return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT) & DPAA2_NI_FD_SL_MASK) == 1); } static int dpaa2_ni_fd_offset(struct dpaa2_fd *fd) { return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK); } /** * @brief Collect statistics of the network interface. */ static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number]; - device_t child = sc->dev; + device_t pdev = device_get_parent(sc->dev); + device_t dev = sc->dev; + device_t child = dev; + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); + struct dpaa2_cmd cmd; uint64_t cnt[DPAA2_NI_STAT_COUNTERS]; uint64_t result = 0; + uint16_t rc_token, ni_token; int error; - error = DPAA2_CMD_NI_GET_STATISTICS(sc->dev, child, - dpaa2_mcp_tk(sc->cmd, sc->ni_token), stat->page, 0, cnt); - if (!error) + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource container: " + "id=%d, error=%d\n", __func__, rcinfo->id, error); + goto exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network interface: " + "id=%d, error=%d\n", __func__, dinfo->id, error); + goto close_rc; + } + + error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt); + if (!error) { result = cnt[stat->cnt]; + } + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); +exit: return (sysctl_handle_64(oidp, &result, 0, req)); } static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); return (sysctl_handle_32(oidp, &buf_num, 0, req)); } static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free); return (sysctl_handle_32(oidp, &buf_free, 0, req)); } static int dpaa2_ni_set_hash(device_t dev, uint64_t flags) { struct dpaa2_ni_softc *sc = device_get_softc(dev); uint64_t key = 0; int i; if (!(sc->attr.num.queues > 1)) { return (EOPNOTSUPP); } for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { if (dist_fields[i].rxnfc_field & flags) { key |= dist_fields[i].id; } } return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key)); } /** * @brief Set Rx distribution (hash or flow classification) key flags is a * combination of RXH_ bits. */ static int dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags) { + device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpkg_profile_cfg cls_cfg; struct dpkg_extract *key; struct dpaa2_buf *buf = &sc->rxd_kcfg; + struct dpaa2_cmd cmd; + uint16_t rc_token, ni_token; int i, error = 0; KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer", __func__)); - if (__predict_true(buf->store.dmat == NULL)) + if (__predict_true(buf->store.dmat == NULL)) { buf->store.dmat = sc->rxd_dmat; + } memset(&cls_cfg, 0, sizeof(cls_cfg)); /* Configure extracts according to the given flags. */ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { key = &cls_cfg.extracts[cls_cfg.num_extracts]; - if (!(flags & dist_fields[i].id)) + if (!(flags & dist_fields[i].id)) { continue; + } if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { device_printf(dev, "%s: failed to add key extraction " "rule\n", __func__); return (E2BIG); } key->type = DPKG_EXTRACT_FROM_HDR; key->extract.from_hdr.prot = dist_fields[i].cls_prot; key->extract.from_hdr.type = DPKG_FULL_FIELD; key->extract.from_hdr.field = dist_fields[i].cls_field; cls_cfg.num_extracts++; } error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); if (error != 0) { device_printf(dev, "%s: failed to allocate a buffer for Rx " "traffic distribution key configuration\n", __func__); return (error); } error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr); if (error != 0) { device_printf(dev, "%s: failed to prepare key configuration: " "error=%d\n", __func__, error); return (error); } /* Prepare for setting the Rx dist. */ error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb, &buf->store.paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->dev, "%s: failed to map a buffer for Rx " "traffic distribution key configuration\n", __func__); return (error); } if (type == DPAA2_NI_DIST_MODE_HASH) { - error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, dpaa2_mcp_tk( - sc->cmd, sc->ni_token), sc->attr.num.queues, 0, - DPAA2_NI_DIST_MODE_HASH, buf->store.paddr); - if (error != 0) + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, + &rc_token); + if (error) { + device_printf(dev, "%s: failed to open resource " + "container: id=%d, error=%d\n", __func__, rcinfo->id, + error); + goto err_exit; + } + error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, + &ni_token); + if (error) { + device_printf(dev, "%s: failed to open network " + "interface: id=%d, error=%d\n", __func__, dinfo->id, + error); + goto close_rc; + } + + error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd, + sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, + buf->store.paddr); + if (error != 0) { device_printf(dev, "%s: failed to set distribution mode " "and size for the traffic class\n", __func__); + } + + (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + ni_token)); +close_rc: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, + rc_token)); } +err_exit: return (error); } /** * @brief Prepares extract parameters. * * cfg: Defining a full Key Generation profile. * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA. */ static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) { struct dpni_ext_set_rx_tc_dist *dpni_ext; struct dpni_dist_extract *extr; int i, j; if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) return (EINVAL); dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf; dpni_ext->num_extracts = cfg->num_extracts; for (i = 0; i < cfg->num_extracts; i++) { extr = &dpni_ext->extracts[i]; switch (cfg->extracts[i].type) { case DPKG_EXTRACT_FROM_HDR: extr->prot = cfg->extracts[i].extract.from_hdr.prot; extr->efh_type = cfg->extracts[i].extract.from_hdr.type & 0x0Fu; extr->size = cfg->extracts[i].extract.from_hdr.size; extr->offset = cfg->extracts[i].extract.from_hdr.offset; extr->field = cfg->extracts[i].extract.from_hdr.field; extr->hdr_index = cfg->extracts[i].extract.from_hdr.hdr_index; break; case DPKG_EXTRACT_FROM_DATA: extr->size = cfg->extracts[i].extract.from_data.size; extr->offset = cfg->extracts[i].extract.from_data.offset; break; case DPKG_EXTRACT_FROM_PARSE: extr->size = cfg->extracts[i].extract.from_parse.size; extr->offset = cfg->extracts[i].extract.from_parse.offset; break; default: return (EINVAL); } extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; extr->extract_type = cfg->extracts[i].type & 0x0Fu; for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { extr->masks[j].mask = cfg->extracts[i].masks[j].mask; extr->masks[j].offset = cfg->extracts[i].masks[j].offset; } } return (0); } /** * @brief Obtain the next dequeue response from the channel storage. */ static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq) { struct dpaa2_buf *buf = &chan->store; struct dpaa2_dq *msgs = buf->store.vaddr; struct dpaa2_dq *msg = &msgs[chan->store_idx]; int rc = EINPROGRESS; chan->store_idx++; if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) { rc = EALREADY; /* VDQ command is expired */ chan->store_idx = 0; if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) msg = NULL; /* Null response, FD is invalid */ } if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) { rc = ENOENT; /* FQ is empty */ chan->store_idx = 0; } if (dq != NULL) *dq = msg; return (rc); } static device_method_t dpaa2_ni_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_ni_probe), DEVMETHOD(device_attach, dpaa2_ni_attach), DEVMETHOD(device_detach, dpaa2_ni_detach), /* mii via memac_mdio */ DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg), DEVMETHOD_END }; static driver_t dpaa2_ni_driver = { "dpaa2_ni", dpaa2_ni_methods, sizeof(struct dpaa2_ni_softc), }; DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0); DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0); MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1); #ifdef DEV_ACPI MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1); #endif #ifdef FDT MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1); #endif diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h index 929a4d0d4966..03528420d5d7 100644 --- a/sys/dev/dpaa2/dpaa2_ni.h +++ b/sys/dev/dpaa2/dpaa2_ni.h @@ -1,607 +1,602 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * Copyright © 2022 Mathew McBride * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_NI_H #define _DPAA2_NI_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_io.h" #include "dpaa2_mac.h" #include "dpaa2_ni_dpkg.h" /* Name of the DPAA2 network interface. */ #define DPAA2_NI_IFNAME "dpni" /* Maximum resources per DPNI: 16 DPIOs + 16 DPCONs + 1 DPBP + 1 DPMCP. */ #define DPAA2_NI_MAX_RESOURCES 34 #define DPAA2_NI_MSI_COUNT 1 /* MSIs per DPNI */ #define DPAA2_NI_MAX_CHANNELS 16 /* to distribute ingress traffic to cores */ #define DPAA2_NI_MAX_TCS 8 /* traffic classes per DPNI */ #define DPAA2_NI_MAX_POOLS 8 /* buffer pools per DPNI */ /* Maximum number of Rx buffers. */ #define DPAA2_NI_BUFS_INIT (50u * DPAA2_SWP_BUFS_PER_CMD) #define DPAA2_NI_BUFS_MAX (1 << 15) /* 15 bits for buffer index max. */ /* Maximum number of buffers allocated per Tx ring. */ #define DPAA2_NI_BUFS_PER_TX (1 << 7) #define DPAA2_NI_MAX_BPTX (1 << 8) /* 8 bits for buffer index max. */ /* Number of the DPNI statistics counters. */ #define DPAA2_NI_STAT_COUNTERS 7u #define DPAA2_NI_STAT_SYSCTLS 9u /* Error and status bits in the frame annotation status word. */ #define DPAA2_NI_FAS_DISC 0x80000000 /* debug frame */ #define DPAA2_NI_FAS_MS 0x40000000 /* MACSEC frame */ #define DPAA2_NI_FAS_PTP 0x08000000 #define DPAA2_NI_FAS_MC 0x04000000 /* Ethernet multicast frame */ #define DPAA2_NI_FAS_BC 0x02000000 /* Ethernet broadcast frame */ #define DPAA2_NI_FAS_KSE 0x00040000 #define DPAA2_NI_FAS_EOFHE 0x00020000 #define DPAA2_NI_FAS_MNLE 0x00010000 #define DPAA2_NI_FAS_TIDE 0x00008000 #define DPAA2_NI_FAS_PIEE 0x00004000 #define DPAA2_NI_FAS_FLE 0x00002000 /* Frame length error */ #define DPAA2_NI_FAS_FPE 0x00001000 /* Frame physical error */ #define DPAA2_NI_FAS_PTE 0x00000080 #define DPAA2_NI_FAS_ISP 0x00000040 #define DPAA2_NI_FAS_PHE 0x00000020 #define DPAA2_NI_FAS_BLE 0x00000010 #define DPAA2_NI_FAS_L3CV 0x00000008 /* L3 csum validation performed */ #define DPAA2_NI_FAS_L3CE 0x00000004 /* L3 csum error */ #define DPAA2_NI_FAS_L4CV 0x00000002 /* L4 csum validation performed */ #define DPAA2_NI_FAS_L4CE 0x00000001 /* L4 csum error */ /* Mask for errors on the ingress path. */ #define DPAA2_NI_FAS_RX_ERR_MASK (DPAA2_NI_FAS_KSE | \ DPAA2_NI_FAS_EOFHE | \ DPAA2_NI_FAS_MNLE | \ DPAA2_NI_FAS_TIDE | \ DPAA2_NI_FAS_PIEE | \ DPAA2_NI_FAS_FLE | \ DPAA2_NI_FAS_FPE | \ DPAA2_NI_FAS_PTE | \ DPAA2_NI_FAS_ISP | \ DPAA2_NI_FAS_PHE | \ DPAA2_NI_FAS_BLE | \ DPAA2_NI_FAS_L3CE | \ DPAA2_NI_FAS_L4CE \ ) /* Option bits to select specific queue configuration options to apply. */ #define DPAA2_NI_QUEUE_OPT_USER_CTX 0x00000001 #define DPAA2_NI_QUEUE_OPT_DEST 0x00000002 #define DPAA2_NI_QUEUE_OPT_FLC 0x00000004 #define DPAA2_NI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 #define DPAA2_NI_QUEUE_OPT_SET_CGID 0x00000040 #define DPAA2_NI_QUEUE_OPT_CLEAR_CGID 0x00000080 /* DPNI link configuration options. */ #define DPAA2_NI_LINK_OPT_AUTONEG ((uint64_t) 0x01u) #define DPAA2_NI_LINK_OPT_HALF_DUPLEX ((uint64_t) 0x02u) #define DPAA2_NI_LINK_OPT_PAUSE ((uint64_t) 0x04u) #define DPAA2_NI_LINK_OPT_ASYM_PAUSE ((uint64_t) 0x08u) #define DPAA2_NI_LINK_OPT_PFC_PAUSE ((uint64_t) 0x10u) /* * Number of times to retry a frame enqueue before giving up. Value determined * empirically, in order to minimize the number of frames dropped on Tx. */ #define DPAA2_NI_ENQUEUE_RETRIES 10 enum dpaa2_ni_queue_type { DPAA2_NI_QUEUE_RX = 0, DPAA2_NI_QUEUE_TX, DPAA2_NI_QUEUE_TX_CONF, DPAA2_NI_QUEUE_RX_ERR }; enum dpaa2_ni_dest_type { DPAA2_NI_DEST_NONE = 0, DPAA2_NI_DEST_DPIO, DPAA2_NI_DEST_DPCON }; enum dpaa2_ni_ofl_type { DPAA2_NI_OFL_RX_L3_CSUM = 0, DPAA2_NI_OFL_RX_L4_CSUM, DPAA2_NI_OFL_TX_L3_CSUM, DPAA2_NI_OFL_TX_L4_CSUM, DPAA2_NI_OFL_FLCTYPE_HASH /* FD flow context for AIOP/CTLU */ }; /** * @brief DPNI ingress traffic distribution mode. */ enum dpaa2_ni_dist_mode { DPAA2_NI_DIST_MODE_NONE = 0, DPAA2_NI_DIST_MODE_HASH, DPAA2_NI_DIST_MODE_FS }; /** * @brief DPNI behavior in case of errors. */ enum dpaa2_ni_err_action { DPAA2_NI_ERR_DISCARD = 0, DPAA2_NI_ERR_CONTINUE, DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE }; struct dpaa2_ni_channel; struct dpaa2_ni_fq; /** * @brief Attributes of the DPNI object. * * options: ... * wriop_ver: Revision of the underlying WRIOP hardware block. */ struct dpaa2_ni_attr { uint32_t options; uint16_t wriop_ver; struct { uint16_t fs; uint8_t mac; uint8_t vlan; uint8_t qos; } entries; struct { uint8_t queues; uint8_t rx_tcs; uint8_t tx_tcs; uint8_t channels; uint8_t cgs; } num; struct { uint8_t fs; uint8_t qos; } key_size; }; /** * @brief Tx ring. * * fq: Parent (TxConf) frame queue. * fqid: ID of the logical Tx queue. * mbuf_br: Ring buffer for mbufs to transmit. * mbuf_lock: Lock for the ring buffer. */ struct dpaa2_ni_tx_ring { struct dpaa2_ni_fq *fq; uint32_t fqid; uint32_t txid; /* Tx ring index */ /* Ring buffer for indexes in "buf" array. */ struct buf_ring *idx_br; struct mtx lock; /* Buffers to DMA load/unload Tx mbufs. */ struct dpaa2_buf buf[DPAA2_NI_BUFS_PER_TX]; }; /** * @brief A Frame Queue is the basic queuing structure used by the QMan. * * It comprises a list of frame descriptors (FDs), so it can be thought of * as a queue of frames. * * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued * onto a work queue (WQ). * * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other * commands on the queue through DPIO. * txq_n: Number of configured Tx queues. * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid. * Note that Tx queues are logical queues and not all management * commands are available on these queue types. * qdbin: Queue destination bin. Can be used with the DPIO enqueue * operation based on QDID, QDBIN and QPRI. Note that all Tx queues * with the same flowid have the same destination bin. */ struct dpaa2_ni_fq { int (*consume)(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); struct dpaa2_ni_channel *chan; uint32_t fqid; uint16_t flowid; uint8_t tc; enum dpaa2_ni_queue_type type; /* Optional fields (for TxConf queue). */ struct dpaa2_ni_tx_ring tx_rings[DPAA2_NI_MAX_TCS]; uint32_t tx_qdbin; } __aligned(CACHE_LINE_SIZE); /** * @brief QBMan channel to process ingress traffic (Rx, Tx conf). * * NOTE: Several WQs are organized into a single WQ Channel. */ struct dpaa2_ni_channel { device_t ni_dev; device_t io_dev; device_t con_dev; uint16_t id; uint16_t flowid; /* For debug purposes only! */ uint64_t tx_frames; uint64_t tx_dropped; /* Context to configure CDAN. */ struct dpaa2_io_notif_ctx ctx; /* Channel storage (to keep responses from VDQ command). */ struct dpaa2_buf store; uint32_t store_sz; /* in frames */ uint32_t store_idx; /* frame index */ /* Recycled buffers to release back to the pool. */ uint32_t recycled_n; bus_addr_t recycled[DPAA2_SWP_BUFS_PER_CMD]; /* Frame queues */ uint32_t rxq_n; struct dpaa2_ni_fq rx_queues[DPAA2_NI_MAX_TCS]; struct dpaa2_ni_fq txc_queue; }; /** * @brief Configuration of the network interface queue. * * NOTE: This configuration is used to obtain information of a queue by * DPNI_GET_QUEUE command and update it by DPNI_SET_QUEUE one. * * It includes binding of the queue to a DPIO or DPCON object to receive * notifications and traffic on the CPU. * * user_ctx: (r/w) User defined data, presented along with the frames * being dequeued from this queue. * flow_ctx: (r/w) Set default FLC value for traffic dequeued from this queue. * Please check description of FD structure for more information. * Note that FLC values set using DPNI_ADD_FS_ENTRY, if any, take * precedence over values per queue. * dest_id: (r/w) The ID of a DPIO or DPCON object, depending on * DEST_TYPE (in flags) value. This field is ignored for DEST_TYPE * set to 0 (DPNI_DEST_NONE). * fqid: (r) Frame queue ID, can be used to enqueue/dequeue or execute * other commands on the queue through DPIO. Note that Tx queues * are logical queues and not all management commands are available * on these queue types. * qdbin: (r) Queue destination bin. Can be used with the DPIO enqueue * operation based on QDID, QDBIN and QPRI. * type: Type of the queue to set configuration to. * tc: Traffic class. Ignored for QUEUE_TYPE 2 and 3 (Tx confirmation * and Rx error queues). * idx: Selects a specific queue out of the set of queues in a TC. * Accepted values are in range 0 to NUM_QUEUES–1. This field is * ignored for QUEUE_TYPE 3 (Rx error queue). For access to the * shared Tx confirmation queue (for Tx confirmation mode 1), this * field must be set to 0xff. * cgid: (r/w) Congestion group ID. * chan_id: (w) Channel index to be configured. Used only when QUEUE_TYPE is * set to DPNI_QUEUE_TX. * priority: (r/w) Sets the priority in the destination DPCON or DPIO for * dequeued traffic. Supported values are 0 to # of priorities in * destination DPCON or DPIO - 1. This field is ignored for * DEST_TYPE set to 0 (DPNI_DEST_NONE), except if this DPNI is in * AIOP context. In that case the DPNI_SET_QUEUE can be used to * override the default assigned priority of the FQ from the TC. * options: Option bits selecting specific configuration options to apply. * See DPAA2_NI_QUEUE_OPT_* for details. * dest_type: Type of destination for dequeued traffic. * cgid_valid: (r) Congestion group ID is valid. * stash_control: (r/w) If true, lowest 6 bits of FLC are used for stash control. * Please check description of FD structure for more information. * hold_active: (r/w) If true, this flag prevents the queue from being * rescheduled between DPIOs while it carries traffic and is active * on one DPIO. Can help reduce reordering if one queue is services * on multiple CPUs, but the queue is also more likely to be trapped * in one DPIO, especially when congested. */ struct dpaa2_ni_queue_cfg { uint64_t user_ctx; uint64_t flow_ctx; uint32_t dest_id; uint32_t fqid; uint16_t qdbin; enum dpaa2_ni_queue_type type; uint8_t tc; uint8_t idx; uint8_t cgid; uint8_t chan_id; uint8_t priority; uint8_t options; enum dpaa2_ni_dest_type dest_type; bool cgid_valid; bool stash_control; bool hold_active; }; /** * @brief Buffer layout attributes. * * pd_size: Size kept for private data (in bytes). * fd_align: Frame data alignment. * head_size: Data head room. * tail_size: Data tail room. * options: ... * pass_timestamp: Timestamp is included in the buffer layout. * pass_parser_result: Parsing results are included in the buffer layout. * pass_frame_status: Frame status is included in the buffer layout. * pass_sw_opaque: SW annotation is activated. * queue_type: Type of a queue this configuration applies to. */ struct dpaa2_ni_buf_layout { uint16_t pd_size; uint16_t fd_align; uint16_t head_size; uint16_t tail_size; uint16_t options; bool pass_timestamp; bool pass_parser_result; bool pass_frame_status; bool pass_sw_opaque; enum dpaa2_ni_queue_type queue_type; }; /** * @brief Buffer pools configuration for a network interface. */ struct dpaa2_ni_pools_cfg { uint8_t pools_num; struct { uint32_t bp_obj_id; uint16_t buf_sz; int backup_flag; /* 0 - regular pool, 1 - backup pool */ } pools[DPAA2_NI_MAX_POOLS]; }; /** * @brief Errors behavior configuration for a network interface. * * err_mask: The errors mask to configure. * action: Desired action for the errors selected in the mask. * set_err_fas: Set to true to mark the errors in frame annotation * status (FAS); relevant for non-discard actions only. */ struct dpaa2_ni_err_cfg { uint32_t err_mask; enum dpaa2_ni_err_action action; bool set_err_fas; }; /** * @brief Link configuration. * * options: Mask of available options. * adv_speeds: Speeds that are advertised for autoneg. * rate: Rate in Mbps. */ struct dpaa2_ni_link_cfg { uint64_t options; uint64_t adv_speeds; uint32_t rate; }; /** * @brief Link state. * * options: Mask of available options. * adv_speeds: Speeds that are advertised for autoneg. * sup_speeds: Speeds capability of the PHY. * rate: Rate in Mbps. * link_up: Link state (true if link is up, false otherwise). * state_valid: Ignore/Update the state of the link. */ struct dpaa2_ni_link_state { uint64_t options; uint64_t adv_speeds; uint64_t sup_speeds; uint32_t rate; bool link_up; bool state_valid; }; /** * @brief QoS table configuration. * * kcfg_busaddr: Address of the buffer in I/O virtual address space which * holds the QoS table key configuration. * default_tc: Default traffic class to use in case of a lookup miss in * the QoS table. * discard_on_miss: Set to true to discard frames in case of no match. * Default traffic class will be used otherwise. * keep_entries: Set to true to keep existing QoS table entries. This * option will work properly only for DPNI objects created * with DPNI_OPT_HAS_KEY_MASKING option. */ struct dpaa2_ni_qos_table { uint64_t kcfg_busaddr; uint8_t default_tc; bool discard_on_miss; bool keep_entries; }; /** * @brief Context to add multicast physical addresses to the filter table. * * ifp: Network interface associated with the context. * error: Result of the last MC command. * nent: Number of entries added. */ struct dpaa2_ni_mcaddr_ctx { struct ifnet *ifp; int error; int nent; }; struct dpaa2_eth_dist_fields { uint64_t rxnfc_field; enum net_prot cls_prot; int cls_field; int size; uint64_t id; }; struct dpni_mask_cfg { uint8_t mask; uint8_t offset; } __packed; struct dpni_dist_extract { uint8_t prot; uint8_t efh_type; /* EFH type is in the 4 LSBs. */ uint8_t size; uint8_t offset; uint32_t field; uint8_t hdr_index; uint8_t constant; uint8_t num_of_repeats; uint8_t num_of_byte_masks; uint8_t extract_type; /* Extraction type is in the 4 LSBs */ uint8_t _reserved[3]; struct dpni_mask_cfg masks[4]; } __packed; struct dpni_ext_set_rx_tc_dist { uint8_t num_extracts; uint8_t _reserved[7]; struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; } __packed; /** * @brief Software context for the DPAA2 Network Interface driver. */ struct dpaa2_ni_softc { device_t dev; struct resource *res[DPAA2_NI_MAX_RESOURCES]; uint16_t api_major; uint16_t api_minor; uint64_t rx_hash_fields; uint16_t tx_data_off; uint16_t tx_qdid; uint32_t link_options; int link_state; uint16_t buf_align; uint16_t buf_sz; /* For debug purposes only! */ uint64_t rx_anomaly_frames; uint64_t rx_single_buf_frames; uint64_t rx_sg_buf_frames; uint64_t rx_enq_rej_frames; uint64_t rx_ieoi_err_frames; uint64_t tx_single_buf_frames; uint64_t tx_sg_frames; /* Attributes of the DPAA2 network interface. */ struct dpaa2_ni_attr attr; - /* Helps to send commands to MC. */ - struct dpaa2_cmd *cmd; - uint16_t rc_token; - uint16_t ni_token; - /* For network interface and miibus. */ struct ifnet *ifp; uint32_t if_flags; struct mtx lock; device_t miibus; struct mii_data *mii; boolean_t fixed_link; struct ifmedia fixed_ifmedia; int media_status; /* DMA resources */ bus_dma_tag_t bp_dmat; /* for buffer pool */ bus_dma_tag_t tx_dmat; /* for Tx buffers */ bus_dma_tag_t st_dmat; /* for channel storage */ bus_dma_tag_t rxd_dmat; /* for Rx distribution key */ bus_dma_tag_t qos_dmat; /* for QoS table key */ bus_dma_tag_t sgt_dmat; /* for scatter/gather tables */ struct dpaa2_buf qos_kcfg; /* QoS table key config. */ struct dpaa2_buf rxd_kcfg; /* Rx distribution key config. */ /* Channels and RxError frame queue */ uint32_t chan_n; struct dpaa2_ni_channel *channels[DPAA2_NI_MAX_CHANNELS]; struct dpaa2_ni_fq rxe_queue; /* one per network interface */ /* Rx buffers for buffer pool. */ struct dpaa2_atomic buf_num; struct dpaa2_atomic buf_free; /* for sysctl(9) only */ struct dpaa2_buf buf[DPAA2_NI_BUFS_MAX]; /* Interrupts */ int irq_rid[DPAA2_NI_MSI_COUNT]; struct resource *irq_res; void *intr; /* interrupt handle */ /* Tasks */ struct taskqueue *bp_taskq; struct task bp_task; /* Callouts */ struct callout mii_callout; struct { uint32_t dpmac_id; uint8_t addr[ETHER_ADDR_LEN]; device_t phy_dev; int phy_loc; } mac; /* Info about connected DPMAC (if exists). */ }; extern struct resource_spec dpaa2_ni_spec[]; #endif /* _DPAA2_NI_H */ diff --git a/sys/dev/dpaa2/dpaa2_rc.c b/sys/dev/dpaa2/dpaa2_rc.c index f5d7bae92e04..6828eb35d833 100644 --- a/sys/dev/dpaa2/dpaa2_rc.c +++ b/sys/dev/dpaa2/dpaa2_rc.c @@ -1,3585 +1,3562 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The DPAA2 Resource Container (DPRC) bus driver. * * DPRC holds all the resources and object information that a software context * (kernel, virtual machine, etc.) can access or use. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_ni.h" #include "dpaa2_mc_if.h" #include "dpaa2_cmd_if.h" /* Timeouts to wait for a command response from MC. */ #define CMD_SPIN_TIMEOUT 100u /* us */ #define CMD_SPIN_ATTEMPTS 2000u /* max. 200 ms */ #define TYPE_LEN_MAX 16u #define LABEL_LEN_MAX 16u MALLOC_DEFINE(M_DPAA2_RC, "dpaa2_rc", "DPAA2 Resource Container"); /* Discover and add devices to the resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *); static int dpaa2_rc_add_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); /* Helper routines. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *, struct dpaa2_cmd *, uint8_t, bool, uint16_t); static int dpaa2_rc_configure_irq(device_t, device_t, int, uint64_t, uint32_t); static int dpaa2_rc_add_res(device_t, device_t, enum dpaa2_dev_type, int *, int); static int dpaa2_rc_print_type(struct resource_list *, enum dpaa2_dev_type); static struct dpaa2_mcp *dpaa2_rc_select_portal(device_t, device_t); /* Routines to send commands to MC. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *, uint16_t); static int dpaa2_rc_send_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *); static int dpaa2_rc_probe(device_t dev) { /* DPRC device will be added by the parent DPRC or MC bus itself. */ device_set_desc(dev, "DPAA2 Resource Container"); return (BUS_PROBE_DEFAULT); } static int dpaa2_rc_detach(device_t dev) { struct dpaa2_devinfo *dinfo; int error; error = bus_generic_detach(dev); if (error) return (error); dinfo = device_get_ivars(dev); if (dinfo->portal) dpaa2_mcp_free_portal(dinfo->portal); if (dinfo) free(dinfo, M_DPAA2_RC); return (device_delete_children(dev)); } static int dpaa2_rc_attach(device_t dev) { device_t pdev; struct dpaa2_mc_softc *mcsc; struct dpaa2_rc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; sc = device_get_softc(dev); sc->dev = dev; sc->unit = device_get_unit(dev); if (sc->unit == 0) { /* Root DPRC should be attached directly to the MC bus. */ pdev = device_get_parent(dev); mcsc = device_get_softc(pdev); KASSERT(strcmp(device_get_name(pdev), "dpaa2_mc") == 0, ("root DPRC should be attached to the MC bus")); /* * Allocate devinfo to let the parent MC bus access ICID of the * DPRC object. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(dev, "%s: failed to allocate " "dpaa2_devinfo\n", __func__); dpaa2_rc_detach(dev); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = pdev; dinfo->dev = dev; dinfo->dtype = DPAA2_DEV_RC; dinfo->portal = NULL; /* Prepare helper portal object to send commands to MC. */ error = dpaa2_mcp_init_portal(&dinfo->portal, mcsc->res[0], &mcsc->map[0], DPAA2_PORTAL_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_mcp: " "error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (ENXIO); } } else { /* TODO: Child DPRCs aren't supported yet. */ return (ENXIO); } /* Create DPAA2 devices for objects in this container. */ error = dpaa2_rc_discover(sc); if (error) { device_printf(dev, "%s: failed to discover objects in " "container: error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (error); } return (0); } /* * Bus interface. */ static struct resource_list * dpaa2_rc_get_resource_list(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static void dpaa2_rc_delete_resource(device_t rcdev, device_t child, int type, int rid) { struct resource_list *rl; struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(rcdev, "%s: resource still owned by " "child: type=%d, rid=%d, start=%jx\n", __func__, type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, rcdev, child, type, rid); } resource_list_delete(rl, type, rid); } static struct resource * dpaa2_rc_alloc_multi_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * By default, software portal interrupts are message-based, that is, * they are issued from QMan using a 4 byte write. * * TODO: However this default behavior can be changed by programming one * or more software portals to issue their interrupts via a * dedicated software portal interrupt wire. * See registers SWP_INTW0_CFG to SWP_INTW3_CFG for details. */ if (type == SYS_RES_IRQ && *rid == 0) return (NULL); return (resource_list_alloc(rl, rcdev, child, type, rid, start, end, count, flags)); } static struct resource * dpaa2_rc_alloc_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { if (device_get_parent(child) != rcdev) return (BUS_ALLOC_RESOURCE(device_get_parent(rcdev), child, type, rid, start, end, count, flags)); return (dpaa2_rc_alloc_multi_resource(rcdev, child, type, rid, start, end, count, flags)); } static int dpaa2_rc_release_resource(device_t rcdev, device_t child, int type, int rid, struct resource *r) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return (BUS_RELEASE_RESOURCE(device_get_parent(rcdev), child, type, rid, r)); dinfo = device_get_ivars(child); rl = &dinfo->resources; return (resource_list_release(rl, rcdev, child, type, rid, r)); } static void dpaa2_rc_child_deleted(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { device_printf(child, "%s: resource still owned: " "type=%d, rid=%d, addr=%lx\n", __func__, rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, rcdev, child, rle->type, rle->rid); } } resource_list_free(rl); if (dinfo) free(dinfo, M_DPAA2_RC); } static void dpaa2_rc_child_detached(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, rcdev, child, SYS_RES_IRQ) != 0) device_printf(child, "%s: leaked IRQ resources!\n", __func__); if (dinfo->msi.msi_alloc != 0) { device_printf(child, "%s: leaked %d MSI vectors!\n", __func__, dinfo->msi.msi_alloc); PCI_RELEASE_MSI(rcdev, child); } if (resource_list_release_active(rl, rcdev, child, SYS_RES_MEMORY) != 0) device_printf(child, "%s: leaked memory resources!\n", __func__); } static int dpaa2_rc_setup_intr(device_t rcdev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct dpaa2_devinfo *dinfo; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(rcdev, child, irq, flags, filter, intr, arg, &cookie); if (error) { device_printf(rcdev, "%s: bus_generic_setup_intr() failed: " "error=%d\n", __func__, error); return (error); } /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != rcdev) { *cookiep = cookie; return (0); } rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot setup interrupt with " "rid=0: INTx are not supported by DPAA2 objects " "yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); KASSERT(dinfo->msi.msi_alloc > 0, ("No MSI interrupts allocated")); /* * Ask our parent to map the MSI and give us the address and * data register values. If we fail for some reason, teardown * the interrupt handler. */ error = PCIB_MAP_MSI(device_get_parent(rcdev), child, rman_get_start(irq), &addr, &data); if (error) { device_printf(rcdev, "%s: PCIB_MAP_MSI failed: " "error=%d\n", __func__, error); (void)bus_generic_teardown_intr(rcdev, child, irq, cookie); return (error); } /* Configure MSI for this DPAA2 object. */ error = dpaa2_rc_configure_irq(rcdev, child, rid, addr, data); if (error) { device_printf(rcdev, "%s: failed to configure IRQ for " "DPAA2 object: rid=%d, type=%s, unit=%d\n", __func__, rid, dpaa2_ttos(dinfo->dtype), device_get_unit(child)); return (error); } dinfo->msi.msi_handlers++; } *cookiep = cookie; return (0); } static int dpaa2_rc_teardown_intr(device_t rcdev, device_t child, struct resource *irq, void *cookie) { struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != rcdev) return(bus_generic_teardown_intr(rcdev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot teardown interrupt " "with rid=0: INTx are not supported by DPAA2 " "objects yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); dinfo->msi.msi_handlers--; } error = bus_generic_teardown_intr(rcdev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI", __func__)); return (error); } static int dpaa2_rc_print_child(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; int retval = 0; retval += bus_print_child_header(rcdev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); /* Print DPAA2-specific resources. */ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_IO); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_BP); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_CON); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_MCP); retval += printf(" at %s (id=%u)", dpaa2_ttos(dinfo->dtype), dinfo->id); retval += bus_print_child_domain(rcdev, child); retval += bus_print_child_footer(rcdev, child); return (retval); } /* * Pseudo-PCI interface. */ /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_alloc_msi(device_t rcdev, device_t child, int *count) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); int error, actual, i, run, irqs[32]; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* MSI should be allocated by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Already have allocated messages? */ if (dinfo->msi.msi_alloc != 0) return (ENXIO); /* Don't ask for more than the device supports. */ actual = min(*count, dinfo->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(rcdev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ resources in * the irqs[] array, so add new resources starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) { device_printf(child, "using IRQ %d for MSI\n", irqs[0]); } else { /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update counts of alloc'd messages. */ dinfo->msi.msi_alloc = actual; dinfo->msi.msi_handlers = 0; *count = actual; return (0); } /* * Release the MSI messages associated with this DPAA2 device. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_release_msi(device_t rcdev, device_t child) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list_entry *rle; int i, irqs[32]; /* MSI should be released by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Do we have any messages to release? */ if (dinfo->msi.msi_alloc == 0) return (ENODEV); KASSERT(dinfo->msi.msi_alloc <= 32, ("more than 32 alloc'd MSI messages")); /* Make sure none of the resources are allocated. */ if (dinfo->msi.msi_handlers > 0) return (EBUSY); for (i = 0; i < dinfo->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(rcdev), child, dinfo->msi.msi_alloc, irqs); for (i = 0; i < dinfo->msi.msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ dinfo->msi.msi_alloc = 0; return (0); } /** * @brief Return the maximum number of the MSI supported by this DPAA2 device. */ static int dpaa2_rc_msi_count(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (dinfo->msi.msi_msgnum); } static int dpaa2_rc_get_id(device_t rcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); return (PCIB_GET_ID(device_get_parent(rcdev), child, type, id)); } /* * DPAA2 MC command interface. */ static int dpaa2_rc_mng_get_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *major, uint32_t *minor, uint32_t *rev) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL || rev == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_VER); if (!error) { *major = cmd->params[0] >> 32; *minor = cmd->params[1] & 0xFFFFFFFF; *rev = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_soc_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *pvr, uint32_t *svr) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || pvr == NULL || svr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_SOC_VER); if (!error) { *pvr = cmd->params[0] >> 32; *svr = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_container_id(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *cont_id) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cont_id == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_CONT_ID); if (!error) *cont_id = cmd->params[0] & 0xFFFFFFFF; return (error); } static int dpaa2_rc_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t cont_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = cont_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_CLOSE)); } static int dpaa2_rc_get_obj_count(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *obj_count) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj_count == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_COUNT); if (!error) *obj_count = (uint32_t)(cmd->params[0] >> 32); return (error); } static int dpaa2_rc_get_obj(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_idx, struct dpaa2_obj *obj) { struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = obj_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_obj_descriptor(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, enum dpaa2_dev_type dtype, struct dpaa2_obj *obj) { struct __packed get_obj_desc_args { uint32_t obj_id; uint32_t _reserved1; uint8_t type[16]; } *args; struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_obj_desc_args *) &cmd->params[0]; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_DESC); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_rc_attr *attr) { struct __packed dpaa2_rc_attr { uint32_t cont_id; uint32_t icid; uint32_t options; uint32_t portal_id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_ATTR); if (!error) { pattr = (struct dpaa2_rc_attr *) &cmd->params[0]; attr->cont_id = pattr->cont_id; attr->portal_id = pattr->portal_id; attr->options = pattr->options; attr->icid = pattr->icid; } return (error); } static int dpaa2_rc_get_obj_region(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, uint8_t reg_idx, enum dpaa2_dev_type dtype, struct dpaa2_rc_obj_region *reg) { struct __packed obj_region_args { uint32_t obj_id; uint16_t _reserved1; uint8_t reg_idx; uint8_t _reserved2; uint64_t _reserved3; uint64_t _reserved4; uint8_t type[16]; } *args; struct __packed obj_region { uint64_t _reserved1; uint64_t base_offset; uint32_t size; uint32_t type; uint32_t flags; uint32_t _reserved2; uint64_t base_paddr; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); uint16_t cmdid, api_major, api_minor; const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || reg == NULL) return (DPAA2_CMD_STAT_ERR); /* * If the DPRC object version was not yet cached, cache it now. * Otherwise use the already cached value. */ if (!portal->rc_api_major && !portal->rc_api_minor) { error = DPAA2_CMD_RC_GET_API_VERSION(dev, child, cmd, &api_major, &api_minor); if (error) return (error); portal->rc_api_major = api_major; portal->rc_api_minor = api_minor; } else { api_major = portal->rc_api_major; api_minor = portal->rc_api_minor; } /* TODO: Remove magic numbers. */ if (api_major > 6u || (api_major == 6u && api_minor >= 6u)) /* * MC API version 6.6 changed the size of the MC portals and * software portals to 64K (as implemented by hardware). */ cmdid = CMDID_RC_GET_OBJ_REG_V3; else if (api_major == 6u && api_minor >= 3u) /* * MC API version 6.3 introduced a new field to the region * descriptor: base_address. */ cmdid = CMDID_RC_GET_OBJ_REG_V2; else cmdid = CMDID_RC_GET_OBJ_REG; args = (struct obj_region_args *) &cmd->params[0]; args->obj_id = obj_id; args->reg_idx = reg_idx; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, cmdid); if (!error) { resp = (struct obj_region *) &cmd->params[0]; reg->base_paddr = resp->base_paddr; reg->base_offset = resp->base_offset; reg->size = resp->size; reg->flags = resp->flags; reg->type = resp->type & 0xFu; } return (error); } static int dpaa2_rc_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct __packed rc_api_version { uint16_t major; uint16_t minor; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_API_VERSION); if (!error) { resp = (struct rc_api_version *) &cmd->params[0]; *major = resp->major; *minor = resp->minor; } return (error); } static int dpaa2_rc_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint8_t enable) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_enable_irq(portal, cmd, irq_idx, enable, CMDID_RC_SET_IRQ_ENABLE)); } static int dpaa2_rc_set_obj_irq(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint64_t addr, uint32_t data, uint32_t irq_usr, uint32_t obj_id, enum dpaa2_dev_type dtype) { struct __packed set_obj_irq_args { uint32_t data; uint8_t irq_idx; uint8_t _reserved1[3]; uint64_t addr; uint32_t irq_usr; uint32_t obj_id; uint8_t type[16]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_obj_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->addr = addr; args->data = data; args->irq_usr = irq_usr; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_SET_OBJ_IRQ)); } static int dpaa2_rc_get_conn(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ep_desc *ep1_desc, struct dpaa2_ep_desc *ep2_desc, uint32_t *link_stat) { struct __packed get_conn_args { uint32_t ep1_id; uint32_t ep1_ifid; uint8_t ep1_type[16]; uint64_t _reserved[4]; } *args; struct __packed get_conn_resp { uint64_t _reserved1[3]; uint32_t ep2_id; uint32_t ep2_ifid; uint8_t ep2_type[16]; uint32_t link_stat; uint32_t _reserved2; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || ep1_desc == NULL || ep2_desc == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_conn_args *) &cmd->params[0]; args->ep1_id = ep1_desc->obj_id; args->ep1_ifid = ep1_desc->if_id; /* TODO: Remove magic number. */ strncpy(args->ep1_type, dpaa2_ttos(ep1_desc->type), 16); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_CONN); if (!error) { resp = (struct get_conn_resp *) &cmd->params[0]; ep2_desc->obj_id = resp->ep2_id; ep2_desc->if_id = resp->ep2_ifid; ep2_desc->type = dpaa2_stot((const char *) resp->ep2_type); if (link_stat != NULL) *link_stat = resp->link_stat; } return (error); } static int dpaa2_rc_ni_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpni_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpni_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_ni_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLOSE)); } static int dpaa2_rc_ni_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ENABLE)); } static int dpaa2_rc_ni_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_DISABLE)); } static int dpaa2_rc_ni_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_API_VER); if (!error) { *major = cmd->params[0] & 0xFFFFU; *minor = (cmd->params[0] >> 16) & 0xFFFFU; } return (error); } static int dpaa2_rc_ni_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_RESET)); } static int dpaa2_rc_ni_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_attr *attr) { struct __packed ni_attr { uint32_t options; uint8_t num_queues; uint8_t num_rx_tcs; uint8_t mac_entries; uint8_t num_tx_tcs; uint8_t vlan_entries; uint8_t num_channels; uint8_t qos_entries; uint8_t _reserved1; uint16_t fs_entries; uint16_t _reserved2; uint8_t qos_key_size; uint8_t fs_key_size; uint16_t wriop_ver; uint8_t num_cgs; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_ATTR); if (!error) { resp = (struct ni_attr *) &cmd->params[0]; attr->options = resp->options; attr->wriop_ver = resp->wriop_ver; attr->entries.fs = resp->fs_entries; attr->entries.mac = resp->mac_entries; attr->entries.vlan = resp->vlan_entries; attr->entries.qos = resp->qos_entries; attr->num.queues = resp->num_queues; attr->num.rx_tcs = resp->num_rx_tcs; attr->num.tx_tcs = resp->num_tx_tcs; attr->num.channels = resp->num_channels; attr->num.cgs = resp->num_cgs; attr->key_size.fs = resp->fs_key_size; attr->key_size.qos = resp->qos_key_size; } return (error); } static int dpaa2_rc_ni_set_buf_layout(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_buf_layout *bl) { struct __packed set_buf_layout_args { uint8_t queue_type; uint8_t _reserved1; uint16_t _reserved2; uint16_t options; uint8_t params; uint8_t _reserved3; uint16_t priv_data_size; uint16_t data_align; uint16_t head_room; uint16_t tail_room; uint64_t _reserved4[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || bl == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_buf_layout_args *) &cmd->params[0]; args->queue_type = (uint8_t) bl->queue_type; args->options = bl->options; args->params = 0; args->priv_data_size = bl->pd_size; args->data_align = bl->fd_align; args->head_room = bl->head_size; args->tail_room = bl->tail_size; args->params |= bl->pass_timestamp ? 1U : 0U; args->params |= bl->pass_parser_result ? 2U : 0U; args->params |= bl->pass_frame_status ? 4U : 0U; args->params |= bl->pass_sw_opaque ? 8U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_BUF_LAYOUT)); } static int dpaa2_rc_ni_get_tx_data_offset(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *offset) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || offset == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_TX_DATA_OFF); if (!error) *offset = cmd->params[0] & 0xFFFFU; return (error); } static int dpaa2_rc_ni_get_port_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PORT_MAC_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_ni_set_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed set_prim_mac_args { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct set_prim_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_PRIM_MAC_ADDR)); } static int dpaa2_rc_ni_get_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed get_prim_mac_resp { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PRIM_MAC_ADDR); if (!error) { resp = (struct get_prim_mac_resp *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) mac[ETHER_ADDR_LEN - i] = resp->mac[i - 1]; } return (error); } static int dpaa2_rc_ni_set_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_args { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct link_cfg_args *) &cmd->params[0]; args->rate = cfg->rate; args->options = cfg->options; args->adv_speeds = cfg->adv_speeds; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_LINK_CFG)); } static int dpaa2_rc_ni_get_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_resp { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_CFG); if (!error) { resp = (struct link_cfg_resp *) &cmd->params[0]; cfg->rate = resp->rate; cfg->options = resp->options; cfg->adv_speeds = resp->adv_speeds; } return (error); } static int dpaa2_rc_ni_get_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_state *state) { struct __packed link_state_resp { uint32_t _reserved1; uint32_t flags; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t supported; uint64_t advert; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_STATE); if (!error) { resp = (struct link_state_resp *) &cmd->params[0]; state->options = resp->options; state->adv_speeds = resp->advert; state->sup_speeds = resp->supported; state->rate = resp->rate; state->link_up = resp->flags & 0x1u ? true : false; state->state_valid = resp->flags & 0x2u ? true : false; } return (error); } static int dpaa2_rc_ni_set_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_qos_table *tbl) { struct __packed qos_table_args { uint32_t _reserved1; uint8_t default_tc; uint8_t options; uint16_t _reserved2; uint64_t _reserved[5]; uint64_t kcfg_busaddr; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || tbl == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct qos_table_args *) &cmd->params[0]; args->default_tc = tbl->default_tc; args->kcfg_busaddr = tbl->kcfg_busaddr; args->options |= tbl->discard_on_miss ? 1U : 0U; args->options |= tbl->keep_entries ? 2U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QOS_TABLE)); } static int dpaa2_rc_ni_clear_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_QOS_TABLE)); } static int dpaa2_rc_ni_set_pools(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_pools_cfg *cfg) { struct __packed set_pools_args { uint8_t pools_num; uint8_t backup_pool_mask; uint8_t _reserved1; uint8_t pool_as; /* assigning: 0 - QPRI, 1 - QDBIN */ uint32_t bp_obj_id[DPAA2_NI_MAX_POOLS]; uint16_t buf_sz[DPAA2_NI_MAX_POOLS]; uint32_t _reserved2; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_pools_args *) &cmd->params[0]; args->pools_num = cfg->pools_num < DPAA2_NI_MAX_POOLS ? cfg->pools_num : DPAA2_NI_MAX_POOLS; for (uint32_t i = 0; i < args->pools_num; i++) { args->bp_obj_id[i] = cfg->pools[i].bp_obj_id; args->buf_sz[i] = cfg->pools[i].buf_sz; args->backup_pool_mask |= (cfg->pools[i].backup_flag & 1) << i; } return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_POOLS)); } static int dpaa2_rc_ni_set_err_behavior(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_err_cfg *cfg) { struct __packed err_behavior_args { uint32_t err_mask; uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct err_behavior_args *) &cmd->params[0]; args->err_mask = cfg->err_mask; args->flags |= cfg->set_err_fas ? 0x10u : 0u; args->flags |= ((uint8_t) cfg->action) & 0x0Fu; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_ERR_BEHAVIOR)); } static int dpaa2_rc_ni_get_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed get_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t chan_id; } *args; struct __packed get_queue_resp { uint64_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint32_t fqid; uint16_t qdbin; uint16_t _reserved3; uint8_t cgid; uint8_t _reserved[15]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->chan_id = cfg->chan_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QUEUE); if (!error) { resp = (struct get_queue_resp *) &cmd->params[0]; cfg->dest_id = resp->dest_id; cfg->priority = resp->priority; cfg->flow_ctx = resp->flc; cfg->user_ctx = resp->user_ctx; cfg->fqid = resp->fqid; cfg->qdbin = resp->qdbin; cfg->cgid = resp->cgid; cfg->dest_type = (enum dpaa2_ni_dest_type) resp->flags & 0x0Fu; cfg->cgid_valid = (resp->flags & 0x20u) > 0u ? true : false; cfg->stash_control = (resp->flags & 0x40u) > 0u ? true : false; cfg->hold_active = (resp->flags & 0x80u) > 0u ? true : false; } return (error); } static int dpaa2_rc_ni_set_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed set_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t options; uint32_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint8_t cgid; uint8_t chan_id; uint8_t _reserved[23]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->options = cfg->options; args->dest_id = cfg->dest_id; args->priority = cfg->priority; args->flc = cfg->flow_ctx; args->user_ctx = cfg->user_ctx; args->cgid = cfg->cgid; args->chan_id = cfg->chan_id; args->flags |= (uint8_t)(cfg->dest_type & 0x0Fu); args->flags |= cfg->stash_control ? 0x40u : 0u; args->flags |= cfg->hold_active ? 0x80u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QUEUE)); } static int dpaa2_rc_ni_get_qdid(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_queue_type type, uint16_t *qdid) { struct __packed get_qdid_args { uint8_t queue_type; } *args; struct __packed get_qdid_resp { uint16_t qdid; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || qdid == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_qdid_args *) &cmd->params[0]; args->queue_type = (uint8_t) type; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QDID); if (!error) { resp = (struct get_qdid_resp *) &cmd->params[0]; *qdid = resp->qdid; } return (error); } static int dpaa2_rc_ni_add_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed add_mac_args { uint8_t flags; uint8_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint8_t tc_id; uint8_t fq_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ADD_MAC_ADDR)); } static int dpaa2_rc_ni_remove_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed rem_mac_args { uint16_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint64_t _reserved1[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct rem_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_REMOVE_MAC_ADDR)); } static int dpaa2_rc_ni_clear_mac_filters(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool rm_uni, bool rm_multi) { struct __packed clear_mac_filters_args { uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct clear_mac_filters_args *) &cmd->params[0]; args->flags |= rm_uni ? 0x1 : 0x0; args->flags |= rm_multi ? 0x2 : 0x0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_MAC_FILTERS)); } static int dpaa2_rc_ni_set_mfl(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t length) { struct __packed set_mfl_args { uint16_t length; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_mfl_args *) &cmd->params[0]; args->length = length; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MFL)); } static int dpaa2_rc_ni_set_offload(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_ofl_type ofl_type, bool en) { struct __packed set_ofl_args { uint8_t _reserved[3]; uint8_t ofl_type; uint32_t config; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_ofl_args *) &cmd->params[0]; args->ofl_type = (uint8_t) ofl_type; args->config = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_OFFLOAD)); } static int dpaa2_rc_ni_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_MASK)); } static int dpaa2_rc_ni_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_ENABLE)); } static int dpaa2_rc_ni_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_ni_set_uni_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { struct __packed set_uni_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_uni_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_UNI_PROMISC)); } static int dpaa2_rc_ni_set_multi_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { /* TODO: Implementation is the same as for ni_set_uni_promisc(). */ struct __packed set_multi_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_multi_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MULTI_PROMISC)); } static int dpaa2_rc_ni_get_statistics(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t page, uint16_t param, uint64_t *cnt) { struct __packed get_statistics_args { uint8_t page; uint16_t param; } *args; struct __packed get_statistics_resp { uint64_t cnt[7]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cnt == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_statistics_args *) &cmd->params[0]; args->page = page; args->param = param; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_STATISTICS); if (!error) { resp = (struct get_statistics_resp *) &cmd->params[0]; for (int i = 0; i < DPAA2_NI_STAT_COUNTERS; i++) cnt[i] = resp->cnt[i]; } return (error); } static int dpaa2_rc_ni_set_rx_tc_dist(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t dist_size, uint8_t tc, enum dpaa2_ni_dist_mode dist_mode, bus_addr_t key_cfg_buf) { struct __packed set_rx_tc_dist_args { uint16_t dist_size; uint8_t tc; uint8_t ma_dm; /* miss action + dist. mode */ uint32_t _reserved1; uint64_t _reserved2[5]; uint64_t key_cfg_iova; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_rx_tc_dist_args *) &cmd->params[0]; args->dist_size = dist_size; args->tc = tc; args->ma_dm = ((uint8_t) dist_mode) & 0x0Fu; args->key_cfg_iova = key_cfg_buf; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_RX_TC_DIST)); } static int dpaa2_rc_io_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpio_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpio_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_io_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_CLOSE)); } static int dpaa2_rc_io_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ENABLE)); } static int dpaa2_rc_io_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_DISABLE)); } static int dpaa2_rc_io_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_RESET)); } static int dpaa2_rc_io_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_io_attr *attr) { struct __packed dpaa2_io_attr { uint32_t id; uint16_t swp_id; uint8_t priors_num; uint8_t chan_mode; uint64_t swp_ce_paddr; uint64_t swp_ci_paddr; uint32_t swp_version; uint32_t _reserved1; uint32_t swp_clk; uint32_t _reserved2[5]; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_ATTR); if (!error) { pattr = (struct dpaa2_io_attr *) &cmd->params[0]; attr->swp_ce_paddr = pattr->swp_ce_paddr; attr->swp_ci_paddr = pattr->swp_ci_paddr; attr->swp_version = pattr->swp_version; attr->swp_clk = pattr->swp_clk; attr->id = pattr->id; attr->swp_id = pattr->swp_id; attr->priors_num = pattr->priors_num; attr->chan_mode = (enum dpaa2_io_chan_mode) pattr->chan_mode; } return (error); } static int dpaa2_rc_io_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Extract similar *_set_irq_mask() into one function. */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_MASK)); } static int dpaa2_rc_io_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Extract similar *_get_irq_status() into one function. */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_io_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Extract similar *_set_irq_enable() into one function. */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_ENABLE)); } static int dpaa2_rc_io_add_static_dq_chan(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint8_t *chan_idx) { struct __packed add_static_dq_chan_args { uint32_t dpcon_id; } *args; struct __packed add_static_dq_chan_resp { uint8_t chan_idx; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || chan_idx == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_static_dq_chan_args *) &cmd->params[0]; args->dpcon_id = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ADD_STATIC_DQ_CHAN); if (!error) { resp = (struct add_static_dq_chan_resp *) &cmd->params[0]; *chan_idx = resp->chan_idx; } return (error); } static int dpaa2_rc_bp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpbp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpbp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_bp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_CLOSE)); } static int dpaa2_rc_bp_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_ENABLE)); } static int dpaa2_rc_bp_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_DISABLE)); } static int dpaa2_rc_bp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_RESET)); } static int dpaa2_rc_bp_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_bp_attr *attr) { struct __packed dpaa2_bp_attr { uint16_t _reserved1; uint16_t bpid; uint32_t id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_GET_ATTR); if (!error) { pattr = (struct dpaa2_bp_attr *) &cmd->params[0]; attr->id = pattr->id; attr->bpid = pattr->bpid; } return (error); } static int dpaa2_rc_mac_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmac_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmac_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mac_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_CLOSE)); } static int dpaa2_rc_mac_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_RESET)); } static int dpaa2_rc_mac_mdio_read(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t *val) { struct __packed mdio_read_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint32_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || val == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_read_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->clause = 0; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_READ); if (!error) *val = cmd->params[0] & 0xFFFF; return (error); } static int dpaa2_rc_mac_mdio_write(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t val) { struct __packed mdio_write_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint16_t val; uint16_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_write_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->val = val; args->clause = 0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_WRITE)); } static int dpaa2_rc_mac_get_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_mac_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_attr *attr) { struct __packed mac_attr_resp { uint8_t eth_if; uint8_t link_type; uint16_t id; uint32_t max_rate; uint8_t fec_mode; uint8_t ifg_mode; uint8_t ifg_len; uint8_t _reserved1; uint32_t _reserved2; uint8_t sgn_post_pre; uint8_t serdes_cfg_mode; uint8_t eq_amp_red; uint8_t eq_post1q; uint8_t eq_preq; uint8_t eq_type; uint16_t _reserved3; uint64_t _reserved[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ATTR); if (!error) { resp = (struct mac_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->max_rate = resp->max_rate; attr->eth_if = resp->eth_if; attr->link_type = resp->link_type; } return (error); } static int dpaa2_rc_mac_set_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_link_state *state) { struct __packed mac_set_link_args { uint64_t options; uint32_t rate; uint32_t _reserved1; uint32_t flags; uint32_t _reserved2; uint64_t supported; uint64_t advert; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct mac_set_link_args *) &cmd->params[0]; args->options = state->options; args->rate = state->rate; args->supported = state->supported; args->advert = state->advert; args->flags |= state->up ? 0x1u : 0u; args->flags |= state->state_valid ? 0x2u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_LINK_STATE)); } static int dpaa2_rc_mac_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Implementation is the same as for ni_set_irq_mask(). */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_MASK)); } static int dpaa2_rc_mac_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Implementation is the same as for ni_set_irq_enable(). */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_ENABLE)); } static int dpaa2_rc_mac_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Implementation is the same as ni_get_irq_status(). */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_con_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_con_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_CLOSE)); } static int dpaa2_rc_con_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_RESET)); } static int dpaa2_rc_con_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_ENABLE)); } static int dpaa2_rc_con_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_DISABLE)); } static int dpaa2_rc_con_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_attr *attr) { struct __packed con_attr_resp { uint32_t id; uint16_t chan_id; uint8_t prior_num; uint8_t _reserved1; uint64_t _reserved2[6]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_GET_ATTR); if (!error) { resp = (struct con_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->chan_id = resp->chan_id; attr->prior_num = resp->prior_num; } return (error); } static int dpaa2_rc_con_set_notif(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_notif_cfg *cfg) { struct __packed set_notif_args { uint32_t dpio_id; uint8_t prior; uint8_t _reserved1; uint16_t _reserved2; uint64_t ctx; uint64_t _reserved3[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_notif_args *) &cmd->params[0]; args->dpio_id = cfg->dpio_id; args->prior = cfg->prior; args->ctx = cfg->qman_ctx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_SET_NOTIF)); } static int dpaa2_rc_mcp_create(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t portal_id, uint32_t options, uint32_t *dpmcp_id) { struct __packed mcp_create_args { uint32_t portal_id; uint32_t options; uint64_t _reserved[6]; } *args; struct __packed mcp_create_resp { uint32_t dpmcp_id; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || dpmcp_id == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_create_args *) &cmd->params[0]; args->portal_id = portal_id; args->options = options; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CREATE); if (!error) { resp = (struct mcp_create_resp *) &cmd->params[0]; *dpmcp_id = resp->dpmcp_id; } return (error); } static int dpaa2_rc_mcp_destroy(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id) { struct __packed mcp_destroy_args { uint32_t dpmcp_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_destroy_args *) &cmd->params[0]; args->dpmcp_id = dpmcp_id; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_DESTROY)); } static int dpaa2_rc_mcp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmcp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mcp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CLOSE)); } static int dpaa2_rc_mcp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_RESET)); } /** * @brief Create and add devices for DPAA2 objects in this resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *sc) { device_t rcdev = sc->dev; device_t child = sc->dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); - struct dpaa2_cmd *cmd = NULL; + struct dpaa2_cmd cmd; struct dpaa2_rc_attr dprc_attr; struct dpaa2_obj obj; uint32_t major, minor, rev, obj_count; uint16_t rc_token; int rc; - /* Allocate a command to send to MC hardware. */ - rc = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF); - if (rc) { - device_printf(rcdev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, rc); - return (ENXIO); - } + DPAA2_CMD_INIT(&cmd); /* Print MC firmware version. */ - rc = DPAA2_CMD_MNG_GET_VERSION(rcdev, child, cmd, &major, &minor, &rev); + rc = DPAA2_CMD_MNG_GET_VERSION(rcdev, child, &cmd, &major, &minor, &rev); if (rc) { device_printf(rcdev, "%s: failed to get MC firmware version: " "error=%d\n", __func__, rc); - dpaa2_mcp_free_command(cmd); return (ENXIO); } device_printf(rcdev, "MC firmware version: %u.%u.%u\n", major, minor, rev); /* Obtain container ID associated with a given MC portal. */ - rc = DPAA2_CMD_MNG_GET_CONTAINER_ID(rcdev, child, cmd, &sc->cont_id); + rc = DPAA2_CMD_MNG_GET_CONTAINER_ID(rcdev, child, &cmd, &sc->cont_id); if (rc) { device_printf(rcdev, "%s: failed to get container id: " "error=%d\n", __func__, rc); - dpaa2_mcp_free_command(cmd); return (ENXIO); } - if (bootverbose) + if (bootverbose) { device_printf(rcdev, "Resource container ID: %u\n", sc->cont_id); + } /* Open the resource container. */ - rc = DPAA2_CMD_RC_OPEN(rcdev, child, cmd, sc->cont_id, &rc_token); + rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, sc->cont_id, &rc_token); if (rc) { device_printf(rcdev, "%s: failed to open container: cont_id=%u, " "error=%d\n", __func__, sc->cont_id, rc); - dpaa2_mcp_free_command(cmd); return (ENXIO); } /* Obtain a number of objects in this container. */ - rc = DPAA2_CMD_RC_GET_OBJ_COUNT(rcdev, child, cmd, &obj_count); + rc = DPAA2_CMD_RC_GET_OBJ_COUNT(rcdev, child, &cmd, &obj_count); if (rc) { device_printf(rcdev, "%s: failed to count objects in container: " "cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); - DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); - dpaa2_mcp_free_command(cmd); + (void)DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } - if (bootverbose) + if (bootverbose) { device_printf(rcdev, "Objects in container: %u\n", obj_count); + } - /* Obtain container attributes (including ICID). */ - rc = DPAA2_CMD_RC_GET_ATTRIBUTES(rcdev, child, cmd, &dprc_attr); + rc = DPAA2_CMD_RC_GET_ATTRIBUTES(rcdev, child, &cmd, &dprc_attr); if (rc) { device_printf(rcdev, "%s: failed to get attributes of the " "container: cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); - DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); - dpaa2_mcp_free_command(cmd); + DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } - if (bootverbose) + if (bootverbose) { device_printf(rcdev, "Isolation context ID: %u\n", dprc_attr.icid); + } if (rcinfo) { rcinfo->id = dprc_attr.cont_id; rcinfo->portal_id = dprc_attr.portal_id; rcinfo->icid = dprc_attr.icid; } /* * Add MC portals before everything else. * TODO: Discover DPAA2 objects on-demand. */ for (uint32_t i = 0; i < obj_count; i++) { - rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj); - if (rc) + rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); + if (rc) { continue; /* Skip silently for now. */ - if (obj.type != DPAA2_DEV_MCP) + } + if (obj.type != DPAA2_DEV_MCP) { continue; - - dpaa2_rc_add_managed_child(sc, cmd, &obj); + } + dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach MC portals. */ bus_generic_probe(rcdev); rc = bus_generic_attach(rcdev); if (rc) { - DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); - dpaa2_mcp_free_command(cmd); + DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add managed devices (except DPMCPs) to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { - rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj); + rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc && bootverbose) { if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ) { device_printf(rcdev, "%s: skip unsupported " "DPAA2 object: idx=%u\n", __func__, i); continue; } else { device_printf(rcdev, "%s: failed to get " "information about DPAA2 object: idx=%u, " "error=%d\n", __func__, i, rc); continue; } } - if (obj.type == DPAA2_DEV_MCP) + if (obj.type == DPAA2_DEV_MCP) { continue; /* Already added. */ - - dpaa2_rc_add_managed_child(sc, cmd, &obj); + } + dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach managed devices properly. */ bus_generic_probe(rcdev); rc = bus_generic_attach(rcdev); if (rc) { - DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); - dpaa2_mcp_free_command(cmd); + DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add other devices to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { - rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj); + rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ && bootverbose) { device_printf(rcdev, "%s: skip unsupported DPAA2 " "object: idx=%u\n", __func__, i); continue; } else if (rc) { device_printf(rcdev, "%s: failed to get object: " "idx=%u, error=%d\n", __func__, i, rc); continue; } - dpaa2_rc_add_child(sc, cmd, &obj); + dpaa2_rc_add_child(sc, &cmd, &obj); } - DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); - dpaa2_mcp_free_command(cmd); + DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); /* Probe and attach the rest of devices. */ bus_generic_probe(rcdev); return (bus_generic_attach(rcdev)); } /** * @brief Add a new DPAA2 device to the resource container bus. */ static int dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev; struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; struct resource_spec *res_spec; const char *devclass; int dpio_n = 0; /* to limit DPIOs by # of CPUs */ int dpcon_n = 0; /* to limit DPCONs by # of CPUs */ int rid, error; rcdev = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_NI: devclass = "dpaa2_ni"; res_spec = dpaa2_ni_spec; break; default: return (ENXIO); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for a child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; /* Limit DPIOs and DPCONs by number of CPUs. */ if (res_spec->type == DPAA2_DEV_IO && dpio_n >= mp_ncpus) { dpio_n++; continue; } if (res_spec->type == DPAA2_DEV_CON && dpcon_n >= mp_ncpus) { dpcon_n++; continue; } error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); if (res_spec->type == DPAA2_DEV_IO) dpio_n++; if (res_spec->type == DPAA2_DEV_CON) dpcon_n++; } return (0); } /** * @brief Add a new managed DPAA2 device to the resource container bus. * * There are DPAA2 objects (DPIO, DPBP) which have their own drivers and can be * allocated as resources or associated with the other DPAA2 objects. This * function is supposed to discover such managed objects in the resource * container and add them as children to perform a proper initialization. * * NOTE: It must be called together with bus_generic_probe() and * bus_generic_attach() before dpaa2_rc_add_child(). */ static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev, child; struct dpaa2_devinfo *rcinfo, *dinfo; struct dpaa2_rc_obj_region reg; struct resource_spec *res_spec; const char *devclass; uint64_t start, end, count; uint32_t flags = 0; int rid, error; rcdev = sc->dev; child = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_IO: devclass = "dpaa2_io"; res_spec = dpaa2_io_spec; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; case DPAA2_DEV_BP: devclass = "dpaa2_bp"; res_spec = dpaa2_bp_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_CON: devclass = "dpaa2_con"; res_spec = dpaa2_con_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_MAC: devclass = "dpaa2_mac"; res_spec = dpaa2_mac_spec; flags = DPAA2_MC_DEV_ASSOCIATED; break; case DPAA2_DEV_MCP: devclass = "dpaa2_mcp"; res_spec = NULL; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; default: /* Only managed devices above are supported. */ return (EINVAL); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for the child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add memory regions to the resource list. */ for (uint8_t i = 0; i < obj->reg_count; i++) { error = DPAA2_CMD_RC_GET_OBJ_REGION(rcdev, child, cmd, obj->id, i, obj->type, ®); if (error) { device_printf(rcdev, "%s: failed to obtain memory " "region for type=%s, id=%u, reg_idx=%u: error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, i, error); continue; } count = reg.size; start = reg.base_paddr + reg.base_offset; end = reg.base_paddr + reg.base_offset + reg.size - 1; resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, start, end, count); } /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); } /* Inform MC about a new managed device. */ error = DPAA2_MC_MANAGE_DEV(rcdev, dev, flags); if (error) { device_printf(rcdev, "%s: failed to add a managed DPAA2 device: " "type=%s, id=%u, error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, error); return (ENXIO); } return (0); } /** * @brief Configure given IRQ using MC command interface. */ static int dpaa2_rc_configure_irq(device_t rcdev, device_t child, int rid, uint64_t addr, uint32_t data) { struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; - struct dpaa2_cmd *cmd; + struct dpaa2_cmd cmd; uint16_t rc_token; int rc = EINVAL; + DPAA2_CMD_INIT(&cmd); + if (device_get_parent(child) == rcdev && rid >= 1) { rcinfo = device_get_ivars(rcdev); dinfo = device_get_ivars(child); - /* Allocate a command to send to MC hardware. */ - rc = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF); + rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, rcinfo->id, + &rc_token); if (rc) { - device_printf(rcdev, "%s: failed to allocate dpaa2_cmd: " - "error=%d\n", __func__, rc); - return (ENODEV); - } - - /* Open resource container. */ - rc = DPAA2_CMD_RC_OPEN(rcdev, child, cmd, rcinfo->id, &rc_token); - if (rc) { - dpaa2_mcp_free_command(cmd); device_printf(rcdev, "%s: failed to open DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } /* Set MSI address and value. */ - rc = DPAA2_CMD_RC_SET_OBJ_IRQ(rcdev, child, cmd, rid - 1, addr, + rc = DPAA2_CMD_RC_SET_OBJ_IRQ(rcdev, child, &cmd, rid - 1, addr, data, rid, dinfo->id, dinfo->dtype); if (rc) { - dpaa2_mcp_free_command(cmd); device_printf(rcdev, "%s: failed to setup IRQ: " "rid=%d, addr=%jx, data=%x, error=%d\n", __func__, rid, addr, data, rc); return (ENODEV); } - /* Close resource container. */ - rc = DPAA2_CMD_RC_CLOSE(rcdev, child, cmd); + rc = DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); if (rc) { - dpaa2_mcp_free_command(cmd); device_printf(rcdev, "%s: failed to close DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } - - dpaa2_mcp_free_command(cmd); rc = 0; } return (rc); } /** * @brief General implementation of the MC command to enable IRQ. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool enable, uint16_t cmdid) { struct __packed enable_irq_args { uint8_t enable; uint8_t _reserved1; uint16_t _reserved2; uint8_t irq_idx; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[6]; } *args; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); args = (struct enable_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->enable = enable == 0u ? 0u : 1u; return (dpaa2_rc_exec_cmd(mcp, cmd, cmdid)); } /** * @brief Sends a command to MC and waits for response. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint16_t cmdid) { struct dpaa2_cmd_header *hdr; uint16_t flags; int error; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); /* Prepare a command for the MC hardware. */ hdr = (struct dpaa2_cmd_header *) &cmd->header; hdr->cmdid = cmdid; hdr->status = DPAA2_CMD_STAT_READY; DPAA2_MCP_LOCK(mcp, &flags); if (flags & DPAA2_PORTAL_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_INVALID_STATE); } /* Send a command to MC and wait for the result. */ dpaa2_rc_send_cmd(mcp, cmd); error = dpaa2_rc_wait_for_cmd(mcp, cmd); if (error) { DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_ERR); } if (hdr->status != DPAA2_CMD_STAT_OK) { DPAA2_MCP_UNLOCK(mcp); return (int)(hdr->status); } DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_OK); } /** * @brief Writes a command to the MC command portal. */ static int dpaa2_rc_send_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { /* Write command parameters. */ for (uint32_t i = 1; i <= DPAA2_CMD_PARAMS_N; i++) bus_write_8(mcp->map, sizeof(uint64_t) * i, cmd->params[i-1]); bus_barrier(mcp->map, 0, sizeof(struct dpaa2_cmd), BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Write command header to trigger execution. */ bus_write_8(mcp->map, 0, cmd->header); return (0); } /** * @brief Polls the MC command portal in order to receive a result of the * command execution. */ static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { struct dpaa2_cmd_header *hdr; uint64_t val; uint32_t i; /* Wait for a command execution result from the MC hardware. */ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) { val = bus_read_8(mcp->map, 0); hdr = (struct dpaa2_cmd_header *) &val; if (hdr->status != DPAA2_CMD_STAT_READY) { break; } DELAY(CMD_SPIN_TIMEOUT); } if (i > CMD_SPIN_ATTEMPTS) { /* Return an error on expired timeout. */ return (DPAA2_CMD_STAT_TIMEOUT); } else { /* Read command response. */ cmd->header = val; for (i = 1; i <= DPAA2_CMD_PARAMS_N; i++) { cmd->params[i-1] = bus_read_8(mcp->map, i * sizeof(uint64_t)); } } return (DPAA2_CMD_STAT_OK); } /** * @brief Reserve a DPAA2-specific device of the given devtype for the child. */ static int dpaa2_rc_add_res(device_t rcdev, device_t child, enum dpaa2_dev_type devtype, int *rid, int flags) { device_t dpaa2_dev; struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource *res; bool shared = false; int error; /* Request a free DPAA2 device of the given type from MC. */ error = DPAA2_MC_GET_FREE_DEV(rcdev, &dpaa2_dev, devtype); if (error && !(flags & RF_SHAREABLE)) { device_printf(rcdev, "%s: failed to obtain a free %s (rid=%d) " "for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } /* Request a shared DPAA2 device of the given type from MC. */ if (error) { error = DPAA2_MC_GET_SHARED_DEV(rcdev, &dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to obtain a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } shared = true; } /* Add DPAA2 device to the resource list of the child device. */ resource_list_add(&dinfo->resources, devtype, *rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1); /* Reserve a newly added DPAA2 resource. */ res = resource_list_reserve(&dinfo->resources, rcdev, child, devtype, rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1, flags & ~RF_ACTIVE); if (!res) { device_printf(rcdev, "%s: failed to reserve %s (rid=%d) for: %s " "(id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (EBUSY); } /* Reserve a shared DPAA2 device of the given type. */ if (shared) { error = DPAA2_MC_RESERVE_DEV(rcdev, dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to reserve a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } } return (0); } static int dpaa2_rc_print_type(struct resource_list *rl, enum dpaa2_dev_type type) { struct dpaa2_devinfo *dinfo; struct resource_list_entry *rle; uint32_t prev_id; int printed = 0, series = 0; int retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { dinfo = device_get_ivars((device_t) rle->start); if (printed == 0) { retval += printf(" %s (id=", dpaa2_ttos(dinfo->dtype)); } else { if (dinfo->id == prev_id + 1) { if (series == 0) { series = 1; retval += printf("-"); } } else { if (series == 1) { retval += printf("%u", prev_id); series = 0; } retval += printf(","); } } printed++; if (series == 0) retval += printf("%u", dinfo->id); prev_id = dinfo->id; } } if (printed) { if (series == 1) retval += printf("%u", prev_id); retval += printf(")"); } return (retval); } static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *cmd) { if (cmd != NULL) { memset(cmd->params, 0, sizeof(cmd->params[0]) * DPAA2_CMD_PARAMS_N); } return (0); } static struct dpaa2_mcp * dpaa2_rc_select_portal(device_t dev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *cinfo = device_get_ivars(child); if (cinfo == NULL || dinfo == NULL || dinfo->dtype != DPAA2_DEV_RC) return (NULL); return (cinfo->portal != NULL ? cinfo->portal : dinfo->portal); } static device_method_t dpaa2_rc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_rc_probe), DEVMETHOD(device_attach, dpaa2_rc_attach), DEVMETHOD(device_detach, dpaa2_rc_detach), /* Bus interface */ DEVMETHOD(bus_get_resource_list, dpaa2_rc_get_resource_list), DEVMETHOD(bus_delete_resource, dpaa2_rc_delete_resource), DEVMETHOD(bus_alloc_resource, dpaa2_rc_alloc_resource), DEVMETHOD(bus_release_resource, dpaa2_rc_release_resource), DEVMETHOD(bus_child_deleted, dpaa2_rc_child_deleted), DEVMETHOD(bus_child_detached, dpaa2_rc_child_detached), DEVMETHOD(bus_setup_intr, dpaa2_rc_setup_intr), DEVMETHOD(bus_teardown_intr, dpaa2_rc_teardown_intr), DEVMETHOD(bus_print_child, dpaa2_rc_print_child), DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), /* Pseudo-PCI interface */ DEVMETHOD(pci_alloc_msi, dpaa2_rc_alloc_msi), DEVMETHOD(pci_release_msi, dpaa2_rc_release_msi), DEVMETHOD(pci_msi_count, dpaa2_rc_msi_count), DEVMETHOD(pci_get_id, dpaa2_rc_get_id), /* DPAA2 MC command interface */ DEVMETHOD(dpaa2_cmd_mng_get_version, dpaa2_rc_mng_get_version), DEVMETHOD(dpaa2_cmd_mng_get_soc_version, dpaa2_rc_mng_get_soc_version), DEVMETHOD(dpaa2_cmd_mng_get_container_id, dpaa2_rc_mng_get_container_id), /* DPRC commands */ DEVMETHOD(dpaa2_cmd_rc_open, dpaa2_rc_open), DEVMETHOD(dpaa2_cmd_rc_close, dpaa2_rc_close), DEVMETHOD(dpaa2_cmd_rc_get_obj_count, dpaa2_rc_get_obj_count), DEVMETHOD(dpaa2_cmd_rc_get_obj, dpaa2_rc_get_obj), DEVMETHOD(dpaa2_cmd_rc_get_obj_descriptor, dpaa2_rc_get_obj_descriptor), DEVMETHOD(dpaa2_cmd_rc_get_attributes, dpaa2_rc_get_attributes), DEVMETHOD(dpaa2_cmd_rc_get_obj_region, dpaa2_rc_get_obj_region), DEVMETHOD(dpaa2_cmd_rc_get_api_version, dpaa2_rc_get_api_version), DEVMETHOD(dpaa2_cmd_rc_set_irq_enable, dpaa2_rc_set_irq_enable), DEVMETHOD(dpaa2_cmd_rc_set_obj_irq, dpaa2_rc_set_obj_irq), DEVMETHOD(dpaa2_cmd_rc_get_conn, dpaa2_rc_get_conn), /* DPNI commands */ DEVMETHOD(dpaa2_cmd_ni_open, dpaa2_rc_ni_open), DEVMETHOD(dpaa2_cmd_ni_close, dpaa2_rc_ni_close), DEVMETHOD(dpaa2_cmd_ni_enable, dpaa2_rc_ni_enable), DEVMETHOD(dpaa2_cmd_ni_disable, dpaa2_rc_ni_disable), DEVMETHOD(dpaa2_cmd_ni_get_api_version, dpaa2_rc_ni_get_api_version), DEVMETHOD(dpaa2_cmd_ni_reset, dpaa2_rc_ni_reset), DEVMETHOD(dpaa2_cmd_ni_get_attributes, dpaa2_rc_ni_get_attributes), DEVMETHOD(dpaa2_cmd_ni_set_buf_layout, dpaa2_rc_ni_set_buf_layout), DEVMETHOD(dpaa2_cmd_ni_get_tx_data_off, dpaa2_rc_ni_get_tx_data_offset), DEVMETHOD(dpaa2_cmd_ni_get_port_mac_addr, dpaa2_rc_ni_get_port_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_prim_mac_addr, dpaa2_rc_ni_set_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_get_prim_mac_addr, dpaa2_rc_ni_get_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_link_cfg, dpaa2_rc_ni_set_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_cfg, dpaa2_rc_ni_get_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_state, dpaa2_rc_ni_get_link_state), DEVMETHOD(dpaa2_cmd_ni_set_qos_table, dpaa2_rc_ni_set_qos_table), DEVMETHOD(dpaa2_cmd_ni_clear_qos_table, dpaa2_rc_ni_clear_qos_table), DEVMETHOD(dpaa2_cmd_ni_set_pools, dpaa2_rc_ni_set_pools), DEVMETHOD(dpaa2_cmd_ni_set_err_behavior,dpaa2_rc_ni_set_err_behavior), DEVMETHOD(dpaa2_cmd_ni_get_queue, dpaa2_rc_ni_get_queue), DEVMETHOD(dpaa2_cmd_ni_set_queue, dpaa2_rc_ni_set_queue), DEVMETHOD(dpaa2_cmd_ni_get_qdid, dpaa2_rc_ni_get_qdid), DEVMETHOD(dpaa2_cmd_ni_add_mac_addr, dpaa2_rc_ni_add_mac_addr), DEVMETHOD(dpaa2_cmd_ni_remove_mac_addr, dpaa2_rc_ni_remove_mac_addr), DEVMETHOD(dpaa2_cmd_ni_clear_mac_filters, dpaa2_rc_ni_clear_mac_filters), DEVMETHOD(dpaa2_cmd_ni_set_mfl, dpaa2_rc_ni_set_mfl), DEVMETHOD(dpaa2_cmd_ni_set_offload, dpaa2_rc_ni_set_offload), DEVMETHOD(dpaa2_cmd_ni_set_irq_mask, dpaa2_rc_ni_set_irq_mask), DEVMETHOD(dpaa2_cmd_ni_set_irq_enable, dpaa2_rc_ni_set_irq_enable), DEVMETHOD(dpaa2_cmd_ni_get_irq_status, dpaa2_rc_ni_get_irq_status), DEVMETHOD(dpaa2_cmd_ni_set_uni_promisc, dpaa2_rc_ni_set_uni_promisc), DEVMETHOD(dpaa2_cmd_ni_set_multi_promisc, dpaa2_rc_ni_set_multi_promisc), DEVMETHOD(dpaa2_cmd_ni_get_statistics, dpaa2_rc_ni_get_statistics), DEVMETHOD(dpaa2_cmd_ni_set_rx_tc_dist, dpaa2_rc_ni_set_rx_tc_dist), /* DPIO commands */ DEVMETHOD(dpaa2_cmd_io_open, dpaa2_rc_io_open), DEVMETHOD(dpaa2_cmd_io_close, dpaa2_rc_io_close), DEVMETHOD(dpaa2_cmd_io_enable, dpaa2_rc_io_enable), DEVMETHOD(dpaa2_cmd_io_disable, dpaa2_rc_io_disable), DEVMETHOD(dpaa2_cmd_io_reset, dpaa2_rc_io_reset), DEVMETHOD(dpaa2_cmd_io_get_attributes, dpaa2_rc_io_get_attributes), DEVMETHOD(dpaa2_cmd_io_set_irq_mask, dpaa2_rc_io_set_irq_mask), DEVMETHOD(dpaa2_cmd_io_get_irq_status, dpaa2_rc_io_get_irq_status), DEVMETHOD(dpaa2_cmd_io_set_irq_enable, dpaa2_rc_io_set_irq_enable), DEVMETHOD(dpaa2_cmd_io_add_static_dq_chan, dpaa2_rc_io_add_static_dq_chan), /* DPBP commands */ DEVMETHOD(dpaa2_cmd_bp_open, dpaa2_rc_bp_open), DEVMETHOD(dpaa2_cmd_bp_close, dpaa2_rc_bp_close), DEVMETHOD(dpaa2_cmd_bp_enable, dpaa2_rc_bp_enable), DEVMETHOD(dpaa2_cmd_bp_disable, dpaa2_rc_bp_disable), DEVMETHOD(dpaa2_cmd_bp_reset, dpaa2_rc_bp_reset), DEVMETHOD(dpaa2_cmd_bp_get_attributes, dpaa2_rc_bp_get_attributes), /* DPMAC commands */ DEVMETHOD(dpaa2_cmd_mac_open, dpaa2_rc_mac_open), DEVMETHOD(dpaa2_cmd_mac_close, dpaa2_rc_mac_close), DEVMETHOD(dpaa2_cmd_mac_reset, dpaa2_rc_mac_reset), DEVMETHOD(dpaa2_cmd_mac_mdio_read, dpaa2_rc_mac_mdio_read), DEVMETHOD(dpaa2_cmd_mac_mdio_write, dpaa2_rc_mac_mdio_write), DEVMETHOD(dpaa2_cmd_mac_get_addr, dpaa2_rc_mac_get_addr), DEVMETHOD(dpaa2_cmd_mac_get_attributes, dpaa2_rc_mac_get_attributes), DEVMETHOD(dpaa2_cmd_mac_set_link_state, dpaa2_rc_mac_set_link_state), DEVMETHOD(dpaa2_cmd_mac_set_irq_mask, dpaa2_rc_mac_set_irq_mask), DEVMETHOD(dpaa2_cmd_mac_set_irq_enable, dpaa2_rc_mac_set_irq_enable), DEVMETHOD(dpaa2_cmd_mac_get_irq_status, dpaa2_rc_mac_get_irq_status), /* DPCON commands */ DEVMETHOD(dpaa2_cmd_con_open, dpaa2_rc_con_open), DEVMETHOD(dpaa2_cmd_con_close, dpaa2_rc_con_close), DEVMETHOD(dpaa2_cmd_con_reset, dpaa2_rc_con_reset), DEVMETHOD(dpaa2_cmd_con_enable, dpaa2_rc_con_enable), DEVMETHOD(dpaa2_cmd_con_disable, dpaa2_rc_con_disable), DEVMETHOD(dpaa2_cmd_con_get_attributes, dpaa2_rc_con_get_attributes), DEVMETHOD(dpaa2_cmd_con_set_notif, dpaa2_rc_con_set_notif), /* DPMCP commands */ DEVMETHOD(dpaa2_cmd_mcp_create, dpaa2_rc_mcp_create), DEVMETHOD(dpaa2_cmd_mcp_destroy, dpaa2_rc_mcp_destroy), DEVMETHOD(dpaa2_cmd_mcp_open, dpaa2_rc_mcp_open), DEVMETHOD(dpaa2_cmd_mcp_close, dpaa2_rc_mcp_close), DEVMETHOD(dpaa2_cmd_mcp_reset, dpaa2_rc_mcp_reset), DEVMETHOD_END }; static driver_t dpaa2_rc_driver = { "dpaa2_rc", dpaa2_rc_methods, sizeof(struct dpaa2_rc_softc), }; /* For root container */ DRIVER_MODULE(dpaa2_rc, dpaa2_mc, dpaa2_rc_driver, 0, 0); /* For child containers */ DRIVER_MODULE(dpaa2_rc, dpaa2_rc, dpaa2_rc_driver, 0, 0);