diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index d4e4de216736..1fd109a6edf3 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -1,4258 +1,4184 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause
  *
- * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021-2023 Dmitry Salychev
  * Copyright © 2022 Mathew McBride
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 /*
  * The DPAA2 Network Interface (DPNI) driver.
  *
  * The DPNI object is a network interface that is configurable to support a wide
  * range of features from a very basic Ethernet interface up to a
  * high-functioning network interface. The DPNI supports features that are
  * expected by standard network stacks, from basic features to offloads.
  *
  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
  * functions are provided for standard network protocols (L2, L3, L4, etc.).
  */
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/bus.h>
 #include <sys/rman.h>
 #include <sys/module.h>
 #include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/socket.h>
 #include <sys/sockio.h>
 #include <sys/sysctl.h>
 #include <sys/mbuf.h>
 #include <sys/taskqueue.h>
 #include <sys/sysctl.h>
 #include <sys/buf_ring.h>
 #include <sys/smp.h>
 #include <sys/proc.h>
 
 #include <vm/vm.h>
 #include <vm/pmap.h>
 
 #include <machine/bus.h>
 #include <machine/resource.h>
 #include <machine/atomic.h>
+#include <machine/vmparam.h>
 
 #include <net/ethernet.h>
 #include <net/bpf.h>
 #include <net/if.h>
 #include <net/if_dl.h>
 #include <net/if_media.h>
 #include <net/if_types.h>
 #include <net/if_var.h>
 
 #include <dev/pci/pcivar.h>
 #include <dev/mii/mii.h>
 #include <dev/mii/miivar.h>
 #include <dev/mdio/mdio.h>
 
 #include "opt_acpi.h"
 #include "opt_platform.h"
 
 #include "pcib_if.h"
 #include "pci_if.h"
 #include "miibus_if.h"
 #include "memac_mdio_if.h"
 
 #include "dpaa2_types.h"
 #include "dpaa2_mc.h"
 #include "dpaa2_mc_if.h"
 #include "dpaa2_mcp.h"
 #include "dpaa2_swp.h"
 #include "dpaa2_swp_if.h"
 #include "dpaa2_cmd_if.h"
 #include "dpaa2_ni.h"
 
 #define BIT(x)			(1ul << (x))
 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
 
 /* Frame Dequeue Response status bits. */
 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
 
 #define	ALIGN_UP(x, y)		roundup2((x), (y))
 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
 
 #define DPNI_LOCK(__sc) do {			\
 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
 	mtx_lock(&(__sc)->lock);		\
 } while (0)
 #define	DPNI_UNLOCK(__sc) do {			\
 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
 	mtx_unlock(&(__sc)->lock);		\
 } while (0)
 
 #define TX_LOCK(__tx) do {			\
 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
 	mtx_lock(&(__tx)->lock);		\
 } while (0)
 #define	TX_UNLOCK(__tx) do {			\
 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
 	mtx_unlock(&(__tx)->lock);		\
 } while (0)
 
 #define DPAA2_TX_RING(sc, chan, tc)				\
 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
 
 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
 
 /* Default maximum frame length. */
 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
 
 /* Minimally supported version of the DPNI API. */
 #define DPNI_VER_MAJOR		7
 #define DPNI_VER_MINOR		0
 
 /* Rx/Tx buffers configuration. */
 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
 #define BUF_ALIGN		64
 #define BUF_SWA_SIZE		64  /* SW annotation size */
 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
 #define BUF_SIZE		(MJUM9BYTES)
-#define	BUF_MAXADDR_49BIT	0x1FFFFFFFFFFFFul
-#define	BUF_MAXADDR		(BUS_SPACE_MAXADDR)
 
 #define DPAA2_TX_BUFRING_SZ	(4096u)
 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
 #define DPAA2_TX_SEG_SZ		(4096u)
 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
-#define DPAA2_TX_SGT_SZ		(512u) /* bytes */
+#define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
 
 /* Size of a buffer to keep a QoS table key configuration. */
 #define ETH_QOS_KCFG_BUF_SIZE	256
 
 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
 #define DPAA2_CLASSIFIER_DMA_SIZE 256
 
 /* Channel storage buffer configuration. */
 #define ETH_STORE_FRAMES	16u
 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
 #define ETH_STORE_ALIGN		64u
 
 /* Buffers layout options. */
 #define BUF_LOPT_TIMESTAMP	0x1
 #define BUF_LOPT_PARSER_RESULT	0x2
 #define BUF_LOPT_FRAME_STATUS	0x4
 #define BUF_LOPT_PRIV_DATA_SZ	0x8
 #define BUF_LOPT_DATA_ALIGN	0x10
 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
 
 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
 #define DPAA2_NI_TX_IDX_SHIFT	(57)
 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
 
 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
 #define DPAA2_NI_FD_FMT_SHIFT	(12)
 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
 #define DPAA2_NI_FD_ERR_SHIFT	(0)
 #define DPAA2_NI_FD_SL_MASK	(0x1u)
 #define DPAA2_NI_FD_SL_SHIFT	(14)
 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
 
 /* Enables TCAM for Flow Steering and QoS look-ups. */
 #define DPNI_OPT_HAS_KEY_MASKING 0x10
 
 /* Unique IDs for the supported Rx classification header fields. */
 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
 #define DPAA2_ETH_DIST_VLAN	BIT(3)
 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
 #define DPAA2_ETH_DIST_IPDST	BIT(5)
 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
 #define DPAA2_ETH_DIST_L4DST	BIT(8)
 #define DPAA2_ETH_DIST_ALL	(~0ULL)
 
 /* L3-L4 network traffic flow hash options. */
 #define	RXH_L2DA		(1 << 1)
 #define	RXH_VLAN		(1 << 2)
 #define	RXH_L3_PROTO		(1 << 3)
 #define	RXH_IP_SRC		(1 << 4)
 #define	RXH_IP_DST		(1 << 5)
 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
 #define	RXH_DISCARD		(1 << 31)
 
 /* Default Rx hash options, set during attaching. */
 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
 
 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
 
 /* DPAA2 Network Interface resource specification. */
 struct resource_spec dpaa2_ni_spec[] = {
 	/*
 	 * DPMCP resources.
 	 *
 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
 	 *	 receive responses from, the MC firmware. One portal per DPNI.
 	 */
 #define MCP_RES_NUM	(1u)
 #define MCP_RID_OFF	(0u)
 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
 	/* --- */
 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	/*
 	 * DPIO resources (software portals).
 	 *
 	 * NOTE: One per running core. While DPIOs are the source of data
 	 *	 availability interrupts, the DPCONs are used to identify the
 	 *	 network interface that has produced ingress data to that core.
 	 */
 #define IO_RES_NUM	(16u)
 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
 #define IO_RID(rid)	((rid) + IO_RID_OFF)
 	/* --- */
 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
 	/*
 	 * DPBP resources (buffer pools).
 	 *
 	 * NOTE: One per network interface.
 	 */
 #define BP_RES_NUM	(1u)
 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
 #define BP_RID(rid)	((rid) + BP_RID_OFF)
 	/* --- */
 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
 	/*
 	 * DPCON resources (channels).
 	 *
 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
 	 *	 distributed to.
 	 * NOTE: Since it is necessary to distinguish between traffic from
 	 *	 different network interfaces arriving on the same core, the
 	 *	 DPCONs must be private to the DPNIs.
 	 */
 #define CON_RES_NUM	(16u)
 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
 #define CON_RID(rid)	((rid) + CON_RID_OFF)
 	/* --- */
 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
 	/* --- */
 	RESOURCE_SPEC_END
 };
 
 /* Supported header fields for Rx hash distribution key */
 static const struct dpaa2_eth_dist_fields dist_fields[] = {
 	{
 		/* L2 header */
 		.rxnfc_field = RXH_L2DA,
 		.cls_prot = NET_PROT_ETH,
 		.cls_field = NH_FLD_ETH_DA,
 		.id = DPAA2_ETH_DIST_ETHDST,
 		.size = 6,
 	}, {
 		.cls_prot = NET_PROT_ETH,
 		.cls_field = NH_FLD_ETH_SA,
 		.id = DPAA2_ETH_DIST_ETHSRC,
 		.size = 6,
 	}, {
 		/* This is the last ethertype field parsed:
 		 * depending on frame format, it can be the MAC ethertype
 		 * or the VLAN etype.
 		 */
 		.cls_prot = NET_PROT_ETH,
 		.cls_field = NH_FLD_ETH_TYPE,
 		.id = DPAA2_ETH_DIST_ETHTYPE,
 		.size = 2,
 	}, {
 		/* VLAN header */
 		.rxnfc_field = RXH_VLAN,
 		.cls_prot = NET_PROT_VLAN,
 		.cls_field = NH_FLD_VLAN_TCI,
 		.id = DPAA2_ETH_DIST_VLAN,
 		.size = 2,
 	}, {
 		/* IP header */
 		.rxnfc_field = RXH_IP_SRC,
 		.cls_prot = NET_PROT_IP,
 		.cls_field = NH_FLD_IP_SRC,
 		.id = DPAA2_ETH_DIST_IPSRC,
 		.size = 4,
 	}, {
 		.rxnfc_field = RXH_IP_DST,
 		.cls_prot = NET_PROT_IP,
 		.cls_field = NH_FLD_IP_DST,
 		.id = DPAA2_ETH_DIST_IPDST,
 		.size = 4,
 	}, {
 		.rxnfc_field = RXH_L3_PROTO,
 		.cls_prot = NET_PROT_IP,
 		.cls_field = NH_FLD_IP_PROTO,
 		.id = DPAA2_ETH_DIST_IPPROTO,
 		.size = 1,
 	}, {
 		/* Using UDP ports, this is functionally equivalent to raw
 		 * byte pairs from L4 header.
 		 */
 		.rxnfc_field = RXH_L4_B_0_1,
 		.cls_prot = NET_PROT_UDP,
 		.cls_field = NH_FLD_UDP_PORT_SRC,
 		.id = DPAA2_ETH_DIST_L4SRC,
 		.size = 2,
 	}, {
 		.rxnfc_field = RXH_L4_B_2_3,
 		.cls_prot = NET_PROT_UDP,
 		.cls_field = NH_FLD_UDP_PORT_DST,
 		.id = DPAA2_ETH_DIST_L4DST,
 		.size = 2,
 	},
 };
 
 static struct dpni_stat {
 	int	 page;
 	int	 cnt;
 	char	*name;
 	char	*desc;
 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
 	/* PAGE, COUNTER, NAME, DESCRIPTION */
 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
 	   				"filtering" },
 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
 	   				"depletion in DPNI buffer pools" },
 };
 
 /* Device interface */
 static int dpaa2_ni_probe(device_t);
 static int dpaa2_ni_attach(device_t);
 static int dpaa2_ni_detach(device_t);
 
 /* DPAA2 network interface setup and configuration */
 static int dpaa2_ni_setup(device_t);
 static int dpaa2_ni_setup_channels(device_t);
 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
     enum dpaa2_ni_queue_type);
 static int dpaa2_ni_bind(device_t);
 static int dpaa2_ni_setup_rx_dist(device_t);
 static int dpaa2_ni_setup_irqs(device_t);
 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
 
 /* Tx/Rx flow configuration */
 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
 
 /* Configuration subroutines */
 static int dpaa2_ni_set_buf_layout(device_t);
 static int dpaa2_ni_set_pause_frame(device_t);
 static int dpaa2_ni_set_qos_table(device_t);
 static int dpaa2_ni_set_mac_addr(device_t);
 static int dpaa2_ni_set_hash(device_t, uint64_t);
 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
 
 /* Buffers and buffer pools */
 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
-static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
-static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
+static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
+static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
     struct dpaa2_ni_channel *);
 
 /* Frame descriptor routines */
 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
-static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *);
-static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *);
-static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *);
-static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *);
 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
 
 /* Various subroutines */
 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
     struct dpaa2_dq **);
 
 /* Network interface routines */
 static void dpaa2_ni_init(void *);
 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
 static void dpaa2_ni_qflush(if_t );
 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
 static int  dpaa2_ni_update_mac_filters(if_t );
 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
 
 /* Interrupt handlers */
 static void dpaa2_ni_intr(void *);
 
 /* MII handlers */
 static void dpaa2_ni_miibus_statchg(device_t);
 static int  dpaa2_ni_media_change(if_t );
 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
 static void dpaa2_ni_media_tick(void *);
 
 /* DMA mapping callback */
 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
 
 /* Tx/Rx routines. */
 static void dpaa2_ni_poll(void *);
 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
     struct dpaa2_ni_tx_ring *, struct mbuf *);
 static void dpaa2_ni_bp_task(void *, int);
 
 /* Tx/Rx subroutines */
 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
     struct dpaa2_ni_fq **, uint32_t *);
 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
     struct dpaa2_fd *);
 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
     struct dpaa2_fd *);
 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
     struct dpaa2_fd *);
 
 /* sysctl(9) */
 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
 
 static int
 dpaa2_ni_probe(device_t dev)
 {
 	/* DPNI device will be added by a parent resource container itself. */
 	device_set_desc(dev, "DPAA2 Network Interface");
 	return (BUS_PROBE_DEFAULT);
 }
 
 static int
 dpaa2_ni_attach(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	device_t mcp_dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_devinfo *mcp_dinfo;
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	if_t ifp;
 	char tq_name[32];
 	int error;
 
 	sc->dev = dev;
 	sc->ifp = NULL;
 	sc->miibus = NULL;
 	sc->mii = NULL;
 	sc->media_status = 0;
 	sc->if_flags = 0;
 	sc->link_state = LINK_STATE_UNKNOWN;
 	sc->buf_align = 0;
 
 	/* For debug purposes only! */
 	sc->rx_anomaly_frames = 0;
 	sc->rx_single_buf_frames = 0;
 	sc->rx_sg_buf_frames = 0;
 	sc->rx_enq_rej_frames = 0;
 	sc->rx_ieoi_err_frames = 0;
 	sc->tx_single_buf_frames = 0;
 	sc->tx_sg_frames = 0;
 
 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
 
 	sc->bp_dmat = NULL;
 	sc->st_dmat = NULL;
 	sc->rxd_dmat = NULL;
 	sc->qos_dmat = NULL;
 
 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
 	sc->qos_kcfg.store.dmap = NULL;
 	sc->qos_kcfg.store.paddr = 0;
 	sc->qos_kcfg.store.vaddr = NULL;
 
 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
 	sc->rxd_kcfg.store.dmap = NULL;
 	sc->rxd_kcfg.store.paddr = 0;
 	sc->rxd_kcfg.store.vaddr = NULL;
 
 	sc->mac.dpmac_id = 0;
 	sc->mac.phy_dev = NULL;
 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
 
 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
 	if (error) {
 		device_printf(dev, "%s: failed to allocate resources: "
 		    "error=%d\n", __func__, error);
 		goto err_exit;
 	}
 
 	/* Obtain MC portal. */
 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
 	mcp_dinfo = device_get_ivars(mcp_dev);
 	dinfo->portal = mcp_dinfo->portal;
 
 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
 
 	/* Allocate network interface */
 	ifp = if_alloc(IFT_ETHER);
 	if (ifp == NULL) {
 		device_printf(dev, "%s: failed to allocate network interface\n",
 		    __func__);
 		goto err_exit;
 	}
 	sc->ifp = ifp;
 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
 
 	if_setsoftc(ifp, sc);
 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
 	if_setinitfn(ifp, dpaa2_ni_init);
 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
 	if_settransmitfn(ifp, dpaa2_ni_transmit);
 	if_setqflushfn(ifp, dpaa2_ni_qflush);
 
 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
 	if_setcapenable(ifp, if_getcapabilities(ifp));
 
 	DPAA2_CMD_INIT(&cmd);
 
 	/* Open resource container and network interface object. */
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/*
 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
 	 *          (BPSCN) returned as a result to the VDQ command instead.
 	 *          It is similar to CDAN processed in dpaa2_io_intr().
 	 */
 	/* Create a taskqueue thread to release new buffers to the pool. */
 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
 	bzero(tq_name, sizeof (tq_name));
 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
 	    device_get_nameunit(dev));
 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
 	    taskqueue_thread_enqueue, &sc->bp_taskq);
 	if (sc->bp_taskq == NULL) {
 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
 		    __func__, tq_name);
 		goto close_ni;
 	}
 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
 
 	error = dpaa2_ni_setup(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
 		    __func__, error);
 		goto close_ni;
 	}
 	error = dpaa2_ni_setup_channels(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to setup QBMan channels: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	error = dpaa2_ni_bind(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
 		    __func__, error);
 		goto close_ni;
 	}
 	error = dpaa2_ni_setup_irqs(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
 		    __func__, error);
 		goto close_ni;
 	}
 	error = dpaa2_ni_setup_sysctls(sc);
 	if (error) {
 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
 		    __func__, error);
 		goto close_ni;
 	}
 
 	ether_ifattach(sc->ifp, sc->mac.addr);
 	callout_init(&sc->mii_callout, 0);
 
 	return (0);
 
 close_ni:
 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (ENXIO);
 }
 
 static void
 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 
 	DPNI_LOCK(sc);
 	ifmr->ifm_count = 0;
 	ifmr->ifm_mask = 0;
 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
 	ifmr->ifm_current = ifmr->ifm_active =
 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
 
 	/*
 	 * In non-PHY usecases, we need to signal link state up, otherwise
 	 * certain things requiring a link event (e.g async DHCP client) from
 	 * devd do not happen.
 	 */
 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
 		if_link_state_change(ifp, LINK_STATE_UP);
 	}
 
 	/*
 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
 	 * the MC firmware sets the status, instead of us telling the MC what
 	 * it is.
 	 */
 	DPNI_UNLOCK(sc);
 
 	return;
 }
 
 static void
 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
 {
 	/*
 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
 	 * 'apparent' speed from it.
 	 */
 	sc->fixed_link = true;
 
 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
 		     dpaa2_ni_fixed_media_status);
 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
 }
 
 static int
 dpaa2_ni_detach(device_t dev)
 {
 	/* TBD */
 	return (0);
 }
 
 /**
  * @brief Configure DPAA2 network interface object.
  */
 static int
 dpaa2_ni_setup(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
 	struct dpaa2_cmd cmd;
 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
 	uint16_t rc_token, ni_token, mac_token;
 	struct dpaa2_mac_attr attr;
 	enum dpaa2_mac_link_type link_type;
 	uint32_t link;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Check if we can work with this DPNI object. */
 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
 	    &sc->api_minor);
 	if (error) {
 		device_printf(dev, "%s: failed to get DPNI API version\n",
 		    __func__);
 		goto close_ni;
 	}
 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
 		error = ENODEV;
 		goto close_ni;
 	}
 
 	/* Reset the DPNI object. */
 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
 	if (error) {
 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
 		    __func__, dinfo->id);
 		goto close_ni;
 	}
 
 	/* Obtain attributes of the DPNI object. */
 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
 		    "id=%d\n", __func__, dinfo->id);
 		goto close_ni;
 	}
 	if (bootverbose) {
 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
 		    sc->attr.num.channels, sc->attr.wriop_ver);
 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
 		    sc->attr.num.cgs);
 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
 		    sc->attr.entries.qos, sc->attr.entries.fs);
 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
 	}
 
 	/* Configure buffer layouts of the DPNI queues. */
 	error = dpaa2_ni_set_buf_layout(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to configure buffer layout\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	/* Configure DMA resources. */
 	error = dpaa2_ni_setup_dma(sc);
 	if (error) {
 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
 		goto close_ni;
 	}
 
 	/* Setup link between DPNI and an object it's connected to. */
 	ep1_desc.obj_id = dinfo->id;
 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
 	ep1_desc.type = dinfo->dtype;
 
 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
 	    &ep1_desc, &ep2_desc, &link);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain an object DPNI is "
 		    "connected to: error=%d\n", __func__, error);
 	} else {
 		device_printf(dev, "connected to %s (id=%d)\n",
 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
 
 		error = dpaa2_ni_set_mac_addr(dev);
 		if (error) {
 			device_printf(dev, "%s: failed to set MAC address: "
 			    "error=%d\n", __func__, error);
 		}
 
 		if (ep2_desc.type == DPAA2_DEV_MAC) {
 			/*
 			 * This is the simplest case when DPNI is connected to
 			 * DPMAC directly.
 			 */
 			sc->mac.dpmac_id = ep2_desc.obj_id;
 
 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
 
 			/*
 			 * Need to determine if DPMAC type is PHY (attached to
 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
 			 * link state managed by MC firmware).
 			 */
 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
 			    &mac_token);
 			/*
 			 * Under VFIO, the DPMAC might be sitting in another
 			 * container (DPRC) we don't have access to.
 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
 			 * the case.
 			 */
 			if (error) {
 				device_printf(dev, "%s: failed to open "
 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
 				    sc->mac.dpmac_id);
 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
 			} else {
 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
 				    &cmd, &attr);
 				if (error) {
 					device_printf(dev, "%s: failed to get "
 					    "DPMAC attributes: id=%d, "
 					    "error=%d\n", __func__, dinfo->id,
 					    error);
 				} else {
 					link_type = attr.link_type;
 				}
 			}
 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
 
 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
 				device_printf(dev, "connected DPMAC is in FIXED "
 				    "mode\n");
 				dpaa2_ni_setup_fixed_link(sc);
 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
 				device_printf(dev, "connected DPMAC is in PHY "
 				    "mode\n");
 				error = DPAA2_MC_GET_PHY_DEV(dev,
 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
 				if (error == 0) {
 					error = MEMAC_MDIO_SET_NI_DEV(
 					    sc->mac.phy_dev, dev);
 					if (error != 0) {
 						device_printf(dev, "%s: failed "
 						    "to set dpni dev on memac "
 						    "mdio dev %s: error=%d\n",
 						    __func__,
 						    device_get_nameunit(
 						    sc->mac.phy_dev), error);
 					}
 				}
 				if (error == 0) {
 					error = MEMAC_MDIO_GET_PHY_LOC(
 					    sc->mac.phy_dev, &sc->mac.phy_loc);
 					if (error == ENODEV) {
 						error = 0;
 					}
 					if (error != 0) {
 						device_printf(dev, "%s: failed "
 						    "to get phy location from "
 						    "memac mdio dev %s: error=%d\n",
 						    __func__, device_get_nameunit(
 						    sc->mac.phy_dev), error);
 					}
 				}
 				if (error == 0) {
 					error = mii_attach(sc->mac.phy_dev,
 					    &sc->miibus, sc->ifp,
 					    dpaa2_ni_media_change,
 					    dpaa2_ni_media_status,
 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
 					    MII_OFFSET_ANY, 0);
 					if (error != 0) {
 						device_printf(dev, "%s: failed "
 						    "to attach to miibus: "
 						    "error=%d\n",
 						    __func__, error);
 					}
 				}
 				if (error == 0) {
 					sc->mii = device_get_softc(sc->miibus);
 				}
 			} else {
 				device_printf(dev, "%s: DPMAC link type is not "
 				    "supported\n", __func__);
 			}
 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
 			   ep2_desc.type == DPAA2_DEV_MUX ||
 			   ep2_desc.type == DPAA2_DEV_SW) {
 			dpaa2_ni_setup_fixed_link(sc);
 		}
 	}
 
 	/* Select mode to enqueue frames. */
 	/* ... TBD ... */
 
 	/*
 	 * Update link configuration to enable Rx/Tx pause frames support.
 	 *
 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
 	 *       in link configuration. It might be necessary to attach miibus
 	 *       and PHY before this point.
 	 */
 	error = dpaa2_ni_set_pause_frame(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
 		    "frames\n", __func__);
 		goto close_ni;
 	}
 
 	/* Configure ingress traffic classification. */
 	error = dpaa2_ni_set_qos_table(dev);
 	if (error) {
 		device_printf(dev, "%s: failed to configure QoS table: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	/* Add broadcast physical address to the MAC filtering table. */
 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
 	    ni_token), eth_bca);
 	if (error) {
 		device_printf(dev, "%s: failed to add broadcast physical "
 		    "address to the MAC filtering table\n", __func__);
 		goto close_ni;
 	}
 
 	/* Set the maximum allowed length for received frames. */
 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
 	if (error) {
 		device_printf(dev, "%s: failed to set maximum length for "
 		    "received frames\n", __func__);
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Сonfigure QBMan channels and register data availability notifications.
  */
 static int
 dpaa2_ni_setup_channels(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	device_t io_dev, con_dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_ni_channel *channel;
 	struct dpaa2_con_softc *consc;
 	struct dpaa2_con_notif_cfg notif_cfg;
 	struct dpaa2_devinfo *rc_info = device_get_ivars(pdev);
 	struct dpaa2_devinfo *io_info;
 	struct dpaa2_devinfo *con_info;
 	struct dpaa2_io_notif_ctx *ctx;
 	struct dpaa2_buf *buf;
 	struct dpaa2_cmd cmd;
 	struct sysctl_ctx_list *sysctl_ctx;
 	struct sysctl_oid *node;
 	struct sysctl_oid_list *parent;
 	uint32_t i, num_chan;
 	uint16_t rc_token, con_token;
 	int error;
 
 	/* Calculate number of the channels based on the allocated resources. */
 	for (i = 0; i < IO_RES_NUM; i++) {
 		if (!sc->res[IO_RID(i)]) {
 			break;
 		}
 	}
 	num_chan = i;
 	for (i = 0; i < CON_RES_NUM; i++) {
 		if (!sc->res[CON_RID(i)]) {
 			break;
 		}
 	}
 	num_chan = i < num_chan ? i : num_chan;
 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
 	sc->chan_n = sc->chan_n > sc->attr.num.queues
 	    ? sc->attr.num.queues : sc->chan_n;
 
 	device_printf(dev, "channels=%d\n", sc->chan_n);
 
 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
 	parent = SYSCTL_CHILDREN(node);
 
 	/* Setup channels for the portal. */
 	for (uint32_t i = 0; i < sc->chan_n; i++) {
 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
 		io_info = device_get_ivars(io_dev);
 
 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
 		consc = device_get_softc(con_dev);
 		con_info = device_get_ivars(con_dev);
 
 		DPAA2_CMD_INIT(&cmd);
 
 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id,
 		    &rc_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open resource "
 			    "container: id=%d, error=%d\n", __func__,
 			    rc_info->id, error);
 			return (error);
 		}
 		error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id,
 		    &con_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open DPCON: id=%d, "
 			    "error=%d\n", __func__, con_info->id, error);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
 		if (error) {
 			device_printf(dev, "%s: failed to enable channel: "
 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
 			    consc->attr.chan_id);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
 		    M_WAITOK | M_ZERO);
 		if (!channel) {
 			device_printf(dev, "%s: failed to allocate a channel\n",
 			    __func__);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (ENOMEM);
 		}
 
 		sc->channels[i] = channel;
 
 		channel->id = consc->attr.chan_id;
 		channel->flowid = i;
 		channel->ni_dev = dev;
 		channel->io_dev = io_dev;
 		channel->con_dev = con_dev;
 		channel->recycled_n = 0;
 		channel->tx_frames = 0; /* for debug purposes */
 		channel->tx_dropped = 0; /* for debug purposes */
 		channel->rxq_n = 0;
 
 		buf = &channel->store;
 		buf->type = DPAA2_BUF_STORE;
 		buf->store.dmat = NULL;
 		buf->store.dmap = NULL;
 		buf->store.paddr = 0;
 		buf->store.vaddr = NULL;
 
 		/* Setup WQ channel notification context. */
 		ctx = &channel->ctx;
 		ctx->qman_ctx = (uint64_t) ctx;
 		ctx->cdan_en = true;
 		ctx->fq_chan_id = channel->id;
 		ctx->io_dev = channel->io_dev;
 		ctx->channel = channel;
 		ctx->poll = dpaa2_ni_poll;
 
 		/* Register the new notification context. */
 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
 		if (error) {
 			device_printf(dev, "%s: failed to register notification "
 			    "context\n", __func__);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		/* Register DPCON notification with Management Complex. */
 		notif_cfg.dpio_id = io_info->id;
 		notif_cfg.prior = 0;
 		notif_cfg.qman_ctx = ctx->qman_ctx;
 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
 		if (error) {
 			device_printf(dev, "%s: failed to set DPCON "
 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
 			    con_info->id, consc->attr.chan_id);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		/* Allocate initial # of Rx buffers and a channel storage. */
 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
 		if (error) {
 			device_printf(dev, "%s: failed to seed buffer pool\n",
 			    __func__);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 		error = dpaa2_ni_seed_chan_storage(sc, channel);
 		if (error) {
 			device_printf(dev, "%s: failed to seed channel "
 			    "storage\n", __func__);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		/* Prepare queues for this channel. */
 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
 		if (error) {
 			device_printf(dev, "%s: failed to prepare TxConf "
 			    "queue: error=%d\n", __func__, error);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
 		if (error) {
 			device_printf(dev, "%s: failed to prepare Rx queue: "
 			    "error=%d\n", __func__, error);
 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (error);
 		}
 
 		if (bootverbose) {
 			device_printf(dev, "channel: dpio_id=%d "
 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
 			    io_info->id, con_info->id, channel->id,
 			    consc->attr.prior_num);
 		}
 
 		(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    rc_token));
 	}
 
 	/* There is exactly one Rx error queue per DPNI. */
 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
 	if (error) {
 		device_printf(dev, "%s: failed to prepare RxError queue: "
 		    "error=%d\n", __func__, error);
 		return (error);
 	}
 
 	return (0);
 }
 
 /**
  * @brief Performs an initial configuration of the frame queues.
  */
 static int
 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
     enum dpaa2_ni_queue_type queue_type)
 {
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_ni_fq *fq;
 
 	switch (queue_type) {
 	case DPAA2_NI_QUEUE_TX_CONF:
 		/* One queue per channel. */
 		fq = &chan->txc_queue;
 
 		fq->consume = dpaa2_ni_tx_conf;
 		fq->chan = chan;
 		fq->flowid = chan->flowid;
 		fq->tc = 0; /* ignored */
 		fq->type = queue_type;
 
 		break;
 	case DPAA2_NI_QUEUE_RX:
 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
 		    ("too many Rx traffic classes: rx_tcs=%d\n",
 		    sc->attr.num.rx_tcs));
 
 		/* One queue per Rx traffic class within a channel. */
 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
 			fq = &chan->rx_queues[i];
 
 			fq->consume = dpaa2_ni_rx;
 			fq->chan = chan;
 			fq->flowid = chan->flowid;
 			fq->tc = (uint8_t) i;
 			fq->type = queue_type;
 
 			chan->rxq_n++;
 		}
 		break;
 	case DPAA2_NI_QUEUE_RX_ERR:
 		/* One queue per network interface. */
 		fq = &sc->rxe_queue;
 
 		fq->consume = dpaa2_ni_rx_err;
 		fq->chan = chan;
 		fq->flowid = 0; /* ignored */
 		fq->tc = 0; /* ignored */
 		fq->type = queue_type;
 		break;
 	default:
 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
 		    __func__, queue_type);
 		return (EINVAL);
 	}
 
 	return (0);
 }
 
 /**
  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
  */
 static int
 dpaa2_ni_bind(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	device_t bp_dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_devinfo *bp_info;
 	struct dpaa2_cmd cmd;
 	struct dpaa2_ni_pools_cfg pools_cfg;
 	struct dpaa2_ni_err_cfg err_cfg;
 	struct dpaa2_ni_channel *chan;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Select buffer pool (only one available at the moment). */
 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
 	bp_info = device_get_ivars(bp_dev);
 
 	/* Configure buffers pool. */
 	pools_cfg.pools_num = 1;
 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
 	pools_cfg.pools[0].backup_flag = 0;
 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
 		goto close_ni;
 	}
 
 	/* Setup ingress traffic distribution. */
 	error = dpaa2_ni_setup_rx_dist(dev);
 	if (error && error != EOPNOTSUPP) {
 		device_printf(dev, "%s: failed to setup ingress traffic "
 		    "distribution\n", __func__);
 		goto close_ni;
 	}
 	if (bootverbose && error == EOPNOTSUPP) {
 		device_printf(dev, "Ingress traffic distribution not "
 		    "supported\n");
 	}
 
 	/* Configure handling of error frames. */
 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
 	err_cfg.set_err_fas = false;
 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to set errors behavior\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	/* Configure channel queues to generate CDANs. */
 	for (uint32_t i = 0; i < sc->chan_n; i++) {
 		chan = sc->channels[i];
 
 		/* Setup Rx flows. */
 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
 			if (error) {
 				device_printf(dev, "%s: failed to setup Rx "
 				    "flow: error=%d\n", __func__, error);
 				goto close_ni;
 			}
 		}
 
 		/* Setup Tx flow. */
 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
 		if (error) {
 			device_printf(dev, "%s: failed to setup Tx "
 			    "flow: error=%d\n", __func__, error);
 			goto close_ni;
 		}
 	}
 
 	/* Configure RxError queue to generate CDAN. */
 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
 	if (error) {
 		device_printf(dev, "%s: failed to setup RxError flow: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	/*
 	 * Get the Queuing Destination ID (QDID) that should be used for frame
 	 * enqueue operations.
 	 */
 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
 	    &sc->tx_qdid);
 	if (error) {
 		device_printf(dev, "%s: failed to get Tx queuing destination "
 		    "ID\n", __func__);
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Setup ingress traffic distribution.
  *
  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
  */
 static int
 dpaa2_ni_setup_rx_dist(device_t dev)
 {
 	/*
 	 * Have the interface implicitly distribute traffic based on the default
 	 * hash key.
 	 */
 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
 }
 
 static int
 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_devinfo *con_info;
 	struct dpaa2_cmd cmd;
 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Obtain DPCON associated with the FQ's channel. */
 	con_info = device_get_ivars(fq->chan->con_dev);
 
 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
 	queue_cfg.tc = fq->tc;
 	queue_cfg.idx = fq->flowid;
 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain Rx queue "
 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
 		    queue_cfg.idx);
 		goto close_ni;
 	}
 
 	fq->fqid = queue_cfg.fqid;
 
 	queue_cfg.dest_id = con_info->id;
 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
 	queue_cfg.priority = 1;
 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
 	queue_cfg.options =
 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
 	    DPAA2_NI_QUEUE_OPT_DEST;
 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to update Rx queue "
 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
 		    queue_cfg.idx);
 		goto close_ni;
 	}
 
 	if (bootverbose) {
 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
 		    fq->fqid, (uint64_t) fq);
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static int
 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_devinfo *con_info;
 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
 	struct dpaa2_ni_tx_ring *tx;
 	struct dpaa2_buf *buf;
 	struct dpaa2_cmd cmd;
 	uint32_t tx_rings_n = 0;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Obtain DPCON associated with the FQ's channel. */
 	con_info = device_get_ivars(fq->chan->con_dev);
 
 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
 	    sc->attr.num.tx_tcs));
 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
 
 	/* Setup Tx rings. */
 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
 		queue_cfg.tc = i;
 		queue_cfg.idx = fq->flowid;
 		queue_cfg.chan_id = fq->chan->id;
 
 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
 		if (error) {
 			device_printf(dev, "%s: failed to obtain Tx queue "
 			    "configuration: tc=%d, flowid=%d\n", __func__,
 			    queue_cfg.tc, queue_cfg.idx);
 			goto close_ni;
 		}
 
 		tx = &fq->tx_rings[i];
 		tx->fq = fq;
 		tx->fqid = queue_cfg.fqid;
 		tx->txid = tx_rings_n;
 
 		if (bootverbose) {
 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
 			    queue_cfg.fqid);
 		}
 
 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
 
 		/* Allocate Tx ring buffer. */
 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
 		    M_NOWAIT, &tx->lock);
 		if (tx->idx_br == NULL) {
 			device_printf(dev, "%s: failed to setup Tx ring buffer"
 			    " (2) fqid=%d\n", __func__, tx->fqid);
 			goto close_ni;
 		}
 
 		/* Configure Tx buffers. */
 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
 			buf = &tx->buf[j];
 			buf->type = DPAA2_BUF_TX;
 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
 			buf->tx.m = NULL;
-			buf->tx.idx = 0;
+			buf->tx.idx = j;
 
-			error = dpaa2_ni_seed_txbuf(sc, buf, j);
+			error = dpaa2_ni_seed_txbuf(sc, buf);
 
 			/* Add index of the Tx buffer to the ring. */
 			buf_ring_enqueue(tx->idx_br, (void *) j);
 		}
 
 		tx_rings_n++;
 	}
 
 	/* All Tx queues which belong to the same flowid have the same qdbin. */
 	fq->tx_qdbin = queue_cfg.qdbin;
 
 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
 	queue_cfg.tc = 0; /* ignored for TxConf queue */
 	queue_cfg.idx = fq->flowid;
 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain TxConf queue "
 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
 		    queue_cfg.idx);
 		goto close_ni;
 	}
 
 	fq->fqid = queue_cfg.fqid;
 
 	queue_cfg.dest_id = con_info->id;
 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
 	queue_cfg.priority = 0;
 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
 	queue_cfg.options =
 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
 	    DPAA2_NI_QUEUE_OPT_DEST;
 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to update TxConf queue "
 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
 		    queue_cfg.idx);
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static int
 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_devinfo *con_info;
 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Obtain DPCON associated with the FQ's channel. */
 	con_info = device_get_ivars(fq->chan->con_dev);
 
 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
 	queue_cfg.tc = fq->tc; /* ignored */
 	queue_cfg.idx = fq->flowid; /* ignored */
 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain RxErr queue "
 		    "configuration\n", __func__);
 		goto close_ni;
 	}
 
 	fq->fqid = queue_cfg.fqid;
 
 	queue_cfg.dest_id = con_info->id;
 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
 	queue_cfg.priority = 1;
 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
 	queue_cfg.options =
 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
 	    DPAA2_NI_QUEUE_OPT_DEST;
 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to update RxErr queue "
 		    "configuration\n", __func__);
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Configure DPNI object to generate interrupts.
  */
 static int
 dpaa2_ni_setup_irqs(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Configure IRQs. */
 	error = dpaa2_ni_setup_msi(sc);
 	if (error) {
 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
 		goto close_ni;
 	}
 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
 		    __func__);
 		goto close_ni;
 	}
 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
 		device_printf(dev, "%s: failed to setup IRQ resource\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
 	if (error) {
 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
 	    true);
 	if (error) {
 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Allocate MSI interrupts for DPNI.
  */
 static int
 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
 {
 	int val;
 
 	val = pci_msi_count(sc->dev);
 	if (val < DPAA2_NI_MSI_COUNT)
 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
 		    DPAA2_IO_MSI_COUNT);
 	val = MIN(val, DPAA2_NI_MSI_COUNT);
 
 	if (pci_alloc_msi(sc->dev, &val) != 0)
 		return (EINVAL);
 
 	for (int i = 0; i < val; i++)
 		sc->irq_rid[i] = i + 1;
 
 	return (0);
 }
 
 /**
  * @brief Update DPNI according to the updated interface capabilities.
  */
 static int
 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
 {
 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Setup checksums validation. */
 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
 	if (error) {
 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
 		    __func__, en_rxcsum ? "enable" : "disable");
 		goto close_ni;
 	}
 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
 	if (error) {
 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
 		    __func__, en_rxcsum ? "enable" : "disable");
 		goto close_ni;
 	}
 
 	/* Setup checksums generation. */
 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
 	if (error) {
 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
 		    __func__, en_txcsum ? "enable" : "disable");
 		goto close_ni;
 	}
 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
 	if (error) {
 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
 		    __func__, en_txcsum ? "enable" : "disable");
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Update DPNI according to the updated interface flags.
  */
 static int
 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
 {
 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
 	    en_promisc ? true : en_allmulti);
 	if (error) {
 		device_printf(dev, "%s: failed to %s multicast promiscuous "
 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
 		goto close_ni;
 	}
 
 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
 	if (error) {
 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
 		    __func__, en_promisc ? "enable" : "disable");
 		goto close_ni;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (0);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static int
 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
 {
 	struct sysctl_ctx_list *ctx;
 	struct sysctl_oid *node, *node2;
 	struct sysctl_oid_list *parent, *parent2;
 	char cbuf[128];
 	int i;
 
 	ctx = device_get_sysctl_ctx(sc->dev);
 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 
 	/* Add DPNI statistics. */
 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
 	parent = SYSCTL_CHILDREN(node);
 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
 		    "IU", dpni_stat_sysctls[i].desc);
 	}
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
 	    "Rx frames in the buffers outside of the buffer pools");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
 	    "Rx frames in single buffers");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
 	    "Rx frames in scatter/gather list");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
 	    "Enqueue rejected by QMan");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
 	    "QMan IEOI error");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
 	    "Tx single buffer frames");
 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
 	    CTLFLAG_RD, &sc->tx_sg_frames,
 	    "Tx S/G frames");
 
 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
 	    "IU", "number of Rx buffers in the buffer pool");
 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
 	    "IU", "number of free Rx buffers in the buffer pool");
 
  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 
 	/* Add channels statistics. */
 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
 	parent = SYSCTL_CHILDREN(node);
 	for (int i = 0; i < sc->chan_n; i++) {
 		snprintf(cbuf, sizeof(cbuf), "%d", i);
 
 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
 		parent2 = SYSCTL_CHILDREN(node2);
 
 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
 		    "Tx frames counter");
 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
 		    "Tx dropped counter");
 	}
 
 	return (0);
 }
 
 static int
 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
 {
 	device_t dev = sc->dev;
 	int error;
 
 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
 
-	/*
-	 * DMA tag to allocate buffers for buffer pool.
-	 *
-	 * NOTE: QBMan supports DMA addresses up to 49-bits maximum.
-	 *	 Bits 63-49 are not used by QBMan.
-	 */
+	/* DMA tag to allocate buffers for Rx buffer pool. */
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    sc->buf_align, 0,		/* alignment, boundary */
-	    BUF_MAXADDR_49BIT,		/* low restricted addr */
-	    BUF_MAXADDR,		/* high restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    BUF_SIZE, 1,		/* maxsize, nsegments */
 	    BUF_SIZE, 0,		/* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->bp_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for buffer "
 		    "pool\n", __func__);
 		return (error);
 	}
 
 	/* DMA tag to map Tx mbufs. */
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    sc->buf_align, 0,		/* alignment, boundary */
-	    BUF_MAXADDR_49BIT,		/* low restricted addr */
-	    BUF_MAXADDR,		/* high restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->tx_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for Tx "
 		    "buffers\n", __func__);
 		return (error);
 	}
 
 	/* DMA tag to allocate channel storage. */
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
-	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->st_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for channel "
 		    "storage\n", __func__);
 		return (error);
 	}
 
 	/* DMA tag for Rx distribution key. */
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    PAGE_SIZE, 0,		/* alignment, boundary */
-	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->rxd_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for Rx "
 		    "distribution key\n", __func__);
 		return (error);
 	}
 
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    PAGE_SIZE, 0,		/* alignment, boundary */
-	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->qos_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
 		    __func__);
 		return (error);
 	}
 
 	error = bus_dma_tag_create(
 	    bus_get_dma_tag(dev),
 	    PAGE_SIZE, 0,		/* alignment, boundary */
-	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
+	    BUS_SPACE_MAXADDR,		/* low restricted addr */
 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
 	    NULL, NULL,			/* filter, filterarg */
 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
 	    NULL, NULL,			/* lockfunc, lockarg */
 	    &sc->sgt_dmat);
 	if (error) {
 		device_printf(dev, "%s: failed to create DMA tag for S/G "
 		    "tables\n", __func__);
 		return (error);
 	}
 
 	return (0);
 }
 
 /**
  * @brief Configure buffer layouts of the different DPNI queues.
  */
 static int
 dpaa2_ni_set_buf_layout(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_ni_buf_layout buf_layout = {0};
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
 		    "error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/*
 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
 	 * on the WRIOP version.
 	 */
 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
 
 	/*
 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
 	 * of 64 or 256 bytes depending on the WRIOP version.
 	 */
 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
 
 	if (bootverbose) {
 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
 		    sc->buf_sz, sc->buf_align);
 	}
 
 	/*
 	 *    Frame Descriptor       Tx buffer layout
 	 *
 	 *                ADDR -> |---------------------|
 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
 	 *                        |---------------------|
 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
 	 *                        |---------------------|
 	 *                        |    DATA HEADROOM    |
 	 *       ADDR + OFFSET -> |---------------------|
 	 *                        |                     |
 	 *                        |                     |
 	 *                        |     FRAME DATA      |
 	 *                        |                     |
 	 *                        |                     |
 	 *                        |---------------------|
 	 *                        |    DATA TAILROOM    |
 	 *                        |---------------------|
 	 *
 	 * NOTE: It's for a single buffer frame only.
 	 */
 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
 	buf_layout.pd_size = BUF_SWA_SIZE;
 	buf_layout.pass_timestamp = true;
 	buf_layout.pass_frame_status = true;
 	buf_layout.options =
 	    BUF_LOPT_PRIV_DATA_SZ |
 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
 	    BUF_LOPT_FRAME_STATUS;
 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
 	if (error) {
 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	/* Tx-confirmation buffer layout */
 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
 	buf_layout.options =
 	    BUF_LOPT_TIMESTAMP |
 	    BUF_LOPT_FRAME_STATUS;
 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
 	if (error) {
 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	/*
 	 * Driver should reserve the amount of space indicated by this command
 	 * as headroom in all Tx frames.
 	 */
 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	if (bootverbose) {
 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
 	}
 	if ((sc->tx_data_off % 64) != 0) {
 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
 		    "of 64 bytes\n", sc->tx_data_off);
 	}
 
 	/*
 	 *    Frame Descriptor       Rx buffer layout
 	 *
 	 *                ADDR -> |---------------------|
-	 *                        | SW FRAME ANNOTATION | 0 bytes
+	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
 	 *                        |---------------------|
 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
 	 *                        |---------------------|
 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
 	 *       ADDR + OFFSET -> |---------------------|
 	 *                        |                     |
 	 *                        |                     |
 	 *                        |     FRAME DATA      |
 	 *                        |                     |
 	 *                        |                     |
 	 *                        |---------------------|
 	 *                        |    DATA TAILROOM    | 0 bytes
 	 *                        |---------------------|
 	 *
 	 * NOTE: It's for a single buffer frame only.
 	 */
 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
-	buf_layout.pd_size = 0;
+	buf_layout.pd_size = BUF_SWA_SIZE;
 	buf_layout.fd_align = sc->buf_align;
-	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE;
+	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
 	buf_layout.tail_size = 0;
 	buf_layout.pass_frame_status = true;
 	buf_layout.pass_parser_result = true;
 	buf_layout.pass_timestamp = true;
 	buf_layout.options =
 	    BUF_LOPT_PRIV_DATA_SZ |
 	    BUF_LOPT_DATA_ALIGN |
 	    BUF_LOPT_DATA_HEAD_ROOM |
 	    BUF_LOPT_DATA_TAIL_ROOM |
 	    BUF_LOPT_FRAME_STATUS |
 	    BUF_LOPT_PARSER_RESULT |
 	    BUF_LOPT_TIMESTAMP;
 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
 	if (error) {
 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	error = 0;
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Enable Rx/Tx pause frames.
  *
  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
  *       itself generates pause frames (Tx frame).
  */
 static int
 dpaa2_ni_set_pause_frame(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_ni_link_cfg link_cfg = {0};
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
 		    "error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain link configuration: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	/* Enable both Rx and Tx pause frames by default. */
 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
 
 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
 	if (error) {
 		device_printf(dev, "%s: failed to set link configuration: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	sc->link_options = link_cfg.options;
 	error = 0;
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Configure QoS table to determine the traffic class for the received
  * frame.
  */
 static int
 dpaa2_ni_set_qos_table(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_ni_qos_table tbl;
 	struct dpaa2_buf *buf = &sc->qos_kcfg;
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	if (sc->attr.num.rx_tcs == 1 ||
 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
 		if (bootverbose) {
 			device_printf(dev, "Ingress traffic classification is "
 			    "not supported\n");
 		}
 		return (0);
 	}
 
 	/*
 	 * Allocate a buffer visible to the device to hold the QoS table key
 	 * configuration.
 	 */
 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
 	    __func__));
 	if (__predict_true(buf->store.dmat == NULL)) {
 		buf->store.dmat = sc->qos_dmat;
 	}
 
 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
 	if (error) {
 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
 		    "configuration\n", __func__);
 		goto err_exit;
 	}
 
 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
 	    &buf->store.paddr, BUS_DMA_NOWAIT);
 	if (error) {
 		device_printf(dev, "%s: failed to map QoS key configuration "
 		    "buffer into bus space\n", __func__);
 		goto err_exit;
 	}
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
 		    "error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	tbl.default_tc = 0;
 	tbl.discard_on_miss = false;
 	tbl.keep_entries = false;
 	tbl.kcfg_busaddr = buf->store.paddr;
 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
 	if (error) {
 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
 		goto close_ni;
 	}
 
 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
 	if (error) {
 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
 		goto close_ni;
 	}
 
 	error = 0;
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static int
 dpaa2_ni_set_mac_addr(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	if_t ifp = sc->ifp;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	struct ether_addr rnd_mac_addr;
 	uint16_t rc_token, ni_token;
 	uint8_t mac_addr[ETHER_ADDR_LEN];
 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
 		    "error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/*
 	 * Get the MAC address associated with the physical port, if the DPNI is
 	 * connected to a DPMAC directly associated with one of the physical
 	 * ports.
 	 */
 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain the MAC address "
 		    "associated with the physical port\n", __func__);
 		goto close_ni;
 	}
 
 	/* Get primary MAC address from the DPNI attributes. */
 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
 	if (error) {
 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
 		    __func__);
 		goto close_ni;
 	}
 
 	if (!ETHER_IS_ZERO(mac_addr)) {
 		/* Set MAC address of the physical port as DPNI's primary one. */
 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
 		    mac_addr);
 		if (error) {
 			device_printf(dev, "%s: failed to set primary MAC "
 			    "address\n", __func__);
 			goto close_ni;
 		}
 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
 			sc->mac.addr[i] = mac_addr[i];
 		}
 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
 		/* Generate random MAC address as DPNI's primary one. */
 		ether_gen_addr(ifp, &rnd_mac_addr);
 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
 			mac_addr[i] = rnd_mac_addr.octet[i];
 		}
 
 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
 		    mac_addr);
 		if (error) {
 			device_printf(dev, "%s: failed to set random primary "
 			    "MAC address\n", __func__);
 			goto close_ni;
 		}
 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
 			sc->mac.addr[i] = mac_addr[i];
 		}
 	} else {
 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
 			sc->mac.addr[i] = dpni_mac_addr[i];
 		}
 	}
 
 	error = 0;
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static void
 dpaa2_ni_miibus_statchg(device_t dev)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_mac_link_state mac_link = { 0 };
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, mac_token;
 	int error, link_state;
 
 	if (sc->fixed_link || sc->mii == NULL) {
 		return;
 	}
 
 	/*
 	 * Note: ifp link state will only be changed AFTER we are called so we
 	 * cannot rely on ifp->if_linkstate here.
 	 */
 	if (sc->mii->mii_media_status & IFM_AVALID) {
 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
 			link_state = LINK_STATE_UP;
 		} else {
 			link_state = LINK_STATE_DOWN;
 		}
 	} else {
 		link_state = LINK_STATE_UNKNOWN;
 	}
 
 	if (link_state != sc->link_state) {
 		sc->link_state = link_state;
 
 		DPAA2_CMD_INIT(&cmd);
 
 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
 		    &rc_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open resource "
 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
 			    error);
 			goto err_exit;
 		}
 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
 		    &mac_token);
 		if (error) {
 			device_printf(sc->dev, "%s: failed to open DPMAC: "
 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
 			    error);
 			goto close_rc;
 		}
 
 		if (link_state == LINK_STATE_UP ||
 		    link_state == LINK_STATE_DOWN) {
 			/* Update DPMAC link state. */
 			mac_link.supported = sc->mii->mii_media.ifm_media;
 			mac_link.advert = sc->mii->mii_media.ifm_media;
 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
 			mac_link.options =
 			    DPAA2_MAC_LINK_OPT_AUTONEG |
 			    DPAA2_MAC_LINK_OPT_PAUSE;
 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
 			mac_link.state_valid = true;
 
 			/* Inform DPMAC about link state. */
 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
 			    &mac_link);
 			if (error) {
 				device_printf(sc->dev, "%s: failed to set DPMAC "
 				    "link state: id=%d, error=%d\n", __func__,
 				    sc->mac.dpmac_id, error);
 			}
 		}
 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    rc_token));
 	}
 
 	return;
 
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return;
 }
 
 /**
  * @brief Callback function to process media change request.
  */
 static int
 dpaa2_ni_media_change(if_t ifp)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 
 	DPNI_LOCK(sc);
 	if (sc->mii) {
 		mii_mediachg(sc->mii);
 		sc->media_status = sc->mii->mii_media.ifm_media;
 	} else if (sc->fixed_link) {
 		if_printf(ifp, "%s: can't change media in fixed mode\n",
 		    __func__);
 	}
 	DPNI_UNLOCK(sc);
 
 	return (0);
 }
 
 /**
  * @brief Callback function to process media status request.
  */
 static void
 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 
 	DPNI_LOCK(sc);
 	if (sc->mii) {
 		mii_pollstat(sc->mii);
 		ifmr->ifm_active = sc->mii->mii_media_active;
 		ifmr->ifm_status = sc->mii->mii_media_status;
 	}
 	DPNI_UNLOCK(sc);
 }
 
 /**
  * @brief Callout function to check and update media status.
  */
 static void
 dpaa2_ni_media_tick(void *arg)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
 
 	/* Check for media type change */
 	if (sc->mii) {
 		mii_tick(sc->mii);
 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
 			printf("%s: media type changed (ifm_media=%x)\n",
 			    __func__, sc->mii->mii_media.ifm_media);
 			dpaa2_ni_media_change(sc->ifp);
 		}
 	}
 
 	/* Schedule another timeout one second from now */
 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
 }
 
 static void
 dpaa2_ni_init(void *arg)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
 	if_t ifp = sc->ifp;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPNI_LOCK(sc);
 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
 		DPNI_UNLOCK(sc);
 		return;
 	}
 	DPNI_UNLOCK(sc);
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
 	if (error) {
 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
 		    __func__, error);
 	}
 
 	DPNI_LOCK(sc);
 	if (sc->mii) {
 		mii_mediachg(sc->mii);
 	}
 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
 
 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 	DPNI_UNLOCK(sc);
 
 	/* Force link-state update to initilize things. */
 	dpaa2_ni_miibus_statchg(dev);
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return;
 
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return;
 }
 
 static int
 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 	struct dpaa2_ni_channel	*chan;
 	struct dpaa2_ni_tx_ring *tx;
 	uint32_t fqid;
 	bool found = false;
 	int chan_n = 0;
 
 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
 		return (0);
 
 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
 		fqid = m->m_pkthdr.flowid;
 		for (int i = 0; i < sc->chan_n; i++) {
 			chan = sc->channels[i];
 			for (int j = 0; j < chan->rxq_n; j++) {
 				if (fqid == chan->rx_queues[j].fqid) {
 					chan_n = chan->flowid;
 					found = true;
 					break;
 				}
 			}
 			if (found) {
 				break;
 			}
 		}
 	}
 	tx = DPAA2_TX_RING(sc, chan_n, 0);
 
 	TX_LOCK(tx);
 	dpaa2_ni_tx_locked(sc, tx, m);
 	TX_UNLOCK(tx);
 
 	return (0);
 }
 
 static void
 dpaa2_ni_qflush(if_t ifp)
 {
 	/* TODO: Find a way to drain Tx queues in QBMan. */
 	if_qflush(ifp);
 }
 
 static int
 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 	struct ifreq *ifr = (struct ifreq *) data;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint32_t changed = 0;
 	uint16_t rc_token, ni_token;
 	int mtu, error, rc = 0;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	switch (c) {
 	case SIOCSIFMTU:
 		DPNI_LOCK(sc);
 		mtu = ifr->ifr_mtu;
 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
 			DPNI_UNLOCK(sc);
 			error = EINVAL;
 			goto close_ni;
 		}
 		if_setmtu(ifp, mtu);
 		DPNI_UNLOCK(sc);
 
 		/* Update maximum frame length. */
 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
 		    mtu + ETHER_HDR_LEN);
 		if (error) {
 			device_printf(dev, "%s: failed to update maximum frame "
 			    "length: error=%d\n", __func__, error);
 			goto close_ni;
 		}
 		break;
 	case SIOCSIFCAP:
 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
 		if (changed & IFCAP_HWCSUM) {
 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
 			} else {
 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
 			}
 		}
 		rc = dpaa2_ni_setup_if_caps(sc);
 		if (rc) {
 			printf("%s: failed to update iface capabilities: "
 			    "error=%d\n", __func__, rc);
 			rc = ENXIO;
 		}
 		break;
 	case SIOCSIFFLAGS:
 		DPNI_LOCK(sc);
 		if (if_getflags(ifp) & IFF_UP) {
 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 				changed = if_getflags(ifp) ^ sc->if_flags;
 				if (changed & IFF_PROMISC ||
 				    changed & IFF_ALLMULTI) {
 					rc = dpaa2_ni_setup_if_flags(sc);
 				}
 			} else {
 				DPNI_UNLOCK(sc);
 				dpaa2_ni_init(sc);
 				DPNI_LOCK(sc);
 			}
 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
 		}
 
 		sc->if_flags = if_getflags(ifp);
 		DPNI_UNLOCK(sc);
 		break;
 	case SIOCADDMULTI:
 	case SIOCDELMULTI:
 		DPNI_LOCK(sc);
 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 			DPNI_UNLOCK(sc);
 			rc = dpaa2_ni_update_mac_filters(ifp);
 			if (rc) {
 				device_printf(dev, "%s: failed to update MAC "
 				    "filters: error=%d\n", __func__, rc);
 			}
 			DPNI_LOCK(sc);
 		}
 		DPNI_UNLOCK(sc);
 		break;
 	case SIOCGIFMEDIA:
 	case SIOCSIFMEDIA:
 		if (sc->mii)
 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
 		else if(sc->fixed_link) {
 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
 		}
 		break;
 	default:
 		rc = ether_ioctl(ifp, c, data);
 		break;
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 	return (rc);
 
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static int
 dpaa2_ni_update_mac_filters(if_t ifp)
 {
 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
 	struct dpaa2_ni_mcaddr_ctx ctx;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	/* Remove all multicast MAC filters. */
 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
 	if (error) {
 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
 		    "error=%d\n", __func__, error);
 		goto close_ni;
 	}
 
 	ctx.ifp = ifp;
 	ctx.error = 0;
 	ctx.nent = 0;
 
 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
 
 	error = ctx.error;
 close_ni:
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return (error);
 }
 
 static u_int
 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
 {
 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	if (ctx->error != 0) {
 		return (0);
 	}
 
 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
 		DPAA2_CMD_INIT(&cmd);
 
 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
 		    &rc_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open resource "
 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
 			    error);
 			return (0);
 		}
 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
 		    &ni_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open network interface: "
 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 			    rc_token));
 			return (0);
 		}
 
 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
 		    LLADDR(sdl));
 
 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    ni_token));
 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    rc_token));
 
 		if (ctx->error != 0) {
 			device_printf(dev, "%s: can't add more then %d MAC "
 			    "addresses, switching to the multicast promiscuous "
 			    "mode\n", __func__, ctx->nent);
 
 			/* Enable multicast promiscuous mode. */
 			DPNI_LOCK(sc);
 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
 			sc->if_flags |= IFF_ALLMULTI;
 			ctx->error = dpaa2_ni_setup_if_flags(sc);
 			DPNI_UNLOCK(sc);
 
 			return (0);
 		}
 		ctx->nent++;
 	}
 
 	return (1);
 }
 
 static void
 dpaa2_ni_intr(void *arg)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint32_t status = ~0u; /* clear all IRQ status bits */
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto err_exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
 	    &status);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
 		    "error=%d\n", __func__, error);
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 err_exit:
 	return;
 }
 
 /**
  * @brief Callback to obtain a physical address of the only DMA segment mapped.
  */
 static void
 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 {
 	if (error == 0) {
 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
 		*(bus_addr_t *) arg = segs[0].ds_addr;
 	}
 }
 
 /**
  * @brief Release new buffers to the buffer pool if necessary.
  */
 static void
 dpaa2_ni_bp_task(void *arg, int count)
 {
 	device_t bp_dev;
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
 	struct dpaa2_bp_softc *bpsc;
 	struct dpaa2_bp_conf bp_conf;
 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
 	int error;
 
 	/* There's only one buffer pool for now. */
 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
 	bpsc = device_get_softc(bp_dev);
 
 	/* Get state of the buffer pool. */
 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
 	    &bp_conf);
 	if (error) {
 		device_printf(sc->dev, "%s: failed to query buffer pool "
 		    "configuration: error=%d\n", __func__, error);
 		return;
 	}
 
 	/* Double allocated buffers number if free buffers < 25%. */
 	if (bp_conf.free_bufn < (buf_num >> 2)) {
 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
 	}
 }
 
 /**
  * @brief Poll frames from a specific channel when CDAN is received.
  *
  * NOTE: To be called from the DPIO interrupt handler.
  */
 static void
 dpaa2_ni_poll(void *arg)
 {
 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
 	struct dpaa2_io_softc *iosc;
 	struct dpaa2_swp *swp;
 	struct dpaa2_ni_fq *fq;
 	int error, consumed = 0;
 
 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
 
 	iosc = device_get_softc(chan->io_dev);
 	swp = iosc->swp;
 
 	do {
 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
 		    ETH_STORE_FRAMES);
 		if (error) {
 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
 			break;
 		}
 
 		/*
 		 * TODO: Combine frames from the same Rx queue returned as
 		 * a result to the current VDQ command into a chain (linked
 		 * with m_nextpkt) to ammortize the FQ lock.
 		 */
 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
 		if (error == ENOENT) {
 			break;
 		}
 		if (error == ETIMEDOUT) {
 			device_printf(chan->ni_dev, "%s: timeout to consume "
 			    "frames: chan_id=%d\n", __func__, chan->id);
 		}
 	} while (true);
 
 	/* Re-arm channel to generate CDAN. */
 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
 	if (error) {
 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
 		    "error=%d\n", __func__, chan->id, error);
 	}
 }
 
 /**
  * @brief Transmit mbufs.
  */
 static void
 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
     struct mbuf *m)
 {
 	struct dpaa2_ni_fq *fq = tx->fq;
 	struct dpaa2_buf *buf;
 	struct dpaa2_fd fd;
 	struct mbuf *m_d;
 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
 	uint64_t idx;
 	void *pidx;
 	int error, rc, txnsegs;
 
 	/* Obtain an index of a Tx buffer. */
 	pidx = buf_ring_dequeue_sc(tx->idx_br);
 	if (__predict_false(pidx == NULL)) {
 		/* TODO: Do not give up easily. */
 		m_freem(m);
 		return;
 	} else {
 		idx = (uint64_t) pidx;
 		buf = &tx->buf[idx];
 		buf->tx.m = m;
-		buf->tx.idx = idx;
 		buf->tx.sgt_paddr = 0;
 	}
 
 	/* Load mbuf to transmit. */
 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
 	if (__predict_false(error != 0)) {
 		/* Too many fragments, trying to defragment... */
 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
 		if (m_d == NULL) {
 			device_printf(sc->dev, "%s: mbuf "
 			    "defragmentation failed\n", __func__);
 			fq->chan->tx_dropped++;
 			goto err;
 		}
 
 		buf->tx.m = m = m_d;
 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
 		if (__predict_false(error != 0)) {
 			device_printf(sc->dev, "%s: failed to load "
 			    "mbuf: error=%d\n", __func__, error);
 			fq->chan->tx_dropped++;
 			goto err;
 		}
 	}
 
 	/* Build frame descriptor. */
 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
 	if (__predict_false(error != 0)) {
 		device_printf(sc->dev, "%s: failed to build frame "
 		    "descriptor: error=%d\n", __func__, error);
 		fq->chan->tx_dropped++;
 		goto err_unload;
 	}
 
 	/* TODO: Enqueue several frames in a single command. */
 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
 		/* TODO: Return error codes instead of # of frames. */
 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
 		    &fd, 1);
 		if (rc == 1) {
 			break;
 		}
 	}
 
-	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap,
-	    BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
-	    BUS_DMASYNC_PREWRITE);
+	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE);
+	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE);
 
 	if (rc != 1) {
 		fq->chan->tx_dropped++;
 		goto err_unload;
 	} else {
 		fq->chan->tx_frames++;
 	}
 	return;
 
 err_unload:
 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
 	if (buf->tx.sgt_paddr != 0) {
 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
 	}
 err:
 	m_freem(buf->tx.m);
 	buf_ring_enqueue(tx->idx_br, pidx);
 }
 
 static int
 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
     uint32_t *consumed)
 {
 	struct dpaa2_ni_fq *fq = NULL;
 	struct dpaa2_dq *dq;
 	struct dpaa2_fd *fd;
 	int rc, frames = 0;
 
 	do {
 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
 		if (rc == EINPROGRESS) {
 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
 				fd = &dq->fdr.fd;
 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
 				fq->consume(chan, fq, fd);
 				frames++;
 			}
 		} else if (rc == EALREADY || rc == ENOENT) {
 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
 				fd = &dq->fdr.fd;
 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
 				fq->consume(chan, fq, fd);
 				frames++;
 			}
 			break;
 		} else {
 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
 		}
 	} while (true);
 
 	KASSERT(chan->store_idx < chan->store_sz,
 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
 	    chan->store_idx, chan->store_sz));
 
 	/*
 	 * A dequeue operation pulls frames from a single queue into the store.
 	 * Return the frame queue and a number of consumed frames as an output.
 	 */
 	if (src != NULL)
 		*src = fq;
 	if (consumed != NULL)
 		*consumed = frames;
 
 	return (rc);
 }
 
 /**
  * @brief Receive frames.
  */
 static int
 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
     struct dpaa2_fd *fd)
 {
 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
 	struct dpaa2_bp_softc *bpsc;
 	struct dpaa2_buf *buf;
+	struct dpaa2_fa *fa;
 	if_t ifp = sc->ifp;
 	struct mbuf *m;
 	device_t bp_dev;
 	bus_addr_t paddr = (bus_addr_t) fd->addr;
 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
 	void *buf_data;
-	int buf_idx, buf_len;
-	int error, released_n = 0;
+	int buf_len, error, released_n = 0;
 
-	/*
-	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
-	 * physical address.
-	 */
-	buf_idx = dpaa2_ni_fd_buf_idx(fd);
-	buf = &sc->buf[buf_idx];
+	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
+	buf = fa->buf;
 
+	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
-	if (paddr != buf->rx.paddr) {
+	if (__predict_false(paddr != buf->rx.paddr)) {
 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
 		    __func__, paddr, buf->rx.paddr);
 	}
 
 	/* Update statistics. */
 	switch (dpaa2_ni_fd_err(fd)) {
 	case 1: /* Enqueue rejected by QMan */
 		sc->rx_enq_rej_frames++;
 		break;
 	case 2: /* QMan IEOI error */
 		sc->rx_ieoi_err_frames++;
 		break;
 	default:
 		break;
 	}
 	switch (dpaa2_ni_fd_format(fd)) {
 	case DPAA2_FD_SINGLE:
 		sc->rx_single_buf_frames++;
 		break;
 	case DPAA2_FD_SG:
 		sc->rx_sg_buf_frames++;
 		break;
 	default:
 		break;
 	}
 
 	m = buf->rx.m;
 	buf->rx.m = NULL;
-	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap,
-	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD);
 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
 
 	buf_len = dpaa2_ni_fd_data_len(fd);
 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
 
 	/* Prefetch mbuf data. */
 	__builtin_prefetch(buf_data);
 
 	/* Write value to mbuf (avoid reading). */
 	m->m_flags |= M_PKTHDR;
 	m->m_data = buf_data;
 	m->m_len = buf_len;
 	m->m_pkthdr.len = buf_len;
 	m->m_pkthdr.rcvif = ifp;
 	m->m_pkthdr.flowid = fq->fqid;
 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
 
 	if_input(ifp, m);
 
 	/* Keep the buffer to be recycled. */
-	chan->recycled[chan->recycled_n++] = paddr;
+	chan->recycled[chan->recycled_n++] = buf;
 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
 	    ("%s: too many buffers to recycle", __func__));
 
 	/* Re-seed and release recycled buffers back to the pool. */
 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
 		/* Release new buffers to the pool if needed. */
 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
 
 		for (int i = 0; i < chan->recycled_n; i++) {
-			paddr = chan->recycled[i];
-
-			/* Parse ADDR_TOK of the recycled buffer. */
-			buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT)
-			    & DPAA2_NI_BUF_IDX_MASK;
-			buf = &sc->buf[buf_idx];
+			buf = chan->recycled[i];
 
 			/* Seed recycled buffer. */
-			error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx);
+			error = dpaa2_ni_seed_rxbuf(sc, buf);
 			KASSERT(error == 0, ("%s: failed to seed recycled "
 			    "buffer: error=%d", __func__, error));
 			if (__predict_false(error != 0)) {
 				device_printf(sc->dev, "%s: failed to seed "
 				    "recycled buffer: error=%d\n", __func__,
 				    error);
 				continue;
 			}
 
 			/* Prepare buffer to be released in a single command. */
 			released[released_n++] = buf->rx.paddr;
 		}
 
 		/* There's only one buffer pool for now. */
 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
 		bpsc = device_get_softc(bp_dev);
 
 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
 		    released, released_n);
 		if (__predict_false(error != 0)) {
 			device_printf(sc->dev, "%s: failed to release buffers "
 			    "to the pool: error=%d\n", __func__, error);
 			return (error);
 		}
 
 		/* Be ready to recycle the next portion of the buffers. */
 		chan->recycled_n = 0;
 	}
 
 	return (0);
 }
 
 /**
  * @brief Receive Rx error frames.
  */
 static int
 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
     struct dpaa2_fd *fd)
 {
 	device_t bp_dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
 	struct dpaa2_bp_softc *bpsc;
 	struct dpaa2_buf *buf;
+	struct dpaa2_fa *fa;
 	bus_addr_t paddr = (bus_addr_t) fd->addr;
-	int buf_idx, error;
-	
-	/*
-	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
-	 * physical address.
-	 */
-	buf_idx = dpaa2_ni_fd_buf_idx(fd);
-	buf = &sc->buf[buf_idx];
+	int error;
+
+	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
+	buf = fa->buf;
 
+	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
-	if (paddr != buf->rx.paddr) {
+	if (__predict_false(paddr != buf->rx.paddr)) {
 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
 		    __func__, paddr, buf->rx.paddr);
 	}
 
 	/* There's only one buffer pool for now. */
 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
 	bpsc = device_get_softc(bp_dev);
 
 	/* Release buffer to QBMan buffer pool. */
 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
 	if (error != 0) {
 		device_printf(sc->dev, "%s: failed to release frame buffer to "
 		    "the pool: error=%d\n", __func__, error);
 		return (error);
 	}
 
 	return (0);
 }
 
 /**
  * @brief Receive Tx confirmation frames.
  */
 static int
 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
     struct dpaa2_fd *fd)
 {
-	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
-	struct dpaa2_ni_channel	*buf_chan;
 	struct dpaa2_ni_tx_ring *tx;
 	struct dpaa2_buf *buf;
-	bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT);
-	uint64_t buf_idx;
-	int chan_idx, tx_idx;
-
-	/*
-	 * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits
-	 * (not used by QBMan) of the physical address.
-	 */
-	chan_idx = dpaa2_ni_fd_chan_idx(fd);
-	tx_idx = dpaa2_ni_fd_tx_idx(fd);
-	buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd);
-
-	KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index",
-	    __func__));
-	KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index",
-	    __func__));
+	struct dpaa2_fa *fa;
+	bus_addr_t paddr = (bus_addr_t) fd->addr;
 
-	buf_chan = sc->channels[chan_idx];
-	tx = &buf_chan->txc_queue.tx_rings[tx_idx];
-	buf = &tx->buf[buf_idx];
+	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
+	buf = fa->buf;
+	tx = fa->tx;
 
+	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
 	if (paddr != buf->tx.paddr) {
 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
 		    __func__, paddr, buf->tx.paddr);
 	}
 
-
+	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_POSTWRITE);
+	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_POSTWRITE);
 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
-	if (buf->tx.sgt_paddr != 0)
-		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
+	bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
 	m_freem(buf->tx.m);
 
 	/* Return Tx buffer index back to the ring. */
-	buf_ring_enqueue(tx->idx_br, (void *) buf_idx);
+	buf_ring_enqueue(tx->idx_br, (void *) buf->tx.idx);
 
 	return (0);
 }
 
 /**
  * @brief Compare versions of the DPAA2 network interface API.
  */
 static int
 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
     uint16_t minor)
 {
 	if (sc->api_major == major)
 		return sc->api_minor - minor;
 	return sc->api_major - major;
 }
 
 /**
  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
  */
 static int
 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
 {
 	device_t bp_dev;
 	struct dpaa2_bp_softc *bpsc;
 	struct dpaa2_buf *buf;
 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
 	int i, error, bufn = 0;
 
 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
 	    "created?", __func__));
 
 	/* There's only one buffer pool for now. */
 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
 	bpsc = device_get_softc(bp_dev);
 
 	/* Limit # of buffers released to the pool. */
 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
 		seedn = DPAA2_NI_BUFS_MAX - allocated;
 
 	/* Release "seedn" buffers to the pool. */
 	for (i = allocated; i < (allocated + seedn); i++) {
 		/* Enough buffers were allocated for a single command. */
 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
 			    bpsc->attr.bpid, paddr, bufn);
 			if (error) {
 				device_printf(sc->dev, "%s: failed to release "
 				    "buffers to the pool (1)\n", __func__);
 				return (error);
 			}
 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
 			bufn = 0;
 		}
 
 		buf = &sc->buf[i];
 		buf->type = DPAA2_BUF_RX;
 		buf->rx.m = NULL;
 		buf->rx.dmap = NULL;
 		buf->rx.paddr = 0;
 		buf->rx.vaddr = NULL;
-		error = dpaa2_ni_seed_rxbuf(sc, buf, i);
-		if (error)
+		error = dpaa2_ni_seed_rxbuf(sc, buf);
+		if (error != 0) {
 			break;
+		}
 		paddr[bufn] = buf->rx.paddr;
 		bufn++;
 	}
 
 	/* Release if there are buffers left. */
 	if (bufn > 0) {
 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
 		    bpsc->attr.bpid, paddr, bufn);
 		if (error) {
 			device_printf(sc->dev, "%s: failed to release "
 			    "buffers to the pool (2)\n", __func__);
 			return (error);
 		}
 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
 	}
 
 	return (0);
 }
 
 /**
  * @brief Prepare Rx buffer to be released to the buffer pool.
  */
 static int
-dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
+dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
 {
 	struct mbuf *m;
+	struct dpaa2_fa *fa;
 	bus_dmamap_t dmap;
 	bus_dma_segment_t segs;
 	int error, nsegs;
 
 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
 	    "allocated?", __func__));
 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
 
 	/* Keep DMA tag for this buffer. */
 	if (__predict_false(buf->rx.dmat == NULL))
 		buf->rx.dmat = sc->bp_dmat;
 
 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
 	if (__predict_false(buf->rx.dmap == NULL)) {
 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
 		if (error) {
 			device_printf(sc->dev, "%s: failed to create DMA map "
-			    "for buffer: buf_idx=%d, error=%d\n", __func__,
-			    idx, error);
+			    "for buffer: error=%d\n", __func__, error);
 			return (error);
 		}
 		buf->rx.dmap = dmap;
 	}
 
 	/* Allocate mbuf if needed. */
 	if (__predict_false(buf->rx.m == NULL)) {
 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
 		if (__predict_false(m == NULL)) {
 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
 			    "buffer\n", __func__);
 			return (ENOMEM);
 		}
 		m->m_len = m->m_ext.ext_size;
 		m->m_pkthdr.len = m->m_ext.ext_size;
 		buf->rx.m = m;
 	} else
 		m = buf->rx.m;
 
 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
 	if (__predict_false(error != 0 || nsegs != 1)) {
 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
 		    "nsegs=%d\n", __func__, error, nsegs);
 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
 		m_freem(m);
 		return (error);
 	}
 	buf->rx.paddr = segs.ds_addr;
 	buf->rx.vaddr = m->m_data;
 
-	/*
-	 * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by
-	 * QBMan and is supposed to assist in physical to virtual address
-	 * translation.
-	 *
-	 * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed
-	 * 	 by QBMan must be configured in the DMA tag accordingly.
-	 */
-	buf->rx.paddr =
-	    ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) <<
-		DPAA2_NI_BUF_IDX_SHIFT) |
-	    (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK);
+	/* Populate frame annotation for future use. */
+	fa = (struct dpaa2_fa *) m->m_data;
+	fa->magic = DPAA2_MAGIC;
+	fa->buf = buf;
+
+	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_PREREAD);
 
 	return (0);
 }
 
 /**
  * @brief Prepare Tx buffer to be added to the Tx ring.
  */
 static int
-dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
+dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
 {
 	bus_dmamap_t dmap;
 	int error;
 
 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
 	    __func__));
 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
 	    __func__));
 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
 
 	/* Keep DMA tags for this buffer. */
 	if (__predict_true(buf->tx.dmat == NULL))
 		buf->tx.dmat = sc->tx_dmat;
 	if (__predict_true(buf->tx.sgt_dmat == NULL))
 		buf->tx.sgt_dmat = sc->sgt_dmat;
 
 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
 	if (__predict_true(buf->tx.dmap == NULL)) {
 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
 		if (error != 0) {
 			device_printf(sc->dev, "%s: failed to create "
 			    "Tx DMA map: error=%d\n", __func__, error);
 			return (error);
 		}
 		buf->tx.dmap = dmap;
 	}
 
 	/* Allocate a buffer to store scatter/gather table. */
 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
 		    &buf->tx.sgt_dmap);
 		if (error != 0) {
 			device_printf(sc->dev, "%s: failed to allocate "
 			    "S/G table: error=%d\n", __func__, error);
 			return (error);
 		}
 	}
 
 	return (0);
 }
 
 /**
  * @brief Allocate channel storage visible to QBMan.
  */
 static int
 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
     struct dpaa2_ni_channel *chan)
 {
 	struct dpaa2_buf *buf = &chan->store;
 	int error;
 
 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
 	    "allocated?", __func__));
 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
 	    __func__));
 
 	/* Keep DMA tag for this buffer. */
 	if (__predict_false(buf->store.dmat == NULL)) {
 		buf->store.dmat = sc->st_dmat;
 	}
 
 	if (__predict_false(buf->store.vaddr == NULL)) {
 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
 		if (error) {
 			device_printf(sc->dev, "%s: failed to allocate channel "
 			    "storage\n", __func__);
 			return (error);
 		}
 	}
 
 	if (__predict_false(buf->store.paddr == 0)) {
 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
 		    &buf->store.paddr, BUS_DMA_NOWAIT);
 		if (error) {
 			device_printf(sc->dev, "%s: failed to map channel "
 			    "storage\n", __func__);
 			return (error);
 		}
 	}
 
 	chan->store_sz = ETH_STORE_FRAMES;
 	chan->store_idx = 0;
 
 	return (0);
 }
 
 /**
  * @brief Build a DPAA2 frame descriptor.
  */
 static int
 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
     struct dpaa2_fd *fd)
 {
-	struct dpaa2_ni_channel	*chan = tx->fq->chan;
 	struct dpaa2_sg_entry *sgt;
+	struct dpaa2_fa *fa;
 	int i, error;
 
 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
 	    __func__));
 
 	/* Reset frame descriptor fields. */
 	memset(fd, 0, sizeof(*fd));
 
 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
 		/* Populate S/G table. */
 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
 		    sc->tx_data_off;
 		for (i = 0; i < txnsegs; i++) {
 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
 			sgt[i].offset_fmt = 0u;
 		}
 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
 
 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
 		    __func__, buf->tx.sgt_paddr));
 
 		/* Load S/G table. */
 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
 		if (__predict_false(error != 0)) {
 			device_printf(sc->dev, "%s: failed to map S/G table: "
 			    "error=%d\n", __func__, error);
 			return (error);
 		}
+
 		buf->tx.paddr = buf->tx.sgt_paddr;
 		buf->tx.vaddr = buf->tx.sgt_vaddr;
 		sc->tx_sg_frames++; /* for sysctl(9) */
 	} else {
 		return (EINVAL);
 	}
 
-	fd->addr =
-	    ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) <<
-		DPAA2_NI_BUF_CHAN_SHIFT) |
-	    ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) <<
-		DPAA2_NI_TX_IDX_SHIFT) |
-	    ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) <<
-		DPAA2_NI_TXBUF_IDX_SHIFT) |
-	    (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK);
+	fa = (struct dpaa2_fa *) buf->tx.sgt_vaddr;
+	fa->magic = DPAA2_MAGIC;
+	fa->buf = buf;
+	fa->tx = tx;
 
+	fd->addr = buf->tx.paddr;
 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
 	fd->bpid_ivp_bmt = 0;
 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
 	fd->ctrl = 0x00800000u;
 
 	return (0);
 }
 
 static int
 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
 {
 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
 }
 
 static uint32_t
 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
 {
 	if (dpaa2_ni_fd_short_len(fd))
 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
 
 	return (fd->data_length);
 }
 
-static int
-dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd)
-{
-	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) &
-	    DPAA2_NI_BUF_CHAN_MASK);
-}
-
-static int
-dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd)
-{
-	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) &
-	    DPAA2_NI_BUF_IDX_MASK);
-}
-
-static int
-dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd)
-{
-	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) &
-	    DPAA2_NI_TX_IDX_MASK);
-}
-
-static int
-dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd)
-{
-	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) &
-	    DPAA2_NI_TXBUF_IDX_MASK);
-}
-
 static int
 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
 {
 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
 }
 
 static bool
 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
 {
 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
 	    & DPAA2_NI_FD_SL_MASK) == 1);
 }
 
 static int
 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
 {
 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
 }
 
 /**
  * @brief Collect statistics of the network interface.
  */
 static int
 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
 	device_t pdev = device_get_parent(sc->dev);
 	device_t dev = sc->dev;
 	device_t child = dev;
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpaa2_cmd cmd;
 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
 	uint64_t result = 0;
 	uint16_t rc_token, ni_token;
 	int error;
 
 	DPAA2_CMD_INIT(&cmd);
 
 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open resource container: "
 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
 		goto exit;
 	}
 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
 	if (error) {
 		device_printf(dev, "%s: failed to open network interface: "
 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
 		goto close_rc;
 	}
 
 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
 	if (!error) {
 		result = cnt[stat->cnt];
 	}
 
 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
 close_rc:
 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
 exit:
 	return (sysctl_handle_64(oidp, &result, 0, req));
 }
 
 static int
 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
 
 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
 }
 
 static int
 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
 {
 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
 
 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
 }
 
 static int
 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
 {
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	uint64_t key = 0;
 	int i;
 
 	if (!(sc->attr.num.queues > 1)) {
 		return (EOPNOTSUPP);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 		if (dist_fields[i].rxnfc_field & flags) {
 			key |= dist_fields[i].id;
 		}
 	}
 
 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
 }
 
 /**
  * @brief Set Rx distribution (hash or flow classification) key flags is a
  * combination of RXH_ bits.
  */
 static int
 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
 {
 	device_t pdev = device_get_parent(dev);
 	device_t child = dev;
 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
 	struct dpkg_profile_cfg cls_cfg;
 	struct dpkg_extract *key;
 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
 	struct dpaa2_cmd cmd;
 	uint16_t rc_token, ni_token;
 	int i, error = 0;
 
 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
 	    __func__));
 	if (__predict_true(buf->store.dmat == NULL)) {
 		buf->store.dmat = sc->rxd_dmat;
 	}
 
 	memset(&cls_cfg, 0, sizeof(cls_cfg));
 
 	/* Configure extracts according to the given flags. */
 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
 
 		if (!(flags & dist_fields[i].id)) {
 			continue;
 		}
 
 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
 			device_printf(dev, "%s: failed to add key extraction "
 			    "rule\n", __func__);
 			return (E2BIG);
 		}
 
 		key->type = DPKG_EXTRACT_FROM_HDR;
 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
 		key->extract.from_hdr.field = dist_fields[i].cls_field;
 		cls_cfg.num_extracts++;
 	}
 
 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
 	if (error != 0) {
 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
 		    "traffic distribution key configuration\n", __func__);
 		return (error);
 	}
 
 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
 	if (error != 0) {
 		device_printf(dev, "%s: failed to prepare key configuration: "
 		    "error=%d\n", __func__, error);
 		return (error);
 	}
 
 	/* Prepare for setting the Rx dist. */
 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
 	    &buf->store.paddr, BUS_DMA_NOWAIT);
 	if (error != 0) {
 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
 		    "traffic distribution key configuration\n", __func__);
 		return (error);
 	}
 
 	if (type == DPAA2_NI_DIST_MODE_HASH) {
 		DPAA2_CMD_INIT(&cmd);
 
 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
 		    &rc_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open resource "
 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
 			    error);
 			goto err_exit;
 		}
 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
 		    &ni_token);
 		if (error) {
 			device_printf(dev, "%s: failed to open network "
 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
 			    error);
 			goto close_rc;
 		}
 
 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH,
 		    buf->store.paddr);
 		if (error != 0) {
 			device_printf(dev, "%s: failed to set distribution mode "
 			    "and size for the traffic class\n", __func__);
 		}
 
 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    ni_token));
 close_rc:
 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
 		    rc_token));
 	}
 
 err_exit:
 	return (error);
 }
 
 /**
  * @brief Prepares extract parameters.
  *
  * cfg:		Defining a full Key Generation profile.
  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
  */
 static int
 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
 {
 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
 	struct dpni_dist_extract *extr;
 	int i, j;
 
 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
 		return (EINVAL);
 
 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
 	dpni_ext->num_extracts = cfg->num_extracts;
 
 	for (i = 0; i < cfg->num_extracts; i++) {
 		extr = &dpni_ext->extracts[i];
 
 		switch (cfg->extracts[i].type) {
 		case DPKG_EXTRACT_FROM_HDR:
 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
 			extr->efh_type =
 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
 			extr->size = cfg->extracts[i].extract.from_hdr.size;
 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
 			extr->field = cfg->extracts[i].extract.from_hdr.field;
 			extr->hdr_index =
 				cfg->extracts[i].extract.from_hdr.hdr_index;
 			break;
 		case DPKG_EXTRACT_FROM_DATA:
 			extr->size = cfg->extracts[i].extract.from_data.size;
 			extr->offset =
 				cfg->extracts[i].extract.from_data.offset;
 			break;
 		case DPKG_EXTRACT_FROM_PARSE:
 			extr->size = cfg->extracts[i].extract.from_parse.size;
 			extr->offset =
 				cfg->extracts[i].extract.from_parse.offset;
 			break;
 		default:
 			return (EINVAL);
 		}
 
 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
 
 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
 			extr->masks[j].offset =
 				cfg->extracts[i].masks[j].offset;
 		}
 	}
 
 	return (0);
 }
 
 /**
  * @brief Obtain the next dequeue response from the channel storage.
  */
 static int
 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
 {
 	struct dpaa2_buf *buf = &chan->store;
 	struct dpaa2_dq *msgs = buf->store.vaddr;
 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
 	int rc = EINPROGRESS;
 
 	chan->store_idx++;
 
 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
 		rc = EALREADY; /* VDQ command is expired */
 		chan->store_idx = 0;
 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
 			msg = NULL; /* Null response, FD is invalid */
 	}
 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
 		rc = ENOENT; /* FQ is empty */
 		chan->store_idx = 0;
 	}
 
 	if (dq != NULL)
 		*dq = msg;
 
 	return (rc);
 }
 
 static device_method_t dpaa2_ni_methods[] = {
 	/* Device interface */
 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
 
 	/* mii via memac_mdio */
 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
 
 	DEVMETHOD_END
 };
 
 static driver_t dpaa2_ni_driver = {
 	"dpaa2_ni",
 	dpaa2_ni_methods,
 	sizeof(struct dpaa2_ni_softc),
 };
 
 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
 
 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
 #ifdef DEV_ACPI
 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
 #endif
 #ifdef FDT
 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
 #endif
diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h
index 00215fdcefe5..523775a9365c 100644
--- a/sys/dev/dpaa2/dpaa2_ni.h
+++ b/sys/dev/dpaa2/dpaa2_ni.h
@@ -1,602 +1,602 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause
  *
  * Copyright © 2021-2022 Dmitry Salychev
  * Copyright © 2022 Mathew McBride
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #ifndef	_DPAA2_NI_H
 #define	_DPAA2_NI_H
 
 #include <sys/rman.h>
 #include <sys/bus.h>
 #include <sys/queue.h>
 #include <sys/taskqueue.h>
 #include <sys/mbuf.h>
 #include <sys/param.h>
 #include <sys/socket.h>
 #include <sys/buf_ring.h>
 #include <sys/proc.h>
 #include <sys/mutex.h>
 
 #include <net/if.h>
 #include <net/ethernet.h>
 #include <net/if_media.h>
 
 #include "dpaa2_types.h"
 #include "dpaa2_mcp.h"
 #include "dpaa2_swp.h"
 #include "dpaa2_io.h"
 #include "dpaa2_mac.h"
 #include "dpaa2_ni_dpkg.h"
 
 /* Name of the DPAA2 network interface. */
 #define DPAA2_NI_IFNAME		"dpni"
 
 /* Maximum resources per DPNI: 16 DPIOs + 16 DPCONs + 1 DPBP + 1 DPMCP. */
 #define DPAA2_NI_MAX_RESOURCES	34
 
 #define DPAA2_NI_MSI_COUNT	1  /* MSIs per DPNI */
 #define DPAA2_NI_MAX_CHANNELS	16 /* to distribute ingress traffic to cores */
 #define DPAA2_NI_MAX_TCS	8  /* traffic classes per DPNI */
 #define DPAA2_NI_MAX_POOLS	8  /* buffer pools per DPNI */
 
 /* Maximum number of Rx buffers. */
 #define DPAA2_NI_BUFS_INIT	(50u * DPAA2_SWP_BUFS_PER_CMD)
 #define DPAA2_NI_BUFS_MAX	(1 << 15) /* 15 bits for buffer index max. */
 
 /* Maximum number of buffers allocated per Tx ring. */
 #define DPAA2_NI_BUFS_PER_TX	(1 << 7)
 #define DPAA2_NI_MAX_BPTX	(1 << 8) /* 8 bits for buffer index max. */
 
 /* Number of the DPNI statistics counters. */
 #define DPAA2_NI_STAT_COUNTERS	7u
 #define	DPAA2_NI_STAT_SYSCTLS	9u
 
 /* Error and status bits in the frame annotation status word. */
 #define DPAA2_NI_FAS_DISC	0x80000000 /* debug frame */
 #define DPAA2_NI_FAS_MS		0x40000000 /* MACSEC frame */
 #define DPAA2_NI_FAS_PTP	0x08000000
 #define DPAA2_NI_FAS_MC		0x04000000 /* Ethernet multicast frame */
 #define DPAA2_NI_FAS_BC		0x02000000 /* Ethernet broadcast frame */
 #define DPAA2_NI_FAS_KSE	0x00040000
 #define DPAA2_NI_FAS_EOFHE	0x00020000
 #define DPAA2_NI_FAS_MNLE	0x00010000
 #define DPAA2_NI_FAS_TIDE	0x00008000
 #define DPAA2_NI_FAS_PIEE	0x00004000
 #define DPAA2_NI_FAS_FLE	0x00002000 /* Frame length error */
 #define DPAA2_NI_FAS_FPE	0x00001000 /* Frame physical error */
 #define DPAA2_NI_FAS_PTE	0x00000080
 #define DPAA2_NI_FAS_ISP	0x00000040
 #define DPAA2_NI_FAS_PHE	0x00000020
 #define DPAA2_NI_FAS_BLE	0x00000010
 #define DPAA2_NI_FAS_L3CV	0x00000008 /* L3 csum validation performed */
 #define DPAA2_NI_FAS_L3CE	0x00000004 /* L3 csum error */
 #define DPAA2_NI_FAS_L4CV	0x00000002 /* L4 csum validation performed */
 #define DPAA2_NI_FAS_L4CE	0x00000001 /* L4 csum error */
 
 /* Mask for errors on the ingress path. */
 #define DPAA2_NI_FAS_RX_ERR_MASK (DPAA2_NI_FAS_KSE |	\
     DPAA2_NI_FAS_EOFHE |				\
     DPAA2_NI_FAS_MNLE |					\
     DPAA2_NI_FAS_TIDE |					\
     DPAA2_NI_FAS_PIEE |					\
     DPAA2_NI_FAS_FLE |					\
     DPAA2_NI_FAS_FPE |					\
     DPAA2_NI_FAS_PTE |					\
     DPAA2_NI_FAS_ISP |					\
     DPAA2_NI_FAS_PHE |					\
     DPAA2_NI_FAS_BLE |					\
     DPAA2_NI_FAS_L3CE |					\
     DPAA2_NI_FAS_L4CE					\
 )
 
 /* Option bits to select specific queue configuration options to apply. */
 #define DPAA2_NI_QUEUE_OPT_USER_CTX	0x00000001
 #define DPAA2_NI_QUEUE_OPT_DEST		0x00000002
 #define DPAA2_NI_QUEUE_OPT_FLC		0x00000004
 #define DPAA2_NI_QUEUE_OPT_HOLD_ACTIVE	0x00000008
 #define DPAA2_NI_QUEUE_OPT_SET_CGID	0x00000040
 #define DPAA2_NI_QUEUE_OPT_CLEAR_CGID	0x00000080
 
 /* DPNI link configuration options. */
 #define DPAA2_NI_LINK_OPT_AUTONEG	((uint64_t) 0x01u)
 #define DPAA2_NI_LINK_OPT_HALF_DUPLEX	((uint64_t) 0x02u)
 #define DPAA2_NI_LINK_OPT_PAUSE		((uint64_t) 0x04u)
 #define DPAA2_NI_LINK_OPT_ASYM_PAUSE	((uint64_t) 0x08u)
 #define DPAA2_NI_LINK_OPT_PFC_PAUSE	((uint64_t) 0x10u)
 
 /*
  * Number of times to retry a frame enqueue before giving up. Value determined
  * empirically, in order to minimize the number of frames dropped on Tx.
  */
 #define DPAA2_NI_ENQUEUE_RETRIES	10
 
 enum dpaa2_ni_queue_type {
 	DPAA2_NI_QUEUE_RX = 0,
 	DPAA2_NI_QUEUE_TX,
 	DPAA2_NI_QUEUE_TX_CONF,
 	DPAA2_NI_QUEUE_RX_ERR
 };
 
 enum dpaa2_ni_dest_type {
 	DPAA2_NI_DEST_NONE = 0,
 	DPAA2_NI_DEST_DPIO,
 	DPAA2_NI_DEST_DPCON
 };
 
 enum dpaa2_ni_ofl_type {
 	DPAA2_NI_OFL_RX_L3_CSUM = 0,
 	DPAA2_NI_OFL_RX_L4_CSUM,
 	DPAA2_NI_OFL_TX_L3_CSUM,
 	DPAA2_NI_OFL_TX_L4_CSUM,
 	DPAA2_NI_OFL_FLCTYPE_HASH /* FD flow context for AIOP/CTLU */
 };
 
 /**
  * @brief DPNI ingress traffic distribution mode.
  */
 enum dpaa2_ni_dist_mode {
 	DPAA2_NI_DIST_MODE_NONE = 0,
 	DPAA2_NI_DIST_MODE_HASH,
 	DPAA2_NI_DIST_MODE_FS
 };
 
 /**
  * @brief DPNI behavior in case of errors.
  */
 enum dpaa2_ni_err_action {
 	DPAA2_NI_ERR_DISCARD = 0,
 	DPAA2_NI_ERR_CONTINUE,
 	DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE
 };
 
 struct dpaa2_ni_channel;
 struct dpaa2_ni_fq;
 
 /**
  * @brief Attributes of the DPNI object.
  *
  * options:	 ...
  * wriop_ver:	 Revision of the underlying WRIOP hardware block.
  */
 struct dpaa2_ni_attr {
 	uint32_t		 options;
 	uint16_t		 wriop_ver;
 	struct {
 		uint16_t	 fs;
 		uint8_t		 mac;
 		uint8_t		 vlan;
 		uint8_t		 qos;
 	} entries;
 	struct {
 		uint8_t		 queues;
 		uint8_t		 rx_tcs;
 		uint8_t		 tx_tcs;
 		uint8_t		 channels;
 		uint8_t		 cgs;
 	} num;
 	struct {
 		uint8_t		 fs;
 		uint8_t		 qos;
 	} key_size;
 };
 
 /**
  * @brief Tx ring.
  *
  * fq:		Parent (TxConf) frame queue.
  * fqid:	ID of the logical Tx queue.
  * mbuf_br:	Ring buffer for mbufs to transmit.
  * mbuf_lock:	Lock for the ring buffer.
  */
 struct dpaa2_ni_tx_ring {
 	struct dpaa2_ni_fq	*fq;
 	uint32_t		 fqid;
 	uint32_t		 txid; /* Tx ring index */
 
 	/* Ring buffer for indexes in "buf" array. */
 	struct buf_ring		*idx_br;
 	struct mtx		 lock;
 
 	/* Buffers to DMA load/unload Tx mbufs. */
 	struct dpaa2_buf	 buf[DPAA2_NI_BUFS_PER_TX];
 };
 
 /**
  * @brief A Frame Queue is the basic queuing structure used by the QMan.
  *
  * It comprises a list of frame descriptors (FDs), so it can be thought of
  * as a queue of frames.
  *
  * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
  *	 onto a work queue (WQ).
  *
  * fqid:	Frame queue ID, can be used to enqueue/dequeue or execute other
  *		commands on the queue through DPIO.
  * txq_n:	Number of configured Tx queues.
  * tx_fqid:	Frame queue IDs of the Tx queues which belong to the same flowid.
  *		Note that Tx queues are logical queues and not all management
  *		commands are available on these queue types.
  * qdbin:	Queue destination bin. Can be used with the DPIO enqueue
  *		operation based on QDID, QDBIN and QPRI. Note that all Tx queues
  *		with the same flowid have the same destination bin.
  */
 struct dpaa2_ni_fq {
 	int (*consume)(struct dpaa2_ni_channel *,
 	    struct dpaa2_ni_fq *, struct dpaa2_fd *);
 
 	struct dpaa2_ni_channel	*chan;
 	uint32_t		 fqid;
 	uint16_t		 flowid;
 	uint8_t			 tc;
 	enum dpaa2_ni_queue_type type;
 
 	/* Optional fields (for TxConf queue). */
 	struct dpaa2_ni_tx_ring	 tx_rings[DPAA2_NI_MAX_TCS];
 	uint32_t		 tx_qdbin;
 } __aligned(CACHE_LINE_SIZE);
 
 /**
  * @brief QBMan channel to process ingress traffic (Rx, Tx conf).
  *
  * NOTE: Several WQs are organized into a single WQ Channel.
  */
 struct dpaa2_ni_channel {
 	device_t		 ni_dev;
 	device_t		 io_dev;
 	device_t		 con_dev;
 	uint16_t		 id;
 	uint16_t		 flowid;
 
 	/* For debug purposes only! */
 	uint64_t		 tx_frames;
 	uint64_t		 tx_dropped;
 
 	/* Context to configure CDAN. */
 	struct dpaa2_io_notif_ctx ctx;
 
 	/* Channel storage (to keep responses from VDQ command). */
 	struct dpaa2_buf	 store;
 	uint32_t		 store_sz; /* in frames */
 	uint32_t		 store_idx; /* frame index */
 
 	/* Recycled buffers to release back to the pool. */
 	uint32_t		 recycled_n;
-	bus_addr_t		 recycled[DPAA2_SWP_BUFS_PER_CMD];
+	struct dpaa2_buf	*recycled[DPAA2_SWP_BUFS_PER_CMD];
 
 	/* Frame queues */
 	uint32_t		 rxq_n;
 	struct dpaa2_ni_fq	 rx_queues[DPAA2_NI_MAX_TCS];
 	struct dpaa2_ni_fq	 txc_queue;
 };
 
 /**
  * @brief Configuration of the network interface queue.
  *
  * NOTE: This configuration is used to obtain information of a queue by
  *	 DPNI_GET_QUEUE command and update it by DPNI_SET_QUEUE one.
  *
  * It includes binding of the queue to a DPIO or DPCON object to receive
  * notifications and traffic on the CPU.
  *
  * user_ctx:	(r/w) User defined data, presented along with the frames
  *		being dequeued from this queue.
  * flow_ctx:	(r/w) Set default FLC value for traffic dequeued from this queue.
  *		Please check description of FD structure for more information.
  *		Note that FLC values set using DPNI_ADD_FS_ENTRY, if any, take
  *		precedence over values per queue.
  * dest_id:	(r/w) The ID of a DPIO or DPCON object, depending on
  *		DEST_TYPE (in flags) value. This field is ignored for DEST_TYPE
  *		set to 0 (DPNI_DEST_NONE).
  * fqid:	(r) Frame queue ID, can be used to enqueue/dequeue or execute
  *		other commands on the queue through DPIO. Note that Tx queues
  *		are logical queues and not all management commands are available
  *		on these queue types.
  * qdbin:	(r) Queue destination bin. Can be used with the DPIO enqueue
  *		operation based on QDID, QDBIN and QPRI.
  * type:	Type of the queue to set configuration to.
  * tc:		Traffic class. Ignored for QUEUE_TYPE 2 and 3 (Tx confirmation
  *		and Rx error queues).
  * idx:		Selects a specific queue out of the set of queues in a TC.
  *		Accepted values are in range 0 to NUM_QUEUES–1. This field is
  *		ignored for QUEUE_TYPE 3 (Rx error queue). For access to the
  *		shared Tx confirmation queue (for Tx confirmation mode 1), this
  *		field must be set to 0xff.
  * cgid:	(r/w) Congestion group ID.
  * chan_id:	(w) Channel index to be configured. Used only when QUEUE_TYPE is
  *		set to DPNI_QUEUE_TX.
  * priority:	(r/w) Sets the priority in the destination DPCON or DPIO for
  *		dequeued traffic. Supported values are 0 to # of priorities in
  *		destination DPCON or DPIO - 1. This field is ignored for
  *		DEST_TYPE set to 0 (DPNI_DEST_NONE), except if this DPNI is in
  *		AIOP context. In that case the DPNI_SET_QUEUE can be used to
  *		override the default assigned priority of the FQ from the TC.
  * options:	Option bits selecting specific configuration options to apply.
  *		See DPAA2_NI_QUEUE_OPT_* for details.
  * dest_type:	Type of destination for dequeued traffic.
  * cgid_valid:	(r) Congestion group ID is valid.
  * stash_control: (r/w) If true, lowest 6 bits of FLC are used for stash control.
  *		Please check description of FD structure for more information.
  * hold_active:	(r/w) If true, this flag prevents the queue from being
  *		rescheduled between DPIOs while it carries traffic and is active
  *		on one DPIO. Can help reduce reordering if one queue is services
  *		on multiple CPUs, but the queue is also more likely to be trapped
  *		in one DPIO, especially when congested.
  */
 struct dpaa2_ni_queue_cfg {
 	uint64_t		 user_ctx;
 	uint64_t		 flow_ctx;
 	uint32_t		 dest_id;
 	uint32_t		 fqid;
 	uint16_t		 qdbin;
 	enum dpaa2_ni_queue_type type;
 	uint8_t			 tc;
 	uint8_t			 idx;
 	uint8_t			 cgid;
 	uint8_t			 chan_id;
 	uint8_t			 priority;
 	uint8_t			 options;
 
 	enum dpaa2_ni_dest_type	 dest_type;
 	bool			 cgid_valid;
 	bool			 stash_control;
 	bool			 hold_active;
 };
 
 /**
  * @brief Buffer layout attributes.
  *
  * pd_size:		Size kept for private data (in bytes).
  * fd_align:		Frame data alignment.
  * head_size:		Data head room.
  * tail_size:		Data tail room.
  * options:		...
  * pass_timestamp:	Timestamp is included in the buffer layout.
  * pass_parser_result:	Parsing results are included in the buffer layout.
  * pass_frame_status:	Frame status is included in the buffer layout.
  * pass_sw_opaque:	SW annotation is activated.
  * queue_type:		Type of a queue this configuration applies to.
  */
 struct dpaa2_ni_buf_layout {
 	uint16_t	pd_size;
 	uint16_t	fd_align;
 	uint16_t	head_size;
 	uint16_t	tail_size;
 	uint16_t	options;
 	bool		pass_timestamp;
 	bool		pass_parser_result;
 	bool		pass_frame_status;
 	bool		pass_sw_opaque;
 	enum dpaa2_ni_queue_type queue_type;
 };
 
 /**
  * @brief Buffer pools configuration for a network interface.
  */
 struct dpaa2_ni_pools_cfg {
 	uint8_t		pools_num;
 	struct {
 		uint32_t bp_obj_id;
 		uint16_t buf_sz;
 		int	 backup_flag; /* 0 - regular pool, 1 - backup pool */
 	} pools[DPAA2_NI_MAX_POOLS];
 };
 
 /**
  * @brief Errors behavior configuration for a network interface.
  *
  * err_mask:		The errors mask to configure.
  * action:		Desired action for the errors selected in the mask.
  * set_err_fas:		Set to true to mark the errors in frame annotation
  * 			status (FAS); relevant for non-discard actions only.
  */
 struct dpaa2_ni_err_cfg {
 	uint32_t	err_mask;
 	enum dpaa2_ni_err_action action;
 	bool		set_err_fas;
 };
 
 /**
  * @brief Link configuration.
  *
  * options:	Mask of available options.
  * adv_speeds:	Speeds that are advertised for autoneg.
  * rate:	Rate in Mbps.
  */
 struct dpaa2_ni_link_cfg {
 	uint64_t	options;
 	uint64_t	adv_speeds;
 	uint32_t	rate;
 };
 
 /**
  * @brief Link state.
  *
  * options:	Mask of available options.
  * adv_speeds:	Speeds that are advertised for autoneg.
  * sup_speeds:	Speeds capability of the PHY.
  * rate:	Rate in Mbps.
  * link_up:	Link state (true if link is up, false otherwise).
  * state_valid:	Ignore/Update the state of the link.
  */
 struct dpaa2_ni_link_state {
 	uint64_t	options;
 	uint64_t	adv_speeds;
 	uint64_t	sup_speeds;
 	uint32_t	rate;
 	bool		link_up;
 	bool		state_valid;
 };
 
 /**
  * @brief QoS table configuration.
  *
  * kcfg_busaddr:	Address of the buffer in I/O virtual address space which
  *			holds the QoS table key configuration.
  * default_tc:		Default traffic class to use in case of a lookup miss in
  *			the QoS table.
  * discard_on_miss:	Set to true to discard frames in case of no match.
  *			Default traffic class will be used otherwise.
  * keep_entries:	Set to true to keep existing QoS table entries. This
  *			option will work properly only for DPNI objects created
  *			with DPNI_OPT_HAS_KEY_MASKING option.
  */
 struct dpaa2_ni_qos_table {
 	uint64_t	kcfg_busaddr;
 	uint8_t		default_tc;
 	bool		discard_on_miss;
 	bool		keep_entries;
 };
 
 /**
  * @brief Context to add multicast physical addresses to the filter table.
  *
  * ifp:		Network interface associated with the context.
  * error:	Result of the last MC command.
  * nent:	Number of entries added.
  */
 struct dpaa2_ni_mcaddr_ctx {
 	struct ifnet	*ifp;
 	int		 error;
 	int		 nent;
 };
 
 struct dpaa2_eth_dist_fields {
 	uint64_t	rxnfc_field;
 	enum net_prot	cls_prot;
 	int		cls_field;
 	int		size;
 	uint64_t	id;
 };
 
 struct dpni_mask_cfg {
 	uint8_t		mask;
 	uint8_t		offset;
 } __packed;
 
 struct dpni_dist_extract {
 	uint8_t		prot;
 	uint8_t		efh_type; /* EFH type is in the 4 LSBs. */
 	uint8_t		size;
 	uint8_t		offset;
 	uint32_t	field;
 	uint8_t		hdr_index;
 	uint8_t		constant;
 	uint8_t		num_of_repeats;
 	uint8_t		num_of_byte_masks;
 	uint8_t		extract_type; /* Extraction type is in the 4 LSBs */
 	uint8_t		_reserved[3];
 	struct dpni_mask_cfg masks[4];
 } __packed;
 
 struct dpni_ext_set_rx_tc_dist {
 	uint8_t		num_extracts;
 	uint8_t		_reserved[7];
 	struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
 } __packed;
 
 /**
  * @brief Software context for the DPAA2 Network Interface driver.
  */
 struct dpaa2_ni_softc {
 	device_t		 dev;
 	struct resource 	*res[DPAA2_NI_MAX_RESOURCES];
 	uint16_t		 api_major;
 	uint16_t		 api_minor;
 	uint64_t		 rx_hash_fields;
 	uint16_t		 tx_data_off;
 	uint16_t		 tx_qdid;
 	uint32_t		 link_options;
 	int			 link_state;
 
 	uint16_t		 buf_align;
 	uint16_t		 buf_sz;
 
 	/* For debug purposes only! */
 	uint64_t		 rx_anomaly_frames;
 	uint64_t		 rx_single_buf_frames;
 	uint64_t		 rx_sg_buf_frames;
 	uint64_t		 rx_enq_rej_frames;
 	uint64_t		 rx_ieoi_err_frames;
 	uint64_t		 tx_single_buf_frames;
 	uint64_t		 tx_sg_frames;
 
 	/* Attributes of the DPAA2 network interface. */
 	struct dpaa2_ni_attr	 attr;
 
 	/* For network interface and miibus. */
 	struct ifnet		*ifp;
 	uint32_t		 if_flags;
 	struct mtx		 lock;
 	device_t		 miibus;
 	struct mii_data		*mii;
 	bool			 fixed_link;
 	struct ifmedia		 fixed_ifmedia;
 	int			 media_status;
 
 	/* DMA resources */
 	bus_dma_tag_t		 bp_dmat;  /* for buffer pool */
 	bus_dma_tag_t		 tx_dmat;  /* for Tx buffers */
 	bus_dma_tag_t		 st_dmat;  /* for channel storage */
 	bus_dma_tag_t		 rxd_dmat; /* for Rx distribution key */
 	bus_dma_tag_t		 qos_dmat; /* for QoS table key */
 	bus_dma_tag_t		 sgt_dmat; /* for scatter/gather tables */
 
 	struct dpaa2_buf	 qos_kcfg; /* QoS table key config. */
 	struct dpaa2_buf	 rxd_kcfg; /* Rx distribution key config. */
 
 	/* Channels and RxError frame queue */
 	uint32_t		 chan_n;
 	struct dpaa2_ni_channel	*channels[DPAA2_NI_MAX_CHANNELS];
 	struct dpaa2_ni_fq	 rxe_queue; /* one per network interface */
 
 	/* Rx buffers for buffer pool. */
 	struct dpaa2_atomic	 buf_num;
 	struct dpaa2_atomic	 buf_free; /* for sysctl(9) only */
 	struct dpaa2_buf	 buf[DPAA2_NI_BUFS_MAX];
 
 	/* Interrupts */
 	int			 irq_rid[DPAA2_NI_MSI_COUNT];
 	struct resource		*irq_res;
 	void			*intr; /* interrupt handle */
 
 	/* Tasks */
 	struct taskqueue	*bp_taskq;
 	struct task		 bp_task;
 
 	/* Callouts */
 	struct callout		 mii_callout;
 
 	struct {
 		uint32_t	 dpmac_id;
 		uint8_t		 addr[ETHER_ADDR_LEN];
 		device_t	 phy_dev;
 		int		 phy_loc;
 	} mac; /* Info about connected DPMAC (if exists). */
 };
 
 extern struct resource_spec dpaa2_ni_spec[];
 
 #endif /* _DPAA2_NI_H */
diff --git a/sys/dev/dpaa2/dpaa2_swp.h b/sys/dev/dpaa2/dpaa2_swp.h
index 53a8b32ef185..986ade601149 100644
--- a/sys/dev/dpaa2/dpaa2_swp.h
+++ b/sys/dev/dpaa2/dpaa2_swp.h
@@ -1,504 +1,531 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause
  *
- * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021-2023 Dmitry Salychev
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #ifndef	_DPAA2_SWP_H
 #define	_DPAA2_SWP_H
 
+#include <sys/param.h>
+#include <sys/systm.h>
 #include <sys/bus.h>
 
 #include "dpaa2_types.h"
 #include "dpaa2_bp.h"
 
 /*
  * DPAA2 QBMan software portal.
  */
 
 /* All QBMan commands and result structures use this "valid bit" encoding. */
 #define DPAA2_SWP_VALID_BIT		((uint32_t) 0x80)
 
 #define DPAA2_SWP_TIMEOUT		100000	/* in us */
 #define DPAA2_SWP_CMD_PARAMS_N		8u
 #define DPAA2_SWP_RSP_PARAMS_N		8u
 
 /*
  * Maximum number of buffers that can be acquired/released through a single
  * QBMan command.
  */
 #define DPAA2_SWP_BUFS_PER_CMD		7u
 
 /*
  * Number of times to retry DPIO portal operations while waiting for portal to
  * finish executing current command and become available.
  *
  * We want to avoid being stuck in a while loop in case hardware becomes
  * unresponsive, but not give up too easily if the portal really is busy for
  * valid reasons.
  */
 #define DPAA2_SWP_BUSY_RETRIES		1000
 
 /* Versions of the QBMan software portals. */
 #define DPAA2_SWP_REV_4000		0x04000000
 #define DPAA2_SWP_REV_4100		0x04010000
 #define DPAA2_SWP_REV_4101		0x04010001
 #define DPAA2_SWP_REV_5000		0x05000000
 
 #define DPAA2_SWP_REV_MASK		0xFFFF0000
 
 /* Registers in the cache-inhibited area of the software portal. */
 #define DPAA2_SWP_CINH_CR		0x600 /* Management Command reg.*/
 #define DPAA2_SWP_CINH_EQCR_PI		0x800 /* Enqueue Ring, Producer Index */
 #define DPAA2_SWP_CINH_EQCR_CI		0x840 /* Enqueue Ring, Consumer Index */
 #define DPAA2_SWP_CINH_CR_RT		0x900 /* CR Read Trigger */
 #define DPAA2_SWP_CINH_VDQCR_RT		0x940 /* VDQCR Read Trigger */
 #define DPAA2_SWP_CINH_EQCR_AM_RT	0x980
 #define DPAA2_SWP_CINH_RCR_AM_RT	0x9C0
 #define DPAA2_SWP_CINH_DQPI		0xA00 /* DQRR Producer Index reg. */
 #define DPAA2_SWP_CINH_DQRR_ITR		0xA80 /* DQRR interrupt timeout reg. */
 #define DPAA2_SWP_CINH_DCAP		0xAC0 /* DQRR Consumption Ack. reg. */
 #define DPAA2_SWP_CINH_SDQCR		0xB00 /* Static Dequeue Command reg. */
 #define DPAA2_SWP_CINH_EQCR_AM_RT2	0xB40
 #define DPAA2_SWP_CINH_RCR_PI		0xC00 /* Release Ring, Producer Index */
 #define DPAA2_SWP_CINH_RAR		0xCC0 /* Release Array Allocation reg. */
 #define DPAA2_SWP_CINH_CFG		0xD00
 #define DPAA2_SWP_CINH_ISR		0xE00
 #define DPAA2_SWP_CINH_IER		0xE40
 #define DPAA2_SWP_CINH_ISDR		0xE80
 #define DPAA2_SWP_CINH_IIR		0xEC0
 #define DPAA2_SWP_CINH_ITPR		0xF40
 
 /* Registers in the cache-enabled area of the software portal. */
 #define DPAA2_SWP_CENA_EQCR(n)		(0x000 + ((uint32_t)(n) << 6))
 #define DPAA2_SWP_CENA_DQRR(n)		(0x200 + ((uint32_t)(n) << 6))
 #define DPAA2_SWP_CENA_RCR(n)		(0x400 + ((uint32_t)(n) << 6))
 #define DPAA2_SWP_CENA_CR		(0x600) /* Management Command reg. */
 #define DPAA2_SWP_CENA_RR(vb)		(0x700 + ((uint32_t)(vb) >> 1))
 #define DPAA2_SWP_CENA_VDQCR		(0x780)
 #define DPAA2_SWP_CENA_EQCR_CI		(0x840)
 
 /* Registers in the cache-enabled area of the software portal (memory-backed). */
 #define DPAA2_SWP_CENA_DQRR_MEM(n)	(0x0800 + ((uint32_t)(n) << 6))
 #define DPAA2_SWP_CENA_RCR_MEM(n)	(0x1400 + ((uint32_t)(n) << 6))
 #define DPAA2_SWP_CENA_CR_MEM		(0x1600) /* Management Command reg. */
 #define DPAA2_SWP_CENA_RR_MEM		(0x1680) /* Management Response reg. */
 #define DPAA2_SWP_CENA_VDQCR_MEM	(0x1780)
 #define DPAA2_SWP_CENA_EQCR_CI_MEMBACK	(0x1840)
 
 /* Shifts in the portal's configuration register. */
 #define DPAA2_SWP_CFG_DQRR_MF_SHIFT	20
 #define DPAA2_SWP_CFG_EST_SHIFT		16
 #define DPAA2_SWP_CFG_CPBS_SHIFT	15
 #define DPAA2_SWP_CFG_WN_SHIFT		14
 #define DPAA2_SWP_CFG_RPM_SHIFT		12
 #define DPAA2_SWP_CFG_DCM_SHIFT		10
 #define DPAA2_SWP_CFG_EPM_SHIFT		8
 #define DPAA2_SWP_CFG_VPM_SHIFT		7
 #define DPAA2_SWP_CFG_CPM_SHIFT		6
 #define DPAA2_SWP_CFG_SD_SHIFT		5
 #define DPAA2_SWP_CFG_SP_SHIFT		4
 #define DPAA2_SWP_CFG_SE_SHIFT		3
 #define DPAA2_SWP_CFG_DP_SHIFT		2
 #define DPAA2_SWP_CFG_DE_SHIFT		1
 #define DPAA2_SWP_CFG_EP_SHIFT		0
 
 /* Static Dequeue Command Register attribute codes */
 #define DPAA2_SDQCR_FC_SHIFT		29 /* Dequeue Command Frame Count */
 #define DPAA2_SDQCR_FC_MASK		0x1
 #define DPAA2_SDQCR_DCT_SHIFT		24 /* Dequeue Command Type */
 #define DPAA2_SDQCR_DCT_MASK		0x3
 #define DPAA2_SDQCR_TOK_SHIFT		16 /* Dequeue Command Token */
 #define DPAA2_SDQCR_TOK_MASK		0xff
 #define DPAA2_SDQCR_SRC_SHIFT		0  /* Dequeue Source */
 #define DPAA2_SDQCR_SRC_MASK		0xffff
 
 /*
  * Read trigger bit is used to trigger QMan to read a command from memory,
  * without having software perform a cache flush to force a write of the command
  * to QMan.
  *
  * NOTE: Implemented in QBMan 5.0 or above.
  */
 #define DPAA2_SWP_RT_MODE		((uint32_t)0x100)
 
 /* Interrupt Enable Register bits. */
 #define DPAA2_SWP_INTR_EQRI		0x01
 #define DPAA2_SWP_INTR_EQDI		0x02
 #define DPAA2_SWP_INTR_DQRI		0x04
 #define DPAA2_SWP_INTR_RCRI		0x08
 #define DPAA2_SWP_INTR_RCDI		0x10
 #define DPAA2_SWP_INTR_VDCI		0x20
 
 /* "Write Enable" bitmask for a command to configure SWP WQ Channel.*/
 #define DPAA2_WQCHAN_WE_EN		(0x1u) /* Enable CDAN generation */
 #define DPAA2_WQCHAN_WE_ICD		(0x2u) /* Interrupt Coalescing Disable */
 #define DPAA2_WQCHAN_WE_CTX		(0x4u)
 
 /* Definitions for parsing DQRR entries. */
 #define DPAA2_DQRR_RESULT_MASK		(0x7Fu)
 #define DPAA2_DQRR_RESULT_DQ		(0x60u)
 #define DPAA2_DQRR_RESULT_FQRN		(0x21u)
 #define DPAA2_DQRR_RESULT_FQRNI		(0x22u)
 #define DPAA2_DQRR_RESULT_FQPN		(0x24u)
 #define DPAA2_DQRR_RESULT_FQDAN		(0x25u)
 #define DPAA2_DQRR_RESULT_CDAN		(0x26u)
 #define DPAA2_DQRR_RESULT_CSCN_MEM	(0x27u)
 #define DPAA2_DQRR_RESULT_CGCU		(0x28u)
 #define DPAA2_DQRR_RESULT_BPSCN		(0x29u)
 #define DPAA2_DQRR_RESULT_CSCN_WQ	(0x2au)
 
 /* Frame dequeue statuses */
 #define DPAA2_DQ_STAT_FQEMPTY		(0x80u) /* FQ is empty */
 #define DPAA2_DQ_STAT_HELDACTIVE	(0x40u) /* FQ is held active */
 #define DPAA2_DQ_STAT_FORCEELIGIBLE	(0x20u) /* FQ force eligible */
 #define DPAA2_DQ_STAT_VALIDFRAME	(0x10u) /* valid frame */
 #define DPAA2_DQ_STAT_ODPVALID		(0x04u) /* FQ ODP enable */
 #define DPAA2_DQ_STAT_VOLATILE		(0x02u) /* volatile dequeue (VDC) */
 #define DPAA2_DQ_STAT_EXPIRED		(0x01u) /* VDC is expired */
 
 /*
  * Portal flags.
  *
  * TODO: Use the same flags for both MC and software portals.
  */
 #define DPAA2_SWP_DEF			0x0u
 #define DPAA2_SWP_NOWAIT_ALLOC		0x2u	/* Do not sleep during init */
 #define DPAA2_SWP_LOCKED		0x4000u	/* Wait till portal's unlocked */
 #define DPAA2_SWP_DESTROYED		0x8000u /* Terminate any operations */
 
 /* Command return codes. */
 #define DPAA2_SWP_STAT_OK		0x0
 #define DPAA2_SWP_STAT_NO_MEMORY	0x9	/* No memory available */
 #define DPAA2_SWP_STAT_PORTAL_DISABLED	0xFD	/* QBMan portal disabled */
 #define DPAA2_SWP_STAT_EINVAL		0xFE	/* Invalid argument */
 #define DPAA2_SWP_STAT_ERR		0xFF	/* General error */
 
+#define DPAA2_EQ_DESC_SIZE		32u	/* Enqueue Command Descriptor */
+#define DPAA2_FDR_DESC_SIZE		32u	/* Descriptor of the FDR */
+#define DPAA2_FD_SIZE			32u	/* Frame Descriptor */
+#define DPAA2_FDR_SIZE			64u	/* Frame Dequeue Response */
+#define DPAA2_SCN_SIZE			16u	/* State Change Notification */
+#define DPAA2_FA_SIZE			64u	/* SW Frame Annotation */
+#define DPAA2_SGE_SIZE			16u	/* S/G table entry */
+#define DPAA2_DQ_SIZE			64u	/* Dequeue Response */
+#define DPAA2_SWP_CMD_SIZE		64u	/* SWP Command */
+#define DPAA2_SWP_RSP_SIZE		64u	/* SWP Command Response */
+
 /* Opaque token for static dequeues. */
 #define DPAA2_SWP_SDQCR_TOKEN		0xBBu
 /* Opaque token for static dequeues. */
 #define DPAA2_SWP_VDQCR_TOKEN		0xCCu
 
 #define DPAA2_SWP_LOCK(__swp, __flags) do {		\
 	mtx_assert(&(__swp)->lock, MA_NOTOWNED);	\
 	mtx_lock(&(__swp)->lock);			\
 	*(__flags) = (__swp)->flags;			\
 	(__swp)->flags |= DPAA2_SWP_LOCKED;		\
 } while (0)
 
 #define DPAA2_SWP_UNLOCK(__swp) do {		\
 	mtx_assert(&(__swp)->lock, MA_OWNED);	\
 	(__swp)->flags &= ~DPAA2_SWP_LOCKED;	\
 	mtx_unlock(&(__swp)->lock);		\
 } while (0)
 
 enum dpaa2_fd_format {
 	DPAA2_FD_SINGLE = 0,
 	DPAA2_FD_LIST,
 	DPAA2_FD_SG
 };
 
 /**
  * @brief Enqueue command descriptor.
- *
- * NOTE: 32 bytes.
  */
 struct dpaa2_eq_desc {
 	uint8_t		verb;
 	uint8_t		dca;
 	uint16_t	seqnum;
 	uint16_t	orpid;
 	uint16_t	_reserved;
 	uint32_t	tgtid;
 	uint32_t	tag;
 	uint16_t	qdbin;
 	uint8_t		qpri;
 	uint8_t		_reserved1[3];
 	uint8_t		wae;
 	uint8_t		rspid;
 	uint64_t	rsp_addr;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_eq_desc) == DPAA2_EQ_DESC_SIZE);
 
 /**
  * @brief Frame Dequeue Response (FDR) descriptor.
- *
- * NOTE: 32 bytes.
  */
 struct dpaa2_fdr_desc {
 	uint8_t		verb;
 	uint8_t		stat;
 	uint16_t	seqnum;
 	uint16_t	oprid;
 	uint8_t		_reserved;
 	uint8_t		tok;
 	uint32_t	fqid;
 	uint32_t	_reserved1;
 	uint32_t	fq_byte_cnt;
 	uint32_t	fq_frm_cnt;
 	uint64_t	fqd_ctx;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_fdr_desc) == DPAA2_FDR_DESC_SIZE);
 
 /**
  * @brief State Change Notification Message (SCNM).
- *
- * NOTE: 16 bytes.
  */
 struct dpaa2_scn {
 	uint8_t		verb;
 	uint8_t		stat;
 	uint8_t		state;
 	uint8_t		_reserved;
 	uint32_t	rid_tok;
 	uint64_t	ctx;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_scn) == DPAA2_SCN_SIZE);
 
 /**
  * @brief DPAA2 frame descriptor.
  *
  * addr:		Memory address of the start of the buffer holding the
  *			frame data or the buffer containing the scatter/gather
  *			list.
  * data_length:		Length of the frame data (in bytes).
  * bpid_ivp_bmt:	Buffer pool ID (14 bit + BMT bit + IVP bit)
  * offset_fmt_sl:	Frame data offset, frame format and short-length fields.
  * frame_ctx:		Frame context. This field allows the sender of a frame
  *			to communicate some out-of-band information to the
  *			receiver of the frame.
  * ctrl:		Control bits (ERR, CBMT, ASAL, PTAC, DROPP, SC, DD).
  * flow_ctx:		Frame flow context. Associates the frame with a flow
  *			structure. QMan may use the FLC field for 3 purposes:
  *			stashing control, order definition point identification,
  *			and enqueue replication control.
- *
- * NOTE: 32 bytes.
  */
 struct dpaa2_fd {
 	uint64_t	addr;
 	uint32_t	data_length;
 	uint16_t	bpid_ivp_bmt;
 	uint16_t	offset_fmt_sl;
 	uint32_t	frame_ctx;
 	uint32_t	ctrl;
 	uint64_t	flow_ctx;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE);
+
+/**
+ * @brief DPAA2 frame annotation.
+ */
+struct dpaa2_fa {
+	uint32_t		 magic;
+	struct dpaa2_buf	*buf;
+	union {
+		struct { /* Tx frame annotation */
+			struct dpaa2_ni_tx_ring *tx;
+		};
+#ifdef __notyet__
+		struct { /* Rx frame annotation */
+			uint64_t		 _notused;
+		};
+#endif
+	};
+} __packed;
+CTASSERT(sizeof(struct dpaa2_fa) <= DPAA2_FA_SIZE);
 
 /**
  * @brief DPAA2 scatter/gather entry.
- *
- * NOTE: 16 bytes.
  */
 struct dpaa2_sg_entry {
 	uint64_t	addr;
 	uint32_t	len;
 	uint16_t	bpid;
 	uint16_t	offset_fmt;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_sg_entry) == DPAA2_SGE_SIZE);
 
 /**
  * @brief Frame Dequeue Response (FDR).
- *
- * NOTE: 64 bytes.
  */
 struct dpaa2_fdr {
 	struct dpaa2_fdr_desc	 desc;
 	struct dpaa2_fd		 fd;
 } __packed;
+CTASSERT(sizeof(struct dpaa2_fdr) == DPAA2_FDR_SIZE);
 
 /**
  * @brief Dequeue Response Message.
- *
- * NOTE: 64 bytes.
  */
 struct dpaa2_dq {
 	union {
 		struct {
 			uint8_t	 verb;
 			uint8_t	 _reserved[63];
 		} common;
 		struct dpaa2_fdr fdr; /* Frame Dequeue Response */
 		struct dpaa2_scn scn; /* State Change Notification */
 	};
 } __packed;
+CTASSERT(sizeof(struct dpaa2_dq) == DPAA2_DQ_SIZE);
 
 /**
  * @brief Descriptor of the QBMan software portal.
  *
  * cena_res:		Unmapped cache-enabled part of the portal's I/O memory.
  * cena_map:		Mapped cache-enabled part of the portal's I/O memory.
  * cinh_res:		Unmapped cache-inhibited part of the portal's I/O memory.
  * cinh_map:		Mapped cache-inhibited part of the portal's I/O memory.
  *
  * dpio_dev:		Device associated with the DPIO object to manage this
  *			portal.
  * swp_version:		Hardware IP version of the software portal.
  * swp_clk:		QBMAN clock frequency value in Hz.
  * swp_cycles_ratio:	How many 256 QBMAN cycles fit into one ns.
  * swp_id:		Software portal ID.
  *
  * has_notif:		True if the notification mode is used.
  * has_8prio:		True for a channel with 8 priority WQs. Ignored unless
  *			"has_notif" is true.
  */
 struct dpaa2_swp_desc {
 	struct resource		*cena_res;
 	struct resource_map	*cena_map;
 	struct resource		*cinh_res;
 	struct resource_map	*cinh_map;
 
 	device_t		 dpio_dev;
 	uint32_t		 swp_version;
 	uint32_t		 swp_clk;
 	uint32_t		 swp_cycles_ratio;
 	uint16_t		 swp_id;
 
 	bool			 has_notif;
 	bool			 has_8prio;
 };
 
 /**
  * @brief Command holds data to be written to the software portal.
  */
 struct dpaa2_swp_cmd {
 	uint64_t	params[DPAA2_SWP_CMD_PARAMS_N];
 };
+CTASSERT(sizeof(struct dpaa2_swp_cmd) == DPAA2_SWP_CMD_SIZE);
 
 /**
  * @brief Command response holds data received from the software portal.
  */
 struct dpaa2_swp_rsp {
 	uint64_t	params[DPAA2_SWP_RSP_PARAMS_N];
 };
+CTASSERT(sizeof(struct dpaa2_swp_rsp) == DPAA2_SWP_RSP_SIZE);
 
 /**
  * @brief QBMan software portal.
  *
  * res:		Unmapped cache-enabled and cache-inhibited parts of the portal.
  * map:		Mapped cache-enabled and cache-inhibited parts of the portal.
  * desc:	Descriptor of the QBMan software portal.
  * lock:	Lock to guard an access to the portal.
  * cv:		Conditional variable helps to wait for the helper object's state
  *		change.
  * flags:	Current state of the object.
  * sdq:		Push dequeues status.
  * mc:		Management commands data.
  * mr:		Management response data.
  * dqrr:	Dequeue Response Ring is used to issue frame dequeue responses
  * 		from the QBMan to the driver.
  * eqcr:	Enqueue Command Ring is used to issue frame enqueue commands
  *		from the driver to the QBMan.
  */
 struct dpaa2_swp {
 	struct resource		*cena_res;
 	struct resource_map	*cena_map;
 	struct resource		*cinh_res;
 	struct resource_map	*cinh_map;
 
 	struct mtx		 lock;
 	struct dpaa2_swp_desc	*desc;
 	uint16_t		 flags;
 
 	/* Static Dequeue Command Register value (to obtain CDANs). */
 	uint32_t		 sdq;
 
 	/* Volatile Dequeue Command (to obtain frames). */
 	struct {
 		uint32_t	 valid_bit; /* 0x00 or 0x80 */
 	} vdq;
 
 	struct {
 		bool		 atomic;
 		bool		 writes_cinh;
 		bool		 mem_backed;
 	} cfg; /* Software portal configuration. */
 
 	struct {
 		uint32_t	 valid_bit; /* 0x00 or 0x80 */
 	} mc;
 
 	struct {
 		uint32_t	 valid_bit; /* 0x00 or 0x80 */
 	} mr;
 
 	struct {
 		uint32_t	 next_idx;
 		uint32_t	 valid_bit;
 		uint8_t		 ring_size;
 		bool		 reset_bug; /* dqrr reset workaround */
 		uint32_t	 irq_threshold;
 		uint32_t	 irq_itp;
 	} dqrr;
 
 	struct {
 		uint32_t	 pi; /* producer index */
 		uint32_t	 pi_vb; /* PI valid bits */
 		uint32_t	 pi_ring_size;
 		uint32_t	 pi_ci_mask;
 		uint32_t	 ci;
 		int		 available;
 		uint32_t	 pend;
 		uint32_t	 no_pfdr;
 	} eqcr;
 };
 
 /* Management routines. */
 int dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
     uint16_t flags);
 void dpaa2_swp_free_portal(struct dpaa2_swp *swp);
 uint32_t dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est,
     uint8_t rpm, uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp,
     int de, int ep);
 
 /* Read/write registers of a software portal. */
 void dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v);
 uint32_t dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o);
 
 /* Helper routines. */
 void dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always);
 void dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid);
 void dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask);
 uint32_t dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp);
 uint32_t dpaa2_swp_read_intr_status(struct dpaa2_swp *swp);
 void dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask);
 void dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx,
     bool en);
 int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
     uint32_t holdoff);
 
 /* Software portal commands. */
 int dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
     uint8_t we_mask, bool cdan_en, uint64_t ctx);
 int dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
     struct dpaa2_bp_conf *conf);
 int dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
     uint32_t buf_num);
 int dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
     uint32_t *idx);
 int dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id,
     struct dpaa2_buf *buf, uint32_t frames_n);
 int dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
     struct dpaa2_fd *fd);
 int dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
     struct dpaa2_fd *fd, uint32_t *flags, int frames_n);
 
 #endif /* _DPAA2_SWP_H */
diff --git a/sys/dev/dpaa2/dpaa2_types.h b/sys/dev/dpaa2/dpaa2_types.h
index b54afa358a28..5e7ccad15e1c 100644
--- a/sys/dev/dpaa2/dpaa2_types.h
+++ b/sys/dev/dpaa2/dpaa2_types.h
@@ -1,114 +1,116 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause
  *
  * Copyright © 2021-2022 Dmitry Salychev
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #ifndef	_DPAA2_TYPES_H
 #define	_DPAA2_TYPES_H
 
 #include <machine/atomic.h>
 
+#define DPAA2_MAGIC	((uint32_t) 0xD4AA2C0Du)
+
 /**
  * @brief Types of the DPAA2 devices.
  */
 enum dpaa2_dev_type {
 	DPAA2_DEV_MC = 7500,	/* Management Complex (firmware bus) */
 	DPAA2_DEV_RC,		/* Resource Container (firmware bus) */
 	DPAA2_DEV_IO,		/* I/O object (to work with QBMan portal) */
 	DPAA2_DEV_NI,		/* Network Interface */
 	DPAA2_DEV_MCP,		/* MC portal */
 	DPAA2_DEV_BP,		/* Buffer Pool */
 	DPAA2_DEV_CON,		/* Concentrator */
 	DPAA2_DEV_MAC,		/* MAC object */
 	DPAA2_DEV_MUX,		/* MUX (Datacenter bridge) object */
 	DPAA2_DEV_SW,		/* Ethernet Switch */
 
 	DPAA2_DEV_NOTYPE	/* Shouldn't be assigned to any DPAA2 device. */
 };
 
 /**
  * @brief Types of the DPAA2 buffers.
  */
 enum dpaa2_buf_type {
 	DPAA2_BUF_RX = 75,	/* Rx buffer */
 	DPAA2_BUF_TX,		/* Tx buffer */
 	DPAA2_BUF_STORE		/* Channel storage, key configuration */
 };
 
 /**
  * @brief DMA-mapped buffer (for Rx/Tx buffers, channel storage, etc.).
  */
 struct dpaa2_buf {
 	enum dpaa2_buf_type		 type;
 	union {
 		struct {
 			bus_dma_tag_t	 dmat; /* DMA tag for this buffer */
 			bus_dmamap_t	 dmap;
 			bus_addr_t	 paddr;
 			void		*vaddr;
 
 			struct mbuf	*m; /* associated mbuf */
 		} rx;
 		struct {
 			bus_dma_tag_t	 dmat; /* DMA tag for this buffer */
 			bus_dmamap_t	 dmap;
 			bus_addr_t	 paddr;
 			void		*vaddr;
 
 			struct mbuf	*m; /* associated mbuf */
 			uint64_t	 idx;
 
 			/* for scatter/gather table */
 			bus_dma_tag_t	 sgt_dmat;
 			bus_dmamap_t	 sgt_dmap;
 			bus_addr_t	 sgt_paddr;
 			void		*sgt_vaddr;
 		} tx;
 		struct {
 			bus_dma_tag_t	 dmat; /* DMA tag for this buffer */
 			bus_dmamap_t	 dmap;
 			bus_addr_t	 paddr;
 			void		*vaddr;
 		} store;
 	};
 };
 
 struct dpaa2_atomic {
 	volatile int counter;
 };
 
 /* Handy wrappers over atomic operations. */
 #define DPAA2_ATOMIC_XCHG(a, val) \
 	(atomic_swap_int(&(a)->counter, (val)))
 #define DPAA2_ATOMIC_READ(a) \
 	(atomic_load_acq_int(&(a)->counter))
 #define DPAA2_ATOMIC_ADD(a, val) \
 	(atomic_add_acq_int(&(a)->counter, (val)))
 
 /* Convert DPAA2 type to/from string. */
 const char		*dpaa2_ttos(enum dpaa2_dev_type type);
 enum dpaa2_dev_type	 dpaa2_stot(const char *str);
 
 #endif /* _DPAA2_TYPES_H */