Index: stable/12/usr.sbin/bhyve/net_backends.c
===================================================================
--- stable/12/usr.sbin/bhyve/net_backends.c	(revision 358184)
+++ stable/12/usr.sbin/bhyve/net_backends.c	(revision 358185)
@@ -1,873 +1,814 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 /*
  * This file implements multiple network backends (tap, netmap, ...),
  * to be used by network frontends such as virtio-net and e1000.
  * The API to access the backend (e.g. send/receive packets, negotiate
  * features) is exported by net_backends.h.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/types.h>		/* u_short etc */
 #ifndef WITHOUT_CAPSICUM
 #include <sys/capsicum.h>
 #endif
 #include <sys/ioctl.h>
 #include <sys/mman.h>
 #include <sys/uio.h>
 
 #include <net/if.h>
 #include <net/netmap.h>
 #include <net/netmap_virt.h>
 #define NETMAP_WITH_LIBS
 #include <net/netmap_user.h>
 
 #ifndef WITHOUT_CAPSICUM
 #include <capsicum_helpers.h>
 #endif
 #include <err.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <string.h>
 #include <unistd.h>
 #include <sysexits.h>
 #include <assert.h>
 #include <pthread.h>
 #include <pthread_np.h>
 #include <poll.h>
 #include <assert.h>
 
 
 #include "debug.h"
 #include "iov.h"
 #include "mevent.h"
 #include "net_backends.h"
 
 #include <sys/linker_set.h>
 
 /*
  * Each network backend registers a set of function pointers that are
  * used to implement the net backends API.
  * This might need to be exposed if we implement backends in separate files.
  */
 struct net_backend {
 	const char *prefix;	/* prefix matching this backend */
 
 	/*
 	 * Routines used to initialize and cleanup the resources needed
 	 * by a backend. The cleanup function is used internally,
 	 * and should not be called by the frontend.
 	 */
 	int (*init)(struct net_backend *be, const char *devname,
 	    net_be_rxeof_t cb, void *param);
 	void (*cleanup)(struct net_backend *be);
 
 	/*
 	 * Called to serve a guest transmit request. The scatter-gather
 	 * vector provided by the caller has 'iovcnt' elements and contains
 	 * the packet to send.
 	 */
-	ssize_t (*send)(struct net_backend *be, struct iovec *iov, int iovcnt);
+	ssize_t (*send)(struct net_backend *be, const struct iovec *iov,
+	    int iovcnt);
 
 	/*
 	 * Called to receive a packet from the backend. When the function
 	 * returns a positive value 'len', the scatter-gather vector
 	 * provided by the caller contains a packet with such length.
 	 * The function returns 0 if the backend doesn't have a new packet to
 	 * receive.
 	 */
-	ssize_t (*recv)(struct net_backend *be, struct iovec *iov, int iovcnt);
+	ssize_t (*recv)(struct net_backend *be, const struct iovec *iov,
+	    int iovcnt);
 
 	/*
 	 * Ask the backend to enable or disable receive operation in the
 	 * backend. On return from a disable operation, it is guaranteed
 	 * that the receive callback won't be called until receive is
 	 * enabled again. Note however that it is up to the caller to make
 	 * sure that netbe_recv() is not currently being executed by another
 	 * thread.
 	 */
 	void (*recv_enable)(struct net_backend *be);
 	void (*recv_disable)(struct net_backend *be);
 
 	/*
 	 * Ask the backend for the virtio-net features it is able to
 	 * support. Possible features are TSO, UFO and checksum offloading
 	 * in both rx and tx direction and for both IPv4 and IPv6.
 	 */
 	uint64_t (*get_cap)(struct net_backend *be);
 
 	/*
 	 * Tell the backend to enable/disable the specified virtio-net
 	 * features (capabilities).
 	 */
 	int (*set_cap)(struct net_backend *be, uint64_t features,
 	    unsigned int vnet_hdr_len);
 
 	struct pci_vtnet_softc *sc;
 	int fd;
 
 	/*
 	 * Length of the virtio-net header used by the backend and the
 	 * frontend, respectively. A zero value means that the header
 	 * is not used.
 	 */
 	unsigned int be_vnet_hdr_len;
 	unsigned int fe_vnet_hdr_len;
 
 	/* Size of backend-specific private data. */
 	size_t priv_size;
 
 	/* Room for backend-specific data. */
 	char opaque[0];
 };
 
 SET_DECLARE(net_backend_set, struct net_backend);
 
 #define VNET_HDR_LEN	sizeof(struct virtio_net_rxhdr)
 
 #define WPRINTF(params) PRINTLN params
 
 /*
  * The tap backend
  */
 
 struct tap_priv {
 	struct mevent *mevp;
 };
 
 static void
 tap_cleanup(struct net_backend *be)
 {
 	struct tap_priv *priv = (struct tap_priv *)be->opaque;
 
 	if (priv->mevp) {
 		mevent_delete(priv->mevp);
 	}
 	if (be->fd != -1) {
 		close(be->fd);
 		be->fd = -1;
 	}
 }
 
 static int
 tap_init(struct net_backend *be, const char *devname,
 	 net_be_rxeof_t cb, void *param)
 {
 	struct tap_priv *priv = (struct tap_priv *)be->opaque;
 	char tbuf[80];
 	int opt = 1;
 #ifndef WITHOUT_CAPSICUM
 	cap_rights_t rights;
 #endif
 
 	if (cb == NULL) {
 		WPRINTF(("TAP backend requires non-NULL callback"));
 		return (-1);
 	}
 
 	strcpy(tbuf, "/dev/");
 	strlcat(tbuf, devname, sizeof(tbuf));
 
 	be->fd = open(tbuf, O_RDWR);
 	if (be->fd == -1) {
 		WPRINTF(("open of tap device %s failed", tbuf));
 		goto error;
 	}
 
 	/*
 	 * Set non-blocking and register for read
 	 * notifications with the event loop
 	 */
 	if (ioctl(be->fd, FIONBIO, &opt) < 0) {
 		WPRINTF(("tap device O_NONBLOCK failed"));
 		goto error;
 	}
 
 #ifndef WITHOUT_CAPSICUM
 	cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
 	if (caph_rights_limit(be->fd, &rights) == -1)
 		errx(EX_OSERR, "Unable to apply rights for sandbox");
 #endif
 
 	priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
 	if (priv->mevp == NULL) {
 		WPRINTF(("Could not register event"));
 		goto error;
 	}
 
 	return (0);
 
 error:
 	tap_cleanup(be);
 	return (-1);
 }
 
 /*
  * Called to send a buffer chain out to the tap device
  */
 static ssize_t
-tap_send(struct net_backend *be, struct iovec *iov, int iovcnt)
+tap_send(struct net_backend *be, const struct iovec *iov, int iovcnt)
 {
 	return (writev(be->fd, iov, iovcnt));
 }
 
 static ssize_t
-tap_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
+tap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
 {
 	ssize_t ret;
 
 	/* Should never be called without a valid tap fd */
 	assert(be->fd != -1);
 
 	ret = readv(be->fd, iov, iovcnt);
 
 	if (ret < 0 && errno == EWOULDBLOCK) {
 		return (0);
 	}
 
 	return (ret);
 }
 
 static void
 tap_recv_enable(struct net_backend *be)
 {
 	struct tap_priv *priv = (struct tap_priv *)be->opaque;
 
 	mevent_enable(priv->mevp);
 }
 
 static void
 tap_recv_disable(struct net_backend *be)
 {
 	struct tap_priv *priv = (struct tap_priv *)be->opaque;
 
 	mevent_disable(priv->mevp);
 }
 
 static uint64_t
 tap_get_cap(struct net_backend *be)
 {
 
 	return (0); /* no capabilities for now */
 }
 
 static int
 tap_set_cap(struct net_backend *be, uint64_t features,
 		unsigned vnet_hdr_len)
 {
 
 	return ((features || vnet_hdr_len) ? -1 : 0);
 }
 
 static struct net_backend tap_backend = {
 	.prefix = "tap",
 	.priv_size = sizeof(struct tap_priv),
 	.init = tap_init,
 	.cleanup = tap_cleanup,
 	.send = tap_send,
 	.recv = tap_recv,
 	.recv_enable = tap_recv_enable,
 	.recv_disable = tap_recv_disable,
 	.get_cap = tap_get_cap,
 	.set_cap = tap_set_cap,
 };
 
 /* A clone of the tap backend, with a different prefix. */
 static struct net_backend vmnet_backend = {
 	.prefix = "vmnet",
 	.priv_size = sizeof(struct tap_priv),
 	.init = tap_init,
 	.cleanup = tap_cleanup,
 	.send = tap_send,
 	.recv = tap_recv,
 	.recv_enable = tap_recv_enable,
 	.recv_disable = tap_recv_disable,
 	.get_cap = tap_get_cap,
 	.set_cap = tap_set_cap,
 };
 
 DATA_SET(net_backend_set, tap_backend);
 DATA_SET(net_backend_set, vmnet_backend);
 
 /*
  * The netmap backend
  */
 
 /* The virtio-net features supported by netmap. */
 #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
 		VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
 		VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
 		VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO | \
 		VIRTIO_NET_F_MRG_RXBUF)
 
 struct netmap_priv {
 	char ifname[IFNAMSIZ];
 	struct nm_desc *nmd;
 	uint16_t memid;
 	struct netmap_ring *rx;
 	struct netmap_ring *tx;
 	struct mevent *mevp;
 	net_be_rxeof_t cb;
 	void *cb_param;
 };
 
 static void
 nmreq_init(struct nmreq *req, char *ifname)
 {
 
 	memset(req, 0, sizeof(*req));
 	strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
 	req->nr_version = NETMAP_API;
 }
 
 static int
 netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
 {
 	int err;
 	struct nmreq req;
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 
 	nmreq_init(&req, priv->ifname);
 	req.nr_cmd = NETMAP_BDG_VNET_HDR;
 	req.nr_arg1 = vnet_hdr_len;
 	err = ioctl(be->fd, NIOCREGIF, &req);
 	if (err) {
 		WPRINTF(("Unable to set vnet header length %d",
 				vnet_hdr_len));
 		return (err);
 	}
 
 	be->be_vnet_hdr_len = vnet_hdr_len;
 
 	return (0);
 }
 
 static int
 netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
 {
 	int prev_hdr_len = be->be_vnet_hdr_len;
 	int ret;
 
 	if (vnet_hdr_len == prev_hdr_len) {
 		return (1);
 	}
 
 	ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
 	if (ret) {
 		return (0);
 	}
 
 	netmap_set_vnet_hdr_len(be, prev_hdr_len);
 
 	return (1);
 }
 
 static uint64_t
 netmap_get_cap(struct net_backend *be)
 {
 
 	return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
 	    NETMAP_FEATURES : 0);
 }
 
 static int
 netmap_set_cap(struct net_backend *be, uint64_t features,
 	       unsigned vnet_hdr_len)
 {
 
 	return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
 }
 
 static int
 netmap_init(struct net_backend *be, const char *devname,
 	    net_be_rxeof_t cb, void *param)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 
 	strlcpy(priv->ifname, devname, sizeof(priv->ifname));
 	priv->ifname[sizeof(priv->ifname) - 1] = '\0';
 
 	priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
 	if (priv->nmd == NULL) {
 		WPRINTF(("Unable to nm_open(): interface '%s', errno (%s)",
 			devname, strerror(errno)));
 		free(priv);
 		return (-1);
 	}
 
 	priv->memid = priv->nmd->req.nr_arg2;
 	priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
 	priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
 	priv->cb = cb;
 	priv->cb_param = param;
 	be->fd = priv->nmd->fd;
 
 	priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
 	if (priv->mevp == NULL) {
 		WPRINTF(("Could not register event"));
 		return (-1);
 	}
 
 	return (0);
 }
 
 static void
 netmap_cleanup(struct net_backend *be)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 
 	if (priv->mevp) {
 		mevent_delete(priv->mevp);
 	}
 	if (priv->nmd) {
 		nm_close(priv->nmd);
 	}
 	be->fd = -1;
 }
 
 static ssize_t
-netmap_send(struct net_backend *be, struct iovec *iov,
+netmap_send(struct net_backend *be, const struct iovec *iov,
 	    int iovcnt)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 	struct netmap_ring *ring;
 	ssize_t totlen = 0;
 	int nm_buf_size;
 	int nm_buf_len;
 	uint32_t head;
 	void *nm_buf;
 	int j;
 
 	ring = priv->tx;
 	head = ring->head;
 	if (head == ring->tail) {
 		WPRINTF(("No space, drop %zu bytes", count_iov(iov, iovcnt)));
 		goto txsync;
 	}
 	nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
 	nm_buf_size = ring->nr_buf_size;
 	nm_buf_len = 0;
 
 	for (j = 0; j < iovcnt; j++) {
 		int iov_frag_size = iov[j].iov_len;
 		void *iov_frag_buf = iov[j].iov_base;
 
 		totlen += iov_frag_size;
 
 		/*
 		 * Split each iovec fragment over more netmap slots, if
 		 * necessary.
 		 */
 		for (;;) {
 			int copylen;
 
 			copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
 			memcpy(nm_buf, iov_frag_buf, copylen);
 
 			iov_frag_buf += copylen;
 			iov_frag_size -= copylen;
 			nm_buf += copylen;
 			nm_buf_size -= copylen;
 			nm_buf_len += copylen;
 
 			if (iov_frag_size == 0) {
 				break;
 			}
 
 			ring->slot[head].len = nm_buf_len;
 			ring->slot[head].flags = NS_MOREFRAG;
 			head = nm_ring_next(ring, head);
 			if (head == ring->tail) {
 				/*
 				 * We ran out of netmap slots while
 				 * splitting the iovec fragments.
 				 */
 				WPRINTF(("No space, drop %zu bytes",
 				   count_iov(iov, iovcnt)));
 				goto txsync;
 			}
 			nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
 			nm_buf_size = ring->nr_buf_size;
 			nm_buf_len = 0;
 		}
 	}
 
 	/* Complete the last slot, which must not have NS_MOREFRAG set. */
 	ring->slot[head].len = nm_buf_len;
 	ring->slot[head].flags = 0;
 	head = nm_ring_next(ring, head);
 
 	/* Now update ring->head and ring->cur. */
 	ring->head = ring->cur = head;
 txsync:
 	ioctl(be->fd, NIOCTXSYNC, NULL);
 
 	return (totlen);
 }
 
 static ssize_t
-netmap_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
+netmap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 	struct netmap_slot *slot = NULL;
 	struct netmap_ring *ring;
 	void *iov_frag_buf;
 	int iov_frag_size;
 	ssize_t totlen = 0;
 	uint32_t head;
 
 	assert(iovcnt);
 
 	ring = priv->rx;
 	head = ring->head;
 	iov_frag_buf = iov->iov_base;
 	iov_frag_size = iov->iov_len;
 
 	do {
 		int nm_buf_len;
 		void *nm_buf;
 
 		if (head == ring->tail) {
 			return (0);
 		}
 
 		slot = ring->slot + head;
 		nm_buf = NETMAP_BUF(ring, slot->buf_idx);
 		nm_buf_len = slot->len;
 
 		for (;;) {
 			int copylen = nm_buf_len < iov_frag_size ?
 			    nm_buf_len : iov_frag_size;
 
 			memcpy(iov_frag_buf, nm_buf, copylen);
 			nm_buf += copylen;
 			nm_buf_len -= copylen;
 			iov_frag_buf += copylen;
 			iov_frag_size -= copylen;
 			totlen += copylen;
 
 			if (nm_buf_len == 0) {
 				break;
 			}
 
 			iov++;
 			iovcnt--;
 			if (iovcnt == 0) {
 				/* No space to receive. */
 				WPRINTF(("Short iov, drop %zd bytes",
 				    totlen));
 				return (-ENOSPC);
 			}
 			iov_frag_buf = iov->iov_base;
 			iov_frag_size = iov->iov_len;
 		}
 
 		head = nm_ring_next(ring, head);
 
 	} while (slot->flags & NS_MOREFRAG);
 
 	/* Release slots to netmap. */
 	ring->head = ring->cur = head;
 
 	return (totlen);
 }
 
 static void
 netmap_recv_enable(struct net_backend *be)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 
 	mevent_enable(priv->mevp);
 }
 
 static void
 netmap_recv_disable(struct net_backend *be)
 {
 	struct netmap_priv *priv = (struct netmap_priv *)be->opaque;
 
 	mevent_disable(priv->mevp);
 }
 
 static struct net_backend netmap_backend = {
 	.prefix = "netmap",
 	.priv_size = sizeof(struct netmap_priv),
 	.init = netmap_init,
 	.cleanup = netmap_cleanup,
 	.send = netmap_send,
 	.recv = netmap_recv,
 	.recv_enable = netmap_recv_enable,
 	.recv_disable = netmap_recv_disable,
 	.get_cap = netmap_get_cap,
 	.set_cap = netmap_set_cap,
 };
 
 /* A clone of the netmap backend, with a different prefix. */
 static struct net_backend vale_backend = {
 	.prefix = "vale",
 	.priv_size = sizeof(struct netmap_priv),
 	.init = netmap_init,
 	.cleanup = netmap_cleanup,
 	.send = netmap_send,
 	.recv = netmap_recv,
 	.recv_enable = netmap_recv_enable,
 	.recv_disable = netmap_recv_disable,
 	.get_cap = netmap_get_cap,
 	.set_cap = netmap_set_cap,
 };
 
 DATA_SET(net_backend_set, netmap_backend);
 DATA_SET(net_backend_set, vale_backend);
 
 /*
  * Initialize a backend and attach to the frontend.
  * This is called during frontend initialization.
  *  @pbe is a pointer to the backend to be initialized
  *  @devname is the backend-name as supplied on the command line,
  * 	e.g. -s 2:0,frontend-name,backend-name[,other-args]
  *  @cb is the receive callback supplied by the frontend,
  *	and it is invoked in the event loop when a receive
  *	event is generated in the hypervisor,
  *  @param is a pointer to the frontend, and normally used as
  *	the argument for the callback.
  */
 int
 netbe_init(struct net_backend **ret, const char *devname, net_be_rxeof_t cb,
     void *param)
 {
 	struct net_backend **pbe, *nbe, *tbe = NULL;
 	int err;
 
 	/*
 	 * Find the network backend that matches the user-provided
 	 * device name. net_backend_set is built using a linker set.
 	 */
 	SET_FOREACH(pbe, net_backend_set) {
 		if (strncmp(devname, (*pbe)->prefix,
 		    strlen((*pbe)->prefix)) == 0) {
 			tbe = *pbe;
 			assert(tbe->init != NULL);
 			assert(tbe->cleanup != NULL);
 			assert(tbe->send != NULL);
 			assert(tbe->recv != NULL);
 			assert(tbe->get_cap != NULL);
 			assert(tbe->set_cap != NULL);
 			break;
 		}
 	}
 
 	*ret = NULL;
 	if (tbe == NULL)
 		return (EINVAL);
 	nbe = calloc(1, sizeof(*nbe) + tbe->priv_size);
 	*nbe = *tbe;	/* copy the template */
 	nbe->fd = -1;
 	nbe->sc = param;
 	nbe->be_vnet_hdr_len = 0;
 	nbe->fe_vnet_hdr_len = 0;
 
 	/* Initialize the backend. */
 	err = nbe->init(nbe, devname, cb, param);
 	if (err) {
 		free(nbe);
 		return (err);
 	}
 
 	*ret = nbe;
 
 	return (0);
 }
 
 void
 netbe_cleanup(struct net_backend *be)
 {
 
 	if (be != NULL) {
 		be->cleanup(be);
 		free(be);
 	}
 }
 
 uint64_t
 netbe_get_cap(struct net_backend *be)
 {
 
 	assert(be != NULL);
 	return (be->get_cap(be));
 }
 
 int
 netbe_set_cap(struct net_backend *be, uint64_t features,
 	      unsigned vnet_hdr_len)
 {
 	int ret;
 
 	assert(be != NULL);
 
 	/* There are only three valid lengths, i.e., 0, 10 and 12. */
 	if (vnet_hdr_len && vnet_hdr_len != VNET_HDR_LEN
 		&& vnet_hdr_len != (VNET_HDR_LEN - sizeof(uint16_t)))
 		return (-1);
 
 	be->fe_vnet_hdr_len = vnet_hdr_len;
 
 	ret = be->set_cap(be, features, vnet_hdr_len);
 	assert(be->be_vnet_hdr_len == 0 ||
 	       be->be_vnet_hdr_len == be->fe_vnet_hdr_len);
 
 	return (ret);
 }
 
-static __inline struct iovec *
-iov_trim(struct iovec *iov, int *iovcnt, unsigned int tlen)
-{
-	struct iovec *riov;
-
-	/* XXX short-cut: assume first segment is >= tlen */
-	assert(iov[0].iov_len >= tlen);
-
-	iov[0].iov_len -= tlen;
-	if (iov[0].iov_len == 0) {
-		assert(*iovcnt > 1);
-		*iovcnt -= 1;
-		riov = &iov[1];
-	} else {
-		iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen);
-		riov = &iov[0];
-	}
-
-	return (riov);
-}
-
 ssize_t
-netbe_send(struct net_backend *be, struct iovec *iov, int iovcnt)
+netbe_send(struct net_backend *be, const struct iovec *iov, int iovcnt)
 {
 
-	assert(be != NULL);
-	if (be->be_vnet_hdr_len != be->fe_vnet_hdr_len) {
-		/*
-		 * The frontend uses a virtio-net header, but the backend
-		 * does not. We ignore it (as it must be all zeroes) and
-		 * strip it.
-		 */
-		assert(be->be_vnet_hdr_len == 0);
-		iov = iov_trim(iov, &iovcnt, be->fe_vnet_hdr_len);
-	}
-
 	return (be->send(be, iov, iovcnt));
 }
 
 /*
  * Try to read a packet from the backend, without blocking.
  * If no packets are available, return 0. In case of success, return
  * the length of the packet just read. Return -1 in case of errors.
  */
 ssize_t
-netbe_recv(struct net_backend *be, struct iovec *iov, int iovcnt)
+netbe_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
 {
-	/* Length of prepended virtio-net header. */
-	unsigned int hlen = be->fe_vnet_hdr_len;
-	int ret;
 
-	assert(be != NULL);
-
-	if (hlen && hlen != be->be_vnet_hdr_len) {
-		/*
-		 * The frontend uses a virtio-net header, but the backend
-		 * does not. We need to prepend a zeroed header.
-		 */
-		struct virtio_net_rxhdr *vh;
-
-		assert(be->be_vnet_hdr_len == 0);
-
-		/*
-		 * Get a pointer to the rx header, and use the
-		 * data immediately following it for the packet buffer.
-		 */
-		vh = iov[0].iov_base;
-		iov = iov_trim(iov, &iovcnt, hlen);
-
-		/*
-		 * The only valid field in the rx packet header is the
-		 * number of buffers if merged rx bufs were negotiated.
-		 */
-		memset(vh, 0, hlen);
-		if (hlen == VNET_HDR_LEN) {
-			vh->vrh_bufs = 1;
-		}
-	}
-
-	ret = be->recv(be, iov, iovcnt);
-	if (ret > 0) {
-		ret += hlen;
-	}
-
-	return (ret);
+	return (be->recv(be, iov, iovcnt));
 }
 
 /*
  * Read a packet from the backend and discard it.
  * Returns the size of the discarded packet or zero if no packet was available.
  * A negative error code is returned in case of read error.
  */
 ssize_t
 netbe_rx_discard(struct net_backend *be)
 {
 	/*
 	 * MP note: the dummybuf is only used to discard frames,
 	 * so there is no need for it to be per-vtnet or locked.
 	 * We only make it large enough for TSO-sized segment.
 	 */
 	static uint8_t dummybuf[65536 + 64];
 	struct iovec iov;
 
 	iov.iov_base = dummybuf;
 	iov.iov_len = sizeof(dummybuf);
 
 	return netbe_recv(be, &iov, 1);
 }
 
 void
 netbe_rx_disable(struct net_backend *be)
 {
 
 	return be->recv_disable(be);
 }
 
 void
 netbe_rx_enable(struct net_backend *be)
 {
 
 	return be->recv_enable(be);
+}
+
+size_t
+netbe_get_vnet_hdr_len(struct net_backend *be)
+{
+
+	return (be->be_vnet_hdr_len);
 }
Index: stable/12/usr.sbin/bhyve/net_backends.h
===================================================================
--- stable/12/usr.sbin/bhyve/net_backends.h	(revision 358184)
+++ stable/12/usr.sbin/bhyve/net_backends.h	(revision 358185)
@@ -1,91 +1,92 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #ifndef __NET_BACKENDS_H__
 #define __NET_BACKENDS_H__
 
 #include <stdint.h>
 
 /* Opaque type representing a network backend. */
 typedef struct net_backend net_backend_t;
 
 /* Interface between network frontends and the network backends. */
 typedef void (*net_be_rxeof_t)(int, enum ev_type, void *param);
 int	netbe_init(net_backend_t **be, const char *devname, net_be_rxeof_t cb,
             void *param);
 void	netbe_cleanup(net_backend_t *be);
 uint64_t netbe_get_cap(net_backend_t *be);
 int	 netbe_set_cap(net_backend_t *be, uint64_t cap,
              unsigned vnet_hdr_len);
-ssize_t	netbe_send(net_backend_t *be, struct iovec *iov, int iovcnt);
-ssize_t	netbe_recv(net_backend_t *be, struct iovec *iov, int iovcnt);
+size_t	netbe_get_vnet_hdr_len(net_backend_t *be);
+ssize_t	netbe_send(net_backend_t *be, const struct iovec *iov, int iovcnt);
+ssize_t	netbe_recv(net_backend_t *be, const struct iovec *iov, int iovcnt);
 ssize_t	netbe_rx_discard(net_backend_t *be);
 void	netbe_rx_disable(net_backend_t *be);
 void	netbe_rx_enable(net_backend_t *be);
 
 
 /*
  * Network device capabilities taken from the VirtIO standard.
  * Despite the name, these capabilities can be used by different frontents
  * (virtio-net, ptnet) and supported by different backends (netmap, tap, ...).
  */
 #define	VIRTIO_NET_F_CSUM	(1 <<  0) /* host handles partial cksum */
 #define	VIRTIO_NET_F_GUEST_CSUM	(1 <<  1) /* guest handles partial cksum */
 #define	VIRTIO_NET_F_MAC	(1 <<  5) /* host supplies MAC */
 #define	VIRTIO_NET_F_GSO_DEPREC	(1 <<  6) /* deprecated: host handles GSO */
 #define	VIRTIO_NET_F_GUEST_TSO4	(1 <<  7) /* guest can rcv TSOv4 */
 #define	VIRTIO_NET_F_GUEST_TSO6	(1 <<  8) /* guest can rcv TSOv6 */
 #define	VIRTIO_NET_F_GUEST_ECN	(1 <<  9) /* guest can rcv TSO with ECN */
 #define	VIRTIO_NET_F_GUEST_UFO	(1 << 10) /* guest can rcv UFO */
 #define	VIRTIO_NET_F_HOST_TSO4	(1 << 11) /* host can rcv TSOv4 */
 #define	VIRTIO_NET_F_HOST_TSO6	(1 << 12) /* host can rcv TSOv6 */
 #define	VIRTIO_NET_F_HOST_ECN	(1 << 13) /* host can rcv TSO with ECN */
 #define	VIRTIO_NET_F_HOST_UFO	(1 << 14) /* host can rcv UFO */
 #define	VIRTIO_NET_F_MRG_RXBUF	(1 << 15) /* host can merge RX buffers */
 #define	VIRTIO_NET_F_STATUS	(1 << 16) /* config status field available */
 #define	VIRTIO_NET_F_CTRL_VQ	(1 << 17) /* control channel available */
 #define	VIRTIO_NET_F_CTRL_RX	(1 << 18) /* control channel RX mode support */
 #define	VIRTIO_NET_F_CTRL_VLAN	(1 << 19) /* control channel VLAN filtering */
 #define	VIRTIO_NET_F_GUEST_ANNOUNCE \
 				(1 << 21) /* guest can send gratuitous pkts */
 
 /*
  * Fixed network header size
  */
 struct virtio_net_rxhdr {
 	uint8_t		vrh_flags;
 	uint8_t		vrh_gso_type;
 	uint16_t	vrh_hdr_len;
 	uint16_t	vrh_gso_size;
 	uint16_t	vrh_csum_start;
 	uint16_t	vrh_csum_offset;
 	uint16_t	vrh_bufs;
 } __packed;
 
 #endif /* __NET_BACKENDS_H__ */
Index: stable/12/usr.sbin/bhyve/net_utils.c
===================================================================
--- stable/12/usr.sbin/bhyve/net_utils.c	(revision 358184)
+++ stable/12/usr.sbin/bhyve/net_utils.c	(revision 358185)
@@ -1,89 +1,87 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2011 NetApp, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #include "net_utils.h"
 #include "bhyverun.h"
 #include <md5.h>
 #include <net/ethernet.h>
 #include <string.h>
 #include <stdio.h>
 #include <errno.h>
 
 #include "bhyverun.h"
 #include "debug.h"
 #include "net_utils.h"
 
 int
 net_parsemac(char *mac_str, uint8_t *mac_addr)
 {
         struct ether_addr *ea;
-        char *tmpstr;
         char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
 
-        tmpstr = strsep(&mac_str,"=");
+	if (mac_str == NULL)
+		return (EINVAL);
 
-        if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) {
-                ea = ether_aton(mac_str);
+	ea = ether_aton(mac_str);
 
-                if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
-                    memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
-			EPRINTLN("Invalid MAC %s", mac_str);
-                        return (EINVAL);
-                } else
-                        memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
-        }
+	if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
+	    memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
+		EPRINTLN("Invalid MAC %s", mac_str);
+		return (EINVAL);
+	} else
+		memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
 
         return (0);
 }
 
 void
 net_genmac(struct pci_devinst *pi, uint8_t *macaddr)
 {
 	/*
 	 * The default MAC address is the standard NetApp OUI of 00-a0-98,
 	 * followed by an MD5 of the PCI slot/func number and dev name
 	 */
 	MD5_CTX mdctx;
 	unsigned char digest[16];
 	char nstr[80];
 
 	snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
 	    pi->pi_func, vmname);
 
 	MD5Init(&mdctx);
 	MD5Update(&mdctx, nstr, (unsigned int)strlen(nstr));
 	MD5Final(digest, &mdctx);
 
 	macaddr[0] = 0x00;
 	macaddr[1] = 0xa0;
 	macaddr[2] = 0x98;
 	macaddr[3] = digest[0];
 	macaddr[4] = digest[1];
 	macaddr[5] = digest[2];
 }
Index: stable/12/usr.sbin/bhyve/pci_e82545.c
===================================================================
--- stable/12/usr.sbin/bhyve/pci_e82545.c	(revision 358184)
+++ stable/12/usr.sbin/bhyve/pci_e82545.c	(revision 358185)
@@ -1,2370 +1,2388 @@
 /*
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
  * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
  * Copyright (c) 2013 Jeremiah Lott, Avere Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer
  *    in this position and unchanged.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/types.h>
 #ifndef WITHOUT_CAPSICUM
 #include <sys/capsicum.h>
 #endif
 #include <sys/limits.h>
 #include <sys/ioctl.h>
 #include <sys/uio.h>
 #include <net/ethernet.h>
 #include <netinet/in.h>
 #include <netinet/tcp.h>
 
 #ifndef WITHOUT_CAPSICUM
 #include <capsicum_helpers.h>
 #endif
 #include <err.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <md5.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sysexits.h>
 #include <unistd.h>
 #include <pthread.h>
 #include <pthread_np.h>
 
 #include "e1000_regs.h"
 #include "e1000_defines.h"
 #include "mii.h"
 
 #include "bhyverun.h"
 #include "debug.h"
 #include "pci_emul.h"
 #include "mevent.h"
 #include "net_utils.h"
 #include "net_backends.h"
 
 /* Hardware/register definitions XXX: move some to common code. */
 #define E82545_VENDOR_ID_INTEL			0x8086
 #define E82545_DEV_ID_82545EM_COPPER		0x100F
 #define E82545_SUBDEV_ID			0x1008
 
 #define E82545_REVISION_4			4
 
 #define E82545_MDIC_DATA_MASK			0x0000FFFF
 #define E82545_MDIC_OP_MASK			0x0c000000
 #define E82545_MDIC_IE				0x20000000
 
 #define E82545_EECD_FWE_DIS	0x00000010 /* Flash writes disabled */
 #define E82545_EECD_FWE_EN	0x00000020 /* Flash writes enabled */
 #define E82545_EECD_FWE_MASK	0x00000030 /* Flash writes mask */
 
 #define E82545_BAR_REGISTER			0
 #define E82545_BAR_REGISTER_LEN			(128*1024)
 #define E82545_BAR_FLASH			1
 #define E82545_BAR_FLASH_LEN			(64*1024)
 #define E82545_BAR_IO				2
 #define E82545_BAR_IO_LEN			8
 
 #define E82545_IOADDR				0x00000000
 #define E82545_IODATA				0x00000004
 #define E82545_IO_REGISTER_MAX			0x0001FFFF
 #define E82545_IO_FLASH_BASE			0x00080000
 #define E82545_IO_FLASH_MAX			0x000FFFFF
 
 #define E82545_ARRAY_ENTRY(reg, offset)		(reg + (offset<<2))
 #define E82545_RAR_MAX				15
 #define E82545_MTA_MAX				127
 #define E82545_VFTA_MAX				127
 
 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
  * followed by 6 address bits.
  * TODO: make opcode bits and addr bits configurable?
  * NVM Commands - Microwire */
 #define E82545_NVM_OPCODE_BITS	3
 #define E82545_NVM_ADDR_BITS	6
 #define E82545_NVM_DATA_BITS	16
 #define E82545_NVM_OPADDR_BITS	(E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
 #define E82545_NVM_ADDR_MASK	((1 << E82545_NVM_ADDR_BITS)-1)
 #define E82545_NVM_OPCODE_MASK	\
     (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
 #define E82545_NVM_OPCODE_READ	(0x6 << E82545_NVM_ADDR_BITS)	/* read */
 #define E82545_NVM_OPCODE_WRITE	(0x5 << E82545_NVM_ADDR_BITS)	/* write */
 #define E82545_NVM_OPCODE_ERASE	(0x7 << E82545_NVM_ADDR_BITS)	/* erase */
 #define	E82545_NVM_OPCODE_EWEN	(0x4 << E82545_NVM_ADDR_BITS)	/* wr-enable */
 
 #define	E82545_NVM_EEPROM_SIZE	64 /* 64 * 16-bit values == 128K */
 
 #define E1000_ICR_SRPD		0x00010000
 
 /* This is an arbitrary number.  There is no hard limit on the chip. */
 #define I82545_MAX_TXSEGS	64
 
 /* Legacy receive descriptor */
 struct e1000_rx_desc {
 	uint64_t buffer_addr;	/* Address of the descriptor's data buffer */
 	uint16_t length;	/* Length of data DMAed into data buffer */
 	uint16_t csum;		/* Packet checksum */
 	uint8_t	 status;       	/* Descriptor status */
 	uint8_t  errors;	/* Descriptor Errors */
 	uint16_t special;
 };
 
 /* Transmit descriptor types */
 #define	E1000_TXD_MASK		(E1000_TXD_CMD_DEXT | 0x00F00000)
 #define E1000_TXD_TYP_L		(0)
 #define E1000_TXD_TYP_C		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
 #define E1000_TXD_TYP_D		(E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
 
 /* Legacy transmit descriptor */
 struct e1000_tx_desc {
 	uint64_t buffer_addr;   /* Address of the descriptor's data buffer */
 	union {
 		uint32_t data;
 		struct {
 			uint16_t length;  /* Data buffer length */
 			uint8_t  cso;  /* Checksum offset */
 			uint8_t  cmd;  /* Descriptor control */
 		} flags;
 	} lower;
 	union {
 		uint32_t data;
 		struct {
 			uint8_t status; /* Descriptor status */
 			uint8_t css;  /* Checksum start */
 			uint16_t special;
 		} fields;
 	} upper;
 };
 
 /* Context descriptor */
 struct e1000_context_desc {
 	union {
 		uint32_t ip_config;
 		struct {
 			uint8_t ipcss;  /* IP checksum start */
 			uint8_t ipcso;  /* IP checksum offset */
 			uint16_t ipcse;  /* IP checksum end */
 		} ip_fields;
 	} lower_setup;
 	union {
 		uint32_t tcp_config;
 		struct {
 			uint8_t tucss;  /* TCP checksum start */
 			uint8_t tucso;  /* TCP checksum offset */
 			uint16_t tucse;  /* TCP checksum end */
 		} tcp_fields;
 	} upper_setup;
 	uint32_t cmd_and_length;
 	union {
 		uint32_t data;
 		struct {
 			uint8_t status;  /* Descriptor status */
 			uint8_t hdr_len;  /* Header length */
 			uint16_t mss;  /* Maximum segment size */
 		} fields;
 	} tcp_seg_setup;
 };
 
 /* Data descriptor */
 struct e1000_data_desc {
 	uint64_t buffer_addr;  /* Address of the descriptor's buffer address */
 	union {
 		uint32_t data;
 		struct {
 			uint16_t length;  /* Data buffer length */
 			uint8_t typ_len_ext;
 			uint8_t cmd;
 		} flags;
 	} lower;
 	union {
 		uint32_t data;
 		struct {
 			uint8_t status;  /* Descriptor status */
 			uint8_t popts;  /* Packet Options */
 			uint16_t special;
 		} fields;
 	} upper;
 };
 
 union e1000_tx_udesc {
 	struct e1000_tx_desc td;
 	struct e1000_context_desc cd;
 	struct e1000_data_desc dd;
 };
 
 /* Tx checksum info for a packet. */
 struct ck_info {
 	int	ck_valid;	/* ck_info is valid */
 	uint8_t	ck_start;	/* start byte of cksum calcuation */
 	uint8_t	ck_off;		/* offset of cksum insertion */
 	uint16_t ck_len;	/* length of cksum calc: 0 is to packet-end */
 };
 
 /*
  * Debug printf
  */
 static int e82545_debug = 0;
 #define WPRINTF(msg,params...) PRINTLN("e82545: " msg, params)
 #define DPRINTF(msg,params...) if (e82545_debug) WPRINTF(msg, params)
 
 #define	MIN(a,b) (((a)<(b))?(a):(b))
 #define	MAX(a,b) (((a)>(b))?(a):(b))
 
 /* s/w representation of the RAL/RAH regs */
 struct  eth_uni {
 	int		eu_valid;
 	int		eu_addrsel;
 	struct ether_addr eu_eth;
 };
 
 
 struct e82545_softc {
 	struct pci_devinst *esc_pi;
 	struct vmctx	*esc_ctx;
 	struct mevent   *esc_mevpitr;
 	pthread_mutex_t	esc_mtx;
 	struct ether_addr esc_mac;
 	net_backend_t	*esc_be;
 
 	/* General */
 	uint32_t	esc_CTRL;	/* x0000 device ctl */
 	uint32_t	esc_FCAL;	/* x0028 flow ctl addr lo */
 	uint32_t	esc_FCAH;	/* x002C flow ctl addr hi */
 	uint32_t	esc_FCT;	/* x0030 flow ctl type */
 	uint32_t	esc_VET;	/* x0038 VLAN eth type */
 	uint32_t	esc_FCTTV;	/* x0170 flow ctl tx timer */
 	uint32_t	esc_LEDCTL;	/* x0E00 LED control */
 	uint32_t	esc_PBA;	/* x1000 pkt buffer allocation */
 	
 	/* Interrupt control */
 	int		esc_irq_asserted;
 	uint32_t	esc_ICR;	/* x00C0 cause read/clear */
 	uint32_t	esc_ITR;	/* x00C4 intr throttling */
 	uint32_t	esc_ICS;	/* x00C8 cause set */
 	uint32_t	esc_IMS;	/* x00D0 mask set/read */
 	uint32_t	esc_IMC;	/* x00D8 mask clear */
 
 	/* Transmit */
 	union e1000_tx_udesc *esc_txdesc;
 	struct e1000_context_desc esc_txctx;
 	pthread_t	esc_tx_tid;
 	pthread_cond_t	esc_tx_cond;
 	int		esc_tx_enabled;
 	int		esc_tx_active;
 	uint32_t	esc_TXCW;	/* x0178 transmit config */
 	uint32_t	esc_TCTL;	/* x0400 transmit ctl */
 	uint32_t	esc_TIPG;	/* x0410 inter-packet gap */
 	uint16_t	esc_AIT;	/* x0458 Adaptive Interframe Throttle */
 	uint64_t	esc_tdba;      	/* verified 64-bit desc table addr */
 	uint32_t	esc_TDBAL;	/* x3800 desc table addr, low bits */
 	uint32_t	esc_TDBAH;	/* x3804 desc table addr, hi 32-bits */
 	uint32_t	esc_TDLEN;	/* x3808 # descriptors in bytes */
 	uint16_t	esc_TDH;	/* x3810 desc table head idx */
 	uint16_t	esc_TDHr;	/* internal read version of TDH */
 	uint16_t	esc_TDT;	/* x3818 desc table tail idx */
 	uint32_t	esc_TIDV;	/* x3820 intr delay */
 	uint32_t	esc_TXDCTL;	/* x3828 desc control */
 	uint32_t	esc_TADV;	/* x382C intr absolute delay */
 	
 	/* L2 frame acceptance */
 	struct eth_uni	esc_uni[16];	/* 16 x unicast MAC addresses */
 	uint32_t	esc_fmcast[128]; /* Multicast filter bit-match */
 	uint32_t	esc_fvlan[128]; /* VLAN 4096-bit filter */
 	
 	/* Receive */
 	struct e1000_rx_desc *esc_rxdesc;
 	pthread_cond_t	esc_rx_cond;
 	int		esc_rx_enabled;
 	int		esc_rx_active;
 	int		esc_rx_loopback;
 	uint32_t	esc_RCTL;	/* x0100 receive ctl */
 	uint32_t	esc_FCRTL;	/* x2160 flow cntl thresh, low */
 	uint32_t	esc_FCRTH;	/* x2168 flow cntl thresh, hi */
 	uint64_t	esc_rdba;	/* verified 64-bit desc table addr */
 	uint32_t	esc_RDBAL;	/* x2800 desc table addr, low bits */
 	uint32_t	esc_RDBAH;	/* x2804 desc table addr, hi 32-bits*/
 	uint32_t	esc_RDLEN;	/* x2808 #descriptors */
 	uint16_t	esc_RDH;	/* x2810 desc table head idx */
 	uint16_t	esc_RDT;	/* x2818 desc table tail idx */
 	uint32_t	esc_RDTR;	/* x2820 intr delay */
 	uint32_t	esc_RXDCTL;	/* x2828 desc control */
 	uint32_t	esc_RADV;	/* x282C intr absolute delay */
 	uint32_t	esc_RSRPD;	/* x2C00 recv small packet detect */
 	uint32_t	esc_RXCSUM;     /* x5000 receive cksum ctl */
 	
 	/* IO Port register access */
 	uint32_t io_addr;
 
 	/* Shadow copy of MDIC */
 	uint32_t mdi_control;
 	/* Shadow copy of EECD */
 	uint32_t eeprom_control;
 	/* Latest NVM in/out */
 	uint16_t nvm_data;
 	uint16_t nvm_opaddr;
 	/* stats */
 	uint32_t missed_pkt_count; /* dropped for no room in rx queue */
 	uint32_t pkt_rx_by_size[6];
 	uint32_t pkt_tx_by_size[6];
 	uint32_t good_pkt_rx_count;
 	uint32_t bcast_pkt_rx_count;
 	uint32_t mcast_pkt_rx_count;
 	uint32_t good_pkt_tx_count;
 	uint32_t bcast_pkt_tx_count;
 	uint32_t mcast_pkt_tx_count;
 	uint32_t oversize_rx_count;
 	uint32_t tso_tx_count;
 	uint64_t good_octets_rx;
 	uint64_t good_octets_tx;
 	uint64_t missed_octets; /* counts missed and oversized */
 
 	uint8_t nvm_bits:6; /* number of bits remaining in/out */
 	uint8_t nvm_mode:2;
 #define E82545_NVM_MODE_OPADDR  0x0
 #define E82545_NVM_MODE_DATAIN  0x1
 #define E82545_NVM_MODE_DATAOUT 0x2
 	/* EEPROM data */
 	uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
 };
 
 static void e82545_reset(struct e82545_softc *sc, int dev);
 static void e82545_rx_enable(struct e82545_softc *sc);
 static void e82545_rx_disable(struct e82545_softc *sc);
 static void e82545_rx_callback(int fd, enum ev_type type, void *param);
 static void e82545_tx_start(struct e82545_softc *sc);
 static void e82545_tx_enable(struct e82545_softc *sc);
 static void e82545_tx_disable(struct e82545_softc *sc);
 
 static inline int
 e82545_size_stat_index(uint32_t size)
 {
 	if (size <= 64) {
 		return 0;
 	} else if (size >= 1024) {
 		return 5;
 	} else {
 		/* should be 1-4 */
 		return (ffs(size) - 6);
 	}
 }
 
 static void
 e82545_init_eeprom(struct e82545_softc *sc)
 {
 	uint16_t checksum, i;
 
         /* mac addr */
 	sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
 		(((uint16_t)sc->esc_mac.octet[1]) << 8);
 	sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
 		(((uint16_t)sc->esc_mac.octet[3]) << 8);
 	sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
 		(((uint16_t)sc->esc_mac.octet[5]) << 8);
 
 	/* pci ids */
 	sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
 	sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
 	sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
 	sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
 
 	/* fill in the checksum */
         checksum = 0;
 	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
 		checksum += sc->eeprom_data[i];
 	}
 	checksum = NVM_SUM - checksum;
 	sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
 	DPRINTF("eeprom checksum: 0x%x", checksum);
 }
 
 static void
 e82545_write_mdi(struct e82545_softc *sc, uint8_t reg_addr,
 			uint8_t phy_addr, uint32_t data)
 {
 	DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x", reg_addr, phy_addr, data);
 }
 
 static uint32_t
 e82545_read_mdi(struct e82545_softc *sc, uint8_t reg_addr,
 			uint8_t phy_addr)
 {
 	//DPRINTF("Read mdi reg:0x%x phy:0x%x", reg_addr, phy_addr);
 	switch (reg_addr) {
 	case PHY_STATUS:
 		return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
 			MII_SR_AUTONEG_COMPLETE);
 	case PHY_AUTONEG_ADV:
 		return NWAY_AR_SELECTOR_FIELD;
 	case PHY_LP_ABILITY:
 		return 0;
 	case PHY_1000T_STATUS:
 		return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
 			SR_1000T_LOCAL_RX_STATUS);
 	case PHY_ID1:
 		return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
 	case PHY_ID2:
 		return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
 	default:
 		DPRINTF("Unknown mdi read reg:0x%x phy:0x%x", reg_addr, phy_addr);
 		return 0;
 	}
 	/* not reached */
 }
 
 static void
 e82545_eecd_strobe(struct e82545_softc *sc)
 {
 	/* Microwire state machine */
 	/*
 	DPRINTF("eeprom state machine srtobe "
 		"0x%x 0x%x 0x%x 0x%x",
 		sc->nvm_mode, sc->nvm_bits,
 		sc->nvm_opaddr, sc->nvm_data);*/
 
 	if (sc->nvm_bits == 0) {
 		DPRINTF("eeprom state machine not expecting data! "
 			"0x%x 0x%x 0x%x 0x%x",
 			sc->nvm_mode, sc->nvm_bits,
 			sc->nvm_opaddr, sc->nvm_data);
 		return;
 	}
 	sc->nvm_bits--;
 	if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
 		/* shifting out */
 		if (sc->nvm_data & 0x8000) {
 			sc->eeprom_control |= E1000_EECD_DO;
 		} else {
 			sc->eeprom_control &= ~E1000_EECD_DO;
 		}
 		sc->nvm_data <<= 1;
 		if (sc->nvm_bits == 0) {
 			/* read done, back to opcode mode. */
 			sc->nvm_opaddr = 0;
 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
 		}
 	} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
 		/* shifting in */
 		sc->nvm_data <<= 1;
 		if (sc->eeprom_control & E1000_EECD_DI) {
 			sc->nvm_data |= 1;
 		}
 		if (sc->nvm_bits == 0) {
 			/* eeprom write */
 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
 			uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
 			if (op != E82545_NVM_OPCODE_WRITE) {
 				DPRINTF("Illegal eeprom write op 0x%x",
 					sc->nvm_opaddr);
 			} else if (addr >= E82545_NVM_EEPROM_SIZE) {
 				DPRINTF("Illegal eeprom write addr 0x%x",
 					sc->nvm_opaddr);
 			} else {
 				DPRINTF("eeprom write eeprom[0x%x] = 0x%x",
 				addr, sc->nvm_data);
 				sc->eeprom_data[addr] = sc->nvm_data;
 			}
 			/* back to opcode mode */
 			sc->nvm_opaddr = 0;
 			sc->nvm_mode = E82545_NVM_MODE_OPADDR;
 			sc->nvm_bits = E82545_NVM_OPADDR_BITS;
 		}
 	} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
 		sc->nvm_opaddr <<= 1;
 		if (sc->eeprom_control & E1000_EECD_DI) {
 			sc->nvm_opaddr |= 1;
 		}
 		if (sc->nvm_bits == 0) {
 			uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
 			switch (op) {
 			case E82545_NVM_OPCODE_EWEN:
 				DPRINTF("eeprom write enable: 0x%x",
 					sc->nvm_opaddr);
 				/* back to opcode mode */
 				sc->nvm_opaddr = 0;
 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
 				break;
 			case E82545_NVM_OPCODE_READ:
 			{
 				uint16_t addr = sc->nvm_opaddr &
 					E82545_NVM_ADDR_MASK;
 				sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
 				sc->nvm_bits = E82545_NVM_DATA_BITS;
 				if (addr < E82545_NVM_EEPROM_SIZE) {
 					sc->nvm_data = sc->eeprom_data[addr];
 					DPRINTF("eeprom read: eeprom[0x%x] = 0x%x",
 						addr, sc->nvm_data);
 				} else {
 					DPRINTF("eeprom illegal read: 0x%x",
 						sc->nvm_opaddr);
 					sc->nvm_data = 0;
 				}
 				break;
 			}
 			case E82545_NVM_OPCODE_WRITE:
 				sc->nvm_mode = E82545_NVM_MODE_DATAIN;
 				sc->nvm_bits = E82545_NVM_DATA_BITS;
 				sc->nvm_data = 0;
 				break;
 			default:
 				DPRINTF("eeprom unknown op: 0x%x",
 					sc->nvm_opaddr);
 				/* back to opcode mode */
 				sc->nvm_opaddr = 0;
 				sc->nvm_mode = E82545_NVM_MODE_OPADDR;
 				sc->nvm_bits = E82545_NVM_OPADDR_BITS;
 			}
 		}
 	} else {
 		DPRINTF("eeprom state machine wrong state! "
 			"0x%x 0x%x 0x%x 0x%x",
 			sc->nvm_mode, sc->nvm_bits,
 			sc->nvm_opaddr, sc->nvm_data);
 	}
 }
 
 static void
 e82545_itr_callback(int fd, enum ev_type type, void *param)
 {
 	uint32_t new;
 	struct e82545_softc *sc = param;
 
 	pthread_mutex_lock(&sc->esc_mtx);
 	new = sc->esc_ICR & sc->esc_IMS;
 	if (new && !sc->esc_irq_asserted) {
 		DPRINTF("itr callback: lintr assert %x", new);
 		sc->esc_irq_asserted = 1;
 		pci_lintr_assert(sc->esc_pi);
 	} else {
 		mevent_delete(sc->esc_mevpitr);
 		sc->esc_mevpitr = NULL;
 	}
 	pthread_mutex_unlock(&sc->esc_mtx);
 }
 
 static void
 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
 {
 	uint32_t new;
 
 	DPRINTF("icr assert: 0x%x", bits);
 	
 	/*
 	 * An interrupt is only generated if bits are set that
 	 * aren't already in the ICR, these bits are unmasked,
 	 * and there isn't an interrupt already pending.
 	 */
 	new = bits & ~sc->esc_ICR & sc->esc_IMS;
 	sc->esc_ICR |= bits;
 
 	if (new == 0) {
 		DPRINTF("icr assert: masked %x, ims %x", new, sc->esc_IMS);
 	} else if (sc->esc_mevpitr != NULL) {
 		DPRINTF("icr assert: throttled %x, ims %x", new, sc->esc_IMS);
 	} else if (!sc->esc_irq_asserted) {
 		DPRINTF("icr assert: lintr assert %x", new);
 		sc->esc_irq_asserted = 1;
 		pci_lintr_assert(sc->esc_pi);
 		if (sc->esc_ITR != 0) {
 			sc->esc_mevpitr = mevent_add(
 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
 			    EVF_TIMER, e82545_itr_callback, sc);
 		}
 	}
 }
 
 static void
 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
 {
 	uint32_t new;
 
 	/*
 	 * Changing the mask may allow previously asserted
 	 * but masked interrupt requests to generate an interrupt.
 	 */
 	new = bits & sc->esc_ICR & ~sc->esc_IMS;
 	sc->esc_IMS |= bits;
 
 	if (new == 0) {
 		DPRINTF("ims change: masked %x, ims %x", new, sc->esc_IMS);
 	} else if (sc->esc_mevpitr != NULL) {
 		DPRINTF("ims change: throttled %x, ims %x", new, sc->esc_IMS);
 	} else if (!sc->esc_irq_asserted) {
 		DPRINTF("ims change: lintr assert %x", new);
 		sc->esc_irq_asserted = 1;
 		pci_lintr_assert(sc->esc_pi);
 		if (sc->esc_ITR != 0) {
 			sc->esc_mevpitr = mevent_add(
 			    (sc->esc_ITR + 3905) / 3906,  /* 256ns -> 1ms */
 			    EVF_TIMER, e82545_itr_callback, sc);
 		}
 	}
 }
 
 static void
 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
 {
 
 	DPRINTF("icr deassert: 0x%x", bits);
 	sc->esc_ICR &= ~bits;
 
 	/*
 	 * If there are no longer any interrupt sources and there
 	 * was an asserted interrupt, clear it
 	 */
 	if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
 		DPRINTF("icr deassert: lintr deassert %x", bits);
 		pci_lintr_deassert(sc->esc_pi);
 		sc->esc_irq_asserted = 0;
 	}
 }
 
 static void
 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
 {
 
 	DPRINTF("intr_write: off %x, val %x", offset, value);
 	
 	switch (offset) {
 	case E1000_ICR:
 		e82545_icr_deassert(sc, value);
 		break;
 	case E1000_ITR:
 		sc->esc_ITR = value;
 		break;
 	case E1000_ICS:
 		sc->esc_ICS = value;	/* not used: store for debug */
 		e82545_icr_assert(sc, value);
 		break;
 	case E1000_IMS:
 		e82545_ims_change(sc, value);
 		break;
 	case E1000_IMC:
 		sc->esc_IMC = value;	/* for debug */
 		sc->esc_IMS &= ~value;
 		// XXX clear interrupts if all ICR bits now masked
 		// and interrupt was pending ?
 		break;
 	default:
 		break;
 	}
 }
 
 static uint32_t
 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
 {
 	uint32_t retval;
 
 	retval = 0;
 
 	DPRINTF("intr_read: off %x", offset);
 	
 	switch (offset) {
 	case E1000_ICR:
 		retval = sc->esc_ICR;
 		sc->esc_ICR = 0;
 		e82545_icr_deassert(sc, ~0);
 		break;
 	case E1000_ITR:
 		retval = sc->esc_ITR;
 		break;
 	case E1000_ICS:
 		/* write-only register */
 		break;
 	case E1000_IMS:
 		retval = sc->esc_IMS;
 		break;
 	case E1000_IMC:
 		/* write-only register */
 		break;
 	default:
 		break;
 	}
 
 	return (retval);
 }
 
 static void
 e82545_devctl(struct e82545_softc *sc, uint32_t val)
 {
 
 	sc->esc_CTRL = val & ~E1000_CTRL_RST;
 
 	if (val & E1000_CTRL_RST) {
 		DPRINTF("e1k: s/w reset, ctl %x", val);
 		e82545_reset(sc, 1);
 	}
 	/* XXX check for phy reset ? */
 }
 
 static void
 e82545_rx_update_rdba(struct e82545_softc *sc)
 {
 
 	/* XXX verify desc base/len within phys mem range */
 	sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
 	    sc->esc_RDBAL;
 	
 	/* Cache host mapping of guest descriptor array */
 	sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
 	    sc->esc_rdba, sc->esc_RDLEN);	
 }
 
 static void
 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
 {
 	int on;
 
 	on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
 
 	/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
 	sc->esc_RCTL = val & ~0xF9204c01;
 
 	DPRINTF("rx_ctl - %s RCTL %x, val %x",
 		on ? "on" : "off", sc->esc_RCTL, val);
 
 	/* state change requested */
 	if (on != sc->esc_rx_enabled) {
 		if (on) {
 			/* Catch disallowed/unimplemented settings */
 			//assert(!(val & E1000_RCTL_LBM_TCVR));
 
 			if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
 				sc->esc_rx_loopback = 1;
 			} else {
 				sc->esc_rx_loopback = 0;
 			}
 
 			e82545_rx_update_rdba(sc);
 			e82545_rx_enable(sc);
 		} else {
 			e82545_rx_disable(sc);
 			sc->esc_rx_loopback = 0;
 			sc->esc_rdba = 0;
 			sc->esc_rxdesc = NULL;
 		}
 	}
 }
 
 static void
 e82545_tx_update_tdba(struct e82545_softc *sc)
 {
 
 	/* XXX verify desc base/len within phys mem range */
 	sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
 
 	/* Cache host mapping of guest descriptor array */
 	sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
             sc->esc_TDLEN);
 }
 
 static void
 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
 {
 	int on;
 	
 	on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
 
 	/* ignore TCTL_EN settings that don't change state */
 	if (on == sc->esc_tx_enabled)
 		return;
 
 	if (on) {
 		e82545_tx_update_tdba(sc);
 		e82545_tx_enable(sc);
 	} else {
 		e82545_tx_disable(sc);
 		sc->esc_tdba = 0;
 		sc->esc_txdesc = NULL;
 	}
 
 	/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
 	sc->esc_TCTL = val & ~0xFE800005;
 }
 
 int
 e82545_bufsz(uint32_t rctl)
 {
 
 	switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
 	case (E1000_RCTL_SZ_2048): return (2048);
 	case (E1000_RCTL_SZ_1024): return (1024);
 	case (E1000_RCTL_SZ_512): return (512);
 	case (E1000_RCTL_SZ_256): return (256);
 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
 	case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
 	}
 	return (256);	/* Forbidden value. */
 }
 
 /* XXX one packet at a time until this is debugged */
 static void
 e82545_rx_callback(int fd, enum ev_type type, void *param)
 {
 	struct e82545_softc *sc = param;
 	struct e1000_rx_desc *rxd;
 	struct iovec vec[64];
 	int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
 	uint32_t cause = 0;
 	uint16_t *tp, tag, head;
 
 	pthread_mutex_lock(&sc->esc_mtx);
 	DPRINTF("rx_run: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
 
 	if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
 		DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped",
 		    sc->esc_rx_enabled, sc->esc_rx_loopback);
 		while (netbe_rx_discard(sc->esc_be) > 0) {
 		}
 		goto done1;
 	}
 	bufsz = e82545_bufsz(sc->esc_RCTL);
 	maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
 	maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
 	size = sc->esc_RDLEN / 16;
 	head = sc->esc_RDH;
 	left = (size + sc->esc_RDT - head) % size;
 	if (left < maxpktdesc) {
 		DPRINTF("rx overflow (%d < %d) -- packet(s) dropped",
 		    left, maxpktdesc);
 		while (netbe_rx_discard(sc->esc_be) > 0) {
 		}
 		goto done1;
 	}
 
 	sc->esc_rx_active = 1;
 	pthread_mutex_unlock(&sc->esc_mtx);
 
 	for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
 
 		/* Grab rx descriptor pointed to by the head pointer */
 		for (i = 0; i < maxpktdesc; i++) {
 			rxd = &sc->esc_rxdesc[(head + i) % size];
 			vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
 			    rxd->buffer_addr, bufsz);
 			vec[i].iov_len = bufsz;
 		}
 		len = netbe_recv(sc->esc_be, vec, maxpktdesc);
 		if (len <= 0) {
 			DPRINTF("netbe_recv() returned %d", len);
 			goto done;
 		}
 
 		/*
 		 * Adjust the packet length based on whether the CRC needs
 		 * to be stripped or if the packet is less than the minimum
 		 * eth packet size.
 		 */
 		if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
 			len = ETHER_MIN_LEN - ETHER_CRC_LEN;
 		if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
 			len += ETHER_CRC_LEN;
 		n = (len + bufsz - 1) / bufsz;
 
 		DPRINTF("packet read %d bytes, %d segs, head %d",
 		    len, n, head);
 
 		/* Apply VLAN filter. */
 		tp = (uint16_t *)vec[0].iov_base + 6;
 		if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
 		    (ntohs(tp[0]) == sc->esc_VET)) {
 			tag = ntohs(tp[1]) & 0x0fff;
 			if ((sc->esc_fvlan[tag >> 5] &
 			    (1 << (tag & 0x1f))) != 0) {
 				DPRINTF("known VLAN %d", tag);
 			} else {
 				DPRINTF("unknown VLAN %d", tag);
 				n = 0;
 				continue;
 			}
 		}
 
 		/* Update all consumed descriptors. */
 		for (i = 0; i < n - 1; i++) {
 			rxd = &sc->esc_rxdesc[(head + i) % size];
 			rxd->length = bufsz;
 			rxd->csum = 0;
 			rxd->errors = 0;
 			rxd->special = 0;
 			rxd->status = E1000_RXD_STAT_DD;
 		}
 		rxd = &sc->esc_rxdesc[(head + i) % size];
 		rxd->length = len % bufsz;
 		rxd->csum = 0;
 		rxd->errors = 0;
 		rxd->special = 0;
 		/* XXX signal no checksum for now */
 		rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
 		    E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
 
 		/* Schedule receive interrupts. */
 		if (len <= sc->esc_RSRPD) {
 			cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
 		} else {
 			/* XXX: RDRT and RADV timers should be here. */
 			cause |= E1000_ICR_RXT0;
 		}
 
 		head = (head + n) % size;
 		left -= n;
 	}
 
 done:
 	pthread_mutex_lock(&sc->esc_mtx);
 	sc->esc_rx_active = 0;
 	if (sc->esc_rx_enabled == 0)
 		pthread_cond_signal(&sc->esc_rx_cond);
 
 	sc->esc_RDH = head;
 	/* Respect E1000_RCTL_RDMTS */
 	left = (size + sc->esc_RDT - head) % size;
 	if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
 		cause |= E1000_ICR_RXDMT0;
 	/* Assert all accumulated interrupts. */
 	if (cause != 0)
 		e82545_icr_assert(sc, cause);
 done1:
 	DPRINTF("rx_run done: head %x, tail %x", sc->esc_RDH, sc->esc_RDT);
 	pthread_mutex_unlock(&sc->esc_mtx);
 }
 
 static uint16_t
 e82545_carry(uint32_t sum)
 {
 
 	sum = (sum & 0xFFFF) + (sum >> 16);
 	if (sum > 0xFFFF)
 		sum -= 0xFFFF;
 	return (sum);
 }
 
 static uint16_t
 e82545_buf_checksum(uint8_t *buf, int len)
 {
 	int i;
 	uint32_t sum = 0;
 
 	/* Checksum all the pairs of bytes first... */
 	for (i = 0; i < (len & ~1U); i += 2)
 		sum += *((u_int16_t *)(buf + i));
 
 	/*
 	 * If there's a single byte left over, checksum it, too.
 	 * Network byte order is big-endian, so the remaining byte is
 	 * the high byte.
 	 */
 	if (i < len)
 		sum += htons(buf[i] << 8);
 
 	return (e82545_carry(sum));
 }
 
 static uint16_t
 e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
 {
 	int now, odd;
 	uint32_t sum = 0, s;
 
 	/* Skip completely unneeded vectors. */
 	while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
 		off -= iov->iov_len;
 		iov++;
 		iovcnt--;
 	}
 
 	/* Calculate checksum of requested range. */
 	odd = 0;
 	while (len > 0 && iovcnt > 0) {
 		now = MIN(len, iov->iov_len - off);
 		s = e82545_buf_checksum(iov->iov_base + off, now);
 		sum += odd ? (s << 8) : s;
 		odd ^= (now & 1);
 		len -= now;
 		off = 0;
 		iov++;
 		iovcnt--;
 	}
 
 	return (e82545_carry(sum));
 }
 
 /*
  * Return the transmit descriptor type.
  */
 int
 e82545_txdesc_type(uint32_t lower)
 {
 	int type;
 
 	type = 0;
 	
 	if (lower & E1000_TXD_CMD_DEXT)
 		type = lower & E1000_TXD_MASK;
 
 	return (type);
 }
 
 static void
 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
 {
 	uint16_t cksum;
 	int cklen;
 
 	DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d",
 	    iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
 	cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
 	cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
 	*(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
 }
 
 static void
 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
 {
 
 	if (sc->esc_be == NULL)
 		return;
 
 	(void) netbe_send(sc->esc_be, iov, iovcnt);
 }
 
 static void
 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
     uint16_t dsize, int *tdwb)
 {
 	union e1000_tx_udesc *dsc;
 
 	for ( ; head != tail; head = (head + 1) % dsize) {
 		dsc = &sc->esc_txdesc[head];
 		if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
 			dsc->td.upper.data |= E1000_TXD_STAT_DD;
 			*tdwb = 1;
 		}
 	}
 }
 
 static int
 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
     uint16_t dsize, uint16_t *rhead, int *tdwb)
 {
 	uint8_t *hdr, *hdrp;
 	struct iovec iovb[I82545_MAX_TXSEGS + 2];
 	struct iovec tiov[I82545_MAX_TXSEGS + 2];
 	struct e1000_context_desc *cd;
 	struct ck_info ckinfo[2];
 	struct iovec *iov;
 	union  e1000_tx_udesc *dsc;
 	int desc, dtype, len, ntype, iovcnt, tlen, tcp, tso;
 	int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
 	unsigned hdrlen, vlen;
 	uint32_t tcpsum, tcpseq;
 	uint16_t ipcs, tcpcs, ipid, ohead;
 
 	ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
 	iovcnt = 0;
 	tlen = 0;
 	ntype = 0;
 	tso = 0;
 	ohead = head;
 
 	/* iovb[0/1] may be used for writable copy of headers. */
 	iov = &iovb[2];
 
 	for (desc = 0; ; desc++, head = (head + 1) % dsize) {
 		if (head == tail) {
 			*rhead = head;
 			return (0);
 		}
 		dsc = &sc->esc_txdesc[head];
 		dtype = e82545_txdesc_type(dsc->td.lower.data);
 
 		if (desc == 0) {
 			switch (dtype) {
 			case E1000_TXD_TYP_C:
 				DPRINTF("tx ctxt desc idx %d: %016jx "
 				    "%08x%08x",
 				    head, dsc->td.buffer_addr,
 				    dsc->td.upper.data, dsc->td.lower.data);
 				/* Save context and return */
 				sc->esc_txctx = dsc->cd;
 				goto done;
 			case E1000_TXD_TYP_L:
 				DPRINTF("tx legacy desc idx %d: %08x%08x",
 				    head, dsc->td.upper.data, dsc->td.lower.data);
 				/*
 				 * legacy cksum start valid in first descriptor
 				 */
 				ntype = dtype;
 				ckinfo[0].ck_start = dsc->td.upper.fields.css;
 				break;
 			case E1000_TXD_TYP_D:
 				DPRINTF("tx data desc idx %d: %08x%08x",
 				    head, dsc->td.upper.data, dsc->td.lower.data);
 				ntype = dtype;
 				break;
 			default:
 				break;
 			}
 		} else {
 			/* Descriptor type must be consistent */
 			assert(dtype == ntype);
 			DPRINTF("tx next desc idx %d: %08x%08x",
 			    head, dsc->td.upper.data, dsc->td.lower.data);
 		}
 
 		len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
 		    dsc->dd.lower.data & 0xFFFFF;
 
 		if (len > 0) {
 			/* Strip checksum supplied by guest. */
 			if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
 			    (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
 				len -= 2;
 			tlen += len;
 			if (iovcnt < I82545_MAX_TXSEGS) {
 				iov[iovcnt].iov_base = paddr_guest2host(
 				    sc->esc_ctx, dsc->td.buffer_addr, len);
 				iov[iovcnt].iov_len = len;
 			}
 			iovcnt++;
 		}
 
 		/*
 		 * Pull out info that is valid in the final descriptor
 		 * and exit descriptor loop.
 		 */
 		if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
 			if (dtype == E1000_TXD_TYP_L) {
 				if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
 					ckinfo[0].ck_valid = 1;
 					ckinfo[0].ck_off =
 					    dsc->td.lower.flags.cso;
 					ckinfo[0].ck_len = 0;
 				}
 			} else {
 				cd = &sc->esc_txctx;
 				if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
 					tso = 1;
 				if (dsc->dd.upper.fields.popts &
 				    E1000_TXD_POPTS_IXSM)
 					ckinfo[0].ck_valid = 1;
 				if (dsc->dd.upper.fields.popts &
 				    E1000_TXD_POPTS_IXSM || tso) {
 					ckinfo[0].ck_start =
 					    cd->lower_setup.ip_fields.ipcss;
 					ckinfo[0].ck_off =
 					    cd->lower_setup.ip_fields.ipcso;
 					ckinfo[0].ck_len =
 					    cd->lower_setup.ip_fields.ipcse;
 				}
 				if (dsc->dd.upper.fields.popts &
 				    E1000_TXD_POPTS_TXSM)
 					ckinfo[1].ck_valid = 1;
 				if (dsc->dd.upper.fields.popts &
 				    E1000_TXD_POPTS_TXSM || tso) {
 					ckinfo[1].ck_start =
 					    cd->upper_setup.tcp_fields.tucss;
 					ckinfo[1].ck_off =
 					    cd->upper_setup.tcp_fields.tucso;
 					ckinfo[1].ck_len =
 					    cd->upper_setup.tcp_fields.tucse;
 				}
 			}
 			break;
 		}
 	}
 
 	if (iovcnt > I82545_MAX_TXSEGS) {
 		WPRINTF("tx too many descriptors (%d > %d) -- dropped",
 		    iovcnt, I82545_MAX_TXSEGS);
 		goto done;
 	}
 
 	hdrlen = vlen = 0;
 	/* Estimate writable space for VLAN header insertion. */
 	if ((sc->esc_CTRL & E1000_CTRL_VME) &&
 	    (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
 		hdrlen = ETHER_ADDR_LEN*2;
 		vlen = ETHER_VLAN_ENCAP_LEN;
 	}
 	if (!tso) {
 		/* Estimate required writable space for checksums. */
 		if (ckinfo[0].ck_valid)
 			hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
 		if (ckinfo[1].ck_valid)
 			hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
 		/* Round up writable space to the first vector. */
 		if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
 		    iov[0].iov_len < hdrlen + 100)
 			hdrlen = iov[0].iov_len;
 	} else {
 		/* In case of TSO header length provided by software. */
 		hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
 
 		/*
 		 * Cap the header length at 240 based on 7.2.4.5 of
 		 * the Intel 82576EB (Rev 2.63) datasheet.
 		 */
 		if (hdrlen > 240) {
 			WPRINTF("TSO hdrlen too large: %d", hdrlen);
 			goto done;
 		}
 
 		/*
 		 * If VLAN insertion is requested, ensure the header
 		 * at least holds the amount of data copied during
 		 * VLAN insertion below.
 		 *
 		 * XXX: Realistic packets will include a full Ethernet
 		 * header before the IP header at ckinfo[0].ck_start,
 		 * but this check is sufficient to prevent
 		 * out-of-bounds access below.
 		 */
 		if (vlen != 0 && hdrlen < ETHER_ADDR_LEN*2) {
 			WPRINTF("TSO hdrlen too small for vlan insertion "
 			    "(%d vs %d) -- dropped", hdrlen,
 			    ETHER_ADDR_LEN*2);
 			goto done;
 		}
 
 		/*
 		 * Ensure that the header length covers the used fields
 		 * in the IP and TCP headers as well as the IP and TCP
 		 * checksums.  The following fields are accessed below:
 		 *
 		 * Header | Field | Offset | Length
 		 * -------+-------+--------+-------
 		 * IPv4   | len   | 2      | 2
 		 * IPv4   | ID    | 4      | 2
 		 * IPv6   | len   | 4      | 2
 		 * TCP    | seq # | 4      | 4
 		 * TCP    | flags | 13     | 1
 		 * UDP    | len   | 4      | 4
 		 */
 		if (hdrlen < ckinfo[0].ck_start + 6 ||
 		    hdrlen < ckinfo[0].ck_off + 2) {
 			WPRINTF("TSO hdrlen too small for IP fields (%d) "
 			    "-- dropped", hdrlen);
 			goto done;
 		}
 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) {
 			if (hdrlen < ckinfo[1].ck_start + 14 ||
 			    (ckinfo[1].ck_valid &&
 			    hdrlen < ckinfo[1].ck_off + 2)) {
 				WPRINTF("TSO hdrlen too small for TCP fields "
 				    "(%d) -- dropped", hdrlen);
 				goto done;
 			}
 		} else {
 			if (hdrlen < ckinfo[1].ck_start + 8) {
 				WPRINTF("TSO hdrlen too small for UDP fields "
 				    "(%d) -- dropped", hdrlen);
 				goto done;
 			}
 		}
 	}
 
 	/* Allocate, fill and prepend writable header vector. */
 	if (hdrlen != 0) {
 		hdr = __builtin_alloca(hdrlen + vlen);
 		hdr += vlen;
 		for (left = hdrlen, hdrp = hdr; left > 0;
 		    left -= now, hdrp += now) {
 			now = MIN(left, iov->iov_len);
 			memcpy(hdrp, iov->iov_base, now);
 			iov->iov_base += now;
 			iov->iov_len -= now;
 			if (iov->iov_len == 0) {
 				iov++;
 				iovcnt--;
 			}
 		}
 		iov--;
 		iovcnt++;
 		iov->iov_base = hdr;
 		iov->iov_len = hdrlen;
 	} else
 		hdr = NULL;
 
 	/* Insert VLAN tag. */
 	if (vlen != 0) {
 		hdr -= ETHER_VLAN_ENCAP_LEN;
 		memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
 		hdrlen += ETHER_VLAN_ENCAP_LEN;
 		hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
 		hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
 		hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
 		hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
 		iov->iov_base = hdr;
 		iov->iov_len += ETHER_VLAN_ENCAP_LEN;
 		/* Correct checksum offsets after VLAN tag insertion. */
 		ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
 		ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
 		if (ckinfo[0].ck_len != 0)
 			ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
 		ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
 		ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
 		if (ckinfo[1].ck_len != 0)
 			ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
 	}
 
 	/* Simple non-TSO case. */
 	if (!tso) {
 		/* Calculate checksums and transmit. */
 		if (ckinfo[0].ck_valid)
 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
 		if (ckinfo[1].ck_valid)
 			e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
 		e82545_transmit_backend(sc, iov, iovcnt);
 		goto done;
 	}
 
 	/* Doing TSO. */
 	tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
 	mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
 	paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
 	DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs",
 	    tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
 	ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
 	tcpseq = 0;
 	if (tcp)
 		tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
 	ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
 	tcpcs = 0;
 	if (ckinfo[1].ck_valid)	/* Save partial pseudo-header checksum. */
 		tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
 	pv = 1;
 	pvoff = 0;
 	for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
 		now = MIN(left, mss);
 
 		/* Construct IOVs for the segment. */
 		/* Include whole original header. */
 		tiov[0].iov_base = hdr;
 		tiov[0].iov_len = hdrlen;
 		tiovcnt = 1;
 		/* Include respective part of payload IOV. */
 		for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
 			nnow = MIN(nleft, iov[pv].iov_len - pvoff);
 			tiov[tiovcnt].iov_base = iov[pv].iov_base + pvoff;
 			tiov[tiovcnt++].iov_len = nnow;
 			if (pvoff + nnow == iov[pv].iov_len) {
 				pv++;
 				pvoff = 0;
 			} else
 				pvoff += nnow;
 		}
 		DPRINTF("tx segment %d %d+%d bytes %d iovs",
 		    seg, hdrlen, now, tiovcnt);
 
 		/* Update IP header. */
 		if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
 			/* IPv4 -- set length and ID */
 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
 			    htons(hdrlen - ckinfo[0].ck_start + now);
 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
 			    htons(ipid + seg);
 		} else {
 			/* IPv6 -- set length */
 			*(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
 			    htons(hdrlen - ckinfo[0].ck_start - 40 +
 				  now);
 		}
 
 		/* Update pseudo-header checksum. */
 		tcpsum = tcpcs;
 		tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
 
 		/* Update TCP/UDP headers. */
 		if (tcp) {
 			/* Update sequence number and FIN/PUSH flags. */
 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
 			    htonl(tcpseq + paylen - left);
 			if (now < left) {
 				hdr[ckinfo[1].ck_start + 13] &=
 				    ~(TH_FIN | TH_PUSH);
 			}
 		} else {
 			/* Update payload length. */
 			*(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
 			    hdrlen - ckinfo[1].ck_start + now;
 		}
 
 		/* Calculate checksums and transmit. */
 		if (ckinfo[0].ck_valid) {
 			*(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
 		}
 		if (ckinfo[1].ck_valid) {
 			*(uint16_t *)&hdr[ckinfo[1].ck_off] =
 			    e82545_carry(tcpsum);
 			e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
 		}
 		e82545_transmit_backend(sc, tiov, tiovcnt);
 	}
 
 done:
 	head = (head + 1) % dsize;
 	e82545_transmit_done(sc, ohead, head, dsize, tdwb);
 
 	*rhead = head;
 	return (desc + 1);
 }
 
 static void
 e82545_tx_run(struct e82545_softc *sc)
 {
 	uint32_t cause;
 	uint16_t head, rhead, tail, size;
 	int lim, tdwb, sent;
 
 	head = sc->esc_TDH;
 	tail = sc->esc_TDT;
 	size = sc->esc_TDLEN / 16;
 	DPRINTF("tx_run: head %x, rhead %x, tail %x",
 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
 
 	pthread_mutex_unlock(&sc->esc_mtx);
 	rhead = head;
 	tdwb = 0;
 	for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
 		sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
 		if (sent == 0)
 			break;
 		head = rhead;
 	}
 	pthread_mutex_lock(&sc->esc_mtx);
 
 	sc->esc_TDH = head;
 	sc->esc_TDHr = rhead;
 	cause = 0;
 	if (tdwb)
 		cause |= E1000_ICR_TXDW;
 	if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
 		cause |= E1000_ICR_TXQE;
 	if (cause)
 		e82545_icr_assert(sc, cause);
 
 	DPRINTF("tx_run done: head %x, rhead %x, tail %x",
 	    sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
 }
 
 static _Noreturn void *
 e82545_tx_thread(void *param)
 {
 	struct e82545_softc *sc = param;
 
 	pthread_mutex_lock(&sc->esc_mtx);
 	for (;;) {
 		while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
 			if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
 				break;
 			sc->esc_tx_active = 0;
 			if (sc->esc_tx_enabled == 0)
 				pthread_cond_signal(&sc->esc_tx_cond);
 			pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
 		}
 		sc->esc_tx_active = 1;
 
 		/* Process some tx descriptors.  Lock dropped inside. */
 		e82545_tx_run(sc);
 	}
 }
 
 static void
 e82545_tx_start(struct e82545_softc *sc)
 {
 
 	if (sc->esc_tx_active == 0)
 		pthread_cond_signal(&sc->esc_tx_cond);
 }
 
 static void
 e82545_tx_enable(struct e82545_softc *sc)
 {
 
 	sc->esc_tx_enabled = 1;
 }
 
 static void
 e82545_tx_disable(struct e82545_softc *sc)
 {
 
 	sc->esc_tx_enabled = 0;
 	while (sc->esc_tx_active)
 		pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
 }
 
 static void
 e82545_rx_enable(struct e82545_softc *sc)
 {
 
 	sc->esc_rx_enabled = 1;
 }
 
 static void
 e82545_rx_disable(struct e82545_softc *sc)
 {
 
 	sc->esc_rx_enabled = 0;
 	while (sc->esc_rx_active)
 		pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
 }
 
 static void
 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
 {
 	struct eth_uni *eu;
 	int idx;
 
 	idx = reg >> 1;
 	assert(idx < 15);
 
 	eu = &sc->esc_uni[idx];
 
 	if (reg & 0x1) {
 		/* RAH */
 		eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
 		eu->eu_addrsel = (wval >> 16) & 0x3;
 		eu->eu_eth.octet[5] = wval >> 8;
 		eu->eu_eth.octet[4] = wval;
 	} else {
 		/* RAL */
 		eu->eu_eth.octet[3] = wval >> 24;
 		eu->eu_eth.octet[2] = wval >> 16;
 		eu->eu_eth.octet[1] = wval >> 8;
 		eu->eu_eth.octet[0] = wval;
 	}
 }
 
 static uint32_t
 e82545_read_ra(struct e82545_softc *sc, int reg)
 {
 	struct eth_uni *eu;
 	uint32_t retval;
 	int idx;
 
 	idx = reg >> 1;
 	assert(idx < 15);
 
 	eu = &sc->esc_uni[idx];
 
 	if (reg & 0x1) {
 		/* RAH */
 		retval = (eu->eu_valid << 31) |
 			 (eu->eu_addrsel << 16) |
 			 (eu->eu_eth.octet[5] << 8) |
 			 eu->eu_eth.octet[4];
 	} else {
 		/* RAL */
 		retval = (eu->eu_eth.octet[3] << 24) |
 			 (eu->eu_eth.octet[2] << 16) |
 			 (eu->eu_eth.octet[1] << 8) |
 			 eu->eu_eth.octet[0];
 	}
 
 	return (retval);	
 }
 
 static void
 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
 {
 	int ridx;
 	
 	if (offset & 0x3) {
 		DPRINTF("Unaligned register write offset:0x%x value:0x%x", offset, value);
 		return;
 	}
 	DPRINTF("Register write: 0x%x value: 0x%x", offset, value);
 
 	switch (offset) {
 	case E1000_CTRL:
 	case E1000_CTRL_DUP:
 		e82545_devctl(sc, value);
 		break;
 	case E1000_FCAL:
 		sc->esc_FCAL = value;
 		break;
 	case E1000_FCAH:
 		sc->esc_FCAH = value & ~0xFFFF0000;
 		break;
 	case E1000_FCT:
 		sc->esc_FCT = value & ~0xFFFF0000;
 		break;
 	case E1000_VET:
 		sc->esc_VET = value & ~0xFFFF0000;
 		break;
 	case E1000_FCTTV:
 		sc->esc_FCTTV = value & ~0xFFFF0000;
 		break;
 	case E1000_LEDCTL:
 		sc->esc_LEDCTL = value & ~0x30303000;
 		break;
 	case E1000_PBA:
 		sc->esc_PBA = value & 0x0000FF80;
 		break;
 	case E1000_ICR:
 	case E1000_ITR:
 	case E1000_ICS:
 	case E1000_IMS:
 	case E1000_IMC:
 		e82545_intr_write(sc, offset, value);
 		break;
 	case E1000_RCTL:
 		e82545_rx_ctl(sc, value);
 		break;
 	case E1000_FCRTL:
 		sc->esc_FCRTL = value & ~0xFFFF0007;
 		break;
 	case E1000_FCRTH:
 		sc->esc_FCRTH = value & ~0xFFFF0007;
 		break;
 	case E1000_RDBAL(0):
 		sc->esc_RDBAL = value & ~0xF;
 		if (sc->esc_rx_enabled) {
 			/* Apparently legal: update cached address */
 			e82545_rx_update_rdba(sc);
 		}
 		break;
 	case E1000_RDBAH(0):
 		assert(!sc->esc_rx_enabled);
 		sc->esc_RDBAH = value;
 		break;
 	case E1000_RDLEN(0):
 		assert(!sc->esc_rx_enabled);
 		sc->esc_RDLEN = value & ~0xFFF0007F;
 		break;
 	case E1000_RDH(0):
 		/* XXX should only ever be zero ? Range check ? */
 		sc->esc_RDH = value;
 		break;
 	case E1000_RDT(0):
 		/* XXX if this opens up the rx ring, do something ? */
 		sc->esc_RDT = value;
 		break;
 	case E1000_RDTR:
 		/* ignore FPD bit 31 */
 		sc->esc_RDTR = value & ~0xFFFF0000;
 		break;
 	case E1000_RXDCTL(0):
 		sc->esc_RXDCTL = value & ~0xFEC0C0C0;
 		break;
 	case E1000_RADV:
 		sc->esc_RADV = value & ~0xFFFF0000;
 		break;
 	case E1000_RSRPD:
 		sc->esc_RSRPD = value & ~0xFFFFF000;
 		break;
 	case E1000_RXCSUM:
 		sc->esc_RXCSUM = value & ~0xFFFFF800;
 		break;
 	case E1000_TXCW:
 		sc->esc_TXCW = value & ~0x3FFF0000;
 		break;
 	case E1000_TCTL:
 		e82545_tx_ctl(sc, value);
 		break;
 	case E1000_TIPG:
 		sc->esc_TIPG = value;
 		break;
 	case E1000_AIT:
 		sc->esc_AIT = value;
 		break;
 	case E1000_TDBAL(0):
 		sc->esc_TDBAL = value & ~0xF;
 		if (sc->esc_tx_enabled) {
 			/* Apparently legal */
 			e82545_tx_update_tdba(sc);
 		}
 		break;
 	case E1000_TDBAH(0):
 		//assert(!sc->esc_tx_enabled);		
 		sc->esc_TDBAH = value;
 		break;
 	case E1000_TDLEN(0):
 		//assert(!sc->esc_tx_enabled);
 		sc->esc_TDLEN = value & ~0xFFF0007F;
 		break;
 	case E1000_TDH(0):
 		//assert(!sc->esc_tx_enabled);
 		/* XXX should only ever be zero ? Range check ? */
 		sc->esc_TDHr = sc->esc_TDH = value;
 		break;
 	case E1000_TDT(0):
 		/* XXX range check ? */
 		sc->esc_TDT = value;
 		if (sc->esc_tx_enabled)
 			e82545_tx_start(sc);
 		break;
 	case E1000_TIDV:
 		sc->esc_TIDV = value & ~0xFFFF0000;
 		break;
 	case E1000_TXDCTL(0):
 		//assert(!sc->esc_tx_enabled);
 		sc->esc_TXDCTL = value & ~0xC0C0C0;
 		break;
 	case E1000_TADV:
 		sc->esc_TADV = value & ~0xFFFF0000;
 		break;
 	case E1000_RAL(0) ... E1000_RAH(15):
 		/* convert to u32 offset */
 		ridx = (offset - E1000_RAL(0)) >> 2;
 		e82545_write_ra(sc, ridx, value);
 		break;
 	case E1000_MTA ... (E1000_MTA + (127*4)):
 		sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
 		break;
 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
 		sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
 		break;		
 	case E1000_EECD:
 	{
 		//DPRINTF("EECD write 0x%x -> 0x%x", sc->eeprom_control, value);
 		/* edge triggered low->high */
 		uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
 			0 : (value & E1000_EECD_SK));
 		uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
 					E1000_EECD_DI|E1000_EECD_REQ);
 		sc->eeprom_control &= ~eecd_mask;
 		sc->eeprom_control |= (value & eecd_mask);
 		/* grant/revoke immediately */
 		if (value & E1000_EECD_REQ) {
 			sc->eeprom_control |= E1000_EECD_GNT;
 		} else {
                         sc->eeprom_control &= ~E1000_EECD_GNT;
 		}
 		if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
 			e82545_eecd_strobe(sc);
 		}
 		return;
 	}
 	case E1000_MDIC:
 	{
 		uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
 						E1000_MDIC_REG_SHIFT);
 		uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
 						E1000_MDIC_PHY_SHIFT);
 		sc->mdi_control =
 			(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
 		if ((value & E1000_MDIC_READY) != 0) {
 			DPRINTF("Incorrect MDIC ready bit: 0x%x", value);
 			return;
 		}
 		switch (value & E82545_MDIC_OP_MASK) {
 		case E1000_MDIC_OP_READ:
 			sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
 			sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
 			break;
 		case E1000_MDIC_OP_WRITE:
 			e82545_write_mdi(sc, reg_addr, phy_addr,
 				value & E82545_MDIC_DATA_MASK);
 			break;
 		default:
 			DPRINTF("Unknown MDIC op: 0x%x", value);
 			return;
 		}
 		/* TODO: barrier? */
 		sc->mdi_control |= E1000_MDIC_READY;
 		if (value & E82545_MDIC_IE) {
 			// TODO: generate interrupt
 		}
 		return;
 	}
 	case E1000_MANC:
 	case E1000_STATUS: 
 		return;
 	default:
 		DPRINTF("Unknown write register: 0x%x value:%x", offset, value);
 		return;
 	}
 }
 
 static uint32_t
 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
 {
 	uint32_t retval;
 	int ridx;
 
 	if (offset & 0x3) {
 		DPRINTF("Unaligned register read offset:0x%x", offset);
 		return 0;
 	}
 
 	DPRINTF("Register read: 0x%x", offset);
 
 	switch (offset) {
 	case E1000_CTRL:
 		retval = sc->esc_CTRL;
 		break;
 	case E1000_STATUS:
 		retval = E1000_STATUS_FD | E1000_STATUS_LU |
 		    E1000_STATUS_SPEED_1000;
 		break;
 	case E1000_FCAL:
 		retval = sc->esc_FCAL;
 		break;
 	case E1000_FCAH:
 		retval = sc->esc_FCAH;
 		break;
 	case E1000_FCT:
 		retval = sc->esc_FCT;
 		break;
 	case E1000_VET:
 		retval = sc->esc_VET;
 		break;
 	case E1000_FCTTV:
 		retval = sc->esc_FCTTV;
 		break;
 	case E1000_LEDCTL:
 		retval = sc->esc_LEDCTL;
 		break;
 	case E1000_PBA:
 		retval = sc->esc_PBA;
 		break;
 	case E1000_ICR:
 	case E1000_ITR:
 	case E1000_ICS:
 	case E1000_IMS:
 	case E1000_IMC:
 		retval = e82545_intr_read(sc, offset);
 		break;
 	case E1000_RCTL:
 		retval = sc->esc_RCTL;
 		break;
 	case E1000_FCRTL:
 		retval = sc->esc_FCRTL;
 		break;
 	case E1000_FCRTH:
 		retval = sc->esc_FCRTH;
 		break;
 	case E1000_RDBAL(0):
 		retval = sc->esc_RDBAL;
 		break;
 	case E1000_RDBAH(0):
 		retval = sc->esc_RDBAH;
 		break;
 	case E1000_RDLEN(0):
 		retval = sc->esc_RDLEN;
 		break;
 	case E1000_RDH(0):
 		retval = sc->esc_RDH;
 		break;
 	case E1000_RDT(0):
 		retval = sc->esc_RDT;
 		break;
 	case E1000_RDTR:
 		retval = sc->esc_RDTR;
 		break;
 	case E1000_RXDCTL(0):
 		retval = sc->esc_RXDCTL;
 		break;
 	case E1000_RADV:
 		retval = sc->esc_RADV;
 		break;
 	case E1000_RSRPD:
 		retval = sc->esc_RSRPD;
 		break;
 	case E1000_RXCSUM:	       
 		retval = sc->esc_RXCSUM;
 		break;
 	case E1000_TXCW:
 		retval = sc->esc_TXCW;
 		break;
 	case E1000_TCTL:
 		retval = sc->esc_TCTL;
 		break;
 	case E1000_TIPG:
 		retval = sc->esc_TIPG;
 		break;
 	case E1000_AIT:
 		retval = sc->esc_AIT;
 		break;
 	case E1000_TDBAL(0):
 		retval = sc->esc_TDBAL;
 		break;
 	case E1000_TDBAH(0):
 		retval = sc->esc_TDBAH;
 		break;
 	case E1000_TDLEN(0):
 		retval = sc->esc_TDLEN;
 		break;
 	case E1000_TDH(0):
 		retval = sc->esc_TDH;
 		break;
 	case E1000_TDT(0):
 		retval = sc->esc_TDT;
 		break;
 	case E1000_TIDV:
 		retval = sc->esc_TIDV;
 		break;
 	case E1000_TXDCTL(0):
 		retval = sc->esc_TXDCTL;
 		break;
 	case E1000_TADV:
 		retval = sc->esc_TADV;
 		break;
 	case E1000_RAL(0) ... E1000_RAH(15):
 		/* convert to u32 offset */
 		ridx = (offset - E1000_RAL(0)) >> 2;
 		retval = e82545_read_ra(sc, ridx);
 		break;
 	case E1000_MTA ... (E1000_MTA + (127*4)):
 		retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
 		break;
 	case E1000_VFTA ... (E1000_VFTA + (127*4)):
 		retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
 		break;		
 	case E1000_EECD:
 		//DPRINTF("EECD read %x", sc->eeprom_control);
 		retval = sc->eeprom_control;
 		break;
 	case E1000_MDIC:
 		retval = sc->mdi_control;
 		break;
 	case E1000_MANC:
 		retval = 0;
 		break;
 	/* stats that we emulate. */
 	case E1000_MPC:
 		retval = sc->missed_pkt_count;
 		break;
 	case E1000_PRC64:
 		retval = sc->pkt_rx_by_size[0];
 		break;
 	case E1000_PRC127:
 		retval = sc->pkt_rx_by_size[1];
 		break;
 	case E1000_PRC255:
 		retval = sc->pkt_rx_by_size[2];
 		break;
 	case E1000_PRC511:
 		retval = sc->pkt_rx_by_size[3];
 		break;
 	case E1000_PRC1023:
 		retval = sc->pkt_rx_by_size[4];
 		break;
 	case E1000_PRC1522:
 		retval = sc->pkt_rx_by_size[5];
 		break;
 	case E1000_GPRC:
 		retval = sc->good_pkt_rx_count;
 		break;
 	case E1000_BPRC:
 		retval = sc->bcast_pkt_rx_count;
 		break;
 	case E1000_MPRC:
 		retval = sc->mcast_pkt_rx_count;
 		break;
 	case E1000_GPTC:
 	case E1000_TPT:
 		retval = sc->good_pkt_tx_count;
 		break;
 	case E1000_GORCL:
 		retval = (uint32_t)sc->good_octets_rx;
 		break;
 	case E1000_GORCH:
 		retval = (uint32_t)(sc->good_octets_rx >> 32);
 		break;
 	case E1000_TOTL:
 	case E1000_GOTCL:
 		retval = (uint32_t)sc->good_octets_tx;
 		break;
 	case E1000_TOTH:
 	case E1000_GOTCH:
 		retval = (uint32_t)(sc->good_octets_tx >> 32);
 		break;
 	case E1000_ROC:
 		retval = sc->oversize_rx_count;
 		break;
 	case E1000_TORL:
 		retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
 		break;
 	case E1000_TORH:
 		retval = (uint32_t)((sc->good_octets_rx +
 		    sc->missed_octets) >> 32);
 		break;
 	case E1000_TPR:
 		retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
 		    sc->oversize_rx_count;
 		break;
 	case E1000_PTC64:
 		retval = sc->pkt_tx_by_size[0];
 		break;
 	case E1000_PTC127:
 		retval = sc->pkt_tx_by_size[1];
 		break;
 	case E1000_PTC255:
 		retval = sc->pkt_tx_by_size[2];
 		break;
 	case E1000_PTC511:
 		retval = sc->pkt_tx_by_size[3];
 		break;
 	case E1000_PTC1023:
 		retval = sc->pkt_tx_by_size[4];
 		break;
 	case E1000_PTC1522:
 		retval = sc->pkt_tx_by_size[5];
 		break;
 	case E1000_MPTC:
 		retval = sc->mcast_pkt_tx_count;
 		break;
 	case E1000_BPTC:
 		retval = sc->bcast_pkt_tx_count;
 		break;
 	case E1000_TSCTC:
 		retval = sc->tso_tx_count;
 		break;
 	/* stats that are always 0. */
 	case E1000_CRCERRS:
 	case E1000_ALGNERRC:
 	case E1000_SYMERRS:
 	case E1000_RXERRC:
 	case E1000_SCC:
 	case E1000_ECOL:
 	case E1000_MCC:
 	case E1000_LATECOL:
 	case E1000_COLC:
 	case E1000_DC:
 	case E1000_TNCRS:
 	case E1000_SEC:
 	case E1000_CEXTERR:
 	case E1000_RLEC:
 	case E1000_XONRXC:
 	case E1000_XONTXC:
 	case E1000_XOFFRXC:
 	case E1000_XOFFTXC:
 	case E1000_FCRUC:
 	case E1000_RNBC:
 	case E1000_RUC:
 	case E1000_RFC:
 	case E1000_RJC:
 	case E1000_MGTPRC:
 	case E1000_MGTPDC:
 	case E1000_MGTPTC:
 	case E1000_TSCTFC:
 		retval = 0;
 		break;
 	default:
 		DPRINTF("Unknown read register: 0x%x", offset);
 		retval = 0;
 		break;
 	}
 
 	return (retval);
 }
 
 static void
 e82545_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
 	     uint64_t offset, int size, uint64_t value)
 {
 	struct e82545_softc *sc;
 
 	//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d", baridx, offset, value, size);
 
 	sc = pi->pi_arg;
 
 	pthread_mutex_lock(&sc->esc_mtx);
 
 	switch (baridx) {
 	case E82545_BAR_IO:
 		switch (offset) {
 		case E82545_IOADDR:
 			if (size != 4) {
 				DPRINTF("Wrong io addr write sz:%d value:0x%lx", size, value);
 			} else
 				sc->io_addr = (uint32_t)value;
 			break;
 		case E82545_IODATA:
 			if (size != 4) {
 				DPRINTF("Wrong io data write size:%d value:0x%lx", size, value);
 			} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
 				DPRINTF("Non-register io write addr:0x%x value:0x%lx", sc->io_addr, value);
 			} else
 				e82545_write_register(sc, sc->io_addr,
 						      (uint32_t)value);
 			break;
 		default:
 			DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d", offset, value, size);
 			break;
 		}
 		break;
 	case E82545_BAR_REGISTER:
 		if (size != 4) {
 			DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx", size, offset, value);
 		} else
 			e82545_write_register(sc, (uint32_t)offset,
 					      (uint32_t)value);
 		break;
 	default:
 		DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d",
 			baridx, offset, value, size);
 	}
 
 	pthread_mutex_unlock(&sc->esc_mtx);
 }
 
 static uint64_t
 e82545_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
 	    uint64_t offset, int size)
 {
 	struct e82545_softc *sc;
 	uint64_t retval;
 	
 	//DPRINTF("Read  bar:%d offset:0x%lx size:%d", baridx, offset, size);
 	sc = pi->pi_arg;
 	retval = 0;
 
 	pthread_mutex_lock(&sc->esc_mtx);
 
 	switch (baridx) {
 	case E82545_BAR_IO:
 		switch (offset) {
 		case E82545_IOADDR:
 			if (size != 4) {
 				DPRINTF("Wrong io addr read sz:%d", size);
 			} else
 				retval = sc->io_addr;
 			break;
 		case E82545_IODATA:
 			if (size != 4) {
 				DPRINTF("Wrong io data read sz:%d", size);
 			}
 			if (sc->io_addr > E82545_IO_REGISTER_MAX) {
 				DPRINTF("Non-register io read addr:0x%x",
 					sc->io_addr);
 			} else
 				retval = e82545_read_register(sc, sc->io_addr);
 			break;
 		default:
 			DPRINTF("Unknown io bar read offset:0x%lx size:%d",
 				offset, size);
 			break;
 		}
 		break;
 	case E82545_BAR_REGISTER:
 		if (size != 4) {
 			DPRINTF("Wrong register read size:%d offset:0x%lx",
 				size, offset);
 		} else
 			retval = e82545_read_register(sc, (uint32_t)offset);
 		break;
 	default:
 		DPRINTF("Unknown read bar:%d offset:0x%lx size:%d",
 			baridx, offset, size);
 		break;
 	}
 
 	pthread_mutex_unlock(&sc->esc_mtx);
 
 	return (retval);
 }
 
 static void
 e82545_reset(struct e82545_softc *sc, int drvr)
 {
 	int i;
 
 	e82545_rx_disable(sc);
 	e82545_tx_disable(sc);
 
 	/* clear outstanding interrupts */
 	if (sc->esc_irq_asserted)
 		pci_lintr_deassert(sc->esc_pi);
 
 	/* misc */
 	if (!drvr) {
 		sc->esc_FCAL = 0;
 		sc->esc_FCAH = 0;
 		sc->esc_FCT = 0;
 		sc->esc_VET = 0;
 		sc->esc_FCTTV = 0;
 	}
 	sc->esc_LEDCTL = 0x07061302;
 	sc->esc_PBA = 0x00100030;
 	
 	/* start nvm in opcode mode. */
 	sc->nvm_opaddr = 0;
 	sc->nvm_mode = E82545_NVM_MODE_OPADDR;
 	sc->nvm_bits = E82545_NVM_OPADDR_BITS;
 	sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
 	e82545_init_eeprom(sc);
 
 	/* interrupt */
 	sc->esc_ICR = 0;
 	sc->esc_ITR = 250;
 	sc->esc_ICS = 0;
 	sc->esc_IMS = 0;
 	sc->esc_IMC = 0;
 		
 	/* L2 filters */
 	if (!drvr) {
 		memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
 		memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
 		memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
 
 		/* XXX not necessary on 82545 ?? */
 		sc->esc_uni[0].eu_valid = 1;
 		memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
 		    ETHER_ADDR_LEN);
 	} else {
 		/* Clear RAH valid bits */
 		for (i = 0; i < 16; i++)
 			sc->esc_uni[i].eu_valid = 0;
 	}
 	
 	/* receive */
 	if (!drvr) {
 		sc->esc_RDBAL = 0;
 		sc->esc_RDBAH = 0;
 	}
 	sc->esc_RCTL = 0;
 	sc->esc_FCRTL = 0;
 	sc->esc_FCRTH = 0;
 	sc->esc_RDLEN = 0;
 	sc->esc_RDH = 0;
 	sc->esc_RDT = 0;
 	sc->esc_RDTR = 0;
 	sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
 	sc->esc_RADV = 0;
 	sc->esc_RXCSUM = 0;
 
 	/* transmit */
 	if (!drvr) {
 		sc->esc_TDBAL = 0;
 		sc->esc_TDBAH = 0;
 		sc->esc_TIPG = 0;
 		sc->esc_AIT = 0;
 		sc->esc_TIDV = 0;
 		sc->esc_TADV = 0;
 	}
 	sc->esc_tdba = 0;
 	sc->esc_txdesc = NULL;
 	sc->esc_TXCW = 0;
 	sc->esc_TCTL = 0;
 	sc->esc_TDLEN = 0;
 	sc->esc_TDT = 0;
 	sc->esc_TDHr = sc->esc_TDH = 0;
 	sc->esc_TXDCTL = 0;
 }
 
 static int
 e82545_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
 {
 	char nstr[80];
 	struct e82545_softc *sc;
 	char *devname;
 	char *vtopts;
 	int mac_provided;
 
 	DPRINTF("Loading with options: %s", opts);
 
 	/* Setup our softc */
 	sc = calloc(1, sizeof(*sc));
 
 	pi->pi_arg = sc;
 	sc->esc_pi = pi;
 	sc->esc_ctx = ctx;
 
 	pthread_mutex_init(&sc->esc_mtx, NULL);
 	pthread_cond_init(&sc->esc_rx_cond, NULL);
 	pthread_cond_init(&sc->esc_tx_cond, NULL);
 	pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
 	snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
 	    pi->pi_func);
         pthread_set_name_np(sc->esc_tx_tid, nstr);
 
 	pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
 	pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
 	pci_set_cfgdata8(pi,  PCIR_CLASS, PCIC_NETWORK);
 	pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
 
 	pci_set_cfgdata8(pi,  PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
 	pci_set_cfgdata8(pi,  PCIR_INTPIN, 0x1);
 	
 	/* TODO: this card also supports msi, but the freebsd driver for it
 	 * does not, so I have not implemented it. */
 	pci_lintr_request(pi);
 
 	pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
 		E82545_BAR_REGISTER_LEN);
 	pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
 		E82545_BAR_FLASH_LEN);
 	pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
 		E82545_BAR_IO_LEN);
 
 	/*
 	 * Attempt to open the net backend and read the MAC address
 	 * if specified.  Copied from virtio-net, slightly modified.
 	 */
 	mac_provided = 0;
 	sc->esc_be = NULL;
 	if (opts != NULL) {
-		int err;
+		int err = 0;
 
 		devname = vtopts = strdup(opts);
 		(void) strsep(&vtopts, ",");
 
-		if (vtopts != NULL) {
-			err = net_parsemac(vtopts, sc->esc_mac.octet);
-			if (err != 0) {
-				free(devname);
-				return (err);
+		/*
+		 * Parse the list of options in the form
+		 *     key1=value1,...,keyN=valueN.
+		 */
+		while (vtopts != NULL) {
+			char *value = vtopts;
+			char *key;
+
+			key = strsep(&value, "=");
+			if (value == NULL)
+				break;
+			vtopts = value;
+			(void) strsep(&vtopts, ",");
+
+			if (strcmp(key, "mac") == 0) {
+				err = net_parsemac(value, sc->esc_mac.octet);
+				if (err)
+					break;
+				mac_provided = 1;
 			}
-			mac_provided = 1;
+		}
+
+		if (err) {
+			free(devname);
+			return (err);
 		}
 
 		err = netbe_init(&sc->esc_be, devname, e82545_rx_callback, sc);
 		free(devname);
 		if (err)
 			return (err);
 	}
 
 	if (!mac_provided) {
 		net_genmac(pi, sc->esc_mac.octet);
 	}
 
 	netbe_rx_enable(sc->esc_be);
 
 	/* H/w initiated reset */
 	e82545_reset(sc, 0);
 
 	return (0);
 }
 
 struct pci_devemu pci_de_e82545 = {
 	.pe_emu = 	"e1000",
 	.pe_init =	e82545_init,
 	.pe_barwrite =	e82545_write,
 	.pe_barread =	e82545_read
 };
 PCI_EMUL_SET(pci_de_e82545);
 
Index: stable/12/usr.sbin/bhyve/pci_virtio_net.c
===================================================================
--- stable/12/usr.sbin/bhyve/pci_virtio_net.c	(revision 358184)
+++ stable/12/usr.sbin/bhyve/pci_virtio_net.c	(revision 358185)
@@ -1,603 +1,709 @@
 /*-
  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  *
  * Copyright (c) 2011 NetApp, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/linker_set.h>
 #include <sys/select.h>
 #include <sys/uio.h>
 #include <sys/ioctl.h>
 #include <net/ethernet.h>
 #include <net/if.h> /* IFNAMSIZ */
 
 #include <err.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <string.h>
 #include <strings.h>
 #include <unistd.h>
 #include <assert.h>
 #include <pthread.h>
 #include <pthread_np.h>
 
 #include "bhyverun.h"
 #include "debug.h"
 #include "pci_emul.h"
 #include "mevent.h"
 #include "virtio.h"
 #include "net_utils.h"
 #include "net_backends.h"
 #include "iov.h"
 
 #define VTNET_RINGSZ	1024
 
 #define VTNET_MAXSEGS	256
 
 #define VTNET_MAX_PKT_LEN	(65536 + 64)
 
 #define VTNET_S_HOSTCAPS      \
   ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \
     VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC)
 
 /*
  * PCI config-space "registers"
  */
 struct virtio_net_config {
 	uint8_t  mac[6];
 	uint16_t status;
 } __packed;
 
 /*
  * Queue definitions.
  */
 #define VTNET_RXQ	0
 #define VTNET_TXQ	1
 #define VTNET_CTLQ	2	/* NB: not yet supported */
 
 #define VTNET_MAXQ	3
 
 /*
  * Debug printf
  */
 static int pci_vtnet_debug;
 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params
 #define WPRINTF(params) PRINTLN params
 
 /*
  * Per-device softc
  */
 struct pci_vtnet_softc {
 	struct virtio_softc vsc_vs;
 	struct vqueue_info vsc_queues[VTNET_MAXQ - 1];
 	pthread_mutex_t vsc_mtx;
 
 	net_backend_t	*vsc_be;
 
 	int		resetting;	/* protected by tx_mtx */
 
 	uint64_t	vsc_features;	/* negotiated features */
 	
 	pthread_mutex_t	rx_mtx;
 	int		rx_merge;	/* merged rx bufs in use */
 
 	pthread_t 	tx_tid;
 	pthread_mutex_t	tx_mtx;
 	pthread_cond_t	tx_cond;
 	int		tx_in_progress;
 
+	size_t		vhdrlen;
+	size_t		be_vhdrlen;
+
 	struct virtio_net_config vsc_config;
 	struct virtio_consts vsc_consts;
 };
 
 static void pci_vtnet_reset(void *);
 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */
 static int pci_vtnet_cfgread(void *, int, int, uint32_t *);
 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t);
 static void pci_vtnet_neg_features(void *, uint64_t);
 
 static struct virtio_consts vtnet_vi_consts = {
 	"vtnet",		/* our name */
 	VTNET_MAXQ - 1,		/* we currently support 2 virtqueues */
 	sizeof(struct virtio_net_config), /* config reg size */
 	pci_vtnet_reset,	/* reset */
 	NULL,			/* device-wide qnotify -- not used */
 	pci_vtnet_cfgread,	/* read PCI config */
 	pci_vtnet_cfgwrite,	/* write PCI config */
 	pci_vtnet_neg_features,	/* apply negotiated features */
 	VTNET_S_HOSTCAPS,	/* our capabilities */
 };
 
 static void
 pci_vtnet_reset(void *vsc)
 {
 	struct pci_vtnet_softc *sc = vsc;
 
 	DPRINTF(("vtnet: device reset requested !"));
 
 	/* Acquire the RX lock to block RX processing. */
 	pthread_mutex_lock(&sc->rx_mtx);
 
 	/*
 	 * Make sure receive operation is disabled at least until we
 	 * re-negotiate the features, since receive operation depends
 	 * on the value of sc->rx_merge and the header length, which
 	 * are both set in pci_vtnet_neg_features().
 	 * Receive operation will be enabled again once the guest adds
 	 * the first receive buffers and kicks us.
 	 */
 	netbe_rx_disable(sc->vsc_be);
 
 	/* Set sc->resetting and give a chance to the TX thread to stop. */
 	pthread_mutex_lock(&sc->tx_mtx);
 	sc->resetting = 1;
 	while (sc->tx_in_progress) {
 		pthread_mutex_unlock(&sc->tx_mtx);
 		usleep(10000);
 		pthread_mutex_lock(&sc->tx_mtx);
 	}
 
 	/*
 	 * Now reset rings, MSI-X vectors, and negotiated capabilities.
 	 * Do that with the TX lock held, since we need to reset
 	 * sc->resetting.
 	 */
 	vi_reset_dev(&sc->vsc_vs);
 
 	sc->resetting = 0;
 	pthread_mutex_unlock(&sc->tx_mtx);
 	pthread_mutex_unlock(&sc->rx_mtx);
 }
 
+static __inline struct iovec *
+iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen)
+{
+	struct iovec *riov;
+
+	if (iov[0].iov_len < hlen) {
+		/*
+		 * Not enough header space in the first fragment.
+		 * That's not ok for us.
+		 */
+		return NULL;
+	}
+
+	iov[0].iov_len -= hlen;
+	if (iov[0].iov_len == 0) {
+		*iovcnt -= 1;
+		if (*iovcnt == 0) {
+			/*
+			 * Only space for the header. That's not
+			 * enough for us.
+			 */
+			return NULL;
+		}
+		riov = &iov[1];
+	} else {
+		iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen);
+		riov = &iov[0];
+	}
+
+	return (riov);
+}
+
 struct virtio_mrg_rxbuf_info {
 	uint16_t idx;
 	uint16_t pad;
 	uint32_t len;
 };
 
 static void
 pci_vtnet_rx(struct pci_vtnet_softc *sc)
 {
+	int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen;
 	struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS];
 	struct iovec iov[VTNET_MAXSEGS + 1];
 	struct vqueue_info *vq;
-	uint32_t cur_iov_bytes;
-	struct iovec *cur_iov;
-	uint16_t cur_iov_len;
+	uint32_t riov_bytes;
+	struct iovec *riov;
+	int riov_len;
 	uint32_t ulen;
 	int n_chains;
 	int len;
 
 	vq = &sc->vsc_queues[VTNET_RXQ];
 	for (;;) {
+		struct virtio_net_rxhdr *hdr;
+
 		/*
 		 * Get a descriptor chain to store the next ingress
 		 * packet. In case of mergeable rx buffers, get as
 		 * many chains as necessary in order to make room
 		 * for a maximum sized LRO packet.
 		 */
-		cur_iov_bytes = 0;
-		cur_iov_len = 0;
-		cur_iov = iov;
+		riov_bytes = 0;
+		riov_len = 0;
+		riov = iov;
 		n_chains = 0;
 		do {
-			int n = vq_getchain(vq, &info[n_chains].idx, cur_iov,
-			    VTNET_MAXSEGS - cur_iov_len, NULL);
+			int n = vq_getchain(vq, &info[n_chains].idx, riov,
+			    VTNET_MAXSEGS - riov_len, NULL);
 
 			if (n == 0) {
 				/*
 				 * No rx buffers. Enable RX kicks and double
 				 * check.
 				 */
 				vq_kick_enable(vq);
 				if (!vq_has_descs(vq)) {
 					/*
 					 * Still no buffers. Return the unused
 					 * chains (if any), interrupt if needed
 					 * (including for NOTIFY_ON_EMPTY), and
 					 * disable the backend until the next
 					 * kick.
 					 */
 					vq_retchains(vq, n_chains);
 					vq_endchains(vq, /*used_all_avail=*/1);
 					netbe_rx_disable(sc->vsc_be);
 					return;
 				}
 
 				/* More rx buffers found, so keep going. */
 				vq_kick_disable(vq);
 				continue;
 			}
-			assert(n >= 1 && cur_iov_len + n <= VTNET_MAXSEGS);
-			cur_iov_len += n;
+			assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS);
+			riov_len += n;
 			if (!sc->rx_merge) {
 				n_chains = 1;
 				break;
 			}
-			info[n_chains].len = (uint32_t)count_iov(cur_iov, n);
-			cur_iov_bytes += info[n_chains].len;
-			cur_iov += n;
+			info[n_chains].len = (uint32_t)count_iov(riov, n);
+			riov_bytes += info[n_chains].len;
+			riov += n;
 			n_chains++;
-		} while (cur_iov_bytes < VTNET_MAX_PKT_LEN &&
-			    cur_iov_len < VTNET_MAXSEGS);
+		} while (riov_bytes < VTNET_MAX_PKT_LEN &&
+			    riov_len < VTNET_MAXSEGS);
 
-		len = netbe_recv(sc->vsc_be, iov, cur_iov_len);
+		riov = iov;
+		hdr = riov[0].iov_base;
+		if (prepend_hdr_len > 0) {
+			/*
+			 * The frontend uses a virtio-net header, but the
+			 * backend does not. We need to prepend a zeroed
+			 * header.
+			 */
+			riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len);
+			if (riov == NULL) {
+				/*
+				 * The first collected chain is nonsensical,
+				 * as it is not even enough to store the
+				 * virtio-net header. Just drop it.
+				 */
+				vq_relchain(vq, info[0].idx, 0);
+				vq_retchains(vq, n_chains - 1);
+				continue;
+			}
+			memset(hdr, 0, prepend_hdr_len);
+		}
 
+		len = netbe_recv(sc->vsc_be, riov, riov_len);
+
 		if (len <= 0) {
 			/*
 			 * No more packets (len == 0), or backend errored
 			 * (err < 0). Return unused available buffers
 			 * and stop.
 			 */
 			vq_retchains(vq, n_chains);
 			/* Interrupt if needed/appropriate and stop. */
 			vq_endchains(vq, /*used_all_avail=*/0);
 			return;
 		}
 
-		ulen = (uint32_t)len; /* avoid too many casts below */
+		ulen = (uint32_t)(len + prepend_hdr_len);
 
-		/* Publish the used buffers to the guest. */
+		/*
+		 * Publish the used buffers to the guest, reporting the
+		 * number of bytes that we wrote.
+		 */
 		if (!sc->rx_merge) {
 			vq_relchain(vq, info[0].idx, ulen);
 		} else {
-			struct virtio_net_rxhdr *hdr = iov[0].iov_base;
 			uint32_t iolen;
 			int i = 0;
 
-			assert(iov[0].iov_len >= sizeof(*hdr));
-
 			do {
 				iolen = info[i].len;
 				if (iolen > ulen) {
 					iolen = ulen;
 				}
 				vq_relchain_prepare(vq, info[i].idx, iolen);
 				ulen -= iolen;
 				i++;
 				assert(i <= n_chains);
 			} while (ulen > 0);
 
 			hdr->vrh_bufs = i;
 			vq_relchain_publish(vq);
 			vq_retchains(vq, n_chains - i);
 		}
 	}
 
 }
 
 /*
  * Called when there is read activity on the backend file descriptor.
  * Each buffer posted by the guest is assumed to be able to contain
  * an entire ethernet frame + rx header.
  */
 static void
 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param)
 {
 	struct pci_vtnet_softc *sc = param;
 
 	pthread_mutex_lock(&sc->rx_mtx);
 	pci_vtnet_rx(sc);
 	pthread_mutex_unlock(&sc->rx_mtx);
 
 }
 
 /* Called on RX kick. */
 static void
 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq)
 {
 	struct pci_vtnet_softc *sc = vsc;
 
 	/*
 	 * A qnotify means that the rx process can now begin.
 	 */
 	pthread_mutex_lock(&sc->rx_mtx);
 	vq_kick_disable(vq);
 	netbe_rx_enable(sc->vsc_be);
 	pthread_mutex_unlock(&sc->rx_mtx);
 }
 
 /* TX virtqueue processing, called by the TX thread. */
 static void
 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
 {
 	struct iovec iov[VTNET_MAXSEGS + 1];
+	struct iovec *siov = iov;
 	uint16_t idx;
 	ssize_t len;
 	int n;
 
 	/*
 	 * Obtain chain of descriptors. The first descriptor also
 	 * contains the virtio-net header.
 	 */
 	n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
 	assert(n >= 1 && n <= VTNET_MAXSEGS);
 
-	len = netbe_send(sc->vsc_be, iov, n);
+	if (sc->vhdrlen != sc->be_vhdrlen) {
+		/*
+		 * The frontend uses a virtio-net header, but the backend
+		 * does not. We simply strip the header and ignore it, as
+		 * it should be zero-filled.
+		 */
+		siov = iov_trim_hdr(siov, &n, sc->vhdrlen);
+	}
 
-	/* chain is processed, release it and set len */
-	vq_relchain(vq, idx, len > 0 ? len : 0);
+	if (siov == NULL) {
+		/* The chain is nonsensical. Just drop it. */
+		len = 0;
+	} else {
+		len = netbe_send(sc->vsc_be, siov, n);
+		if (len < 0) {
+			/*
+			 * If send failed, report that 0 bytes
+			 * were read.
+			 */
+			len = 0;
+		}
+	}
+
+	/*
+	 * Return the processed chain to the guest, reporting
+	 * the number of bytes that we read.
+	 */
+	vq_relchain(vq, idx, len);
 }
 
 /* Called on TX kick. */
 static void
 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq)
 {
 	struct pci_vtnet_softc *sc = vsc;
 
 	/*
 	 * Any ring entries to process?
 	 */
 	if (!vq_has_descs(vq))
 		return;
 
 	/* Signal the tx thread for processing */
 	pthread_mutex_lock(&sc->tx_mtx);
 	vq_kick_disable(vq);
 	if (sc->tx_in_progress == 0)
 		pthread_cond_signal(&sc->tx_cond);
 	pthread_mutex_unlock(&sc->tx_mtx);
 }
 
 /*
  * Thread which will handle processing of TX desc
  */
 static void *
 pci_vtnet_tx_thread(void *param)
 {
 	struct pci_vtnet_softc *sc = param;
 	struct vqueue_info *vq;
 	int error;
 
 	vq = &sc->vsc_queues[VTNET_TXQ];
 
 	/*
 	 * Let us wait till the tx queue pointers get initialised &
 	 * first tx signaled
 	 */
 	pthread_mutex_lock(&sc->tx_mtx);
 	error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
 	assert(error == 0);
 
 	for (;;) {
 		/* note - tx mutex is locked here */
 		while (sc->resetting || !vq_has_descs(vq)) {
 			vq_kick_enable(vq);
 			if (!sc->resetting && vq_has_descs(vq))
 				break;
 
 			sc->tx_in_progress = 0;
 			error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
 			assert(error == 0);
 		}
 		vq_kick_disable(vq);
 		sc->tx_in_progress = 1;
 		pthread_mutex_unlock(&sc->tx_mtx);
 
 		do {
 			/*
 			 * Run through entries, placing them into
 			 * iovecs and sending when an end-of-packet
 			 * is found
 			 */
 			pci_vtnet_proctx(sc, vq);
 		} while (vq_has_descs(vq));
 
 		/*
 		 * Generate an interrupt if needed.
 		 */
 		vq_endchains(vq, /*used_all_avail=*/1);
 
 		pthread_mutex_lock(&sc->tx_mtx);
 	}
 }
 
 #ifdef notyet
 static void
 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq)
 {
 
 	DPRINTF(("vtnet: control qnotify!"));
 }
 #endif
 
 static int
 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
 {
 	struct pci_vtnet_softc *sc;
 	char tname[MAXCOMLEN + 1];
 	int mac_provided;
 
 	/*
 	 * Allocate data structures for further virtio initializations.
 	 * sc also contains a copy of vtnet_vi_consts, since capabilities
 	 * change depending on the backend.
 	 */
 	sc = calloc(1, sizeof(struct pci_vtnet_softc));
 
 	sc->vsc_consts = vtnet_vi_consts;
 	pthread_mutex_init(&sc->vsc_mtx, NULL);
 
 	sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ;
 	sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq;
 	sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ;
 	sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq;
 #ifdef notyet
 	sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ;
         sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq;
 #endif
  
 	/*
 	 * Attempt to open the backend device and read the MAC address
 	 * if specified.
 	 */
 	mac_provided = 0;
 	if (opts != NULL) {
 		char *devname;
 		char *vtopts;
-		int err;
+		int err = 0;
 
+		/* Get the device name. */
 		devname = vtopts = strdup(opts);
 		(void) strsep(&vtopts, ",");
 
-		if (vtopts != NULL) {
-			err = net_parsemac(vtopts, sc->vsc_config.mac);
-			if (err != 0) {
-				free(devname);
-				free(sc);
-				return (err);
+		/*
+		 * Parse the list of options in the form
+		 *     key1=value1,...,keyN=valueN.
+		 */
+		while (vtopts != NULL) {
+			char *value = vtopts;
+			char *key;
+
+			key = strsep(&value, "=");
+			if (value == NULL)
+				break;
+			vtopts = value;
+			(void) strsep(&vtopts, ",");
+
+			if (strcmp(key, "mac") == 0) {
+				err = net_parsemac(value, sc->vsc_config.mac);
+				if (err)
+					break;
+				mac_provided = 1;
 			}
-			mac_provided = 1;
 		}
 
+		if (err) {
+			free(devname);
+			free(sc);
+			return (err);
+		}
+
 		err = netbe_init(&sc->vsc_be, devname, pci_vtnet_rx_callback,
 		          sc);
 		free(devname);
 		if (err) {
 			free(sc);
 			return (err);
 		}
 		sc->vsc_consts.vc_hv_caps |= netbe_get_cap(sc->vsc_be);
 	}
 
 	if (!mac_provided) {
 		net_genmac(pi, sc->vsc_config.mac);
 	}
 
 	/* initialize config space */
 	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
 	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
 	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
 
 	/* Link is up if we managed to open backend device. */
 	sc->vsc_config.status = (opts == NULL || sc->vsc_be);
 	
 	vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues);
 	sc->vsc_vs.vs_mtx = &sc->vsc_mtx;
 
 	/* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */
 	if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) {
 		free(sc);
 		return (1);
 	}
 
 	/* use BAR 0 to map config regs in IO space */
 	vi_set_io_bar(&sc->vsc_vs, 0);
 
 	sc->resetting = 0;
 
 	sc->rx_merge = 0;
+	sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2;
 	pthread_mutex_init(&sc->rx_mtx, NULL); 
 
 	/* 
 	 * Initialize tx semaphore & spawn TX processing thread.
 	 * As of now, only one thread for TX desc processing is
 	 * spawned. 
 	 */
 	sc->tx_in_progress = 0;
 	pthread_mutex_init(&sc->tx_mtx, NULL);
 	pthread_cond_init(&sc->tx_cond, NULL);
 	pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
 	snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot,
 	    pi->pi_func);
 	pthread_set_name_np(sc->tx_tid, tname);
 
 	return (0);
 }
 
 static int
 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value)
 {
 	struct pci_vtnet_softc *sc = vsc;
 	void *ptr;
 
 	if (offset < (int)sizeof(sc->vsc_config.mac)) {
 		assert(offset + size <= (int)sizeof(sc->vsc_config.mac));
 		/*
 		 * The driver is allowed to change the MAC address
 		 */
 		ptr = &sc->vsc_config.mac[offset];
 		memcpy(ptr, &value, size);
 	} else {
 		/* silently ignore other writes */
 		DPRINTF(("vtnet: write to readonly reg %d", offset));
 	}
 
 	return (0);
 }
 
 static int
 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval)
 {
 	struct pci_vtnet_softc *sc = vsc;
 	void *ptr;
 
 	ptr = (uint8_t *)&sc->vsc_config + offset;
 	memcpy(retval, ptr, size);
 	return (0);
 }
 
 static void
 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
 {
 	struct pci_vtnet_softc *sc = vsc;
-	unsigned int rx_vhdrlen;
 
 	sc->vsc_features = negotiated_features;
 
 	if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) {
-		rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
+		sc->vhdrlen = sizeof(struct virtio_net_rxhdr);
 		sc->rx_merge = 1;
 	} else {
 		/*
 		 * Without mergeable rx buffers, virtio-net header is 2
 		 * bytes shorter than sizeof(struct virtio_net_rxhdr).
 		 */
-		rx_vhdrlen = sizeof(struct virtio_net_rxhdr) - 2;
+		sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2;
 		sc->rx_merge = 0;
 	}
 
 	/* Tell the backend to enable some capabilities it has advertised. */
-	netbe_set_cap(sc->vsc_be, negotiated_features, rx_vhdrlen);
+	netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen);
+	sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be);
+	assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen);
 }
 
 static struct pci_devemu pci_de_vnet = {
 	.pe_emu = 	"virtio-net",
 	.pe_init =	pci_vtnet_init,
 	.pe_barwrite =	vi_pci_write,
 	.pe_barread =	vi_pci_read
 };
 PCI_EMUL_SET(pci_de_vnet);