Page MenuHomeFreeBSD

D3425.diff
No OneTemporary

D3425.diff

Index: tools/tools/mq-testing/vme/Makefile
===================================================================
--- /dev/null
+++ tools/tools/mq-testing/vme/Makefile
@@ -0,0 +1,15 @@
+# Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+#
+# $FreeBSD$
+
+.PATH: ${.CURDIR}
+
+KMOD= if_vme
+SRCS= if_vme.c opt_compat.h opt_inet.h opt_rss.h vnode_if.h
+
+.if !defined(KERNBUILDDIR)
+opt_compat.h:
+ echo "#define COMPAT_FREEBSD6 1" > ${.TARGET}
+.endif
+
+.include <bsd.kmod.mk>
Index: tools/tools/mq-testing/vme/if_vme.h
===================================================================
--- /dev/null
+++ tools/tools/mq-testing/vme/if_vme.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BASED ON:
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_vme.h,v 0.7 2000/07/12 04:12:51 max Exp $
+ */
+
+#ifndef _NET_IF_VME_H_
+#define _NET_IF_VME_H_
+
+/* refer to if_vmevar.h for the softc stuff */
+
+/* maximum receive packet size (hard limit) */
+#define VMEMRU 16384
+
+struct vmeinfo {
+ int baudrate; /* linespeed */
+ short mtu; /* maximum transmission unit */
+ u_char type; /* ethernet, tokenring, etc. */
+ u_char dummy; /* place holder */
+};
+
+/* ioctl's for get/set debug */
+#define VMESDEBUG _IOW('t', 90, int)
+#define VMEGDEBUG _IOR('t', 89, int)
+#define VMESIFINFO _IOW('t', 91, struct vmeinfo)
+#define VMEGIFINFO _IOR('t', 92, struct vmeinfo)
+#define VMEGIFNAME _IOR('t', 93, struct ifreq)
+
+#endif /* !_NET_IF_VME_H_ */
Index: tools/tools/mq-testing/vme/if_vme.c
===================================================================
--- /dev/null
+++ tools/tools/mq-testing/vme/if_vme.c
@@ -0,0 +1,1217 @@
+/*-
+ * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BASED ON:
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_vme.c,v 0.21 2000/07/23 21:46:02 max Exp $
+ */
+
+#include "opt_compat.h"
+#include "opt_rss.h"
+
+#ifndef RSS
+#error "RSS must be enabled"
+#endif
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/signalvar.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/ttycom.h>
+#include <sys/uio.h>
+#include <sys/queue.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_clone.h>
+#include <net/rss_config.h>
+#include <net/vnet.h>
+
+#include <netinet/in.h>
+#include <netinet/in_rss.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
+#include <netinet6/in6_rss.h>
+
+#include "if_vme.h"
+
+/*
+ * XXXGL: to avoid inclusion of if_var.h define if_purgeaddrs.
+ * This should be fixed by destroying interface clone on close(2).
+ */
+void if_purgeaddrs(if_t);
+
+#define CDEV_NAME "vme"
+#define VMEDEBUG if (vmedebug) printf
+
+static const char vmename[] = "vme";
+#define VMEMAXUNIT 0x7fff
+#define VMEMAXQUEUES 128
+
+#define PRIV_NET_VME PRIV_NET_TAP
+
+#define VME_CSUM_SET (CSUM_IP_CHECKED | CSUM_IP_VALID | \
+ CSUM_DATA_VALID | CSUM_DATA_VALID_IPV6 | \
+ CSUM_PSEUDO_HDR)
+
+/* module */
+static int vmemodevent(module_t, int, void *);
+
+/* device */
+static void vmeclone(void *, struct ucred *, char *, int,
+ struct cdev **);
+static void vmecreate(struct cdev *);
+
+/* network interface */
+static int vmeifioctl(if_t, u_long, void *, struct thread *);
+static int vmeiftransmit(if_t, struct mbuf *);
+
+static int vme_clone_create(struct if_clone *, int, caddr_t);
+static void vme_clone_destroy(if_t);
+static struct if_clone *vme_cloner;
+
+/* character device */
+static d_open_t vmeopen;
+static d_close_t vmeclose;
+static d_read_t vmeread;
+static d_write_t vmewrite;
+static d_ioctl_t vmeioctl;
+static d_poll_t vmepoll;
+static d_kqfilter_t vmekqfilter;
+
+/* kqueue(2) */
+static int vmekqread(struct knote *, long);
+static int vmekqwrite(struct knote *, long);
+static void vmekqdetach(struct knote *);
+
+/* multiqueue rss */
+static void vmersshash(struct mbuf *);
+
+static struct filterops vme_read_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = vmekqdetach,
+ .f_event = vmekqread,
+};
+
+static struct filterops vme_write_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = vmekqdetach,
+ .f_event = vmekqwrite,
+};
+
+static struct cdevsw vme_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_NEEDMINOR,
+ .d_open = vmeopen,
+ .d_close = vmeclose,
+ .d_read = vmeread,
+ .d_write = vmewrite,
+ .d_ioctl = vmeioctl,
+ .d_poll = vmepoll,
+ .d_name = CDEV_NAME,
+ .d_kqfilter = vmekqfilter,
+};
+
+static struct ifdriver vme_ifdrv = {
+ .ifdrv_ops = {
+ .ifop_ioctl = vmeifioctl,
+ .ifop_transmit = vmeiftransmit,
+ },
+ .ifdrv_name = vmename,
+ .ifdrv_type = IFT_ETHER,
+};
+
+/*
+ * vme_mtx locks vme_flags, vme_pid. vme_next locked with global vmemtx.
+ * Other fields locked by owning subsystems.
+ */
+struct vme_softc {
+ struct mtx vme_mtx; /* per-softc mutex */
+ struct cdev *vme_dev;
+ struct ifnet *vme_ifp;
+ struct mbufq vme_queue;
+ uint32_t vme_ifflags;
+ uint32_t vme_mtu;
+ uint64_t vme_baudrate;
+ uint16_t vme_flags; /* misc flags */
+#define VME_OPEN (1 << 0)
+#define VME_INITED (1 << 1)
+#define VME_RWAIT (1 << 2)
+#define VME_ASYNC (1 << 3)
+#define VME_READY (VME_OPEN|VME_INITED)
+
+ u_int8_t ether_addr[ETHER_ADDR_LEN]; /* remote address */
+ pid_t vme_pid; /* PID of process to open */
+ struct sigio *vme_sigio; /* information for async I/O */
+ struct selinfo vme_rsel; /* read select */
+
+ SLIST_ENTRY(vme_softc) vme_next; /* next device in chain */
+};
+
+/*
+ * All global variables in if_vme.c are locked with vmemtx, with the
+ * exception of vmedebug, which is accessed unlocked; vmeclones is
+ * static at runtime.
+ */
+static struct mtx vmemtx;
+static int vmedebug = 0; /* debug flag */
+static int vmeuopen = 0; /* allow user open() */
+static int vmeuponopen = 0; /* IFF_UP on open() */
+static int vmedclone = 1; /* enable devfs cloning */
+static int vmenumqueues = 0; /* Number of queues */
+static SLIST_HEAD(, vme_softc) vmehead; /* first device */
+static struct clonedevs *vmeclones;
+
+MALLOC_DECLARE(M_VME);
+MALLOC_DEFINE(M_VME, CDEV_NAME, "Ethernet tunnel interface");
+SYSCTL_INT(_debug, OID_AUTO, if_vme_debug, CTLFLAG_RW, &vmedebug, 0, "");
+
+SYSCTL_DECL(_net_link);
+static SYSCTL_NODE(_net_link, OID_AUTO, vme, CTLFLAG_RW, 0,
+ "Ethernet tunnel software network interface");
+SYSCTL_INT(_net_link_vme, OID_AUTO, user_open, CTLFLAG_RW, &vmeuopen, 0,
+ "Allow user to open /dev/vme (based on node permissions)");
+SYSCTL_INT(_net_link_vme, OID_AUTO, up_on_open, CTLFLAG_RW, &vmeuponopen, 0,
+ "Bring interface up when /dev/vme is opened");
+SYSCTL_INT(_net_link_vme, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &vmedclone, 0,
+ "Enably legacy devfs interface creation");
+SYSCTL_INT(_net_link_vme, OID_AUTO, debug, CTLFLAG_RW, &vmedebug, 0, "");
+SYSCTL_INT(_net_link_vme, OID_AUTO, num_queues, CTLFLAG_RDTUN, &vmenumqueues, 0,
+ "Number of queues to configure, 0 indicates autoconfigure");
+
+DEV_MODULE(if_vme, vmemodevent, NULL);
+
+static int
+vme_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct cdev *dev;
+ int i;
+
+ /* Find any existing device, or allocate new unit number. */
+ i = clone_create(&vmeclones, &vme_cdevsw, &unit, &dev, 0);
+ if (i) {
+ dev = make_dev(&vme_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600,
+ "%s%d", vmename, unit);
+ }
+
+ vmecreate(dev);
+ return (0);
+}
+
+static void
+vme_destroy(struct vme_softc *tp)
+{
+ if_t ifp = tp->vme_ifp;
+
+ CURVNET_SET(ifp->if_vnet);
+ destroy_dev(tp->vme_dev);
+ seldrain(&tp->vme_rsel);
+ knlist_clear(&tp->vme_rsel.si_note, 0);
+ knlist_destroy(&tp->vme_rsel.si_note);
+ if_detach(ifp);
+
+ mtx_destroy(&tp->vme_mtx);
+ free(tp, M_VME);
+ CURVNET_RESTORE();
+}
+
+static void
+vme_clone_destroy(if_t ifp)
+{
+ struct vme_softc *tp;
+
+ tp = if_getsoftc(ifp, IF_DRIVER_SOFTC);
+
+ mtx_lock(&vmemtx);
+ SLIST_REMOVE(&vmehead, tp, vme_softc, vme_next);
+ mtx_unlock(&vmemtx);
+ vme_destroy(tp);
+}
+
+/*
+ * vmemodevent
+ *
+ * module event handler
+ */
+static int
+vmemodevent(module_t mod, int type, void *data)
+{
+ static eventhandler_tag eh_tag = NULL;
+ struct vme_softc *tp = NULL;
+ if_t ifp = NULL;
+
+ switch (type) {
+ case MOD_LOAD:
+
+ /* intitialize device */
+
+ mtx_init(&vmemtx, "vmemtx", NULL, MTX_DEF);
+ SLIST_INIT(&vmehead);
+
+ clone_setup(&vmeclones);
+ eh_tag = EVENTHANDLER_REGISTER(dev_clone, vmeclone, 0, 1000);
+ if (eh_tag == NULL) {
+ clone_cleanup(&vmeclones);
+ mtx_destroy(&vmemtx);
+ return (ENOMEM);
+ }
+ vme_cloner = if_clone_simple(vmename, vme_clone_create,
+ vme_clone_destroy, 0);
+
+ if (vmenumqueues <= 0 || vmenumqueues >= VMEMAXQUEUES)
+ vmenumqueues = mp_ncpus;
+
+ return (0);
+
+ case MOD_UNLOAD:
+ /*
+ * The EBUSY algorithm here can't quite atomically
+ * guarantee that this is race-free since we have to
+ * release the vme mtx to deregister the clone handler.
+ */
+ mtx_lock(&vmemtx);
+ SLIST_FOREACH(tp, &vmehead, vme_next) {
+ mtx_lock(&tp->vme_mtx);
+ if (tp->vme_flags & VME_OPEN) {
+ mtx_unlock(&tp->vme_mtx);
+ mtx_unlock(&vmemtx);
+ return (EBUSY);
+ }
+ mtx_unlock(&tp->vme_mtx);
+ }
+ mtx_unlock(&vmemtx);
+
+ EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
+ if_clone_detach(vme_cloner);
+ drain_dev_clone_events();
+
+ mtx_lock(&vmemtx);
+ while ((tp = SLIST_FIRST(&vmehead)) != NULL) {
+ SLIST_REMOVE_HEAD(&vmehead, vme_next);
+ mtx_unlock(&vmemtx);
+
+ ifp = tp->vme_ifp;
+
+ VMEDEBUG("detaching %s\n", if_name(ifp));
+
+ vme_destroy(tp);
+ mtx_lock(&vmemtx);
+ }
+ mtx_unlock(&vmemtx);
+ clone_cleanup(&vmeclones);
+
+ mtx_destroy(&vmemtx);
+
+ break;
+
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+/*
+ * DEVFS handler
+ *
+ * We need to support a kind of devices - vme
+ */
+static void
+vmeclone(void *arg, struct ucred *cred, char *name, int namelen,
+ struct cdev **dev)
+{
+ char devname[SPECNAMELEN + 1];
+ int i, unit, append_unit;
+
+ if (*dev != NULL)
+ return;
+
+ if (!vmedclone ||
+ (!vmeuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0))
+ return;
+
+ unit = 0;
+ append_unit = 0;
+
+ /* We're interested in only vme devices. */
+ if (strcmp(name, vmename) == 0) {
+ unit = -1;
+ } else if (dev_stdclone(name, NULL, vmename, &unit) != 1) {
+ return;
+ }
+
+ if (unit == -1)
+ append_unit = 1;
+
+ CURVNET_SET(CRED_TO_VNET(cred));
+ /* find any existing device, or allocate new unit number */
+ i = clone_create(&vmeclones, &vme_cdevsw, &unit, dev, 0);
+ if (i) {
+ if (append_unit) {
+ /*
+ * We were passed 'tun' or 'vme', with no unit specified
+ * so we'll need to append it now.
+ */
+ namelen = snprintf(devname, sizeof(devname), "%s%d", name,
+ unit);
+ name = devname;
+ }
+
+ *dev = make_dev_credf(MAKEDEV_REF, &vme_cdevsw, unit,
+ cred, UID_ROOT, GID_WHEEL, 0600, "%s", name);
+ }
+
+ if_clone_create(name, namelen, NULL);
+ CURVNET_RESTORE();
+}
+
+
+/*
+ * vmecreate
+ *
+ * to create interface
+ */
+static void
+vmecreate(struct cdev *dev)
+{
+ struct if_attach_args ifat = {
+ .ifat_version = IF_ATTACH_VERSION,
+ .ifat_mtu = ETHERMTU,
+ .ifat_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST,
+ .ifat_capabilities = IFCAP_LINKSTATE,
+ .ifat_capenable = IFCAP_LINKSTATE,
+ .ifat_nrings = vmenumqueues,
+ };
+ if_t ifp = NULL;
+ struct vme_softc *tp = NULL;
+ unsigned short macaddr_hi;
+ uint32_t macaddr_mid;
+ int unit;
+ const char *name = NULL;
+ u_char eaddr[6];
+
+ /* allocate driver storage and create device */
+ tp = malloc(sizeof(*tp), M_VME, M_WAITOK | M_ZERO);
+ mtx_init(&tp->vme_mtx, "vme_mtx", NULL, MTX_DEF);
+ mbufq_init(&tp->vme_queue, IFQ_MAXLEN);
+ mtx_lock(&vmemtx);
+ SLIST_INSERT_HEAD(&vmehead, tp, vme_next);
+ mtx_unlock(&vmemtx);
+
+ unit = dev2unit(dev);
+
+ name = vmename;
+ ifat.ifat_drv = &vme_ifdrv;
+
+ unit &= VMEMAXUNIT;
+
+ VMEDEBUG("vmecreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev));
+
+ /* generate fake MAC address: 00 bd xx xx xx unit_no */
+ macaddr_hi = htons(0x00bd);
+ macaddr_mid = (uint32_t) ticks;
+ bcopy(&macaddr_hi, eaddr, sizeof(short));
+ bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
+ eaddr[5] = (u_char)unit;
+
+ /* fill the rest and attach interface */
+ ifat.ifat_softc = tp;
+ ifat.ifat_dunit = unit;
+ ifat.ifat_lla = eaddr;
+ ifp = tp->vme_ifp = if_attach(&ifat);
+ if (ifp == NULL)
+ panic("%s%d: can not if_attach()", name, unit);
+
+ dev->si_drv1 = tp;
+ tp->vme_dev = dev;
+
+ mtx_lock(&tp->vme_mtx);
+ tp->vme_flags |= VME_INITED;
+ mtx_unlock(&tp->vme_mtx);
+
+ knlist_init_mtx(&tp->vme_rsel.si_note, &tp->vme_mtx);
+
+ VMEDEBUG("interface %s is created. minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+}
+
+/*
+ * vmeopen
+ *
+ * to open tunnel. must be superuser
+ */
+static int
+vmeopen(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct vme_softc *tp = NULL;
+ if_t ifp = NULL;
+ int error;
+
+ if (vmeuopen == 0) {
+ error = priv_check(td, PRIV_NET_VME);
+ if (error)
+ return (error);
+ }
+
+ if ((dev2unit(dev) & CLONE_UNITMASK) > VMEMAXUNIT)
+ return (ENXIO);
+
+ tp = dev->si_drv1;
+
+ mtx_lock(&tp->vme_mtx);
+ if (tp->vme_flags & VME_OPEN) {
+ mtx_unlock(&tp->vme_mtx);
+ return (EBUSY);
+ }
+
+ bcopy(if_lladdr(tp->vme_ifp), tp->ether_addr, sizeof(tp->ether_addr));
+ tp->vme_pid = td->td_proc->p_pid;
+ tp->vme_flags |= VME_OPEN;
+ ifp = tp->vme_ifp;
+
+ if_link_state_change(ifp, LINK_STATE_UP);
+ mtx_unlock(&tp->vme_mtx);
+
+ if (vmeuponopen) {
+ struct ifreq ifr;
+
+ if_drvioctl(ifp, SIOCGIFFLAGS, &ifr, td);
+ ifr.ifr_flags |= IFF_UP;
+ if_drvioctl(ifp, SIOCSIFFLAGS, &ifr, td);
+ }
+
+ VMEDEBUG("%s is open. minor = %#x\n", if_name(ifp), dev2unit(dev));
+
+ return (0);
+}
+
+/*
+ * vmeclose
+ *
+ * close the device - mark i/f down & delete routing info
+ */
+static int
+vmeclose(struct cdev *dev, int foo, int bar, struct thread *td)
+{
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+
+ /* junk all pending output */
+ mtx_lock(&tp->vme_mtx);
+ CURVNET_SET(ifp->if_vnet);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ mbufq_drain(&tp->vme_queue);
+
+ /*
+ * Bring the interface down.
+ */
+ if ((tp->vme_ifflags & (IFF_UP | IFF_LINK0)) == IFF_UP) {
+ struct ifreq ifr;
+
+ mtx_unlock(&tp->vme_mtx);
+ if_drvioctl(ifp, SIOCGIFFLAGS, &ifr, td);
+ ifr.ifr_flags &= ~IFF_UP;
+ if_drvioctl(ifp, SIOCSIFFLAGS, &ifr, td);
+ if_purgeaddrs(ifp);
+ mtx_lock(&tp->vme_mtx);
+ }
+
+ CURVNET_RESTORE();
+
+ funsetown(&tp->vme_sigio);
+ selwakeuppri(&tp->vme_rsel, PZERO+1);
+ KNOTE_LOCKED(&tp->vme_rsel.si_note, 0);
+
+ tp->vme_flags &= ~VME_OPEN;
+ tp->vme_pid = 0;
+ mtx_unlock(&tp->vme_mtx);
+
+ VMEDEBUG("%s is closed. minor = %#x\n", if_name(ifp), dev2unit(dev));
+
+ return (0);
+}
+
+/*
+ * vmeifioctl
+ *
+ * Process an ioctl request on network interface
+ */
+static int
+vmeifioctl(if_t ifp, u_long cmd, void *data, struct thread *td)
+{
+ struct vme_softc *tp;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifstat *ifs = NULL;
+ struct ifmediareq *ifmr = NULL;
+ int dummy, error = 0;
+
+ tp = if_getsoftc(ifp, IF_DRIVER_SOFTC);
+
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ tp->vme_ifflags = ifr->ifr_flags;
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCGIFMEDIA:
+ ifmr = (struct ifmediareq *)data;
+ dummy = ifmr->ifm_count;
+ ifmr->ifm_count = 1;
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+ if (tp->vme_flags & VME_OPEN)
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_current = ifmr->ifm_active;
+ if (dummy >= 1) {
+ int media = IFM_ETHER;
+ error = copyout(&media, ifmr->ifm_ulist,
+ sizeof(int));
+ }
+ break;
+
+ case SIOCSIFMTU:
+ tp->vme_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ mtx_lock(&tp->vme_mtx);
+ if (tp->vme_pid != 0)
+ snprintf(ifs->ascii, sizeof(ifs->ascii),
+ "\tOpened by PID %d\n", tp->vme_pid);
+ else
+ ifs->ascii[0] = '\0';
+ mtx_unlock(&tp->vme_mtx);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * vmeiftransmit
+ *
+ * queue packets from higher level ready to put out
+ */
+static int
+vmeiftransmit(if_t ifp, struct mbuf *m)
+{
+ struct vme_softc *tp;
+ int error;
+
+ VMEDEBUG("%s starting\n", if_name(ifp));
+
+ tp = if_getsoftc(ifp, IF_DRIVER_SOFTC);
+
+ mtx_lock(&tp->vme_mtx);
+ if ((tp->vme_flags & VME_READY) != VME_READY) {
+
+ /* Unlocked read. */
+ VMEDEBUG("%s not ready, vme_flags = 0x%x\n", if_name(ifp),
+ tp->vme_flags);
+ m_freem(m);
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ mtx_unlock(&tp->vme_mtx);
+ return (0);
+ }
+
+ if ((error = mbufq_enqueue(&tp->vme_queue, m)) != 0) {
+ mtx_unlock(&tp->vme_mtx);
+ return (error);
+ }
+
+ if (tp->vme_flags & VME_RWAIT) {
+ tp->vme_flags &= ~VME_RWAIT;
+ wakeup(tp);
+ }
+
+ if ((tp->vme_flags & VME_ASYNC) && (tp->vme_sigio != NULL)) {
+ mtx_unlock(&tp->vme_mtx);
+ pgsigio(&tp->vme_sigio, SIGIO, 0);
+ mtx_lock(&tp->vme_mtx);
+ }
+
+ selwakeuppri(&tp->vme_rsel, PZERO+1);
+ KNOTE_LOCKED(&tp->vme_rsel.si_note, 0);
+
+ mtx_unlock(&tp->vme_mtx);
+
+ return (0);
+}
+
+/*
+ * vmeioctl
+ *
+ * the cdevsw interface is now pretty minimal
+ */
+static int
+vmeioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
+ struct thread *td)
+{
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+ struct vmeinfo *vmep = NULL;
+ int error = 0;
+#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
+ defined(COMPAT_FREEBSD4)
+ int ival;
+#endif
+
+ switch (cmd) {
+ case VMESIFINFO:
+ {
+ struct ifreq ifr;
+
+ vmep = (struct vmeinfo *)data;
+ ifr.ifr_mtu = vmep->mtu;
+ error = if_drvioctl(ifp, SIOCSIFMTU, &ifr, td);
+ if (error)
+ break;
+ tp->vme_baudrate = vmep->baudrate;
+ if_setbaudrate(ifp, vmep->baudrate);
+ break;
+ }
+
+ case VMEGIFINFO:
+ vmep = (struct vmeinfo *)data;
+ mtx_lock(&tp->vme_mtx);
+ vmep->mtu = tp->vme_mtu;
+ vmep->type = IFT_ETHER;
+ vmep->baudrate = tp->vme_baudrate;
+ mtx_unlock(&tp->vme_mtx);
+ break;
+
+ case VMESDEBUG:
+ vmedebug = *(int *)data;
+ break;
+
+ case VMEGDEBUG:
+ *(int *)data = vmedebug;
+ break;
+
+ case VMEGIFNAME:
+ {
+ struct ifreq *ifr = (struct ifreq *) data;
+
+ strlcpy(ifr->ifr_name, if_name(ifp), IFNAMSIZ);
+ break;
+ }
+
+ case FIONBIO:
+ break;
+
+ case FIOASYNC:
+ mtx_lock(&tp->vme_mtx);
+ if (*(int *)data)
+ tp->vme_flags |= VME_ASYNC;
+ else
+ tp->vme_flags &= ~VME_ASYNC;
+ mtx_unlock(&tp->vme_mtx);
+ break;
+
+ case FIONREAD:
+ {
+ struct mbuf *m;
+
+ m = mbufq_first(&tp->vme_queue);
+ if (m != NULL)
+ *(int *)data = m->m_pkthdr.len;
+ else
+ *(int *)data = 0;
+ break;
+ }
+
+ case FIOSETOWN:
+ return (fsetown(*(int *)data, &tp->vme_sigio));
+
+ case FIOGETOWN:
+ *(int *)data = fgetown(&tp->vme_sigio);
+ return (0);
+
+ /* this is deprecated, FIOSETOWN should be used instead */
+ case TIOCSPGRP:
+ return (fsetown(-(*(int *)data), &tp->vme_sigio));
+
+ /* this is deprecated, FIOGETOWN should be used instead */
+ case TIOCGPGRP:
+ *(int *)data = -fgetown(&tp->vme_sigio);
+ return (0);
+
+#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
+ defined(COMPAT_FREEBSD4)
+ case _IO('V', 0):
+ ival = IOCPARM_IVAL(data);
+ data = (caddr_t)&ival;
+ /* FALLTHROUGH */
+#endif
+ case SIOCGIFADDR: /* get MAC address of the remote side */
+ mtx_lock(&tp->vme_mtx);
+ bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
+ mtx_unlock(&tp->vme_mtx);
+ break;
+
+ case SIOCSIFADDR: /* set MAC address of the remote side */
+ mtx_lock(&tp->vme_mtx);
+ bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
+ mtx_unlock(&tp->vme_mtx);
+ break;
+
+ default:
+ return (ENOTTY);
+ }
+
+ return (error);
+}
+
+/*
+ * vmeread
+ *
+ * the cdevsw read interface - reads a packet at a time, or at
+ * least as much of a packet as can be read
+ */
+static int
+vmeread(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+ struct mbuf *m = NULL;
+ int error = 0, len;
+
+ VMEDEBUG("%s reading, minor = %#x\n", if_name(ifp), dev2unit(dev));
+
+ mtx_lock(&tp->vme_mtx);
+ if ((tp->vme_flags & VME_READY) != VME_READY) {
+ mtx_unlock(&tp->vme_mtx);
+
+ /* Unlocked read. */
+ VMEDEBUG("%s not ready. minor = %#x, vme_flags = 0x%x\n",
+ if_name(ifp), dev2unit(dev), tp->vme_flags);
+
+ return (EHOSTDOWN);
+ }
+
+ tp->vme_flags &= ~VME_RWAIT;
+
+ /* sleep until we get a packet */
+ while ((m = mbufq_dequeue(&tp->vme_queue)) == NULL) {
+ if (flag & O_NONBLOCK) {
+ mtx_unlock(&tp->vme_mtx);
+ return (EWOULDBLOCK);
+ }
+ tp->vme_flags |= VME_RWAIT;
+ error = mtx_sleep(tp, &tp->vme_mtx, PCATCH | (PZERO + 1),
+ "vmerd", 0);
+ if (error) {
+ mtx_unlock(&tp->vme_mtx);
+ return (error);
+ }
+ }
+ mtx_unlock(&tp->vme_mtx);
+
+ /* feed packet to bpf */
+ if_mtap(ifp, m, NULL, 0);
+
+ /* xfer packet to user space */
+ while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) {
+ len = min(uio->uio_resid, m->m_len);
+ if (len == 0)
+ break;
+
+ error = uiomove(mtod(m, void *), len, uio);
+ if (error != 0)
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ else if ((m->m_flags & M_PKTHDR) != 0)
+ if_inc_txcounters(ifp, m);
+ m = m_free(m);
+ }
+
+ if (m != NULL) {
+ VMEDEBUG("%s dropping mbuf, minor = %#x\n", if_name(ifp),
+ dev2unit(dev));
+ m_freem(m);
+ }
+
+ return (error);
+}
+
+/*
+ * vmewrite
+ *
+ * the cdevsw write interface - an atomic write is a packet - or else!
+ */
+static int
+vmewrite(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct ether_header *eh;
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+ struct mbuf *m;
+ ifring_t *ifrs;
+ int rid;
+
+ VMEDEBUG("%s writing, minor = %#x\n", if_name(ifp), dev2unit(dev));
+
+ if (uio->uio_resid == 0)
+ return (0);
+
+ if ((uio->uio_resid < 0) || (uio->uio_resid > VMEMRU)) {
+ VMEDEBUG("%s invalid packet len = %zd, minor = %#x\n",
+ if_name(ifp), uio->uio_resid, dev2unit(dev));
+
+ return (EIO);
+ }
+
+ if ((m = m_uiotombuf(uio, M_NOWAIT, 0, ETHER_ALIGN,
+ M_PKTHDR)) == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ return (ENOBUFS);
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.csum_data = 0xffff;
+ m->m_pkthdr.csum_flags = VME_CSUM_SET;
+
+ /*
+ * Only pass a unicast frame to ether_input(), if it would actually
+ * have been received by non-virtual hardware.
+ */
+ if (m->m_len < sizeof(struct ether_header)) {
+ m_freem(m);
+ return (0);
+ }
+ eh = mtod(m, struct ether_header *);
+
+ if (eh && (tp->vme_ifflags & IFF_PROMISC) == 0 &&
+ !ETHER_IS_MULTICAST(eh->ether_dhost) &&
+ bcmp(eh->ether_dhost, if_lladdr(ifp), ETHER_ADDR_LEN) != 0) {
+ m_freem(m);
+ return (0);
+ }
+
+ vmersshash(m);
+ ifrs = if_getsoftc(ifp, IF_RING);
+
+ rid = m->m_pkthdr.flowid % vmenumqueues;
+ m->m_pkthdr.ifring = ifrs[rid];
+
+ /* Pass packet up to parent. */
+ CURVNET_SET(ifp->if_vnet);
+ if_input(ifp, m);
+ CURVNET_RESTORE();
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* ibytes are counted in parent */
+
+ return (0);
+}
+
+/*
+ * vmepoll
+ *
+ * the poll interface, this is only useful on reads
+ * really. the write detect always returns true, write never blocks
+ * anyway, it either accepts the packet or drops it
+ */
+static int
+vmepoll(struct cdev *dev, int events, struct thread *td)
+{
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+ int revents = 0;
+
+ VMEDEBUG("%s polling, minor = %#x\n", if_name(ifp), dev2unit(dev));
+
+ mtx_lock(&tp->vme_mtx);
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (mbufq_len(&tp->vme_queue) > 0) {
+ VMEDEBUG("%s have data in queue. len = %d, " \
+ "minor = %#x\n", if_name(ifp),
+ if_snd_len(ifp), dev2unit(dev));
+
+ revents |= (events & (POLLIN | POLLRDNORM));
+ } else {
+ VMEDEBUG("%s waiting for data, minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+
+ selrecord(td, &tp->vme_rsel);
+ }
+ }
+ mtx_unlock(&tp->vme_mtx);
+
+ if (events & (POLLOUT | POLLWRNORM))
+ revents |= (events & (POLLOUT | POLLWRNORM));
+
+ return (revents);
+}
+
+/*
+ * vme_kqfilter
+ *
+ * support for kevent() system call
+ */
+static int
+vmekqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct vme_softc *tp = dev->si_drv1;
+ if_t ifp = tp->vme_ifp;
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ VMEDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+ kn->kn_fop = &vme_read_filterops;
+ break;
+
+ case EVFILT_WRITE:
+ VMEDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+ kn->kn_fop = &vme_write_filterops;
+ break;
+
+ default:
+ VMEDEBUG("%s kqfilter: invalid filter, minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+ return (EINVAL);
+ /* NOT REACHED */
+ }
+
+ kn->kn_hook = tp;
+ knlist_add(&tp->vme_rsel.si_note, kn, 0);
+
+ return (0);
+}
+
+/*
+ * vme_kqread
+ *
+ * Return true if there is data in the interface queue
+ */
+static int
+vmekqread(struct knote *kn, long hint)
+{
+ int ret;
+ struct vme_softc *tp = kn->kn_hook;
+ struct cdev *dev = tp->vme_dev;
+ if_t ifp = tp->vme_ifp;
+
+ mtx_lock(&tp->vme_mtx);
+ if ((kn->kn_data = mbufq_len(&tp->vme_queue)) > 0) {
+ VMEDEBUG("%s have data in queue. len = %d, minor = %#x\n",
+ if_name(ifp), if_snd_len(ifp), dev2unit(dev));
+ ret = 1;
+ } else {
+ VMEDEBUG("%s waiting for data, minor = %#x\n",
+ if_name(ifp), dev2unit(dev));
+ ret = 0;
+ }
+ mtx_unlock(&tp->vme_mtx);
+
+ return (ret);
+}
+
+/*
+ * vme_kqwrite
+ *
+ * Always can write. Return the MTU in kn->data
+ */
+static int
+vmekqwrite(struct knote *kn, long hint)
+{
+ struct vme_softc *tp = kn->kn_hook;
+
+ kn->kn_data = tp->vme_mtu;
+ return (1);
+}
+
+static void
+vmekqdetach(struct knote *kn)
+{
+ struct vme_softc *tp = kn->kn_hook;
+
+ knlist_remove(&tp->vme_rsel.si_note, kn, 0);
+}
+
+static void
+vmersshash_v4(struct mbuf *m, uint32_t *hashval, uint32_t *hashtype)
+{
+ const struct ether_header *eh;
+ const struct ip *ip;
+ const struct tcphdr *th;
+ const struct udphdr *uh;
+ int hdrlen;
+ int iphlen;
+ uint8_t proto;
+ int is_frag = 0;
+
+ hdrlen = sizeof(*eh) + sizeof(*ip);
+
+ if (m->m_len < hdrlen)
+ return;
+
+ eh = mtod(m, struct ether_header *);
+ ip = (const struct ip *)(eh + 1);
+ proto = ip->ip_p;
+ iphlen = ip->ip_hl << 2;
+
+ if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
+ is_frag = 1;
+
+ if ((rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4) &&
+ (proto == IPPROTO_UDP) && (is_frag == 0)) {
+ hdrlen += iphlen - sizeof(*ip) + sizeof(*uh);
+ if (m->m_len < hdrlen)
+ return;
+ uh = (const struct udphdr *)((c_caddr_t)ip + iphlen);
+ rss_proto_software_hash_v4(ip->ip_src, ip->ip_dst,
+ uh->uh_sport, uh->uh_dport, proto, hashval,
+ hashtype);
+ } else if ((rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4) &&
+ (proto == IPPROTO_TCP) && (is_frag == 0)) {
+ hdrlen += iphlen - sizeof(*ip) + sizeof(*th);
+ if (m->m_len < hdrlen)
+ return;
+ th = (const struct tcphdr *)((c_caddr_t)ip + iphlen);
+ rss_proto_software_hash_v4(ip->ip_src, ip->ip_dst,
+ th->th_sport, th->th_dport, proto, hashval,
+ hashtype);
+ } else if (rss_gethashconfig() & RSS_HASHTYPE_RSS_IPV4) {
+ rss_proto_software_hash_v4(ip->ip_src, ip->ip_dst,
+ 0 /* src port */, 0 /* dst port */, 0 /* proto */,
+ hashval, hashtype);
+ } else {
+ printf("%s: no available hashtypes!\n", __func__);
+ }
+}
+
+static void
+vmersshash_v6(struct mbuf *m, uint32_t *hashval, uint32_t *hashtype)
+{
+ const struct ether_header *eh;
+ const struct ip6_hdr *ip6;
+ const struct tcphdr *th;
+ const struct udphdr *uh;
+ int hdrlen;
+ uint8_t proto;
+
+ hdrlen = sizeof(*eh) + sizeof(*ip6);
+
+ if (m->m_len < hdrlen)
+ return;
+
+ eh = mtod(m, struct ether_header *);
+ ip6 = (const struct ip6_hdr *)(eh + 1);
+ proto = ip6->ip6_nxt;
+
+ if ((rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6) &&
+ (proto == IPPROTO_UDP)) {
+ hdrlen += sizeof(*uh);
+ if (m->m_len < hdrlen)
+ return;
+ uh = (const struct udphdr *)(ip6 + 1);
+ rss_proto_software_hash_v6(&ip6->ip6_src, &ip6->ip6_dst,
+ uh->uh_sport, uh->uh_dport, proto, hashval,
+ hashtype);
+ } else if ((rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6) &&
+ (proto == IPPROTO_TCP)) {
+ hdrlen += sizeof(*th);
+ if (m->m_len < hdrlen)
+ return;
+ th = (const struct tcphdr *)(ip6 + 1);
+ rss_proto_software_hash_v6(&ip6->ip6_src, &ip6->ip6_dst,
+ th->th_sport, th->th_dport, proto, hashval,
+ hashtype);
+ } else if (rss_gethashconfig() & RSS_HASHTYPE_RSS_IPV6) {
+ rss_proto_software_hash_v6(&ip6->ip6_src, &ip6->ip6_dst,
+ 0 /* src port */, 0 /* dst port */, 0 /* proto */,
+ hashval, hashtype);
+ } else {
+ printf("%s: no available hashtypes!\n", __func__);
+ }
+}
+
+static void
+vmersshash(struct mbuf *m)
+{
+ struct ether_header *eh;
+ int hdrlen;
+ uint32_t hashval, hashtype;
+
+ hdrlen = sizeof(*eh);
+
+ if (m->m_pkthdr.len < hdrlen || (m->m_len < hdrlen &&
+ (m = m_pullup(m, hdrlen)) == NULL))
+ return;
+
+ eh = mtod(m, struct ether_header *);
+
+ hashval = 0;
+ hashtype = M_HASHTYPE_NONE;
+
+ switch (ntohs(eh->ether_type)) {
+ case ETHERTYPE_IP:
+ vmersshash_v4(m, &hashval, &hashtype);
+ break;
+ case ETHERTYPE_IPV6:
+ vmersshash_v6(m, &hashval, &hashtype);
+ break;
+ default:
+ /* Not supported */
+ break;
+ }
+
+ m->m_pkthdr.flowid = hashval;
+ M_HASHTYPE_SET(m, hashtype);
+}

File Metadata

Mime Type
text/plain
Expires
Sun, Oct 26, 7:16 AM (11 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24218707
Default Alt Text
D3425.diff (32 KB)

Event Timeline