Index: head/sys/coda/coda_fbsd.c =================================================================== --- head/sys/coda/coda_fbsd.c (revision 129879) +++ head/sys/coda/coda_fbsd.c (revision 129880) @@ -1,221 +1,222 @@ /* * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_fbsd.cr,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ */ #include __FBSDID("$FreeBSD$"); #include "vcoda.h" #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include /* From: "Jordan K. Hubbard" Subject: Re: New 3.0 SNAPshot CDROM about ready for production.. To: "Robert.V.Baron" Date: Fri, 20 Feb 1998 15:57:01 -0800 > Also I need a character device major number. (and might want to reserve > a block of 10 syscalls.) Just one char device number? No block devices? Very well, cdev 93 is yours! */ #define VC_DEV_NO 93 static struct cdevsw codadevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = vc_nb_open, .d_close = vc_nb_close, .d_read = vc_nb_read, .d_write = vc_nb_write, .d_ioctl = vc_nb_ioctl, .d_poll = vc_nb_poll, .d_name = "Coda", .d_maj = VC_DEV_NO, }; int vcdebug = 1; #define VCDEBUG if (vcdebug) printf static int codadev_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: break; case MOD_UNLOAD: break; default: break; } return 0; } static moduledata_t codadev_mod = { "codadev", codadev_modevent, NULL }; DECLARE_MODULE(codadev, codadev_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+VC_DEV_NO); int coda_fbsd_getpages(v) void *v; { struct vop_getpages_args *ap = v; #if 1 return vop_stdgetpages(ap); #else { struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vnode *cfvp = cp->c_ovp; int opened_internally = 0; struct ucred *cred = (struct ucred *) 0; struct proc *p = curproc; int error = 0; if (IS_CTL_VP(vp)) { return(EINVAL); } /* Redirect the request to UFS. */ if (cfvp == NULL) { opened_internally = 1; error = VOP_OPEN(vp, FREAD, cred, p, -1); printf("coda_getp: Internally Opening %p\n", vp); if (error) { printf("coda_getpage: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred); if (error != 0) { printf("coda_getpage: vfs_object_create() returns %d\n", error); vput(vp); return(error); } } cfvp = cp->c_ovp; } else { printf("coda_getp: has container %p\n", cfvp); } printf("coda_fbsd_getpages: using container "); /* error = vnode_pager_generic_getpages(cfvp, ap->a_m, ap->a_count, ap->a_reqpage); */ error = VOP_GETPAGES(cfvp, ap->a_m, ap->a_count, ap->a_reqpage, ap->a_offset); printf("error = %d\n", error); /* Do an internal close if necessary. */ if (opened_internally) { (void)VOP_CLOSE(vp, FREAD, cred, p); } return(error); } #endif } /* for DEVFS, using bpf & tun drivers as examples*/ static void coda_fbsd_drvinit(void *unused); static void coda_fbsd_drvuninit(void *unused); static void coda_fbsd_clone(void *arg, char *name, int namelen, dev_t *dev); static eventhandler_tag clonetag; static void coda_fbsd_clone(arg, name, namelen, dev) void *arg; char *name; int namelen; dev_t *dev; { int u; if (*dev != NODEV) return; if (dev_stdclone(name,NULL,"cfs",&u) != 1) return; *dev = make_dev(&codadevsw,unit2minor(u),UID_ROOT,GID_WHEEL,0600,"cfs%d",u); coda_mnttbl[unit2minor(u)].dev = *dev; } static void coda_fbsd_drvinit(unused) void *unused; { int i; clonetag = EVENTHANDLER_REGISTER(dev_clone,coda_fbsd_clone,0,1000); for(i=0;i * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include typedef struct { struct resource *intr; void *ih; } atkbd_softc_t; static devclass_t atkbd_devclass; static void atkbdidentify(driver_t *driver, device_t dev); static int atkbdprobe(device_t dev); static int atkbdattach(device_t dev); static int atkbdresume(device_t dev); static void atkbd_isa_intr(void *arg); static device_method_t atkbd_methods[] = { DEVMETHOD(device_identify, atkbdidentify), DEVMETHOD(device_probe, atkbdprobe), DEVMETHOD(device_attach, atkbdattach), DEVMETHOD(device_resume, atkbdresume), { 0, 0 } }; static driver_t atkbd_driver = { ATKBD_DRIVER_NAME, atkbd_methods, sizeof(atkbd_softc_t), }; static void atkbdidentify(driver_t *driver, device_t parent) { /* always add at least one child */ BUS_ADD_CHILD(parent, KBDC_RID_KBD, driver->name, device_get_unit(parent)); } static int atkbdprobe(device_t dev) { struct resource *res; u_long irq; int flags; int rid; device_set_desc(dev, "AT Keyboard"); /* obtain parameters */ flags = device_get_flags(dev); /* see if IRQ is available */ rid = KBDC_RID_KBD; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { if (bootverbose) device_printf(dev, "unable to allocate IRQ\n"); return ENXIO; } irq = rman_get_start(res); bus_release_resource(dev, SYS_RES_IRQ, rid, res); /* probe the device */ return atkbd_probe_unit(device_get_unit(dev), device_get_unit(device_get_parent(dev)), irq, flags); } static int atkbdattach(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; u_long irq; int flags; int rid; int error; sc = device_get_softc(dev); rid = KBDC_RID_KBD; irq = bus_get_resource_start(dev, SYS_RES_IRQ, rid); flags = device_get_flags(dev); error = atkbd_attach_unit(device_get_unit(dev), &kbd, device_get_unit(device_get_parent(dev)), irq, flags); if (error) return error; /* declare our interrupt handler */ sc->intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->intr == NULL) return ENXIO; error = bus_setup_intr(dev, sc->intr, INTR_TYPE_TTY, atkbd_isa_intr, kbd, &sc->ih); if (error) bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); return error; } static int atkbdresume(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; int args[2]; sc = device_get_softc(dev); kbd = kbd_get_keyboard(kbd_find_keyboard(ATKBD_DRIVER_NAME, device_get_unit(dev))); if (kbd) { kbd->kb_flags &= ~KB_INITIALIZED; args[0] = device_get_unit(device_get_parent(dev)); args[1] = rman_get_start(sc->intr); (*kbdsw[kbd->kb_index]->init)(device_get_unit(dev), &kbd, args, device_get_flags(dev)); (*kbdsw[kbd->kb_index]->clear_state)(kbd); } return 0; } static void atkbd_isa_intr(void *arg) { keyboard_t *kbd; kbd = (keyboard_t *)arg; (*kbdsw[kbd->kb_index]->intr)(kbd, NULL); } DRIVER_MODULE(atkbd, atkbdc, atkbd_driver, atkbd_devclass, 0, 0); Index: head/sys/dev/atkbdc/atkbd_isa.c =================================================================== --- head/sys/dev/atkbdc/atkbd_isa.c (revision 129879) +++ head/sys/dev/atkbdc/atkbd_isa.c (revision 129880) @@ -1,179 +1,180 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include typedef struct { struct resource *intr; void *ih; } atkbd_softc_t; static devclass_t atkbd_devclass; static void atkbdidentify(driver_t *driver, device_t dev); static int atkbdprobe(device_t dev); static int atkbdattach(device_t dev); static int atkbdresume(device_t dev); static void atkbd_isa_intr(void *arg); static device_method_t atkbd_methods[] = { DEVMETHOD(device_identify, atkbdidentify), DEVMETHOD(device_probe, atkbdprobe), DEVMETHOD(device_attach, atkbdattach), DEVMETHOD(device_resume, atkbdresume), { 0, 0 } }; static driver_t atkbd_driver = { ATKBD_DRIVER_NAME, atkbd_methods, sizeof(atkbd_softc_t), }; static void atkbdidentify(driver_t *driver, device_t parent) { /* always add at least one child */ BUS_ADD_CHILD(parent, KBDC_RID_KBD, driver->name, device_get_unit(parent)); } static int atkbdprobe(device_t dev) { struct resource *res; u_long irq; int flags; int rid; device_set_desc(dev, "AT Keyboard"); /* obtain parameters */ flags = device_get_flags(dev); /* see if IRQ is available */ rid = KBDC_RID_KBD; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { if (bootverbose) device_printf(dev, "unable to allocate IRQ\n"); return ENXIO; } irq = rman_get_start(res); bus_release_resource(dev, SYS_RES_IRQ, rid, res); /* probe the device */ return atkbd_probe_unit(device_get_unit(dev), device_get_unit(device_get_parent(dev)), irq, flags); } static int atkbdattach(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; u_long irq; int flags; int rid; int error; sc = device_get_softc(dev); rid = KBDC_RID_KBD; irq = bus_get_resource_start(dev, SYS_RES_IRQ, rid); flags = device_get_flags(dev); error = atkbd_attach_unit(device_get_unit(dev), &kbd, device_get_unit(device_get_parent(dev)), irq, flags); if (error) return error; /* declare our interrupt handler */ sc->intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->intr == NULL) return ENXIO; error = bus_setup_intr(dev, sc->intr, INTR_TYPE_TTY, atkbd_isa_intr, kbd, &sc->ih); if (error) bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); return error; } static int atkbdresume(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; int args[2]; sc = device_get_softc(dev); kbd = kbd_get_keyboard(kbd_find_keyboard(ATKBD_DRIVER_NAME, device_get_unit(dev))); if (kbd) { kbd->kb_flags &= ~KB_INITIALIZED; args[0] = device_get_unit(device_get_parent(dev)); args[1] = rman_get_start(sc->intr); (*kbdsw[kbd->kb_index]->init)(device_get_unit(dev), &kbd, args, device_get_flags(dev)); (*kbdsw[kbd->kb_index]->clear_state)(kbd); } return 0; } static void atkbd_isa_intr(void *arg) { keyboard_t *kbd; kbd = (keyboard_t *)arg; (*kbdsw[kbd->kb_index]->intr)(kbd, NULL); } DRIVER_MODULE(atkbd, atkbdc, atkbd_driver, atkbd_devclass, 0, 0); Index: head/sys/dev/atkbdc/atkbdc_isa.c =================================================================== --- head/sys/dev/atkbdc/atkbdc_isa.c (revision 129879) +++ head/sys/dev/atkbdc/atkbdc_isa.c (revision 129880) @@ -1,368 +1,369 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ATKBDDEV, "atkbddev", "AT Keyboard device"); /* children */ typedef struct atkbdc_device { struct resource_list resources; int rid; u_int32_t vendorid; u_int32_t serial; u_int32_t logicalid; u_int32_t compatid; } atkbdc_device_t; /* kbdc */ static devclass_t atkbdc_devclass; static int atkbdc_probe(device_t dev); static int atkbdc_attach(device_t dev); static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit); static int atkbdc_print_child(device_t bus, device_t dev); static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val); static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val); static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev); static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res); static device_method_t atkbdc_methods[] = { DEVMETHOD(device_probe, atkbdc_probe), DEVMETHOD(device_attach, atkbdc_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, atkbdc_add_child), DEVMETHOD(bus_print_child, atkbdc_print_child), DEVMETHOD(bus_read_ivar, atkbdc_read_ivar), DEVMETHOD(bus_write_ivar, atkbdc_write_ivar), DEVMETHOD(bus_get_resource_list,atkbdc_get_resource_list), DEVMETHOD(bus_alloc_resource, atkbdc_alloc_resource), DEVMETHOD(bus_release_resource, atkbdc_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t atkbdc_driver = { ATKBDC_DRIVER_NAME, atkbdc_methods, sizeof(atkbdc_softc_t *), }; static struct isa_pnp_id atkbdc_ids[] = { { 0x0303d041, "Keyboard controller (i8042)" }, /* PNP0303 */ { 0 } }; static int atkbdc_probe(device_t dev) { struct resource *port0; struct resource *port1; u_long start; u_long count; int error; int rid; /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Furthermore, /boot/device.hints may contain * just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (count > 1) /* adjust the count */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); if (error == 0) bus_generic_probe(dev); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; } static int atkbdc_attach(device_t dev) { atkbdc_softc_t *sc; int unit; int error; int rid; unit = device_get_unit(dev); sc = *(atkbdc_softc_t **)device_get_softc(dev); if (sc == NULL) { /* * We have to maintain two copies of the kbdc_softc struct, * as the low-level console needs to have access to the * keyboard controller before kbdc is probed and attached. * kbdc_soft[] contains the default entry for that purpose. * See atkbdc.c. XXX */ sc = atkbdc_get_softc(unit); if (sc == NULL) return ENOMEM; } rid = 0; sc->port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port0 == NULL) return ENXIO; rid = 1; sc->port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); return ENXIO; } error = atkbdc_attach_unit(unit, sc, sc->port0, sc->port1); if (error) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->port1); return error; } *(atkbdc_softc_t **)device_get_softc(dev) = sc; bus_generic_attach(dev); return 0; } static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit) { atkbdc_device_t *ivar; device_t child; int t; ivar = malloc(sizeof(struct atkbdc_device), M_ATKBDDEV, M_NOWAIT | M_ZERO); if (!ivar) return NULL; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { free(ivar, M_ATKBDDEV); return child; } resource_list_init(&ivar->resources); ivar->rid = order; /* * If the device is not created by the PnP BIOS or ACPI, * refer to device hints for IRQ. */ if (ISA_PNP_PROBE(device_get_parent(bus), bus, atkbdc_ids) != 0) { if (resource_int_value(name, unit, "irq", &t) != 0) t = -1; } else { t = bus_get_resource_start(bus, SYS_RES_IRQ, ivar->rid); } if (t > 0) resource_list_add(&ivar->resources, SYS_RES_IRQ, ivar->rid, t, t, 1); if (resource_int_value(name, unit, "flags", &t) == 0) device_set_flags(child, t); if (resource_disabled(name, unit)) device_disable(child); device_set_ivars(child, ivar); return child; } static int atkbdc_print_child(device_t bus, device_t dev) { atkbdc_device_t *kbdcdev; u_long irq; int flags; int retval = 0; kbdcdev = (atkbdc_device_t *)device_get_ivars(dev); retval += bus_print_child_header(bus, dev); flags = device_get_flags(dev); if (flags != 0) retval += printf(" flags 0x%x", flags); irq = bus_get_resource_start(dev, SYS_RES_IRQ, kbdcdev->rid); if (irq != 0) retval += printf(" irq %ld", irq); retval += bus_print_child_footer(bus, dev); return (retval); } static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: *val = (u_long)ivar->vendorid; break; case KBDC_IVAR_SERIAL: *val = (u_long)ivar->serial; break; case KBDC_IVAR_LOGICALID: *val = (u_long)ivar->logicalid; break; case KBDC_IVAR_COMPATID: *val = (u_long)ivar->compatid; break; default: return ENOENT; } return 0; } static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: ivar->vendorid = (u_int32_t)val; break; case KBDC_IVAR_SERIAL: ivar->serial = (u_int32_t)val; break; case KBDC_IVAR_LOGICALID: ivar->logicalid = (u_int32_t)val; break; case KBDC_IVAR_COMPATID: ivar->compatid = (u_int32_t)val; break; default: return ENOENT; } return 0; } static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return &ivar->resources; } static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_alloc(&ivar->resources, bus, dev, type, rid, start, end, count, flags); } static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_release(&ivar->resources, bus, dev, type, rid, res); } DRIVER_MODULE(atkbdc, isa, atkbdc_driver, atkbdc_devclass, 0, 0); DRIVER_MODULE(atkbdc, acpi, atkbdc_driver, atkbdc_devclass, 0, 0); Index: head/sys/dev/atkbdc/atkbdc_subr.c =================================================================== --- head/sys/dev/atkbdc/atkbdc_subr.c (revision 129879) +++ head/sys/dev/atkbdc/atkbdc_subr.c (revision 129880) @@ -1,368 +1,369 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ATKBDDEV, "atkbddev", "AT Keyboard device"); /* children */ typedef struct atkbdc_device { struct resource_list resources; int rid; u_int32_t vendorid; u_int32_t serial; u_int32_t logicalid; u_int32_t compatid; } atkbdc_device_t; /* kbdc */ static devclass_t atkbdc_devclass; static int atkbdc_probe(device_t dev); static int atkbdc_attach(device_t dev); static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit); static int atkbdc_print_child(device_t bus, device_t dev); static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val); static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val); static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev); static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res); static device_method_t atkbdc_methods[] = { DEVMETHOD(device_probe, atkbdc_probe), DEVMETHOD(device_attach, atkbdc_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, atkbdc_add_child), DEVMETHOD(bus_print_child, atkbdc_print_child), DEVMETHOD(bus_read_ivar, atkbdc_read_ivar), DEVMETHOD(bus_write_ivar, atkbdc_write_ivar), DEVMETHOD(bus_get_resource_list,atkbdc_get_resource_list), DEVMETHOD(bus_alloc_resource, atkbdc_alloc_resource), DEVMETHOD(bus_release_resource, atkbdc_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t atkbdc_driver = { ATKBDC_DRIVER_NAME, atkbdc_methods, sizeof(atkbdc_softc_t *), }; static struct isa_pnp_id atkbdc_ids[] = { { 0x0303d041, "Keyboard controller (i8042)" }, /* PNP0303 */ { 0 } }; static int atkbdc_probe(device_t dev) { struct resource *port0; struct resource *port1; u_long start; u_long count; int error; int rid; /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Furthermore, /boot/device.hints may contain * just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (count > 1) /* adjust the count */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); if (error == 0) bus_generic_probe(dev); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; } static int atkbdc_attach(device_t dev) { atkbdc_softc_t *sc; int unit; int error; int rid; unit = device_get_unit(dev); sc = *(atkbdc_softc_t **)device_get_softc(dev); if (sc == NULL) { /* * We have to maintain two copies of the kbdc_softc struct, * as the low-level console needs to have access to the * keyboard controller before kbdc is probed and attached. * kbdc_soft[] contains the default entry for that purpose. * See atkbdc.c. XXX */ sc = atkbdc_get_softc(unit); if (sc == NULL) return ENOMEM; } rid = 0; sc->port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port0 == NULL) return ENXIO; rid = 1; sc->port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); return ENXIO; } error = atkbdc_attach_unit(unit, sc, sc->port0, sc->port1); if (error) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->port1); return error; } *(atkbdc_softc_t **)device_get_softc(dev) = sc; bus_generic_attach(dev); return 0; } static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit) { atkbdc_device_t *ivar; device_t child; int t; ivar = malloc(sizeof(struct atkbdc_device), M_ATKBDDEV, M_NOWAIT | M_ZERO); if (!ivar) return NULL; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { free(ivar, M_ATKBDDEV); return child; } resource_list_init(&ivar->resources); ivar->rid = order; /* * If the device is not created by the PnP BIOS or ACPI, * refer to device hints for IRQ. */ if (ISA_PNP_PROBE(device_get_parent(bus), bus, atkbdc_ids) != 0) { if (resource_int_value(name, unit, "irq", &t) != 0) t = -1; } else { t = bus_get_resource_start(bus, SYS_RES_IRQ, ivar->rid); } if (t > 0) resource_list_add(&ivar->resources, SYS_RES_IRQ, ivar->rid, t, t, 1); if (resource_int_value(name, unit, "flags", &t) == 0) device_set_flags(child, t); if (resource_disabled(name, unit)) device_disable(child); device_set_ivars(child, ivar); return child; } static int atkbdc_print_child(device_t bus, device_t dev) { atkbdc_device_t *kbdcdev; u_long irq; int flags; int retval = 0; kbdcdev = (atkbdc_device_t *)device_get_ivars(dev); retval += bus_print_child_header(bus, dev); flags = device_get_flags(dev); if (flags != 0) retval += printf(" flags 0x%x", flags); irq = bus_get_resource_start(dev, SYS_RES_IRQ, kbdcdev->rid); if (irq != 0) retval += printf(" irq %ld", irq); retval += bus_print_child_footer(bus, dev); return (retval); } static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: *val = (u_long)ivar->vendorid; break; case KBDC_IVAR_SERIAL: *val = (u_long)ivar->serial; break; case KBDC_IVAR_LOGICALID: *val = (u_long)ivar->logicalid; break; case KBDC_IVAR_COMPATID: *val = (u_long)ivar->compatid; break; default: return ENOENT; } return 0; } static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: ivar->vendorid = (u_int32_t)val; break; case KBDC_IVAR_SERIAL: ivar->serial = (u_int32_t)val; break; case KBDC_IVAR_LOGICALID: ivar->logicalid = (u_int32_t)val; break; case KBDC_IVAR_COMPATID: ivar->compatid = (u_int32_t)val; break; default: return ENOENT; } return 0; } static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return &ivar->resources; } static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_alloc(&ivar->resources, bus, dev, type, rid, start, end, count, flags); } static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_release(&ivar->resources, bus, dev, type, rid, res); } DRIVER_MODULE(atkbdc, isa, atkbdc_driver, atkbdc_devclass, 0, 0); DRIVER_MODULE(atkbdc, acpi, atkbdc_driver, atkbdc_devclass, 0, 0); Index: head/sys/dev/fb/splash_bmp.c =================================================================== --- head/sys/dev/fb/splash_bmp.c (revision 129879) +++ head/sys/dev/fb/splash_bmp.c (revision 129880) @@ -1,642 +1,643 @@ /*- * Copyright (c) 1999 Michael Smith * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include +#include #include #include #include #include #ifndef PC98 #include #include #endif #define FADE_TIMEOUT 15 /* sec */ #define FADE_LEVELS 10 static int splash_mode = -1; static int splash_on = FALSE; static int bmp_start(video_adapter_t *adp); static int bmp_end(video_adapter_t *adp); static int bmp_splash(video_adapter_t *adp, int on); static int bmp_Init(char *data, int swidth, int sheight, int sdepth); static int bmp_Draw(video_adapter_t *adp); static splash_decoder_t bmp_decoder = { "splash_bmp", bmp_start, bmp_end, bmp_splash, SPLASH_IMAGE, }; SPLASH_DECODER(splash_bmp, bmp_decoder); static int bmp_start(video_adapter_t *adp) { /* currently only 256-color modes are supported XXX */ static int modes[] = { #ifdef PC98 /* * As 640x400 doesn't generally look great, * it's least preferred here. */ M_PC98_PEGC640x400, M_PC98_PEGC640x480, M_PC98_EGC640x400, #else M_VESA_CG640x480, M_VESA_CG800x600, M_VESA_CG1024x768, M_CG640x480, /* * As 320x200 doesn't generally look great, * it's least preferred here. */ M_VGA_CG320, #endif -1, }; video_info_t info; int i; if ((bmp_decoder.data == NULL) || (bmp_decoder.data_size <= 0)) { printf("splash_bmp: No bitmap file found\n"); return ENODEV; } for (i = 0; modes[i] >= 0; ++i) { if (((*vidsw[adp->va_index]->get_info)(adp, modes[i], &info) == 0) && (bmp_Init((u_char *)bmp_decoder.data, info.vi_width, info.vi_height, info.vi_depth) == 0)) break; } splash_mode = modes[i]; if (splash_mode < 0) printf("splash_bmp: No appropriate video mode found\n"); if (bootverbose) printf("bmp_start(): splash_mode:%d\n", splash_mode); return ((splash_mode < 0) ? ENODEV : 0); } static int bmp_end(video_adapter_t *adp) { /* nothing to do */ return 0; } static int bmp_splash(video_adapter_t *adp, int on) { static u_char pal[256*3]; static long time_stamp; u_char tpal[256*3]; static int fading = TRUE, brightness = FADE_LEVELS; struct timeval tv; int i; if (on) { if (!splash_on) { /* set up the video mode and draw something */ if ((*vidsw[adp->va_index]->set_mode)(adp, splash_mode)) return 1; if (bmp_Draw(adp)) return 1; (*vidsw[adp->va_index]->save_palette)(adp, pal); time_stamp = 0; splash_on = TRUE; } /* * This is a kludge to fade the image away. This section of the * code takes effect only after the system is completely up. * FADE_TIMEOUT should be configurable. */ if (!cold) { getmicrotime(&tv); if (time_stamp == 0) time_stamp = tv.tv_sec; if (tv.tv_sec > time_stamp + FADE_TIMEOUT) { if (fading) if (brightness == 0) { fading = FALSE; brightness++; } else brightness--; else if (brightness == FADE_LEVELS) { fading = TRUE; brightness--; } else brightness++; for (i = 0; i < sizeof(pal); ++i) { tpal[i] = pal[i] * brightness / FADE_LEVELS; } (*vidsw[adp->va_index]->load_palette)(adp, tpal); time_stamp = tv.tv_sec; } } return 0; } else { /* the video mode will be restored by the caller */ splash_on = FALSE; return 0; } } /* ** Code to handle Microsoft DIB (".BMP") format images. ** ** Blame me (msmith@freebsd.org) if this is broken, not Soren. */ typedef struct tagBITMAPFILEHEADER { /* bmfh */ u_short bfType __packed; int bfSize __packed; u_short bfReserved1 __packed; u_short bfReserved2 __packed; int bfOffBits __packed; } BITMAPFILEHEADER; typedef struct tagBITMAPINFOHEADER { /* bmih */ int biSize __packed; int biWidth __packed; int biHeight __packed; short biPlanes __packed; short biBitCount __packed; int biCompression __packed; int biSizeImage __packed; int biXPelsPerMeter __packed; int biYPelsPerMeter __packed; int biClrUsed __packed; int biClrImportant __packed; } BITMAPINFOHEADER; typedef struct tagRGBQUAD { /* rgbq */ u_char rgbBlue __packed; u_char rgbGreen __packed; u_char rgbRed __packed; u_char rgbReserved __packed; } RGBQUAD; typedef struct tagBITMAPINFO { /* bmi */ BITMAPINFOHEADER bmiHeader __packed; RGBQUAD bmiColors[256] __packed; } BITMAPINFO; typedef struct tagBITMAPF { BITMAPFILEHEADER bmfh __packed; BITMAPINFO bmfi __packed; } BITMAPF; #define BI_RGB 0 #define BI_RLE8 1 #define BI_RLE4 2 /* ** all we actually care about the image */ typedef struct { int width,height; /* image dimensions */ int swidth,sheight; /* screen dimensions for the current mode */ u_char depth; /* image depth (1, 4, 8, 24 bits) */ u_char sdepth; /* screen depth (1, 4, 8 bpp) */ int ncols; /* number of colours */ u_char palette[256][3]; /* raw palette data */ u_char format; /* one of the BI_* constants above */ u_char *data; /* pointer to the raw data */ u_char *index; /* running pointer to the data while drawing */ u_char *vidmem; /* video memory allocated for drawing */ video_adapter_t *adp; int bank; #ifdef PC98 u_char prev_val; #endif } BMP_INFO; static BMP_INFO bmp_info; /* ** bmp_SetPix ** ** Given (info), set the pixel at (x),(y) to (val) ** */ static void bmp_SetPix(BMP_INFO *info, int x, int y, u_char val) { int sofs, bofs; int newbank; /* * range check to avoid explosions */ if ((x < 0) || (x >= info->swidth) || (y < 0) || (y >= info->sheight)) return; /* * calculate offset into video memory; * because 0,0 is bottom-left for DIB, we have to convert. */ sofs = ((info->height - (y+1) + (info->sheight - info->height) / 2) * info->adp->va_line_width); x += (info->swidth - info->width) / 2; switch(info->sdepth) { #ifdef PC98 case 4: sofs += (x >> 3); bofs = x & 0x7; /* offset within byte */ outb(0x7c, 0x80 | 0x40); /* GRCG on & RMW mode */ if (val != info->prev_val) { outb(0x7e, (val & 1) ? 0xff : 0); /* tile B */ outb(0x7e, (val & 2) ? 0xff : 0); /* tile R */ outb(0x7e, (val & 4) ? 0xff : 0); /* tile G */ outb(0x7e, (val & 8) ? 0xff : 0); /* tile I */ info->prev_val = val; } *(info->vidmem+sofs) = (0x80 >> bofs); /* write new bit */ outb(0x7c, 0); /* GRCG off */ break; #else case 4: case 1: /* EGA/VGA planar modes */ sofs += (x >> 3); newbank = sofs/info->adp->va_window_size; if (info->bank != newbank) { (*vidsw[info->adp->va_index]->set_win_org)(info->adp, newbank*info->adp->va_window_size); info->bank = newbank; } sofs %= info->adp->va_window_size; bofs = x & 0x7; /* offset within byte */ outw(GDCIDX, (0x8000 >> bofs) | 0x08); /* bit mask */ outw(GDCIDX, (val << 8) | 0x00); /* set/reset */ *(info->vidmem + sofs) ^= 0xff; /* read-modify-write */ break; #endif case 8: sofs += x; newbank = sofs/info->adp->va_window_size; if (info->bank != newbank) { (*vidsw[info->adp->va_index]->set_win_org)(info->adp, newbank*info->adp->va_window_size); info->bank = newbank; } sofs %= info->adp->va_window_size; *(info->vidmem+sofs) = val; break; } } /* ** bmp_DecodeRLE4 ** ** Given (data) pointing to a line of RLE4-format data and (line) being the starting ** line onscreen, decode the line. */ static void bmp_DecodeRLE4(BMP_INFO *info, int line) { int count; /* run count */ u_char val; int x,y; /* screen position */ x = 0; /* starting position */ y = line; /* loop reading data */ for (;;) { /* * encoded mode starts with a run length, and then a byte with * two colour indexes to alternate between for the run */ if (*info->index) { for (count = 0; count < *info->index; count++, x++) { if (count & 1) { /* odd count, low nybble */ bmp_SetPix(info, x, y, *(info->index+1) & 0x0f); } else { /* even count, high nybble */ bmp_SetPix(info, x, y, (*(info->index+1) >>4) & 0x0f); } } info->index += 2; /* * A leading zero is an escape; it may signal the end of the * bitmap, a cursor move, or some absolute data. */ } else { /* zero tag may be absolute mode or an escape */ switch (*(info->index+1)) { case 0: /* end of line */ info->index += 2; return; case 1: /* end of bitmap */ info->index = NULL; return; case 2: /* move */ x += *(info->index + 2); /* new coords */ y += *(info->index + 3); info->index += 4; break; default: /* literal bitmap data */ for (count = 0; count < *(info->index + 1); count++, x++) { val = *(info->index + 2 + (count / 2)); /* byte with nybbles */ if (count & 1) { val &= 0xf; /* get low nybble */ } else { val = (val >> 4); /* get high nybble */ } bmp_SetPix(info, x, y, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += 2 + ((count + 3) / 4) * 2; break; } } } } /* ** bmp_DecodeRLE8 ** Given (data) pointing to a line of RLE8-format data and (line) being the starting ** line onscreen, decode the line. */ static void bmp_DecodeRLE8(BMP_INFO *info, int line) { int count; /* run count */ int x,y; /* screen position */ x = 0; /* starting position */ y = line; /* loop reading data */ for(;;) { /* * encoded mode starts with a run length, and then a byte with * two colour indexes to alternate between for the run */ if (*info->index) { for (count = 0; count < *info->index; count++, x++) bmp_SetPix(info, x, y, *(info->index+1)); info->index += 2; /* * A leading zero is an escape; it may signal the end of the * bitmap, a cursor move, or some absolute data. */ } else { /* zero tag may be absolute mode or an escape */ switch(*(info->index+1)) { case 0: /* end of line */ info->index += 2; return; case 1: /* end of bitmap */ info->index = NULL; return; case 2: /* move */ x += *(info->index + 2); /* new coords */ y += *(info->index + 3); info->index += 4; break; default: /* literal bitmap data */ for (count = 0; count < *(info->index + 1); count++, x++) bmp_SetPix(info, x, y, *(info->index + 2 + count)); /* must be an even count */ info->index += 2 + count + (count & 1); break; } } } } /* ** bmp_DecodeLine ** ** Given (info) pointing to an image being decoded, (line) being the line currently ** being displayed, decode a line of data. */ static void bmp_DecodeLine(BMP_INFO *info, int line) { int x; u_char val, mask, *p; switch(info->format) { case BI_RGB: switch(info->depth) { case 8: for (x = 0; x < info->width; x++, info->index++) bmp_SetPix(info, x, line, *info->index); info->index += 3 - (--x % 4); break; case 4: p = info->index; for (x = 0; x < info->width; x++) { if (x & 1) { val = *p & 0xf; /* get low nybble */ p++; } else { val = *p >> 4; /* get high nybble */ } bmp_SetPix(info, x, line, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += ((x + 7) / 8) * 4; break; case 1: p = info->index; mask = 0x80; for (x = 0; x < info->width; x++) { val = (*p & mask) ? 1 : 0; mask >>= 1; if (mask == 0) { mask = 0x80; p++; } bmp_SetPix(info, x, line, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += ((x + 31) / 32) * 4; break; } break; case BI_RLE4: bmp_DecodeRLE4(info, line); break; case BI_RLE8: bmp_DecodeRLE8(info, line); break; } } /* ** bmp_Init ** ** Given a pointer (data) to the image of a BMP file, fill in bmp_info with what ** can be learnt from it. Return nonzero if the file isn't usable. ** ** Take screen dimensions (swidth), (sheight) and (sdepth) and make sure we ** can work with these. */ static int bmp_Init(char *data, int swidth, int sheight, int sdepth) { BITMAPF *bmf = (BITMAPF *)data; int pind; bmp_info.data = NULL; /* assume setup failed */ /* check file ID */ if (bmf->bmfh.bfType != 0x4d42) { printf("splash_bmp: not a BMP file\n"); return(1); /* XXX check word ordering for big-endian ports? */ } /* do we understand this bitmap format? */ if (bmf->bmfi.bmiHeader.biSize > sizeof(bmf->bmfi.bmiHeader)) { printf("splash_bmp: unsupported BMP format (size=%d)\n", bmf->bmfi.bmiHeader.biSize); return(1); } /* save what we know about the screen */ bmp_info.swidth = swidth; bmp_info.sheight = sheight; bmp_info.sdepth = sdepth; /* where's the data? */ bmp_info.data = (u_char *)data + bmf->bmfh.bfOffBits; /* image parameters */ bmp_info.width = bmf->bmfi.bmiHeader.biWidth; bmp_info.height = bmf->bmfi.bmiHeader.biHeight; bmp_info.depth = bmf->bmfi.bmiHeader.biBitCount; bmp_info.format = bmf->bmfi.bmiHeader.biCompression; switch(bmp_info.format) { /* check compression format */ case BI_RGB: case BI_RLE4: case BI_RLE8: break; default: printf("splash_bmp: unsupported compression format\n"); return(1); /* unsupported compression format */ } /* palette details */ bmp_info.ncols = (bmf->bmfi.bmiHeader.biClrUsed); bzero(bmp_info.palette,sizeof(bmp_info.palette)); if (bmp_info.ncols == 0) { /* uses all of them */ bmp_info.ncols = 1 << bmf->bmfi.bmiHeader.biBitCount; } if ((bmp_info.height > bmp_info.sheight) || (bmp_info.width > bmp_info.swidth) || (bmp_info.ncols > (1 << sdepth))) { if (bootverbose) printf("splash_bmp: beyond screen capacity (%dx%d, %d colors)\n", bmp_info.width, bmp_info.height, bmp_info.ncols); return(1); } /* read palette */ for (pind = 0; pind < bmp_info.ncols; pind++) { bmp_info.palette[pind][0] = bmf->bmfi.bmiColors[pind].rgbRed; bmp_info.palette[pind][1] = bmf->bmfi.bmiColors[pind].rgbGreen; bmp_info.palette[pind][2] = bmf->bmfi.bmiColors[pind].rgbBlue; } return(0); } /* ** bmp_Draw ** ** Render the image. Return nonzero if that's not possible. ** */ static int bmp_Draw(video_adapter_t *adp) { int line; #if 0 #ifndef PC98 int i; #endif #endif if (bmp_info.data == NULL) { /* init failed, do nothing */ return(1); } /* clear the screen */ bmp_info.vidmem = (u_char *)adp->va_window; bmp_info.adp = adp; (*vidsw[adp->va_index]->clear)(adp); (*vidsw[adp->va_index]->set_win_org)(adp, 0); bmp_info.bank = 0; /* initialise the info structure for drawing */ bmp_info.index = bmp_info.data; #ifdef PC98 bmp_info.prev_val = 255; #endif /* set the palette for our image */ (*vidsw[adp->va_index]->load_palette)(adp, (u_char *)&bmp_info.palette); #if 0 #ifndef PC98 /* XXX: this is ugly, but necessary for EGA/VGA 1bpp/4bpp modes */ if ((adp->va_type == KD_EGA) || (adp->va_type == KD_VGA)) { inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x14); outb(ATC, 0); for (i = 0; i < 16; ++i) { outb(ATC, i); outb(ATC, i); } inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ outw(GDCIDX, 0x0f01); /* set/reset enable */ if (bmp_info.sdepth == 1) outw(TSIDX, 0x0102); /* unmask plane #0 */ } #endif #endif for (line = 0; (line < bmp_info.height) && bmp_info.index; line++) { bmp_DecodeLine(&bmp_info, line); } return(0); } Index: head/sys/dev/fb/splash_pcx.c =================================================================== --- head/sys/dev/fb/splash_pcx.c (revision 129879) +++ head/sys/dev/fb/splash_pcx.c (revision 129880) @@ -1,262 +1,263 @@ /*- * Copyright (c) 1999 Michael Smith * Copyright (c) 1999 Kazutaka YOKOTA * Copyright (c) 1999 Dag-Erling Coïdan Smørgrav * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include +#include #include #include #include #define FADE_TIMEOUT 300 /* sec */ static int splash_mode = -1; static int splash_on = FALSE; static int pcx_start(video_adapter_t *adp); static int pcx_end(video_adapter_t *adp); static int pcx_splash(video_adapter_t *adp, int on); static int pcx_init(char *data, int sdepth); static int pcx_draw(video_adapter_t *adp); static splash_decoder_t pcx_decoder = { "splash_pcx", pcx_start, pcx_end, pcx_splash, SPLASH_IMAGE, }; SPLASH_DECODER(splash_pcx, pcx_decoder); static struct { int width, height, bpsl; int bpp, planes, zlen; const u_char *zdata; u_char *palette; } pcx_info; static int pcx_start(video_adapter_t *adp) { static int modes[] = { M_VGA_CG320, M_VESA_CG640x480, M_VESA_CG800x600, M_VESA_CG1024x768, -1, }; video_info_t info; int i; if (pcx_decoder.data == NULL || pcx_decoder.data_size <= 0 || pcx_init((u_char *)pcx_decoder.data, pcx_decoder.data_size)) return ENODEV; if (bootverbose) printf("splash_pcx: image good:\n" " width = %d\n" " height = %d\n" " depth = %d\n" " planes = %d\n", pcx_info.width, pcx_info.height, pcx_info.bpp, pcx_info.planes); for (i = 0; modes[i] >= 0; ++i) { if (get_mode_info(adp, modes[i], &info) != 0) continue; if (bootverbose) printf("splash_pcx: considering mode %d:\n" " vi_width = %d\n" " vi_height = %d\n" " vi_depth = %d\n" " vi_planes = %d\n", modes[i], info.vi_width, info.vi_height, info.vi_depth, info.vi_planes); if (info.vi_width >= pcx_info.width && info.vi_height >= pcx_info.height && info.vi_depth == pcx_info.bpp && info.vi_planes == pcx_info.planes) break; } splash_mode = modes[i]; if (splash_mode == -1) return ENODEV; if (bootverbose) printf("pcx_splash: selecting mode %d\n", splash_mode); return 0; } static int pcx_end(video_adapter_t *adp) { /* nothing to do */ return 0; } static int pcx_splash(video_adapter_t *adp, int on) { if (on) { if (!splash_on) { if (set_video_mode(adp, splash_mode) || pcx_draw(adp)) return 1; splash_on = TRUE; } return 0; } else { splash_on = FALSE; return 0; } } struct pcxheader { u_char manufactor; u_char version; u_char encoding; u_char bpp; u_short xmin, ymin, xmax, ymax; u_short hres, vres; u_char colormap[48]; u_char rsvd; u_char nplanes; u_short bpsl; u_short palinfo; u_short hsize, vsize; }; #define MAXSCANLINE 1024 static int pcx_init(char *data, int size) { const struct pcxheader *hdr; hdr = (const struct pcxheader *)data; if (size < 128 + 1 + 1 + 768 || hdr->manufactor != 10 || hdr->version != 5 || hdr->encoding != 1 || hdr->nplanes != 1 || hdr->bpp != 8 || hdr->bpsl > MAXSCANLINE || data[size-769] != 12) { printf("splash_pcx: invalid PCX image\n"); return 1; } pcx_info.width = hdr->xmax - hdr->xmin + 1; pcx_info.height = hdr->ymax - hdr->ymin + 1; pcx_info.bpsl = hdr->bpsl; pcx_info.bpp = hdr->bpp; pcx_info.planes = hdr->nplanes; pcx_info.zlen = size - (128 + 1 + 768); pcx_info.zdata = data + 128; pcx_info.palette = data + size - 768; return 0; } static int pcx_draw(video_adapter_t *adp) { u_char *vidmem; int swidth, sheight, sbpsl, sdepth, splanes; int banksize, origin; int c, i, j, pos, scan, x, y; u_char line[MAXSCANLINE]; if (pcx_info.zlen < 1) return 1; load_palette(adp, pcx_info.palette); vidmem = (u_char *)adp->va_window; swidth = adp->va_info.vi_width; sheight = adp->va_info.vi_height; sbpsl = adp->va_line_width; sdepth = adp->va_info.vi_depth; splanes = adp->va_info.vi_planes; banksize = adp->va_window_size; for (origin = 0; origin < sheight*sbpsl; origin += banksize) { set_origin(adp, origin); bzero(vidmem, banksize); } x = (swidth - pcx_info.width) / 2; y = (sheight - pcx_info.height) / 2; origin = 0; pos = y * sbpsl + x; while (pos > banksize) { pos -= banksize; origin += banksize; } set_origin(adp, origin); for (scan = i = 0; scan < pcx_info.height; ++scan, ++y, pos += sbpsl) { for (j = 0; j < pcx_info.bpsl && i < pcx_info.zlen; ++i) { if ((pcx_info.zdata[i] & 0xc0) == 0xc0) { c = pcx_info.zdata[i++] & 0x3f; if (i >= pcx_info.zlen) return 1; } else { c = 1; } if (j + c > pcx_info.bpsl) return 1; while (c--) line[j++] = pcx_info.zdata[i]; } if (pos > banksize) { origin += banksize; pos -= banksize; set_origin(adp, origin); } if (pos + pcx_info.width > banksize) { /* scanline crosses bank boundary */ j = banksize - pos; bcopy(line, vidmem + pos, j); origin += banksize; pos -= banksize; set_origin(adp, origin); bcopy(line + j, vidmem, pcx_info.width - j); } else { bcopy(line, vidmem + pos, pcx_info.width); } } return 0; } Index: head/sys/fs/coda/coda_fbsd.c =================================================================== --- head/sys/fs/coda/coda_fbsd.c (revision 129879) +++ head/sys/fs/coda/coda_fbsd.c (revision 129880) @@ -1,221 +1,222 @@ /* * Coda: an Experimental Distributed File System * Release 3.1 * * Copyright (c) 1987-1998 Carnegie Mellon University * All Rights Reserved * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation, and * that credit is given to Carnegie Mellon University in all documents * and publicity pertaining to direct or indirect use of this code or its * derivatives. * * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF * ANY DERIVATIVE WORK. * * Carnegie Mellon encourages users of this software to return any * improvements or extensions that they make, and to grant Carnegie * Mellon the rights to redistribute these changes without encumbrance. * * @(#) src/sys/coda/coda_fbsd.cr,v 1.1.1.1 1998/08/29 21:14:52 rvb Exp $ */ #include __FBSDID("$FreeBSD$"); #include "vcoda.h" #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include /* From: "Jordan K. Hubbard" Subject: Re: New 3.0 SNAPshot CDROM about ready for production.. To: "Robert.V.Baron" Date: Fri, 20 Feb 1998 15:57:01 -0800 > Also I need a character device major number. (and might want to reserve > a block of 10 syscalls.) Just one char device number? No block devices? Very well, cdev 93 is yours! */ #define VC_DEV_NO 93 static struct cdevsw codadevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = vc_nb_open, .d_close = vc_nb_close, .d_read = vc_nb_read, .d_write = vc_nb_write, .d_ioctl = vc_nb_ioctl, .d_poll = vc_nb_poll, .d_name = "Coda", .d_maj = VC_DEV_NO, }; int vcdebug = 1; #define VCDEBUG if (vcdebug) printf static int codadev_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: break; case MOD_UNLOAD: break; default: break; } return 0; } static moduledata_t codadev_mod = { "codadev", codadev_modevent, NULL }; DECLARE_MODULE(codadev, codadev_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+VC_DEV_NO); int coda_fbsd_getpages(v) void *v; { struct vop_getpages_args *ap = v; #if 1 return vop_stdgetpages(ap); #else { struct vnode *vp = ap->a_vp; struct cnode *cp = VTOC(vp); struct vnode *cfvp = cp->c_ovp; int opened_internally = 0; struct ucred *cred = (struct ucred *) 0; struct proc *p = curproc; int error = 0; if (IS_CTL_VP(vp)) { return(EINVAL); } /* Redirect the request to UFS. */ if (cfvp == NULL) { opened_internally = 1; error = VOP_OPEN(vp, FREAD, cred, p, -1); printf("coda_getp: Internally Opening %p\n", vp); if (error) { printf("coda_getpage: VOP_OPEN on container failed %d\n", error); return (error); } if (vp->v_type == VREG) { error = vfs_object_create(vp, p, cred); if (error != 0) { printf("coda_getpage: vfs_object_create() returns %d\n", error); vput(vp); return(error); } } cfvp = cp->c_ovp; } else { printf("coda_getp: has container %p\n", cfvp); } printf("coda_fbsd_getpages: using container "); /* error = vnode_pager_generic_getpages(cfvp, ap->a_m, ap->a_count, ap->a_reqpage); */ error = VOP_GETPAGES(cfvp, ap->a_m, ap->a_count, ap->a_reqpage, ap->a_offset); printf("error = %d\n", error); /* Do an internal close if necessary. */ if (opened_internally) { (void)VOP_CLOSE(vp, FREAD, cred, p); } return(error); } #endif } /* for DEVFS, using bpf & tun drivers as examples*/ static void coda_fbsd_drvinit(void *unused); static void coda_fbsd_drvuninit(void *unused); static void coda_fbsd_clone(void *arg, char *name, int namelen, dev_t *dev); static eventhandler_tag clonetag; static void coda_fbsd_clone(arg, name, namelen, dev) void *arg; char *name; int namelen; dev_t *dev; { int u; if (*dev != NODEV) return; if (dev_stdclone(name,NULL,"cfs",&u) != 1) return; *dev = make_dev(&codadevsw,unit2minor(u),UID_ROOT,GID_WHEEL,0600,"cfs%d",u); coda_mnttbl[unit2minor(u)].dev = *dev; } static void coda_fbsd_drvinit(unused) void *unused; { int i; clonetag = EVENTHANDLER_REGISTER(dev_clone,coda_fbsd_clone,0,1000); for(i=0;i __FBSDID("$FreeBSD$"); #include "opt_i4b.h" #if defined(ELSA_QS1PCI) #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define MEM0_BAR 0 #define PORT0_BAR 1 #define PORT1_BAR 3 #define ELSA_PORT0_MAPOFF PCIR_BAR(PORT0_BAR) #define ELSA_PORT1_MAPOFF PCIR_BAR(PORT1_BAR) #define PCI_QS1000_DID 0x1000 #define PCI_QS1000_VID 0x1048 /* masks for register encoded in base addr */ #define ELSA_BASE_MASK 0x0ffff #define ELSA_OFF_MASK 0xf0000 /* register id's to be encoded in base addr */ #define ELSA_IDISAC 0x00000 #define ELSA_IDHSCXA 0x10000 #define ELSA_IDHSCXB 0x20000 #define ELSA_IDIPAC 0x40000 /* offsets from base address */ #define ELSA_OFF_ALE 0x00 #define ELSA_OFF_RW 0x01 static int eqs1p_pci_probe(device_t dev); static int eqs1p_pci_attach(device_t dev); static device_method_t eqs1p_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, eqs1p_pci_probe), DEVMETHOD(device_attach, eqs1p_pci_attach), { 0, 0 } }; static driver_t eqs1p_pci_driver = { "isic", eqs1p_pci_methods, 0 }; static devclass_t eqs1p_pci_devclass; DRIVER_MODULE(eqs1p, pci, eqs1p_pci_driver, eqs1p_pci_devclass, 0, 0); /*---------------------------------------------------------------------------* * ELSA MicroLink ISDN/PCI fifo read routine *---------------------------------------------------------------------------*/ static void eqs1pp_read_fifo(struct l1_softc *sc, int what, void *buf, size_t size) { bus_space_tag_t t = rman_get_bustag(sc->sc_resources.io_base[1]); bus_space_handle_t h = rman_get_bushandle(sc->sc_resources.io_base[1]); switch(what) { case ISIC_WHAT_ISAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_ISAC_OFF); bus_space_read_multi_1(t, h, ELSA_OFF_RW, buf, size); break; case ISIC_WHAT_HSCXA: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXA_OFF); bus_space_read_multi_1(t, h, ELSA_OFF_RW, buf, size); break; case ISIC_WHAT_HSCXB: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXB_OFF); bus_space_read_multi_1(t, h, ELSA_OFF_RW, buf, size); break; } } /*---------------------------------------------------------------------------* * ELSA MicroLink ISDN/PCI fifo write routine *---------------------------------------------------------------------------*/ static void eqs1pp_write_fifo(struct l1_softc *sc, int what, void *buf, size_t size) { bus_space_tag_t t = rman_get_bustag(sc->sc_resources.io_base[1]); bus_space_handle_t h = rman_get_bushandle(sc->sc_resources.io_base[1]); switch(what) { case ISIC_WHAT_ISAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_ISAC_OFF); bus_space_write_multi_1(t, h, ELSA_OFF_RW, (u_int8_t*)buf, size); break; case ISIC_WHAT_HSCXA: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXA_OFF); bus_space_write_multi_1(t, h, ELSA_OFF_RW, (u_int8_t*)buf, size); break; case ISIC_WHAT_HSCXB: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXB_OFF); bus_space_write_multi_1(t, h, ELSA_OFF_RW, (u_int8_t*)buf, size); break; } } /*---------------------------------------------------------------------------* * ELSA MicroLink ISDN/PCI register write routine *---------------------------------------------------------------------------*/ static void eqs1pp_write_reg(struct l1_softc *sc, int what, bus_size_t offs, u_int8_t data) { bus_space_tag_t t = rman_get_bustag(sc->sc_resources.io_base[1]); bus_space_handle_t h = rman_get_bushandle(sc->sc_resources.io_base[1]); switch(what) { case ISIC_WHAT_ISAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_ISAC_OFF+offs); bus_space_write_1(t, h, ELSA_OFF_RW, data); break; case ISIC_WHAT_HSCXA: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXA_OFF+offs); bus_space_write_1(t, h, ELSA_OFF_RW, data); break; case ISIC_WHAT_HSCXB: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXB_OFF+offs); bus_space_write_1(t, h, ELSA_OFF_RW, data); break; case ISIC_WHAT_IPAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_IPAC_OFF+offs); bus_space_write_1(t, h, ELSA_OFF_RW, data); break; } } /*---------------------------------------------------------------------------* * ELSA MicroLink ISDN/PCI register read routine *---------------------------------------------------------------------------*/ static u_int8_t eqs1pp_read_reg(struct l1_softc *sc, int what, bus_size_t offs) { bus_space_tag_t t = rman_get_bustag(sc->sc_resources.io_base[1]); bus_space_handle_t h = rman_get_bushandle(sc->sc_resources.io_base[1]); switch(what) { case ISIC_WHAT_ISAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_ISAC_OFF+offs); return bus_space_read_1(t, h, ELSA_OFF_RW); case ISIC_WHAT_HSCXA: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXA_OFF+offs); return bus_space_read_1(t, h, ELSA_OFF_RW); case ISIC_WHAT_HSCXB: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_HSCXB_OFF+offs); return bus_space_read_1(t, h, ELSA_OFF_RW); case ISIC_WHAT_IPAC: bus_space_write_1(t, h, ELSA_OFF_ALE, IPAC_IPAC_OFF+offs); return bus_space_read_1(t, h, ELSA_OFF_RW); } return 0; } /*---------------------------------------------------------------------------* * avma1pp_probe - probe for a card *---------------------------------------------------------------------------*/ static int eqs1p_pci_probe(device_t dev) { if((pci_get_vendor(dev) == PCI_QS1000_VID) && (pci_get_device(dev) == PCI_QS1000_DID)) { device_set_desc(dev, "ELSA MicroLink ISDN/PCI"); return(0); } return(ENXIO); } /*---------------------------------------------------------------------------* * isic_attach_Eqs1pp - attach for ELSA MicroLink ISDN/PCI *---------------------------------------------------------------------------*/ static int eqs1p_pci_attach(device_t dev) { bus_space_tag_t t; bus_space_handle_t h; struct l1_softc *sc; void *ih = 0; int unit = device_get_unit(dev); /* check max unit range */ if(unit >= ISIC_MAXUNIT) { printf("isic%d: Error, unit %d >= ISIC_MAXUNIT for ELSA MicroLink ISDN/PCI!\n", unit, unit); return(ENXIO); } sc = &l1_sc[unit]; /* get softc */ sc->sc_unit = unit; /* get io_base */ sc->sc_resources.io_rid[0] = ELSA_PORT0_MAPOFF; if(!(sc->sc_resources.io_base[0] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_resources.io_rid[0], RF_ACTIVE))) { printf("isic%d: Couldn't get first iobase for ELSA MicroLink ISDN/PCI!\n", unit); return(ENXIO); } sc->sc_resources.io_rid[1] = ELSA_PORT1_MAPOFF; if(!(sc->sc_resources.io_base[1] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_resources.io_rid[1], RF_ACTIVE))) { printf("isic%d: Couldn't get second iobase for ELSA MicroLink ISDN/PCI!\n", unit); isic_detach_common(dev); return(ENXIO); } sc->sc_port = rman_get_start(sc->sc_resources.io_base[1]); if(!(sc->sc_resources.irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_resources.irq_rid, RF_ACTIVE | RF_SHAREABLE))) { printf("isic%d: Could not get irq for ELSA MicroLink ISDN/PCI!\n",unit); isic_detach_common(dev); return(ENXIO); } sc->sc_irq = rman_get_start(sc->sc_resources.irq); /* setup access routines */ sc->clearirq = NULL; sc->readreg = eqs1pp_read_reg; sc->writereg = eqs1pp_write_reg; sc->readfifo = eqs1pp_read_fifo; sc->writefifo = eqs1pp_write_fifo; /* setup card type */ sc->sc_cardtyp = CARD_TYPEP_ELSAQS1PCI; /* setup IOM bus type */ sc->sc_bustyp = BUS_TYPE_IOM2; /* setup chip type = IPAC ! */ sc->sc_ipac = 1; sc->sc_bfifolen = IPAC_BFIFO_LEN; if(isic_attach_common(dev)) { isic_detach_common(dev); return(ENXIO); } if(bus_setup_intr(dev, sc->sc_resources.irq, INTR_TYPE_NET, (void(*)(void*))isicintr, sc, &ih)) { printf("isic%d: Couldn't set up irq for ELSA MicroLink ISDN/PCI!\n", unit); isic_detach_common(dev); return(ENXIO); } /* enable hscx/isac irq's */ IPAC_WRITE(IPAC_MASK, (IPAC_MASK_INT1 | IPAC_MASK_INT0)); IPAC_WRITE(IPAC_ACFG, 0); /* outputs are open drain */ IPAC_WRITE(IPAC_AOE, /* aux 5..2 are inputs, 7, 6 outputs */ (IPAC_AOE_OE5 | IPAC_AOE_OE4 | IPAC_AOE_OE3 | IPAC_AOE_OE2)); IPAC_WRITE(IPAC_ATX, 0xff); /* set all output lines high */ t = rman_get_bustag(sc->sc_resources.io_base[0]); h = rman_get_bushandle(sc->sc_resources.io_base[0]); bus_space_write_1(t, h, 0x4c, 0x41); /* enable card interrupt */ return(0); } #endif /* defined(ELSA_QS1PCI) */ Index: head/sys/i4b/layer1/isic/i4b_isic_isa.c =================================================================== --- head/sys/i4b/layer1/isic/i4b_isic_isa.c (revision 129879) +++ head/sys/i4b/layer1/isic/i4b_isic_isa.c (revision 129880) @@ -1,205 +1,206 @@ /* * Copyright (c) 1997, 2001 Hellmuth Michaelis. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *--------------------------------------------------------------------------- * * i4b_isic_isa.c - ISA bus interface * ================================== * last edit-date: [Wed Jan 24 09:30:19 2001] * *---------------------------------------------------------------------------*/ #include __FBSDID("$FreeBSD$"); #include "opt_i4b.h" #include #include +#include #include #include #include #include #include #include #include struct l1_softc l1_sc[ISIC_MAXUNIT]; static int isic_isa_probe(device_t dev); static int isic_isa_attach(device_t dev); static device_method_t isic_methods[] = { DEVMETHOD(device_probe, isic_isa_probe), DEVMETHOD(device_attach, isic_isa_attach), { 0, 0 } }; static driver_t isic_driver = { "isic", isic_methods, 0 }; static devclass_t isic_devclass; DRIVER_MODULE(isic, isa, isic_driver, isic_devclass, 0, 0); /*---------------------------------------------------------------------------* * probe for ISA non-PnP cards *---------------------------------------------------------------------------*/ static int isic_isa_probe(device_t dev) { int ret = ENXIO; if(isa_get_vendorid(dev)) /* no PnP probes here */ return ENXIO; switch(device_get_flags(dev)) { #ifdef TEL_S0_16 case CARD_TYPEP_16: ret = isic_probe_s016(dev); break; #endif #ifdef TEL_S0_8 case CARD_TYPEP_8: ret = isic_probe_s08(dev); break; #endif #ifdef ELSA_PCC16 case CARD_TYPEP_PCC16: ret = isic_probe_Epcc16(dev); break; #endif #ifdef TEL_S0_16_3 case CARD_TYPEP_16_3: ret = isic_probe_s0163(dev); break; #endif #ifdef AVM_A1 case CARD_TYPEP_AVMA1: ret = isic_probe_avma1(dev); break; #endif #ifdef USR_STI case CARD_TYPEP_USRTA: ret = isic_probe_usrtai(dev); break; #endif #ifdef ITKIX1 case CARD_TYPEP_ITKIX1: ret = isic_probe_itkix1(dev); break; #endif default: printf("isic%d: probe, unknown flag: %d\n", device_get_unit(dev), device_get_flags(dev)); break; } return(ret); } /*---------------------------------------------------------------------------* * attach for ISA non-PnP cards *---------------------------------------------------------------------------*/ static int isic_isa_attach(device_t dev) { int ret = ENXIO; struct l1_softc *sc = &l1_sc[device_get_unit(dev)]; sc->sc_unit = device_get_unit(dev); /* card dependent setup */ switch(sc->sc_cardtyp) { #ifdef TEL_S0_16 case CARD_TYPEP_16: ret = isic_attach_s016(dev); break; #endif #ifdef TEL_S0_8 case CARD_TYPEP_8: ret = isic_attach_s08(dev); break; #endif #ifdef ELSA_PCC16 case CARD_TYPEP_PCC16: ret = isic_attach_Epcc16(dev); break; #endif #ifdef TEL_S0_16_3 case CARD_TYPEP_16_3: ret = isic_attach_s0163(dev); break; #endif #ifdef AVM_A1 case CARD_TYPEP_AVMA1: ret = isic_attach_avma1(dev); break; #endif #ifdef USR_STI case CARD_TYPEP_USRTA: ret = isic_attach_usrtai(dev); break; #endif #ifdef ITKIX1 case CARD_TYPEP_ITKIX1: ret = isic_attach_itkix1(dev); break; #endif default: printf("isic%d: attach, unknown flag: %d\n", device_get_unit(dev), device_get_flags(dev)); break; } if(ret) return(ret); ret = isic_attach_common(dev); return(ret); } Index: head/sys/i4b/layer1/isic/i4b_isic_pnp.c =================================================================== --- head/sys/i4b/layer1/isic/i4b_isic_pnp.c (revision 129879) +++ head/sys/i4b/layer1/isic/i4b_isic_pnp.c (revision 129880) @@ -1,326 +1,327 @@ /* * Copyright (c) 1998 Eivind Eklund. All rights reserved. * * Copyright (c) 1998, 1999 German Tischler. All rights reserved. * * Copyright (c) 1998, 2001 Hellmuth Michaelis. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * 4. Altered versions must be plainly marked as such, and must not be * misrepresented as being the original software and/or documentation. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *--------------------------------------------------------------------------- * * i4b_isic_pnp.c - i4b pnp support * -------------------------------- * last edit-date: [Fri Jan 26 14:01:04 2001] * *---------------------------------------------------------------------------*/ #include __FBSDID("$FreeBSD$"); #include "opt_i4b.h" #include #include #include +#include #include #include #include #include #include #define VID_TEL163PNP 0x10212750 /* Teles 16.3 PnP */ #define VID_CREATIXPP 0x0000980e /* Creatix S0/16 P+P */ #define VID_DYNALINK 0x88167506 /* Dynalink */ #define VID_SEDLBAUER 0x0100274c /* Sedlbauer WinSpeed */ #define VID_NICCYGO 0x5001814c /* Neuhaus Niccy GO@ */ #define VID_ELSAQS1P 0x33019315 /* ELSA Quickstep1000pro*/ #define VID_ITK0025 0x25008b26 /* ITK Ix1 Micro V3 */ #define VID_AVMPNP 0x0009cd06 /* AVM Fritz! PnP */ #define VID_SIESURF2 0x2000254d /* Siemens I-Surf 2.0 PnP*/ #define VID_ASUSCOM_IPAC 0x90167506 /* Asuscom (with IPAC) */ #define VID_EICON_DIVA_20 0x7100891c /* Eicon DIVA 2.0 ISAC/HSCX */ #define VID_EICON_DIVA_202 0xa100891c /* Eicon DIVA 2.02 IPAC */ #define VID_COMPAQ_M610 0x0210110e /* Compaq Microcom 610 */ static struct isic_pnp_ids { u_long vend_id; char *id_str; } isic_pnp_ids[] = { #if defined(TEL_S0_16_3_P) || defined(CRTX_S0_P) || defined(COMPAQ_M610) { VID_TEL163PNP, "Teles S0/16.3 PnP" }, { VID_CREATIXPP, "Creatix S0/16 PnP" }, { VID_COMPAQ_M610, "Compaq Microcom 610" }, #endif #ifdef DYNALINK { VID_DYNALINK, "Dynalink IS64PH" }, #endif #ifdef SEDLBAUER { VID_SEDLBAUER, "Sedlbauer WinSpeed" }, #endif #ifdef DRN_NGO { VID_NICCYGO, "Dr.Neuhaus Niccy Go@" }, #endif #ifdef ELSA_QS1ISA { VID_ELSAQS1P, "ELSA QuickStep 1000pro" }, #endif #ifdef ITKIX1 { VID_ITK0025, "ITK ix1 Micro V3.0" }, #endif #ifdef AVM_PNP { VID_AVMPNP, "AVM Fritz!Card PnP" }, #endif #ifdef SIEMENS_ISURF2 { VID_SIESURF2, "Siemens I-Surf 2.0 PnP" }, #endif #ifdef ASUSCOM_IPAC { VID_ASUSCOM_IPAC, "Asuscom ISDNLink 128 PnP" }, #endif #ifdef EICON_DIVA { VID_EICON_DIVA_20, "Eicon.Diehl DIVA 2.0 ISA PnP" }, { VID_EICON_DIVA_202, "Eicon.Diehl DIVA 2.02 ISA PnP" }, #endif { 0, 0 } }; static int isic_pnp_probe(device_t dev); static int isic_pnp_attach(device_t dev); static device_method_t isic_pnp_methods[] = { DEVMETHOD(device_probe, isic_pnp_probe), DEVMETHOD(device_attach, isic_pnp_attach), { 0, 0 } }; static driver_t isic_pnp_driver = { "isic", isic_pnp_methods, 0, }; static devclass_t isic_devclass; DRIVER_MODULE(isicpnp, isa, isic_pnp_driver, isic_devclass, 0, 0); /*---------------------------------------------------------------------------* * probe for ISA PnP cards *---------------------------------------------------------------------------*/ static int isic_pnp_probe(device_t dev) { struct isic_pnp_ids *ids; /* pnp id's */ char *string = NULL; /* the name */ u_int32_t vend_id = isa_get_vendorid(dev); /* vendor id */ /* search table of knowd id's */ for(ids = isic_pnp_ids; ids->vend_id != 0; ids++) { if(vend_id == ids->vend_id) { string = ids->id_str; break; } } if(string) /* set name if we have one */ { device_set_desc(dev, string); /* set description */ return 0; } else { return ENXIO; } } /*---------------------------------------------------------------------------* * attach for ISA PnP cards *---------------------------------------------------------------------------*/ static int isic_pnp_attach(device_t dev) { u_int32_t vend_id = isa_get_vendorid(dev); /* vendor id */ unsigned int unit = device_get_unit(dev); /* get unit */ const char *name = device_get_desc(dev); /* get description */ struct l1_softc *sc = 0; /* softc */ void *ih = 0; /* a dummy */ int ret; /* see if we are out of bounds */ if(unit >= ISIC_MAXUNIT) { printf("isic%d: Error, unit %d >= ISIC_MAXUNIT for %s\n", unit, unit, name); return ENXIO; } /* get information structure for this unit */ sc = &l1_sc[unit]; /* get io_base */ if(!(sc->sc_resources.io_base[0] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_resources.io_rid[0], RF_ACTIVE ) )) { printf("isic_pnp_attach: Couldn't get my io_base.\n"); return ENXIO; } /* will not be used for pnp devices */ sc->sc_port = rman_get_start(sc->sc_resources.io_base[0]); /* get irq, release io_base if we don't get it */ if(!(sc->sc_resources.irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_resources.irq_rid, RF_ACTIVE))) { printf("isic%d: Could not get irq.\n",unit); isic_detach_common(dev); return ENXIO; } /* not needed */ sc->sc_irq = rman_get_start(sc->sc_resources.irq); /* set flag so we know what this card is */ ret = ENXIO; switch(vend_id) { #if defined(TEL_S0_16_3_P) || defined(CRTX_S0_P) || defined(COMPAQ_M610) case VID_TEL163PNP: sc->sc_cardtyp = CARD_TYPEP_163P; ret = isic_attach_Cs0P(dev); break; case VID_CREATIXPP: sc->sc_cardtyp = CARD_TYPEP_CS0P; ret = isic_attach_Cs0P(dev); break; case VID_COMPAQ_M610: sc->sc_cardtyp = CARD_TYPEP_COMPAQ_M610; ret = isic_attach_Cs0P(dev); break; #endif #ifdef DYNALINK case VID_DYNALINK: sc->sc_cardtyp = CARD_TYPEP_DYNALINK; ret = isic_attach_Dyn(dev); break; #endif #ifdef SEDLBAUER case VID_SEDLBAUER: sc->sc_cardtyp = CARD_TYPEP_SWS; ret = isic_attach_sws(dev); break; #endif #ifdef DRN_NGO case VID_NICCYGO: sc->sc_cardtyp = CARD_TYPEP_DRNNGO; ret = isic_attach_drnngo(dev); break; #endif #ifdef ELSA_QS1ISA case VID_ELSAQS1P: sc->sc_cardtyp = CARD_TYPEP_ELSAQS1ISA; ret = isic_attach_Eqs1pi(dev); break; #endif #ifdef ITKIX1 case VID_ITK0025: sc->sc_cardtyp = CARD_TYPEP_ITKIX1; ret = isic_attach_itkix1(dev); break; #endif #ifdef SIEMENS_ISURF2 case VID_SIESURF2: sc->sc_cardtyp = CARD_TYPEP_SIE_ISURF2; ret = isic_attach_siemens_isurf(dev); break; #endif #ifdef ASUSCOM_IPAC case VID_ASUSCOM_IPAC: sc->sc_cardtyp = CARD_TYPEP_ASUSCOMIPAC; ret = isic_attach_asi(dev); break; #endif #ifdef EICON_DIVA case VID_EICON_DIVA_20: sc->sc_cardtyp = CARD_TYPEP_DIVA_ISA; ret = isic_attach_diva(dev); break; case VID_EICON_DIVA_202: sc->sc_cardtyp = CARD_TYPEP_DIVA_ISA; ret = isic_attach_diva_ipac(dev); break; #endif default: printf("isic%d: Error, no driver for %s\n", unit, name); ret = ENXIO; break; } if(ret) { isic_detach_common(dev); return ENXIO; } if(isic_attach_common(dev)) { /* unset flag */ sc->sc_cardtyp = CARD_TYPEP_UNK; /* free irq here, it hasn't been attached yet */ bus_release_resource(dev,SYS_RES_IRQ,sc->sc_resources.irq_rid, sc->sc_resources.irq); sc->sc_resources.irq = 0; isic_detach_common(dev); return ENXIO; } else { /* setup intr routine */ bus_setup_intr(dev,sc->sc_resources.irq,INTR_TYPE_NET, (void(*)(void*))isicintr, sc,&ih); return 0; } } Index: head/sys/isa/atkbd_isa.c =================================================================== --- head/sys/isa/atkbd_isa.c (revision 129879) +++ head/sys/isa/atkbd_isa.c (revision 129880) @@ -1,179 +1,180 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include typedef struct { struct resource *intr; void *ih; } atkbd_softc_t; static devclass_t atkbd_devclass; static void atkbdidentify(driver_t *driver, device_t dev); static int atkbdprobe(device_t dev); static int atkbdattach(device_t dev); static int atkbdresume(device_t dev); static void atkbd_isa_intr(void *arg); static device_method_t atkbd_methods[] = { DEVMETHOD(device_identify, atkbdidentify), DEVMETHOD(device_probe, atkbdprobe), DEVMETHOD(device_attach, atkbdattach), DEVMETHOD(device_resume, atkbdresume), { 0, 0 } }; static driver_t atkbd_driver = { ATKBD_DRIVER_NAME, atkbd_methods, sizeof(atkbd_softc_t), }; static void atkbdidentify(driver_t *driver, device_t parent) { /* always add at least one child */ BUS_ADD_CHILD(parent, KBDC_RID_KBD, driver->name, device_get_unit(parent)); } static int atkbdprobe(device_t dev) { struct resource *res; u_long irq; int flags; int rid; device_set_desc(dev, "AT Keyboard"); /* obtain parameters */ flags = device_get_flags(dev); /* see if IRQ is available */ rid = KBDC_RID_KBD; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { if (bootverbose) device_printf(dev, "unable to allocate IRQ\n"); return ENXIO; } irq = rman_get_start(res); bus_release_resource(dev, SYS_RES_IRQ, rid, res); /* probe the device */ return atkbd_probe_unit(device_get_unit(dev), device_get_unit(device_get_parent(dev)), irq, flags); } static int atkbdattach(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; u_long irq; int flags; int rid; int error; sc = device_get_softc(dev); rid = KBDC_RID_KBD; irq = bus_get_resource_start(dev, SYS_RES_IRQ, rid); flags = device_get_flags(dev); error = atkbd_attach_unit(device_get_unit(dev), &kbd, device_get_unit(device_get_parent(dev)), irq, flags); if (error) return error; /* declare our interrupt handler */ sc->intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->intr == NULL) return ENXIO; error = bus_setup_intr(dev, sc->intr, INTR_TYPE_TTY, atkbd_isa_intr, kbd, &sc->ih); if (error) bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); return error; } static int atkbdresume(device_t dev) { atkbd_softc_t *sc; keyboard_t *kbd; int args[2]; sc = device_get_softc(dev); kbd = kbd_get_keyboard(kbd_find_keyboard(ATKBD_DRIVER_NAME, device_get_unit(dev))); if (kbd) { kbd->kb_flags &= ~KB_INITIALIZED; args[0] = device_get_unit(device_get_parent(dev)); args[1] = rman_get_start(sc->intr); (*kbdsw[kbd->kb_index]->init)(device_get_unit(dev), &kbd, args, device_get_flags(dev)); (*kbdsw[kbd->kb_index]->clear_state)(kbd); } return 0; } static void atkbd_isa_intr(void *arg) { keyboard_t *kbd; kbd = (keyboard_t *)arg; (*kbdsw[kbd->kb_index]->intr)(kbd, NULL); } DRIVER_MODULE(atkbd, atkbdc, atkbd_driver, atkbd_devclass, 0, 0); Index: head/sys/isa/atkbdc_isa.c =================================================================== --- head/sys/isa/atkbdc_isa.c (revision 129879) +++ head/sys/isa/atkbdc_isa.c (revision 129880) @@ -1,368 +1,369 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ATKBDDEV, "atkbddev", "AT Keyboard device"); /* children */ typedef struct atkbdc_device { struct resource_list resources; int rid; u_int32_t vendorid; u_int32_t serial; u_int32_t logicalid; u_int32_t compatid; } atkbdc_device_t; /* kbdc */ static devclass_t atkbdc_devclass; static int atkbdc_probe(device_t dev); static int atkbdc_attach(device_t dev); static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit); static int atkbdc_print_child(device_t bus, device_t dev); static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val); static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val); static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev); static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res); static device_method_t atkbdc_methods[] = { DEVMETHOD(device_probe, atkbdc_probe), DEVMETHOD(device_attach, atkbdc_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, atkbdc_add_child), DEVMETHOD(bus_print_child, atkbdc_print_child), DEVMETHOD(bus_read_ivar, atkbdc_read_ivar), DEVMETHOD(bus_write_ivar, atkbdc_write_ivar), DEVMETHOD(bus_get_resource_list,atkbdc_get_resource_list), DEVMETHOD(bus_alloc_resource, atkbdc_alloc_resource), DEVMETHOD(bus_release_resource, atkbdc_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t atkbdc_driver = { ATKBDC_DRIVER_NAME, atkbdc_methods, sizeof(atkbdc_softc_t *), }; static struct isa_pnp_id atkbdc_ids[] = { { 0x0303d041, "Keyboard controller (i8042)" }, /* PNP0303 */ { 0 } }; static int atkbdc_probe(device_t dev) { struct resource *port0; struct resource *port1; u_long start; u_long count; int error; int rid; /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Furthermore, /boot/device.hints may contain * just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (count > 1) /* adjust the count */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); if (error == 0) bus_generic_probe(dev); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; } static int atkbdc_attach(device_t dev) { atkbdc_softc_t *sc; int unit; int error; int rid; unit = device_get_unit(dev); sc = *(atkbdc_softc_t **)device_get_softc(dev); if (sc == NULL) { /* * We have to maintain two copies of the kbdc_softc struct, * as the low-level console needs to have access to the * keyboard controller before kbdc is probed and attached. * kbdc_soft[] contains the default entry for that purpose. * See atkbdc.c. XXX */ sc = atkbdc_get_softc(unit); if (sc == NULL) return ENOMEM; } rid = 0; sc->port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port0 == NULL) return ENXIO; rid = 1; sc->port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); return ENXIO; } error = atkbdc_attach_unit(unit, sc, sc->port0, sc->port1); if (error) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->port1); return error; } *(atkbdc_softc_t **)device_get_softc(dev) = sc; bus_generic_attach(dev); return 0; } static device_t atkbdc_add_child(device_t bus, int order, char *name, int unit) { atkbdc_device_t *ivar; device_t child; int t; ivar = malloc(sizeof(struct atkbdc_device), M_ATKBDDEV, M_NOWAIT | M_ZERO); if (!ivar) return NULL; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { free(ivar, M_ATKBDDEV); return child; } resource_list_init(&ivar->resources); ivar->rid = order; /* * If the device is not created by the PnP BIOS or ACPI, * refer to device hints for IRQ. */ if (ISA_PNP_PROBE(device_get_parent(bus), bus, atkbdc_ids) != 0) { if (resource_int_value(name, unit, "irq", &t) != 0) t = -1; } else { t = bus_get_resource_start(bus, SYS_RES_IRQ, ivar->rid); } if (t > 0) resource_list_add(&ivar->resources, SYS_RES_IRQ, ivar->rid, t, t, 1); if (resource_int_value(name, unit, "flags", &t) == 0) device_set_flags(child, t); if (resource_disabled(name, unit)) device_disable(child); device_set_ivars(child, ivar); return child; } static int atkbdc_print_child(device_t bus, device_t dev) { atkbdc_device_t *kbdcdev; u_long irq; int flags; int retval = 0; kbdcdev = (atkbdc_device_t *)device_get_ivars(dev); retval += bus_print_child_header(bus, dev); flags = device_get_flags(dev); if (flags != 0) retval += printf(" flags 0x%x", flags); irq = bus_get_resource_start(dev, SYS_RES_IRQ, kbdcdev->rid); if (irq != 0) retval += printf(" irq %ld", irq); retval += bus_print_child_footer(bus, dev); return (retval); } static int atkbdc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: *val = (u_long)ivar->vendorid; break; case KBDC_IVAR_SERIAL: *val = (u_long)ivar->serial; break; case KBDC_IVAR_LOGICALID: *val = (u_long)ivar->logicalid; break; case KBDC_IVAR_COMPATID: *val = (u_long)ivar->compatid; break; default: return ENOENT; } return 0; } static int atkbdc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); switch (index) { case KBDC_IVAR_VENDORID: ivar->vendorid = (u_int32_t)val; break; case KBDC_IVAR_SERIAL: ivar->serial = (u_int32_t)val; break; case KBDC_IVAR_LOGICALID: ivar->logicalid = (u_int32_t)val; break; case KBDC_IVAR_COMPATID: ivar->compatid = (u_int32_t)val; break; default: return ENOENT; } return 0; } static struct resource_list *atkbdc_get_resource_list (device_t bus, device_t dev) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return &ivar->resources; } static struct resource *atkbdc_alloc_resource(device_t bus, device_t dev, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_alloc(&ivar->resources, bus, dev, type, rid, start, end, count, flags); } static int atkbdc_release_resource(device_t bus, device_t dev, int type, int rid, struct resource *res) { atkbdc_device_t *ivar; ivar = (atkbdc_device_t *)device_get_ivars(dev); return resource_list_release(&ivar->resources, bus, dev, type, rid, res); } DRIVER_MODULE(atkbdc, isa, atkbdc_driver, atkbdc_devclass, 0, 0); DRIVER_MODULE(atkbdc, acpi, atkbdc_driver, atkbdc_devclass, 0, 0); Index: head/sys/isa/vga_isa.c =================================================================== --- head/sys/isa/vga_isa.c (revision 129879) +++ head/sys/isa/vga_isa.c (revision 129880) @@ -1,217 +1,218 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_vga.h" #include "opt_fb.h" #include "opt_syscons.h" /* should be removed in the future, XXX */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #endif #include #include #include #include #define VGA_SOFTC(unit) \ ((vga_softc_t *)devclass_get_softc(isavga_devclass, unit)) static devclass_t isavga_devclass; #ifdef FB_INSTALL_CDEV static d_open_t isavga_open; static d_close_t isavga_close; static d_read_t isavga_read; static d_write_t isavga_write; static d_ioctl_t isavga_ioctl; static d_mmap_t isavga_mmap; static struct cdevsw isavga_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = isavga_open, .d_close = isavga_close, .d_read = isavga_read, .d_write = isavga_write, .d_ioctl = isavga_ioctl, .d_mmap = isavga_mmap, .d_name = VGA_DRIVER_NAME, .d_maj = -1, }; #endif /* FB_INSTALL_CDEV */ static void isavga_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, VGA_DRIVER_NAME, 0); } static int isavga_probe(device_t dev) { video_adapter_t adp; int error; /* No pnp support */ if (isa_get_vendorid(dev)) return (ENXIO); device_set_desc(dev, "Generic ISA VGA"); error = vga_probe_unit(device_get_unit(dev), &adp, device_get_flags(dev)); if (error == 0) { bus_set_resource(dev, SYS_RES_IOPORT, 0, adp.va_io_base, adp.va_io_size); bus_set_resource(dev, SYS_RES_MEMORY, 0, adp.va_mem_base, adp.va_mem_size); #if 0 isa_set_port(dev, adp.va_io_base); isa_set_portsize(dev, adp.va_io_size); isa_set_maddr(dev, adp.va_mem_base); isa_set_msize(dev, adp.va_mem_size); #endif } return error; } static int isavga_attach(device_t dev) { vga_softc_t *sc; int unit; int rid; int error; unit = device_get_unit(dev); sc = device_get_softc(dev); rid = 0; bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); rid = 0; bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); error = vga_attach_unit(unit, sc, device_get_flags(dev)); if (error) return error; #ifdef FB_INSTALL_CDEV /* attach a virtual frame buffer device */ error = fb_attach(VGA_MKMINOR(unit), sc->adp, &isavga_cdevsw); if (error) return error; #endif /* FB_INSTALL_CDEV */ if (bootverbose) (*vidsw[sc->adp->va_index]->diag)(sc->adp, bootverbose); #if experimental device_add_child(dev, "fb", -1); bus_generic_attach(dev); #endif return 0; } #ifdef FB_INSTALL_CDEV static int isavga_open(dev_t dev, int flag, int mode, struct thread *td) { return vga_open(dev, VGA_SOFTC(VGA_UNIT(dev)), flag, mode, td); } static int isavga_close(dev_t dev, int flag, int mode, struct thread *td) { return vga_close(dev, VGA_SOFTC(VGA_UNIT(dev)), flag, mode, td); } static int isavga_read(dev_t dev, struct uio *uio, int flag) { return vga_read(dev, VGA_SOFTC(VGA_UNIT(dev)), uio, flag); } static int isavga_write(dev_t dev, struct uio *uio, int flag) { return vga_write(dev, VGA_SOFTC(VGA_UNIT(dev)), uio, flag); } static int isavga_ioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { return vga_ioctl(dev, VGA_SOFTC(VGA_UNIT(dev)), cmd, arg, flag, td); } static int isavga_mmap(dev_t dev, vm_offset_t offset, vm_paddr_t *paddr, int prot) { return vga_mmap(dev, VGA_SOFTC(VGA_UNIT(dev)), offset, paddr, prot); } #endif /* FB_INSTALL_CDEV */ static device_method_t isavga_methods[] = { DEVMETHOD(device_identify, isavga_identify), DEVMETHOD(device_probe, isavga_probe), DEVMETHOD(device_attach, isavga_attach), DEVMETHOD(bus_print_child, bus_generic_print_child), { 0, 0 } }; static driver_t isavga_driver = { VGA_DRIVER_NAME, isavga_methods, sizeof(vga_softc_t), }; DRIVER_MODULE(vga, isa, isavga_driver, isavga_devclass, 0, 0); Index: head/sys/modules/splash/bmp/splash_bmp.c =================================================================== --- head/sys/modules/splash/bmp/splash_bmp.c (revision 129879) +++ head/sys/modules/splash/bmp/splash_bmp.c (revision 129880) @@ -1,642 +1,643 @@ /*- * Copyright (c) 1999 Michael Smith * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include +#include #include #include #include #include #ifndef PC98 #include #include #endif #define FADE_TIMEOUT 15 /* sec */ #define FADE_LEVELS 10 static int splash_mode = -1; static int splash_on = FALSE; static int bmp_start(video_adapter_t *adp); static int bmp_end(video_adapter_t *adp); static int bmp_splash(video_adapter_t *adp, int on); static int bmp_Init(char *data, int swidth, int sheight, int sdepth); static int bmp_Draw(video_adapter_t *adp); static splash_decoder_t bmp_decoder = { "splash_bmp", bmp_start, bmp_end, bmp_splash, SPLASH_IMAGE, }; SPLASH_DECODER(splash_bmp, bmp_decoder); static int bmp_start(video_adapter_t *adp) { /* currently only 256-color modes are supported XXX */ static int modes[] = { #ifdef PC98 /* * As 640x400 doesn't generally look great, * it's least preferred here. */ M_PC98_PEGC640x400, M_PC98_PEGC640x480, M_PC98_EGC640x400, #else M_VESA_CG640x480, M_VESA_CG800x600, M_VESA_CG1024x768, M_CG640x480, /* * As 320x200 doesn't generally look great, * it's least preferred here. */ M_VGA_CG320, #endif -1, }; video_info_t info; int i; if ((bmp_decoder.data == NULL) || (bmp_decoder.data_size <= 0)) { printf("splash_bmp: No bitmap file found\n"); return ENODEV; } for (i = 0; modes[i] >= 0; ++i) { if (((*vidsw[adp->va_index]->get_info)(adp, modes[i], &info) == 0) && (bmp_Init((u_char *)bmp_decoder.data, info.vi_width, info.vi_height, info.vi_depth) == 0)) break; } splash_mode = modes[i]; if (splash_mode < 0) printf("splash_bmp: No appropriate video mode found\n"); if (bootverbose) printf("bmp_start(): splash_mode:%d\n", splash_mode); return ((splash_mode < 0) ? ENODEV : 0); } static int bmp_end(video_adapter_t *adp) { /* nothing to do */ return 0; } static int bmp_splash(video_adapter_t *adp, int on) { static u_char pal[256*3]; static long time_stamp; u_char tpal[256*3]; static int fading = TRUE, brightness = FADE_LEVELS; struct timeval tv; int i; if (on) { if (!splash_on) { /* set up the video mode and draw something */ if ((*vidsw[adp->va_index]->set_mode)(adp, splash_mode)) return 1; if (bmp_Draw(adp)) return 1; (*vidsw[adp->va_index]->save_palette)(adp, pal); time_stamp = 0; splash_on = TRUE; } /* * This is a kludge to fade the image away. This section of the * code takes effect only after the system is completely up. * FADE_TIMEOUT should be configurable. */ if (!cold) { getmicrotime(&tv); if (time_stamp == 0) time_stamp = tv.tv_sec; if (tv.tv_sec > time_stamp + FADE_TIMEOUT) { if (fading) if (brightness == 0) { fading = FALSE; brightness++; } else brightness--; else if (brightness == FADE_LEVELS) { fading = TRUE; brightness--; } else brightness++; for (i = 0; i < sizeof(pal); ++i) { tpal[i] = pal[i] * brightness / FADE_LEVELS; } (*vidsw[adp->va_index]->load_palette)(adp, tpal); time_stamp = tv.tv_sec; } } return 0; } else { /* the video mode will be restored by the caller */ splash_on = FALSE; return 0; } } /* ** Code to handle Microsoft DIB (".BMP") format images. ** ** Blame me (msmith@freebsd.org) if this is broken, not Soren. */ typedef struct tagBITMAPFILEHEADER { /* bmfh */ u_short bfType __packed; int bfSize __packed; u_short bfReserved1 __packed; u_short bfReserved2 __packed; int bfOffBits __packed; } BITMAPFILEHEADER; typedef struct tagBITMAPINFOHEADER { /* bmih */ int biSize __packed; int biWidth __packed; int biHeight __packed; short biPlanes __packed; short biBitCount __packed; int biCompression __packed; int biSizeImage __packed; int biXPelsPerMeter __packed; int biYPelsPerMeter __packed; int biClrUsed __packed; int biClrImportant __packed; } BITMAPINFOHEADER; typedef struct tagRGBQUAD { /* rgbq */ u_char rgbBlue __packed; u_char rgbGreen __packed; u_char rgbRed __packed; u_char rgbReserved __packed; } RGBQUAD; typedef struct tagBITMAPINFO { /* bmi */ BITMAPINFOHEADER bmiHeader __packed; RGBQUAD bmiColors[256] __packed; } BITMAPINFO; typedef struct tagBITMAPF { BITMAPFILEHEADER bmfh __packed; BITMAPINFO bmfi __packed; } BITMAPF; #define BI_RGB 0 #define BI_RLE8 1 #define BI_RLE4 2 /* ** all we actually care about the image */ typedef struct { int width,height; /* image dimensions */ int swidth,sheight; /* screen dimensions for the current mode */ u_char depth; /* image depth (1, 4, 8, 24 bits) */ u_char sdepth; /* screen depth (1, 4, 8 bpp) */ int ncols; /* number of colours */ u_char palette[256][3]; /* raw palette data */ u_char format; /* one of the BI_* constants above */ u_char *data; /* pointer to the raw data */ u_char *index; /* running pointer to the data while drawing */ u_char *vidmem; /* video memory allocated for drawing */ video_adapter_t *adp; int bank; #ifdef PC98 u_char prev_val; #endif } BMP_INFO; static BMP_INFO bmp_info; /* ** bmp_SetPix ** ** Given (info), set the pixel at (x),(y) to (val) ** */ static void bmp_SetPix(BMP_INFO *info, int x, int y, u_char val) { int sofs, bofs; int newbank; /* * range check to avoid explosions */ if ((x < 0) || (x >= info->swidth) || (y < 0) || (y >= info->sheight)) return; /* * calculate offset into video memory; * because 0,0 is bottom-left for DIB, we have to convert. */ sofs = ((info->height - (y+1) + (info->sheight - info->height) / 2) * info->adp->va_line_width); x += (info->swidth - info->width) / 2; switch(info->sdepth) { #ifdef PC98 case 4: sofs += (x >> 3); bofs = x & 0x7; /* offset within byte */ outb(0x7c, 0x80 | 0x40); /* GRCG on & RMW mode */ if (val != info->prev_val) { outb(0x7e, (val & 1) ? 0xff : 0); /* tile B */ outb(0x7e, (val & 2) ? 0xff : 0); /* tile R */ outb(0x7e, (val & 4) ? 0xff : 0); /* tile G */ outb(0x7e, (val & 8) ? 0xff : 0); /* tile I */ info->prev_val = val; } *(info->vidmem+sofs) = (0x80 >> bofs); /* write new bit */ outb(0x7c, 0); /* GRCG off */ break; #else case 4: case 1: /* EGA/VGA planar modes */ sofs += (x >> 3); newbank = sofs/info->adp->va_window_size; if (info->bank != newbank) { (*vidsw[info->adp->va_index]->set_win_org)(info->adp, newbank*info->adp->va_window_size); info->bank = newbank; } sofs %= info->adp->va_window_size; bofs = x & 0x7; /* offset within byte */ outw(GDCIDX, (0x8000 >> bofs) | 0x08); /* bit mask */ outw(GDCIDX, (val << 8) | 0x00); /* set/reset */ *(info->vidmem + sofs) ^= 0xff; /* read-modify-write */ break; #endif case 8: sofs += x; newbank = sofs/info->adp->va_window_size; if (info->bank != newbank) { (*vidsw[info->adp->va_index]->set_win_org)(info->adp, newbank*info->adp->va_window_size); info->bank = newbank; } sofs %= info->adp->va_window_size; *(info->vidmem+sofs) = val; break; } } /* ** bmp_DecodeRLE4 ** ** Given (data) pointing to a line of RLE4-format data and (line) being the starting ** line onscreen, decode the line. */ static void bmp_DecodeRLE4(BMP_INFO *info, int line) { int count; /* run count */ u_char val; int x,y; /* screen position */ x = 0; /* starting position */ y = line; /* loop reading data */ for (;;) { /* * encoded mode starts with a run length, and then a byte with * two colour indexes to alternate between for the run */ if (*info->index) { for (count = 0; count < *info->index; count++, x++) { if (count & 1) { /* odd count, low nybble */ bmp_SetPix(info, x, y, *(info->index+1) & 0x0f); } else { /* even count, high nybble */ bmp_SetPix(info, x, y, (*(info->index+1) >>4) & 0x0f); } } info->index += 2; /* * A leading zero is an escape; it may signal the end of the * bitmap, a cursor move, or some absolute data. */ } else { /* zero tag may be absolute mode or an escape */ switch (*(info->index+1)) { case 0: /* end of line */ info->index += 2; return; case 1: /* end of bitmap */ info->index = NULL; return; case 2: /* move */ x += *(info->index + 2); /* new coords */ y += *(info->index + 3); info->index += 4; break; default: /* literal bitmap data */ for (count = 0; count < *(info->index + 1); count++, x++) { val = *(info->index + 2 + (count / 2)); /* byte with nybbles */ if (count & 1) { val &= 0xf; /* get low nybble */ } else { val = (val >> 4); /* get high nybble */ } bmp_SetPix(info, x, y, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += 2 + ((count + 3) / 4) * 2; break; } } } } /* ** bmp_DecodeRLE8 ** Given (data) pointing to a line of RLE8-format data and (line) being the starting ** line onscreen, decode the line. */ static void bmp_DecodeRLE8(BMP_INFO *info, int line) { int count; /* run count */ int x,y; /* screen position */ x = 0; /* starting position */ y = line; /* loop reading data */ for(;;) { /* * encoded mode starts with a run length, and then a byte with * two colour indexes to alternate between for the run */ if (*info->index) { for (count = 0; count < *info->index; count++, x++) bmp_SetPix(info, x, y, *(info->index+1)); info->index += 2; /* * A leading zero is an escape; it may signal the end of the * bitmap, a cursor move, or some absolute data. */ } else { /* zero tag may be absolute mode or an escape */ switch(*(info->index+1)) { case 0: /* end of line */ info->index += 2; return; case 1: /* end of bitmap */ info->index = NULL; return; case 2: /* move */ x += *(info->index + 2); /* new coords */ y += *(info->index + 3); info->index += 4; break; default: /* literal bitmap data */ for (count = 0; count < *(info->index + 1); count++, x++) bmp_SetPix(info, x, y, *(info->index + 2 + count)); /* must be an even count */ info->index += 2 + count + (count & 1); break; } } } } /* ** bmp_DecodeLine ** ** Given (info) pointing to an image being decoded, (line) being the line currently ** being displayed, decode a line of data. */ static void bmp_DecodeLine(BMP_INFO *info, int line) { int x; u_char val, mask, *p; switch(info->format) { case BI_RGB: switch(info->depth) { case 8: for (x = 0; x < info->width; x++, info->index++) bmp_SetPix(info, x, line, *info->index); info->index += 3 - (--x % 4); break; case 4: p = info->index; for (x = 0; x < info->width; x++) { if (x & 1) { val = *p & 0xf; /* get low nybble */ p++; } else { val = *p >> 4; /* get high nybble */ } bmp_SetPix(info, x, line, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += ((x + 7) / 8) * 4; break; case 1: p = info->index; mask = 0x80; for (x = 0; x < info->width; x++) { val = (*p & mask) ? 1 : 0; mask >>= 1; if (mask == 0) { mask = 0x80; p++; } bmp_SetPix(info, x, line, val); } /* warning, this depends on integer truncation, do not hand-optimise! */ info->index += ((x + 31) / 32) * 4; break; } break; case BI_RLE4: bmp_DecodeRLE4(info, line); break; case BI_RLE8: bmp_DecodeRLE8(info, line); break; } } /* ** bmp_Init ** ** Given a pointer (data) to the image of a BMP file, fill in bmp_info with what ** can be learnt from it. Return nonzero if the file isn't usable. ** ** Take screen dimensions (swidth), (sheight) and (sdepth) and make sure we ** can work with these. */ static int bmp_Init(char *data, int swidth, int sheight, int sdepth) { BITMAPF *bmf = (BITMAPF *)data; int pind; bmp_info.data = NULL; /* assume setup failed */ /* check file ID */ if (bmf->bmfh.bfType != 0x4d42) { printf("splash_bmp: not a BMP file\n"); return(1); /* XXX check word ordering for big-endian ports? */ } /* do we understand this bitmap format? */ if (bmf->bmfi.bmiHeader.biSize > sizeof(bmf->bmfi.bmiHeader)) { printf("splash_bmp: unsupported BMP format (size=%d)\n", bmf->bmfi.bmiHeader.biSize); return(1); } /* save what we know about the screen */ bmp_info.swidth = swidth; bmp_info.sheight = sheight; bmp_info.sdepth = sdepth; /* where's the data? */ bmp_info.data = (u_char *)data + bmf->bmfh.bfOffBits; /* image parameters */ bmp_info.width = bmf->bmfi.bmiHeader.biWidth; bmp_info.height = bmf->bmfi.bmiHeader.biHeight; bmp_info.depth = bmf->bmfi.bmiHeader.biBitCount; bmp_info.format = bmf->bmfi.bmiHeader.biCompression; switch(bmp_info.format) { /* check compression format */ case BI_RGB: case BI_RLE4: case BI_RLE8: break; default: printf("splash_bmp: unsupported compression format\n"); return(1); /* unsupported compression format */ } /* palette details */ bmp_info.ncols = (bmf->bmfi.bmiHeader.biClrUsed); bzero(bmp_info.palette,sizeof(bmp_info.palette)); if (bmp_info.ncols == 0) { /* uses all of them */ bmp_info.ncols = 1 << bmf->bmfi.bmiHeader.biBitCount; } if ((bmp_info.height > bmp_info.sheight) || (bmp_info.width > bmp_info.swidth) || (bmp_info.ncols > (1 << sdepth))) { if (bootverbose) printf("splash_bmp: beyond screen capacity (%dx%d, %d colors)\n", bmp_info.width, bmp_info.height, bmp_info.ncols); return(1); } /* read palette */ for (pind = 0; pind < bmp_info.ncols; pind++) { bmp_info.palette[pind][0] = bmf->bmfi.bmiColors[pind].rgbRed; bmp_info.palette[pind][1] = bmf->bmfi.bmiColors[pind].rgbGreen; bmp_info.palette[pind][2] = bmf->bmfi.bmiColors[pind].rgbBlue; } return(0); } /* ** bmp_Draw ** ** Render the image. Return nonzero if that's not possible. ** */ static int bmp_Draw(video_adapter_t *adp) { int line; #if 0 #ifndef PC98 int i; #endif #endif if (bmp_info.data == NULL) { /* init failed, do nothing */ return(1); } /* clear the screen */ bmp_info.vidmem = (u_char *)adp->va_window; bmp_info.adp = adp; (*vidsw[adp->va_index]->clear)(adp); (*vidsw[adp->va_index]->set_win_org)(adp, 0); bmp_info.bank = 0; /* initialise the info structure for drawing */ bmp_info.index = bmp_info.data; #ifdef PC98 bmp_info.prev_val = 255; #endif /* set the palette for our image */ (*vidsw[adp->va_index]->load_palette)(adp, (u_char *)&bmp_info.palette); #if 0 #ifndef PC98 /* XXX: this is ugly, but necessary for EGA/VGA 1bpp/4bpp modes */ if ((adp->va_type == KD_EGA) || (adp->va_type == KD_VGA)) { inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x14); outb(ATC, 0); for (i = 0; i < 16; ++i) { outb(ATC, i); outb(ATC, i); } inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ outw(GDCIDX, 0x0f01); /* set/reset enable */ if (bmp_info.sdepth == 1) outw(TSIDX, 0x0102); /* unmask plane #0 */ } #endif #endif for (line = 0; (line < bmp_info.height) && bmp_info.index; line++) { bmp_DecodeLine(&bmp_info, line); } return(0); } Index: head/sys/modules/splash/pcx/splash_pcx.c =================================================================== --- head/sys/modules/splash/pcx/splash_pcx.c (revision 129879) +++ head/sys/modules/splash/pcx/splash_pcx.c (revision 129880) @@ -1,262 +1,263 @@ /*- * Copyright (c) 1999 Michael Smith * Copyright (c) 1999 Kazutaka YOKOTA * Copyright (c) 1999 Dag-Erling Coïdan Smørgrav * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include +#include #include #include #include #define FADE_TIMEOUT 300 /* sec */ static int splash_mode = -1; static int splash_on = FALSE; static int pcx_start(video_adapter_t *adp); static int pcx_end(video_adapter_t *adp); static int pcx_splash(video_adapter_t *adp, int on); static int pcx_init(char *data, int sdepth); static int pcx_draw(video_adapter_t *adp); static splash_decoder_t pcx_decoder = { "splash_pcx", pcx_start, pcx_end, pcx_splash, SPLASH_IMAGE, }; SPLASH_DECODER(splash_pcx, pcx_decoder); static struct { int width, height, bpsl; int bpp, planes, zlen; const u_char *zdata; u_char *palette; } pcx_info; static int pcx_start(video_adapter_t *adp) { static int modes[] = { M_VGA_CG320, M_VESA_CG640x480, M_VESA_CG800x600, M_VESA_CG1024x768, -1, }; video_info_t info; int i; if (pcx_decoder.data == NULL || pcx_decoder.data_size <= 0 || pcx_init((u_char *)pcx_decoder.data, pcx_decoder.data_size)) return ENODEV; if (bootverbose) printf("splash_pcx: image good:\n" " width = %d\n" " height = %d\n" " depth = %d\n" " planes = %d\n", pcx_info.width, pcx_info.height, pcx_info.bpp, pcx_info.planes); for (i = 0; modes[i] >= 0; ++i) { if (get_mode_info(adp, modes[i], &info) != 0) continue; if (bootverbose) printf("splash_pcx: considering mode %d:\n" " vi_width = %d\n" " vi_height = %d\n" " vi_depth = %d\n" " vi_planes = %d\n", modes[i], info.vi_width, info.vi_height, info.vi_depth, info.vi_planes); if (info.vi_width >= pcx_info.width && info.vi_height >= pcx_info.height && info.vi_depth == pcx_info.bpp && info.vi_planes == pcx_info.planes) break; } splash_mode = modes[i]; if (splash_mode == -1) return ENODEV; if (bootverbose) printf("pcx_splash: selecting mode %d\n", splash_mode); return 0; } static int pcx_end(video_adapter_t *adp) { /* nothing to do */ return 0; } static int pcx_splash(video_adapter_t *adp, int on) { if (on) { if (!splash_on) { if (set_video_mode(adp, splash_mode) || pcx_draw(adp)) return 1; splash_on = TRUE; } return 0; } else { splash_on = FALSE; return 0; } } struct pcxheader { u_char manufactor; u_char version; u_char encoding; u_char bpp; u_short xmin, ymin, xmax, ymax; u_short hres, vres; u_char colormap[48]; u_char rsvd; u_char nplanes; u_short bpsl; u_short palinfo; u_short hsize, vsize; }; #define MAXSCANLINE 1024 static int pcx_init(char *data, int size) { const struct pcxheader *hdr; hdr = (const struct pcxheader *)data; if (size < 128 + 1 + 1 + 768 || hdr->manufactor != 10 || hdr->version != 5 || hdr->encoding != 1 || hdr->nplanes != 1 || hdr->bpp != 8 || hdr->bpsl > MAXSCANLINE || data[size-769] != 12) { printf("splash_pcx: invalid PCX image\n"); return 1; } pcx_info.width = hdr->xmax - hdr->xmin + 1; pcx_info.height = hdr->ymax - hdr->ymin + 1; pcx_info.bpsl = hdr->bpsl; pcx_info.bpp = hdr->bpp; pcx_info.planes = hdr->nplanes; pcx_info.zlen = size - (128 + 1 + 768); pcx_info.zdata = data + 128; pcx_info.palette = data + size - 768; return 0; } static int pcx_draw(video_adapter_t *adp) { u_char *vidmem; int swidth, sheight, sbpsl, sdepth, splanes; int banksize, origin; int c, i, j, pos, scan, x, y; u_char line[MAXSCANLINE]; if (pcx_info.zlen < 1) return 1; load_palette(adp, pcx_info.palette); vidmem = (u_char *)adp->va_window; swidth = adp->va_info.vi_width; sheight = adp->va_info.vi_height; sbpsl = adp->va_line_width; sdepth = adp->va_info.vi_depth; splanes = adp->va_info.vi_planes; banksize = adp->va_window_size; for (origin = 0; origin < sheight*sbpsl; origin += banksize) { set_origin(adp, origin); bzero(vidmem, banksize); } x = (swidth - pcx_info.width) / 2; y = (sheight - pcx_info.height) / 2; origin = 0; pos = y * sbpsl + x; while (pos > banksize) { pos -= banksize; origin += banksize; } set_origin(adp, origin); for (scan = i = 0; scan < pcx_info.height; ++scan, ++y, pos += sbpsl) { for (j = 0; j < pcx_info.bpsl && i < pcx_info.zlen; ++i) { if ((pcx_info.zdata[i] & 0xc0) == 0xc0) { c = pcx_info.zdata[i++] & 0x3f; if (i >= pcx_info.zlen) return 1; } else { c = 1; } if (j + c > pcx_info.bpsl) return 1; while (c--) line[j++] = pcx_info.zdata[i]; } if (pos > banksize) { origin += banksize; pos -= banksize; set_origin(adp, origin); } if (pos + pcx_info.width > banksize) { /* scanline crosses bank boundary */ j = banksize - pos; bcopy(line, vidmem + pos, j); origin += banksize; pos -= banksize; set_origin(adp, origin); bcopy(line + j, vidmem, pcx_info.width - j); } else { bcopy(line, vidmem + pos, pcx_info.width); } } return 0; } Index: head/sys/net/bridge.c =================================================================== --- head/sys/net/bridge.c (revision 129879) +++ head/sys/net/bridge.c (revision 129880) @@ -1,1254 +1,1255 @@ /* * Copyright (c) 1998-2002 Luigi Rizzo * * Work partly supported by: Cisco Systems, Inc. - NSITE lab, RTP, NC * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * This code implements bridging in FreeBSD. It only acts on ethernet * interfaces, including VLANs (others are still usable for routing). * A FreeBSD host can implement multiple logical bridges, called * "clusters". Each cluster is made of a set of interfaces, and * identified by a "cluster-id" which is a number in the range 1..2^16-1. * * Bridging is enabled by the sysctl variable * net.link.ether.bridge.enable * the grouping of interfaces into clusters is done with * net.link.ether.bridge.config * containing a list of interfaces each optionally followed by * a colon and the cluster it belongs to (1 is the default). * Separators can be * spaces, commas or tabs, e.g. * net.link.ether.bridge.config="fxp0:2 fxp1:2 dc0 dc1:1" * Optionally bridged packets can be passed through the firewall, * this is controlled by the variable * net.link.ether.bridge.ipfw * * For each cluster there is a descriptor (cluster_softc) storing * the following data structures: * - a hash table with the MAC address and destination interface for each * known node. The table is indexed using a hash of the source address. * - an array with the MAC addresses of the interfaces used in the cluster. * * Input packets are tapped near the beginning of ether_input(), and * analysed by bridge_in(). Depending on the result, the packet * can be forwarded to one or more output interfaces using bdg_forward(), * and/or sent to the upper layer (e.g. in case of multicast). * * Output packets are intercepted near the end of ether_output(). * The correct destination is selected by bridge_dst_lookup(), * and then forwarding is done by bdg_forward(). * * The arp code is also modified to let a machine answer to requests * irrespective of the port the request came from. * * In case of loops in the bridging topology, the bridge detects this * event and temporarily mutes output bridging on one of the ports. * Periodically, interfaces are unmuted by bdg_timeout(). * Muting is only implemented as a safety measure, and also as * a mechanism to support a user-space implementation of the spanning * tree algorithm. * * To build a bridging kernel, use the following option * option BRIDGE * and then at runtime set the sysctl variable to enable bridging. * * Only one interface per cluster is supposed to have addresses set (but * there are no substantial problems if you set addresses for none or * for more than one interface). * Bridging will act before routing, but nothing prevents a machine * from doing both (modulo bugs in the implementation...). * * THINGS TO REMEMBER * - bridging is incompatible with multicast routing on the same * machine. There is not an easy fix to this. * - be very careful when bridging VLANs * - loop detection is still not very robust. */ #include "opt_pfil_hooks.h" #include #include #include #include #include #include /* for net/if.h */ #include /* string functions */ #include +#include #include #include #include #include /* for struct arpcom */ #include #include #include #include #include #include #ifdef PFIL_HOOKS #include #include #endif #include #include #include #include /*--------------------*/ #define ETHER_ADDR_COPY(_dst,_src) bcopy(_src, _dst, ETHER_ADDR_LEN) #define ETHER_ADDR_EQ(_a1,_a2) (bcmp(_a1, _a2, ETHER_ADDR_LEN) == 0) /* * For each cluster, source MAC addresses are stored into a hash * table which locates the port they reside on. */ #define HASH_SIZE 8192 /* Table size, must be a power of 2 */ typedef struct hash_table { /* each entry. */ struct ifnet * name; u_char etheraddr[ETHER_ADDR_LEN]; u_int16_t used; /* also, padding */ } bdg_hash_table ; /* * The hash function applied to MAC addresses. Out of the 6 bytes, * the last ones tend to vary more. Since we are on a little endian machine, * we have to do some gimmick... */ #define HASH_FN(addr) ( \ ntohs( ((u_int16_t *)addr)[1] ^ ((u_int16_t *)addr)[2] ) & (HASH_SIZE -1)) /* * This is the data structure where local addresses are stored. */ struct bdg_addr { u_char etheraddr[ETHER_ADDR_LEN]; u_int16_t _padding; }; /* * The configuration of each cluster includes the cluster id, a pointer to * the hash table, and an array of local MAC addresses (of size "ports"). */ struct cluster_softc { u_int16_t cluster_id; u_int16_t ports; bdg_hash_table *ht; struct bdg_addr *my_macs; /* local MAC addresses */ }; extern struct protosw inetsw[]; /* from netinet/ip_input.c */ static int n_clusters; /* number of clusters */ static struct cluster_softc *clusters; static struct mtx bdg_mtx; #define BDG_LOCK_INIT() mtx_init(&bdg_mtx, "bridge", NULL, MTX_DEF) #define BDG_LOCK_DESTROY() mtx_destroy(&bdg_mtx) #define BDG_LOCK() mtx_lock(&bdg_mtx) #define BDG_UNLOCK() mtx_unlock(&bdg_mtx) #define BDG_LOCK_ASSERT() mtx_assert(&bdg_mtx, MA_OWNED) #define BDG_MUTED(ifp) (ifp2sc[ifp->if_index].flags & IFF_MUTE) #define BDG_MUTE(ifp) ifp2sc[ifp->if_index].flags |= IFF_MUTE #define BDG_CLUSTER(ifp) (ifp2sc[ifp->if_index].cluster) #define BDG_SAMECLUSTER(ifp,src) \ (src == NULL || BDG_CLUSTER(ifp) == BDG_CLUSTER(src) ) #ifdef __i386__ #define BDG_MATCH(a,b) ( \ ((u_int16_t *)(a))[2] == ((u_int16_t *)(b))[2] && \ *((u_int32_t *)(a)) == *((u_int32_t *)(b)) ) #define IS_ETHER_BROADCAST(a) ( \ *((u_int32_t *)(a)) == 0xffffffff && \ ((u_int16_t *)(a))[2] == 0xffff ) #else /* for machines that do not support unaligned access */ #define BDG_MATCH(a,b) ETHER_ADDR_EQ(a,b) #define IS_ETHER_BROADCAST(a) ETHER_ADDR_EQ(a,"\377\377\377\377\377\377") #endif SYSCTL_DECL(_net_link_ether); SYSCTL_NODE(_net_link_ether, OID_AUTO, bridge, CTLFLAG_RD, 0, "Bridge parameters"); static char bridge_version[] = "031224"; SYSCTL_STRING(_net_link_ether_bridge, OID_AUTO, version, CTLFLAG_RD, bridge_version, 0, "software version"); #define BRIDGE_DEBUG #ifdef BRIDGE_DEBUG int bridge_debug = 0; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, debug, CTLFLAG_RW, &bridge_debug, 0, "control debugging printfs"); #define DPRINTF(X) if (bridge_debug) printf X #else #define DPRINTF(X) #endif #ifdef BRIDGE_TIMING /* * For timing-related debugging, you can use the following macros. * remember, rdtsc() only works on Pentium-class machines quad_t ticks; DDB(ticks = rdtsc();) ... interesting code ... DDB(bdg_fw_ticks += (u_long)(rdtsc() - ticks) ; bdg_fw_count++ ;) * */ #define DDB(x) x static int bdg_fw_avg; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, fw_avg, CTLFLAG_RW, &bdg_fw_avg, 0,"Cycle counter avg"); static int bdg_fw_ticks; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, fw_ticks, CTLFLAG_RW, &bdg_fw_ticks, 0,"Cycle counter item"); static int bdg_fw_count; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, fw_count, CTLFLAG_RW, &bdg_fw_count, 0,"Cycle counter count"); #else #define DDB(x) #endif static int bdginit(void); static void parse_bdg_cfg(void); static int bdg_ipf; /* IPFilter enabled in bridge */ SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, ipf, CTLFLAG_RW, &bdg_ipf, 0,"Pass bridged pkts through IPFilter"); static int bdg_ipfw; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, ipfw, CTLFLAG_RW, &bdg_ipfw,0,"Pass bridged pkts through firewall"); static int bdg_copy; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, copy, CTLFLAG_RW, &bdg_copy, 0, "Force packet copy in bdg_forward"); int bdg_ipfw_drops; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, ipfw_drop, CTLFLAG_RW, &bdg_ipfw_drops,0,""); int bdg_ipfw_colls; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, ipfw_collisions, CTLFLAG_RW, &bdg_ipfw_colls,0,""); static int bdg_thru; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, packets, CTLFLAG_RW, &bdg_thru, 0, "Packets through bridge"); static int bdg_dropped; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, dropped, CTLFLAG_RW, &bdg_dropped, 0, "Packets dropped in bdg_forward"); static int bdg_predict; SYSCTL_INT(_net_link_ether_bridge, OID_AUTO, predict, CTLFLAG_RW, &bdg_predict, 0, "Correctly predicted header location"); #ifdef BRIDGE_DEBUG static char *bdg_dst_names[] = { "BDG_NULL ", "BDG_BCAST ", "BDG_MCAST ", "BDG_LOCAL ", "BDG_DROP ", "BDG_UNKNOWN ", "BDG_IN ", "BDG_OUT ", "BDG_FORWARD " }; #endif /* BRIDGE_DEBUG */ /* * System initialization */ static struct bdg_stats bdg_stats ; SYSCTL_STRUCT(_net_link_ether_bridge, OID_AUTO, stats, CTLFLAG_RD, &bdg_stats, bdg_stats, "bridge statistics"); static struct callout bdg_callout; /* * Add an interface to a cluster, possibly creating a new entry in * the cluster table. This requires reallocation of the table and * updating pointers in ifp2sc. */ static struct cluster_softc * add_cluster(u_int16_t cluster_id, struct ifnet *ifp) { struct cluster_softc *c = NULL; int i; BDG_LOCK_ASSERT(); for (i = 0; i < n_clusters ; i++) if (clusters[i].cluster_id == cluster_id) goto found; /* Not found, need to reallocate */ c = malloc((1+n_clusters) * sizeof (*c), M_IFADDR, M_NOWAIT | M_ZERO); if (c == NULL) {/* malloc failure */ printf("-- bridge: cannot add new cluster\n"); goto bad; } c[n_clusters].ht = (struct hash_table *) malloc(HASH_SIZE * sizeof(struct hash_table), M_IFADDR, M_NOWAIT | M_ZERO); if (c[n_clusters].ht == NULL) { printf("-- bridge: cannot allocate hash table for new cluster\n"); goto bad; } c[n_clusters].my_macs = (struct bdg_addr *) malloc(BDG_MAX_PORTS * sizeof(struct bdg_addr), M_IFADDR, M_NOWAIT | M_ZERO); if (c[n_clusters].my_macs == NULL) { printf("-- bridge: cannot allocate mac addr table for new cluster\n"); free(c[n_clusters].ht, M_IFADDR); goto bad; } c[n_clusters].cluster_id = cluster_id; c[n_clusters].ports = 0; /* * now copy old descriptors here */ if (n_clusters > 0) { for (i=0; i < n_clusters; i++) c[i] = clusters[i]; /* * and finally update pointers in ifp2sc */ for (i = 0 ; i < if_index && i < BDG_MAX_PORTS; i++) if (ifp2sc[i].cluster != NULL) ifp2sc[i].cluster = c + (ifp2sc[i].cluster - clusters); free(clusters, M_IFADDR); } clusters = c; i = n_clusters; /* index of cluster entry */ n_clusters++; found: c = clusters + i; /* the right cluster ... */ ETHER_ADDR_COPY(c->my_macs[c->ports].etheraddr, IFP2AC(ifp)->ac_enaddr); c->ports++; return c; bad: if (c) free(c, M_IFADDR); return NULL; } /* * Turn off bridging, by clearing promisc mode on the interface, * marking the interface as unused, and clearing the name in the * stats entry. * Also dispose the hash tables associated with the clusters. */ static void bridge_off(void) { struct ifnet *ifp ; int i; BDG_LOCK_ASSERT(); DPRINTF(("%s: n_clusters %d\n", __func__, n_clusters)); IFNET_RLOCK(); TAILQ_FOREACH(ifp, &ifnet, if_link) { struct bdg_softc *b; if (ifp->if_index >= BDG_MAX_PORTS) continue; /* make sure we do not go beyond the end */ b = &ifp2sc[ifp->if_index]; if ( b->flags & IFF_BDG_PROMISC ) { ifpromisc(ifp, 0); b->flags &= ~(IFF_BDG_PROMISC|IFF_MUTE) ; DPRINTF(("%s: %s promisc OFF if_flags 0x%x " "bdg_flags 0x%x\n", __func__, ifp->if_xname, ifp->if_flags, b->flags)); } b->flags &= ~(IFF_USED) ; b->cluster = NULL; bdg_stats.s[ifp->if_index].name[0] = '\0'; } IFNET_RUNLOCK(); /* flush_tables */ for (i=0; i < n_clusters; i++) { free(clusters[i].ht, M_IFADDR); free(clusters[i].my_macs, M_IFADDR); } if (clusters != NULL) free(clusters, M_IFADDR); clusters = NULL; n_clusters =0; } /* * set promisc mode on the interfaces we use. */ static void bridge_on(void) { struct ifnet *ifp ; BDG_LOCK_ASSERT(); IFNET_RLOCK(); TAILQ_FOREACH(ifp, &ifnet, if_link) { struct bdg_softc *b = &ifp2sc[ifp->if_index]; if ( !(b->flags & IFF_USED) ) continue ; if ( !( ifp->if_flags & IFF_UP) ) { if_up(ifp); } if ( !(b->flags & IFF_BDG_PROMISC) ) { (void) ifpromisc(ifp, 1); b->flags |= IFF_BDG_PROMISC ; DPRINTF(("%s: %s promisc ON if_flags 0x%x bdg_flags 0x%x\n", __func__, ifp->if_xname, ifp->if_flags, b->flags)); } if (b->flags & IFF_MUTE) { DPRINTF(("%s: unmuting %s\n", __func__, ifp->if_xname)); b->flags &= ~IFF_MUTE; } } IFNET_RUNLOCK(); } static char bridge_cfg[1024]; /* NB: in BSS so initialized to zero */ /** * reconfigure bridge. * This is also done every time we attach or detach an interface. * Main use is to make sure that we do not bridge on some old * (ejected) device. So, it would be really useful to have a * pointer to the modified device as an argument. Without it, we * have to scan all interfaces. */ static void reconfigure_bridge_locked(void) { BDG_LOCK_ASSERT(); bridge_off(); if (do_bridge) { if (if_index >= BDG_MAX_PORTS) { printf("-- sorry too many interfaces (%d, max is %d)," " disabling bridging\n", if_index, BDG_MAX_PORTS); do_bridge = 0; return; } parse_bdg_cfg(); bridge_on(); } } static void reconfigure_bridge(void) { BDG_LOCK(); reconfigure_bridge_locked(); BDG_UNLOCK(); } /* * parse the config string, set IFF_USED, name and cluster_id * for all interfaces found. * The config string is a list of "if[:cluster]" with * a number of possible separators (see "sep"). In particular the * use of the space lets you set bridge_cfg with the output from * "ifconfig -l" */ static void parse_bdg_cfg(void) { char *p, *beg; int l, cluster; static const char *sep = ", \t"; BDG_LOCK_ASSERT(); for (p = bridge_cfg; *p ; p++) { struct ifnet *ifp; int found = 0; char c; if (index(sep, *p)) /* skip separators */ continue ; /* names are lowercase and digits */ for ( beg = p ; islower(*p) || isdigit(*p) ; p++ ) ; l = p - beg ; /* length of name string */ if (l == 0) /* invalid name */ break ; if ( *p != ':' ) /* no ':', assume default cluster 1 */ cluster = 1 ; else /* fetch cluster */ cluster = strtoul( p+1, &p, 10); c = *p; *p = '\0'; /* * now search in interface list for a matching name */ IFNET_RLOCK(); /* could sleep XXX */ TAILQ_FOREACH(ifp, &ifnet, if_link) { if (!strncmp(beg, ifp->if_xname, max(l, strlen(ifp->if_xname)))) { struct bdg_softc *b = &ifp2sc[ifp->if_index]; if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) { printf("%s is not an ethernet, continue\n", ifp->if_xname); continue; } if (b->flags & IFF_USED) { printf("%s already used, skipping\n", ifp->if_xname); break; } b->cluster = add_cluster(htons(cluster), ifp); b->flags |= IFF_USED ; snprintf(bdg_stats.s[ifp->if_index].name, sizeof(bdg_stats.s[ifp->if_index].name), "%s:%d", ifp->if_xname, cluster); DPRINTF(("%s: found %s next c %d\n", __func__, bdg_stats.s[ifp->if_index].name, c)); found = 1; break ; } } IFNET_RUNLOCK(); if (!found) printf("interface %s Not found in bridge\n", beg); *p = c; if (c == '\0') break; /* no more */ } } /* * handler for net.link.ether.bridge */ static int sysctl_bdg(SYSCTL_HANDLER_ARGS) { int enable = do_bridge; int error; error = sysctl_handle_int(oidp, &enable, 0, req); BDG_LOCK(); if (enable != do_bridge) { do_bridge = enable; reconfigure_bridge_locked(); } BDG_UNLOCK(); return error ; } SYSCTL_PROC(_net_link_ether_bridge, OID_AUTO, enable, CTLTYPE_INT|CTLFLAG_RW, &do_bridge, 0, &sysctl_bdg, "I", "Bridging"); /* * handler for net.link.ether.bridge_cfg */ static int sysctl_bdg_cfg(SYSCTL_HANDLER_ARGS) { int error; char *new_cfg; new_cfg = malloc(sizeof(bridge_cfg), M_TEMP, M_WAITOK); bcopy(bridge_cfg, new_cfg, sizeof(bridge_cfg)); error = sysctl_handle_string(oidp, new_cfg, oidp->oid_arg2, req); if (error == 0) { BDG_LOCK(); if (strcmp(new_cfg, bridge_cfg)) { bcopy(new_cfg, bridge_cfg, sizeof(bridge_cfg)); reconfigure_bridge_locked(); } BDG_UNLOCK(); } free(new_cfg, M_TEMP); return error; } SYSCTL_PROC(_net_link_ether_bridge, OID_AUTO, config, CTLTYPE_STRING|CTLFLAG_RW, &bridge_cfg, sizeof(bridge_cfg), &sysctl_bdg_cfg, "A", "Bridge configuration"); static int sysctl_refresh(SYSCTL_HANDLER_ARGS) { if (req->newptr) reconfigure_bridge(); return 0; } SYSCTL_PROC(_net_link_ether_bridge, OID_AUTO, refresh, CTLTYPE_INT|CTLFLAG_WR, NULL, 0, &sysctl_refresh, "I", "iface refresh"); #ifndef BURN_BRIDGES #define SYSCTL_OID_COMPAT(parent, nbr, name, kind, a1, a2, handler, fmt, descr)\ static struct sysctl_oid sysctl__##parent##_##name##_compat = { \ &sysctl_##parent##_children, { 0 }, \ nbr, kind, a1, a2, #name, handler, fmt, 0, descr }; \ DATA_SET(sysctl_set, sysctl__##parent##_##name##_compat) #define SYSCTL_INT_COMPAT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID_COMPAT(parent, nbr, name, CTLTYPE_INT|(access), \ ptr, val, sysctl_handle_int, "I", descr) #define SYSCTL_STRUCT_COMPAT(parent, nbr, name, access, ptr, type, descr)\ SYSCTL_OID_COMPAT(parent, nbr, name, CTLTYPE_OPAQUE|(access), \ ptr, sizeof(struct type), sysctl_handle_opaque, \ "S," #type, descr) #define SYSCTL_PROC_COMPAT(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ SYSCTL_OID_COMPAT(parent, nbr, name, (access), \ ptr, arg, handler, fmt, descr) SYSCTL_INT_COMPAT(_net_link_ether, OID_AUTO, bridge_ipf, CTLFLAG_RW, &bdg_ipf, 0,"Pass bridged pkts through IPFilter"); SYSCTL_INT_COMPAT(_net_link_ether, OID_AUTO, bridge_ipfw, CTLFLAG_RW, &bdg_ipfw,0,"Pass bridged pkts through firewall"); SYSCTL_STRUCT_COMPAT(_net_link_ether, PF_BDG, bdgstats, CTLFLAG_RD, &bdg_stats, bdg_stats, "bridge statistics"); SYSCTL_PROC_COMPAT(_net_link_ether, OID_AUTO, bridge_cfg, CTLTYPE_STRING|CTLFLAG_RW, &bridge_cfg, sizeof(bridge_cfg), &sysctl_bdg_cfg, "A", "Bridge configuration"); SYSCTL_PROC_COMPAT(_net_link_ether, OID_AUTO, bridge_refresh, CTLTYPE_INT|CTLFLAG_WR, NULL, 0, &sysctl_refresh, "I", "iface refresh"); #endif static int bdg_loops; static int bdg_slowtimer = 0; static int bdg_age_index = 0; /* index of table position to age */ /* * called periodically to flush entries etc. */ static void bdg_timeout(void *dummy) { if (do_bridge) { int l, i; BDG_LOCK(); /* * age entries in the forwarding table. */ l = bdg_age_index + HASH_SIZE/4 ; if (l > HASH_SIZE) l = HASH_SIZE; for (i = 0; i < n_clusters; i++) { bdg_hash_table *bdg_table = clusters[i].ht; for (; bdg_age_index < l; bdg_age_index++) if (bdg_table[bdg_age_index].used) bdg_table[bdg_age_index].used = 0; else if (bdg_table[bdg_age_index].name) { DPRINTF(("%s: flushing stale entry %d\n", __func__, bdg_age_index)); bdg_table[bdg_age_index].name = NULL; } } if (bdg_age_index >= HASH_SIZE) bdg_age_index = 0; if (--bdg_slowtimer <= 0 ) { bdg_slowtimer = 5; bridge_on(); /* we just need unmute, really */ bdg_loops = 0; } BDG_UNLOCK(); } callout_reset(&bdg_callout, 2*hz, bdg_timeout, NULL); } /* * Find the right pkt destination: * BDG_BCAST is a broadcast * BDG_MCAST is a multicast * BDG_LOCAL is for a local address * BDG_DROP must be dropped * other ifp of the dest. interface (incl.self) * * We assume this is only called for interfaces for which bridging * is enabled, i.e. BDG_USED(ifp) is true. */ static __inline struct ifnet * bridge_dst_lookup(struct ether_header *eh, struct cluster_softc *c) { bdg_hash_table *bt; /* pointer to entry in hash table */ BDG_LOCK_ASSERT(); if (ETHER_IS_MULTICAST(eh->ether_dhost)) return IS_ETHER_BROADCAST(eh->ether_dhost) ? BDG_BCAST : BDG_MCAST; /* * Lookup local addresses in case one matches. We optimize * for the common case of two interfaces. */ KASSERT(c->ports != 0, ("lookup with no ports!")); switch (c->ports) { int i; default: for (i = c->ports-1; i > 1; i--) { if (ETHER_ADDR_EQ(c->my_macs[i].etheraddr, eh->ether_dhost)) return BDG_LOCAL; } /* fall thru... */ case 2: if (ETHER_ADDR_EQ(c->my_macs[1].etheraddr, eh->ether_dhost)) return BDG_LOCAL; case 1: if (ETHER_ADDR_EQ(c->my_macs[0].etheraddr, eh->ether_dhost)) return BDG_LOCAL; } /* * Look for a possible destination in table */ bt = &c->ht[HASH_FN(eh->ether_dhost)]; if (bt->name && ETHER_ADDR_EQ(bt->etheraddr, eh->ether_dhost)) return bt->name; else return BDG_UNKNOWN; } /** * bridge_in() is invoked to perform bridging decision on input packets. * * On Input: * eh Ethernet header of the incoming packet. * ifp interface the packet is coming from. * * On Return: destination of packet, one of * BDG_BCAST broadcast * BDG_MCAST multicast * BDG_LOCAL is only for a local address (do not forward) * BDG_DROP drop the packet * ifp ifp of the destination interface. * * Forwarding is not done directly to give a chance to some drivers * to fetch more of the packet, or simply drop it completely. */ static struct ifnet * bridge_in(struct ifnet *ifp, struct ether_header *eh) { int index; struct ifnet *dst, *old; bdg_hash_table *bt; /* location in hash table */ int dropit = BDG_MUTED(ifp); /* * hash the source address */ BDG_LOCK(); index = HASH_FN(eh->ether_shost); bt = &BDG_CLUSTER(ifp)->ht[index]; bt->used = 1; old = bt->name; if (old) { /* the entry is valid */ if (!ETHER_ADDR_EQ(eh->ether_shost, bt->etheraddr)) { bdg_ipfw_colls++; bt->name = NULL; /* NB: will overwrite below */ } else if (old != ifp) { /* * Found a loop. Either a machine has moved, or there * is a misconfiguration/reconfiguration of the network. * First, do not forward this packet! * Record the relocation anyways; then, if loops persist, * suspect a reconfiguration and disable forwarding * from the old interface. */ bt->name = ifp; /* relocate address */ printf("-- loop (%d) %6D to %s from %s (%s)\n", bdg_loops, eh->ether_shost, ".", ifp->if_xname, old->if_xname, BDG_MUTED(old) ? "muted":"active"); dropit = 1; if (!BDG_MUTED(old)) { if (bdg_loops++ > 10) BDG_MUTE(old); } } } /* * now write the source address into the table */ if (bt->name == NULL) { DPRINTF(("%s: new addr %6D at %d for %s\n", __func__, eh->ether_shost, ".", index, ifp->if_xname)); ETHER_ADDR_COPY(bt->etheraddr, eh->ether_shost); bt->name = ifp; } dst = bridge_dst_lookup(eh, BDG_CLUSTER(ifp)); BDG_UNLOCK(); /* * bridge_dst_lookup can return the following values: * BDG_BCAST, BDG_MCAST, BDG_LOCAL, BDG_UNKNOWN, BDG_DROP, ifp. * For muted interfaces, or when we detect a loop, the first 3 are * changed in BDG_LOCAL (we still listen to incoming traffic), * and others to BDG_DROP (no use for the local host). * Also, for incoming packets, ifp is changed to BDG_DROP if ifp == src. * These changes are not necessary for outgoing packets from ether_output(). */ BDG_STAT(ifp, BDG_IN); switch ((uintptr_t)dst) { case (uintptr_t)BDG_BCAST: case (uintptr_t)BDG_MCAST: case (uintptr_t)BDG_LOCAL: case (uintptr_t)BDG_UNKNOWN: case (uintptr_t)BDG_DROP: BDG_STAT(ifp, dst); break; default: if (dst == ifp || dropit) BDG_STAT(ifp, BDG_DROP); else BDG_STAT(ifp, BDG_FORWARD); break; } if (dropit) { if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_LOCAL) dst = BDG_LOCAL; else dst = BDG_DROP; } else { if (dst == ifp) dst = BDG_DROP; } DPRINTF(("%s: %6D ->%6D ty 0x%04x dst %s\n", __func__, eh->ether_shost, ".", eh->ether_dhost, ".", ntohs(eh->ether_type), (dst <= BDG_FORWARD) ? bdg_dst_names[(uintptr_t)dst] : dst->if_xname)); return dst; } /* * Return 1 if it's ok to send a packet out the specified interface. * The interface must be: * used for bridging, * not muted, * not full, * up and running, * not the source interface, and * belong to the same cluster as the 'real_dst'. */ static __inline int bridge_ifok(struct ifnet *ifp, struct ifnet *src, struct ifnet *dst) { return (BDG_USED(ifp) && !BDG_MUTED(ifp) && !_IF_QFULL(&ifp->if_snd) && (ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING) && ifp != src && BDG_SAMECLUSTER(ifp, dst)); } /* * Forward a packet to dst -- which can be a single interface or * an entire cluster. The src port and muted interfaces are excluded. * * If src == NULL, the pkt comes from ether_output, and dst is the real * interface the packet is originally sent to. In this case, we must forward * it to the whole cluster. * We never call bdg_forward from ether_output on interfaces which are * not part of a cluster. * * If possible (i.e. we can determine that the caller does not need * a copy), the packet is consumed here, and bdg_forward returns NULL. * Otherwise, a pointer to a copy of the packet is returned. */ static struct mbuf * bdg_forward(struct mbuf *m0, struct ifnet *dst) { #define EH_RESTORE(_m) do { \ M_PREPEND((_m), ETHER_HDR_LEN, M_DONTWAIT); \ if ((_m) == NULL) { \ bdg_dropped++; \ return NULL; \ } \ if (eh != mtod((_m), struct ether_header *)) \ bcopy(&save_eh, mtod((_m), struct ether_header *), ETHER_HDR_LEN); \ else \ bdg_predict++; \ } while (0); struct ether_header *eh; struct ifnet *src; struct ifnet *ifp, *last; int shared = bdg_copy; /* someone else is using the mbuf */ struct ifnet *real_dst = dst; /* real dst from ether_output */ struct ip_fw_args args; struct ether_header save_eh; struct mbuf *m; DDB(quad_t ticks; ticks = rdtsc();) args.rule = ip_dn_claim_rule(m0); if (args.rule) shared = 0; /* For sure this is our own mbuf. */ else bdg_thru++; /* count 1st time through bdg_forward */ /* * The packet arrives with the Ethernet header at the front. */ eh = mtod(m0, struct ether_header *); src = m0->m_pkthdr.rcvif; if (src == NULL) { /* packet from ether_output */ BDG_LOCK(); dst = bridge_dst_lookup(eh, BDG_CLUSTER(real_dst)); BDG_UNLOCK(); } if (dst == BDG_DROP) { /* this should not happen */ printf("xx bdg_forward for BDG_DROP\n"); m_freem(m0); bdg_dropped++; return NULL; } if (dst == BDG_LOCAL) { /* this should not happen as well */ printf("xx ouch, bdg_forward for local pkt\n"); return m0; } if (dst == BDG_BCAST || dst == BDG_MCAST) { /* need a copy for the local stack */ shared = 1; } /* * Do filtering in a very similar way to what is done in ip_output. * Only if firewall is loaded, enabled, and the packet is not * from ether_output() (src==NULL, or we would filter it twice). * Additional restrictions may apply e.g. non-IP, short packets, * and pkts already gone through a pipe. */ if (src != NULL && ( #ifdef PFIL_HOOKS (inet_pfil_hook.ph_busy_count >= 0 && bdg_ipf != 0) || #endif (IPFW_LOADED && bdg_ipfw != 0))) { int i; if (args.rule != NULL && fw_one_pass) goto forward; /* packet already partially processed */ /* * i need some amt of data to be contiguous, and in case others need * the packet (shared==1) also better be in the first mbuf. */ i = min(m0->m_pkthdr.len, max_protohdr) ; if (shared || m0->m_len < i) { m0 = m_pullup(m0, i); if (m0 == NULL) { printf("%s: m_pullup failed\n", __func__); /* XXXDPRINTF*/ bdg_dropped++; return NULL; } eh = mtod(m0, struct ether_header *); } /* * Processing below expects the Ethernet header is stripped. * Furthermore, the mbuf chain might be replaced at various * places. To deal with this we copy the header to a temporary * location, strip the header, and restore it as needed. */ bcopy(eh, &save_eh, ETHER_HDR_LEN); /* local copy for restore */ m_adj(m0, ETHER_HDR_LEN); /* temporarily strip header */ #ifdef PFIL_HOOKS /* * NetBSD-style generic packet filter, pfil(9), hooks. * Enables ipf(8) in bridging. */ if (inet_pfil_hook.ph_busy_count >= 0 && m0->m_pkthdr.len >= sizeof(struct ip) && ntohs(save_eh.ether_type) == ETHERTYPE_IP) { /* * before calling the firewall, swap fields the same as IP does. * here we assume the pkt is an IP one and the header is contiguous */ struct ip *ip = mtod(m0, struct ip *); ip->ip_len = ntohs(ip->ip_len); ip->ip_off = ntohs(ip->ip_off); if (pfil_run_hooks(&inet_pfil_hook, &m0, src, PFIL_IN) != 0) { /* NB: hook should consume packet */ return NULL; } if (m0 == NULL) /* consumed by filter */ return m0; /* * If we get here, the firewall has passed the pkt, but the mbuf * pointer might have changed. Restore ip and the fields ntohs()'d. */ ip = mtod(m0, struct ip *); ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); } #endif /* PFIL_HOOKS */ /* * Prepare arguments and call the firewall. */ if (!IPFW_LOADED || bdg_ipfw == 0) { EH_RESTORE(m0); /* restore Ethernet header */ goto forward; /* not using ipfw, accept the packet */ } /* * XXX The following code is very similar to the one in * if_ethersubr.c:ether_ipfw_chk() */ args.m = m0; /* the packet we are looking at */ args.oif = NULL; /* this is an input packet */ args.next_hop = NULL; /* we do not support forward yet */ args.eh = &save_eh; /* MAC header for bridged/MAC packets */ i = ip_fw_chk_ptr(&args); m0 = args.m; /* in case the firewall used the mbuf */ if (m0 != NULL) EH_RESTORE(m0); /* restore Ethernet header */ if ( (i & IP_FW_PORT_DENY_FLAG) || m0 == NULL) /* drop */ return m0; if (i == 0) /* a PASS rule. */ goto forward; if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG)) { /* * Pass the pkt to dummynet, which consumes it. * If shared, make a copy and keep the original. */ if (shared) { m = m_copypacket(m0, M_DONTWAIT); if (m == NULL) { /* copy failed, give up */ bdg_dropped++; return NULL; } } else { m = m0 ; /* pass the original to dummynet */ m0 = NULL ; /* and nothing back to the caller */ } args.oif = real_dst; ip_dn_io_ptr(m, (i & 0xffff),DN_TO_BDG_FWD, &args); return m0; } /* * XXX at some point, add support for divert/forward actions. * If none of the above matches, we have to drop the packet. */ bdg_ipfw_drops++; return m0; } forward: /* * Again, bring up the headers in case of shared bufs to avoid * corruptions in the future. */ if (shared) { int i = min(m0->m_pkthdr.len, max_protohdr); m0 = m_pullup(m0, i); if (m0 == NULL) { bdg_dropped++; return NULL; } /* NB: eh is not used below; no need to recalculate it */ } /* * now real_dst is used to determine the cluster where to forward. * For packets coming from ether_input, this is the one of the 'src' * interface, whereas for locally generated packets (src==NULL) it * is the cluster of the original destination interface, which * was already saved into real_dst. */ if (src != NULL) real_dst = src; last = NULL; if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_UNKNOWN) { /* * Scan all ports and send copies to all but the last. */ IFNET_RLOCK(); /* XXX replace with generation # */ TAILQ_FOREACH(ifp, &ifnet, if_link) { if (bridge_ifok(ifp, src, real_dst)) { if (last) { /* * At this point we know two interfaces need a copy * of the packet (last + ifp) so we must create a * copy to handoff to last. */ m = m_copypacket(m0, M_DONTWAIT); if (m == NULL) { IFNET_RUNLOCK(); printf("%s: , m_copypacket failed!\n", __func__); bdg_dropped++; return m0; /* the original is still there... */ } if (IF_HANDOFF(&last->if_snd, m, last)) BDG_STAT(last, BDG_OUT); else bdg_dropped++; } last = ifp; } } IFNET_RUNLOCK(); } else { if (bridge_ifok(dst, src, real_dst)) last = dst; } if (last) { if (shared) { /* need to copy */ m = m_copypacket(m0, M_DONTWAIT); if (m == NULL) { printf("%s: sorry, m_copypacket failed!\n", __func__); bdg_dropped++ ; return m0; /* the original is still there... */ } } else { /* consume original */ m = m0, m0 = NULL; } if (IF_HANDOFF(&last->if_snd, m, last)) BDG_STAT(last, BDG_OUT); else bdg_dropped++; } DDB(bdg_fw_ticks += (u_long)(rdtsc() - ticks) ; bdg_fw_count++ ; if (bdg_fw_count != 0) bdg_fw_avg = bdg_fw_ticks/bdg_fw_count; ) return m0; #undef EH_RESTORE } /* * initialization of bridge code. */ static int bdginit(void) { if (bootverbose) printf("BRIDGE %s loaded\n", bridge_version); ifp2sc = malloc(BDG_MAX_PORTS * sizeof(struct bdg_softc), M_IFADDR, M_WAITOK | M_ZERO ); if (ifp2sc == NULL) return ENOMEM; BDG_LOCK_INIT(); n_clusters = 0; clusters = NULL; do_bridge = 0; bzero(&bdg_stats, sizeof(bdg_stats)); bridge_in_ptr = bridge_in; bdg_forward_ptr = bdg_forward; bdgtakeifaces_ptr = reconfigure_bridge; bdgtakeifaces_ptr(); /* XXX does this do anything? */ callout_init(&bdg_callout, CALLOUT_MPSAFE); bdg_timeout(0); return 0 ; } static void bdgdestroy(void) { bridge_in_ptr = NULL; bdg_forward_ptr = NULL; bdgtakeifaces_ptr = NULL; callout_stop(&bdg_callout); BDG_LOCK(); bridge_off(); if (ifp2sc) { free(ifp2sc, M_IFADDR); ifp2sc = NULL; } BDG_LOCK_DESTROY(); } /* * initialization code, both for static and dynamic loading. */ static int bridge_modevent(module_t mod, int type, void *unused) { int err; switch (type) { case MOD_LOAD: if (BDG_LOADED) err = EEXIST; else err = bdginit(); break; case MOD_UNLOAD: do_bridge = 0; bdgdestroy(); err = 0; break; default: err = EINVAL; break; } return err; } static moduledata_t bridge_mod = { "bridge", bridge_modevent, 0 }; DECLARE_MODULE(bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(bridge, 1); Index: head/sys/net/if_arcsubr.c =================================================================== --- head/sys/net/if_arcsubr.c (revision 129879) +++ head/sys/net/if_arcsubr.c (revision 129880) @@ -1,850 +1,851 @@ /* $NetBSD: if_arcsubr.c,v 1.36 2001/06/14 05:44:23 itojun Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 1994, 1995 Ignatios Souvatzis * Copyright (c) 1982, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: if_ethersubr.c,v 1.9 1994/06/29 06:36:11 cgd Exp * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93 * */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipx.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #endif #ifdef INET6 #include #endif #ifdef IPX #include #include #endif MODULE_VERSION(arcnet, 1); #define ARCNET_ALLOW_BROKEN_ARP static struct mbuf *arc_defrag(struct ifnet *, struct mbuf *); static int arc_resolvemulti(struct ifnet *, struct sockaddr **, struct sockaddr *); u_int8_t arcbroadcastaddr = 0; #define ARC_LLADDR(ifp) (*(u_int8_t *)IF_LLADDR(ifp)) #define senderr(e) { error = (e); goto bad;} #define SIN(s) ((struct sockaddr_in *)s) #define SIPX(s) ((struct sockaddr_ipx *)s) /* * ARCnet output routine. * Encapsulate a packet of type family for the local net. * Assumes that ifp is actually pointer to arccom structure. */ int arc_output(ifp, m, dst, rt0) struct ifnet *ifp; struct mbuf *m; struct sockaddr *dst; struct rtentry *rt0; { struct arc_header *ah; int error; u_int8_t atype, adst; int loop_copy = 0; int isphds; if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) return(ENETDOWN); /* m, m1 aren't initialized yet */ error = 0; switch (dst->sa_family) { #ifdef INET case AF_INET: /* * For now, use the simple IP addr -> ARCnet addr mapping */ if (m->m_flags & (M_BCAST|M_MCAST)) adst = arcbroadcastaddr; /* ARCnet broadcast address */ else if (ifp->if_flags & IFF_NOARP) adst = ntohl(SIN(dst)->sin_addr.s_addr) & 0xFF; else { error = arpresolve(ifp, rt0, m, dst, &adst); if (error) return (error == EWOULDBLOCK ? 0 : error); } atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_IP_OLD : ARCTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ARCNET); loop_copy = -1; /* if this is for us, don't do it */ switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: atype = ARCTYPE_REVARP; break; case ARPOP_REQUEST: case ARPOP_REPLY: default: atype = ARCTYPE_ARP; break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, &adst, ARC_ADDR_LEN); else bcopy(ar_tha(ah), &adst, ARC_ADDR_LEN); } break; #endif #ifdef INET6 case AF_INET6: error = nd6_storelladdr(ifp, rt0, m, dst, (u_char *)&adst); if (error) return (error); atype = ARCTYPE_INET6; break; #endif #ifdef IPX case AF_IPX: adst = SIPX(dst)->sipx_addr.x_host.c_host[5]; atype = ARCTYPE_IPX; if (adst == 0xff) adst = arcbroadcastaddr; break; #endif case AF_UNSPEC: loop_copy = -1; ah = (struct arc_header *)dst->sa_data; adst = ah->arc_dhost; atype = ah->arc_type; if (atype == ARCTYPE_ARP) { atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_ARP_OLD: ARCTYPE_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP /* * XXX It's not clear per RFC826 if this is needed, but * "assigned numbers" say this is wrong. * However, e.g., AmiTCP 3.0Beta used it... we make this * switchable for emergency cases. Not perfect, but... */ if (ifp->if_flags & IFF_LINK2) mtod(m, struct arphdr *)->ar_pro = atype - 1; #endif } break; default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } isphds = arc_isphds(atype); M_PREPEND(m, isphds ? ARC_HDRNEWLEN : ARC_HDRLEN, M_DONTWAIT); if (m == 0) senderr(ENOBUFS); ah = mtod(m, struct arc_header *); ah->arc_type = atype; ah->arc_dhost = adst; ah->arc_shost = ARC_LLADDR(ifp); if (isphds) { ah->arc_flag = 0; ah->arc_seqid = 0; } if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) { if ((m->m_flags & M_BCAST) || (loop_copy > 0)) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); (void) if_simloop(ifp, n, dst->sa_family, ARC_HDRLEN); } else if (ah->arc_dhost == ah->arc_shost) { (void) if_simloop(ifp, m, dst->sa_family, ARC_HDRLEN); return (0); /* XXX */ } } BPF_MTAP(ifp, m); if (!IF_HANDOFF(&ifp->if_snd, m, ifp)) { m = 0; senderr(ENOBUFS); } return (error); bad: if (m) m_freem(m); return (error); } void arc_frag_init(ifp) struct ifnet *ifp; { struct arccom *ac; ac = (struct arccom *)ifp; ac->curr_frag = 0; } struct mbuf * arc_frag_next(ifp) struct ifnet *ifp; { struct arccom *ac; struct mbuf *m; struct arc_header *ah; ac = (struct arccom *)ifp; if ((m = ac->curr_frag) == 0) { int tfrags; /* dequeue new packet */ IF_DEQUEUE(&ifp->if_snd, m); if (m == 0) return 0; ah = mtod(m, struct arc_header *); if (!arc_isphds(ah->arc_type)) return m; ++ac->ac_seqid; /* make the seqid unique */ tfrags = (m->m_pkthdr.len + ARC_MAX_DATA - 1) / ARC_MAX_DATA; ac->fsflag = 2 * tfrags - 3; ac->sflag = 0; ac->rsflag = ac->fsflag; ac->arc_dhost = ah->arc_dhost; ac->arc_shost = ah->arc_shost; ac->arc_type = ah->arc_type; m_adj(m, ARC_HDRNEWLEN); ac->curr_frag = m; } /* split out next fragment and return it */ if (ac->sflag < ac->fsflag) { /* we CAN'T have short packets here */ ac->curr_frag = m_split(m, ARC_MAX_DATA, M_DONTWAIT); if (ac->curr_frag == 0) { m_freem(m); return 0; } M_PREPEND(m, ARC_HDRNEWLEN, M_DONTWAIT); if (m == 0) { m_freem(ac->curr_frag); ac->curr_frag = 0; return 0; } ah = mtod(m, struct arc_header *); ah->arc_flag = ac->rsflag; ah->arc_seqid = ac->ac_seqid; ac->sflag += 2; ac->rsflag = ac->sflag; } else if ((m->m_pkthdr.len >= ARC_MIN_FORBID_LEN - ARC_HDRNEWLEN + 2) && (m->m_pkthdr.len <= ARC_MAX_FORBID_LEN - ARC_HDRNEWLEN + 2)) { ac->curr_frag = 0; M_PREPEND(m, ARC_HDRNEWLEN_EXC, M_DONTWAIT); if (m == 0) return 0; ah = mtod(m, struct arc_header *); ah->arc_flag = 0xFF; ah->arc_seqid = 0xFFFF; ah->arc_type2 = ac->arc_type; ah->arc_flag2 = ac->sflag; ah->arc_seqid2 = ac->ac_seqid; } else { ac->curr_frag = 0; M_PREPEND(m, ARC_HDRNEWLEN, M_DONTWAIT); if (m == 0) return 0; ah = mtod(m, struct arc_header *); ah->arc_flag = ac->sflag; ah->arc_seqid = ac->ac_seqid; } ah->arc_dhost = ac->arc_dhost; ah->arc_shost = ac->arc_shost; ah->arc_type = ac->arc_type; return m; } /* * Defragmenter. Returns mbuf if last packet found, else * NULL. frees imcoming mbuf as necessary. */ static __inline struct mbuf * arc_defrag(ifp, m) struct ifnet *ifp; struct mbuf *m; { struct arc_header *ah, *ah1; struct arccom *ac; struct ac_frag *af; struct mbuf *m1; char *s; int newflen; u_char src,dst,typ; ac = (struct arccom *)ifp; if (m->m_len < ARC_HDRNEWLEN) { m = m_pullup(m, ARC_HDRNEWLEN); if (m == NULL) { ++ifp->if_ierrors; return NULL; } } ah = mtod(m, struct arc_header *); typ = ah->arc_type; if (!arc_isphds(typ)) return m; src = ah->arc_shost; dst = ah->arc_dhost; if (ah->arc_flag == 0xff) { m_adj(m, 4); if (m->m_len < ARC_HDRNEWLEN) { m = m_pullup(m, ARC_HDRNEWLEN); if (m == NULL) { ++ifp->if_ierrors; return NULL; } } ah = mtod(m, struct arc_header *); } af = &ac->ac_fragtab[src]; m1 = af->af_packet; s = "debug code error"; if (ah->arc_flag & 1) { /* * first fragment. We always initialize, which is * about the right thing to do, as we only want to * accept one fragmented packet per src at a time. */ if (m1 != NULL) m_freem(m1); af->af_packet = m; m1 = m; af->af_maxflag = ah->arc_flag; af->af_lastseen = 0; af->af_seqid = ah->arc_seqid; return NULL; /* notreached */ } else { /* check for unfragmented packet */ if (ah->arc_flag == 0) return m; /* do we have a first packet from that src? */ if (m1 == NULL) { s = "no first frag"; goto outofseq; } ah1 = mtod(m1, struct arc_header *); if (ah->arc_seqid != ah1->arc_seqid) { s = "seqid differs"; goto outofseq; } if (typ != ah1->arc_type) { s = "type differs"; goto outofseq; } if (dst != ah1->arc_dhost) { s = "dest host differs"; goto outofseq; } /* typ, seqid and dst are ok here. */ if (ah->arc_flag == af->af_lastseen) { m_freem(m); return NULL; } if (ah->arc_flag == af->af_lastseen + 2) { /* ok, this is next fragment */ af->af_lastseen = ah->arc_flag; m_adj(m,ARC_HDRNEWLEN); /* * m_cat might free the first mbuf (with pkthdr) * in 2nd chain; therefore: */ newflen = m->m_pkthdr.len; m_cat(m1,m); m1->m_pkthdr.len += newflen; /* is it the last one? */ if (af->af_lastseen > af->af_maxflag) { af->af_packet = NULL; return(m1); } else return NULL; } s = "other reason"; /* if all else fails, it is out of sequence, too */ } outofseq: if (m1) { m_freem(m1); af->af_packet = NULL; } if (m) m_freem(m); log(LOG_INFO,"%s: got out of seq. packet: %s\n", ifp->if_xname, s); return NULL; } /* * return 1 if Packet Header Definition Standard, else 0. * For now: old IP, old ARP aren't obviously. Lacking correct information, * we guess that besides new IP and new ARP also IPX and APPLETALK are PHDS. * (Apple and Novell corporations were involved, among others, in PHDS work). * Easiest is to assume that everybody else uses that, too. */ int arc_isphds(type) u_int8_t type; { return (type != ARCTYPE_IP_OLD && type != ARCTYPE_ARP_OLD && type != ARCTYPE_DIAGNOSE); } /* * Process a received Arcnet packet; * the packet is in the mbuf chain m with * the ARCnet header. */ void arc_input(ifp, m) struct ifnet *ifp; struct mbuf *m; { struct arc_header *ah; int isr; u_int8_t atype; if ((ifp->if_flags & IFF_UP) == 0) { m_freem(m); return; } /* possibly defragment: */ m = arc_defrag(ifp, m); if (m == NULL) return; BPF_MTAP(ifp, m); ah = mtod(m, struct arc_header *); /* does this belong to us? */ if ((ifp->if_flags & IFF_PROMISC) == 0 && ah->arc_dhost != arcbroadcastaddr && ah->arc_dhost != ARC_LLADDR(ifp)) { m_freem(m); return; } ifp->if_ibytes += m->m_pkthdr.len; if (ah->arc_dhost == arcbroadcastaddr) { m->m_flags |= M_BCAST|M_MCAST; ifp->if_imcasts++; } atype = ah->arc_type; switch (atype) { #ifdef INET case ARCTYPE_IP: m_adj(m, ARC_HDRNEWLEN); if (ip_fastforward(m)) return; isr = NETISR_IP; break; case ARCTYPE_IP_OLD: m_adj(m, ARC_HDRLEN); if (ip_fastforward(m)) return; isr = NETISR_IP; break; case ARCTYPE_ARP: if (ifp->if_flags & IFF_NOARP) { /* Discard packet if ARP is disabled on interface */ m_freem(m); return; } m_adj(m, ARC_HDRNEWLEN); isr = NETISR_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP mtod(m, struct arphdr *)->ar_pro = htons(ETHERTYPE_IP); #endif break; case ARCTYPE_ARP_OLD: if (ifp->if_flags & IFF_NOARP) { /* Discard packet if ARP is disabled on interface */ m_freem(m); return; } m_adj(m, ARC_HDRLEN); isr = NETISR_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP mtod(m, struct arphdr *)->ar_pro = htons(ETHERTYPE_IP); #endif break; #endif #ifdef INET6 case ARCTYPE_INET6: m_adj(m, ARC_HDRNEWLEN); isr = NETISR_IPV6; break; #endif #ifdef IPX case ARCTYPE_IPX: m_adj(m, ARC_HDRNEWLEN); isr = NETISR_IPX; break; #endif default: m_freem(m); return; } netisr_dispatch(isr, m); } /* * Register (new) link level address. */ void arc_storelladdr(ifp, lla) struct ifnet *ifp; u_int8_t lla; { ARC_LLADDR(ifp) = lla; } /* * Perform common duties while attaching to interface list */ void arc_ifattach(ifp, lla) struct ifnet *ifp; u_int8_t lla; { struct ifaddr *ifa; struct sockaddr_dl *sdl; struct arccom *ac; if_attach(ifp); ifp->if_type = IFT_ARCNET; ifp->if_addrlen = 1; ifp->if_hdrlen = ARC_HDRLEN; ifp->if_mtu = 1500; ifp->if_resolvemulti = arc_resolvemulti; if (ifp->if_baudrate == 0) ifp->if_baudrate = 2500000; #if __FreeBSD_version < 500000 ifa = ifnet_addrs[ifp->if_index - 1]; #else ifa = ifaddr_byindex(ifp->if_index); #endif KASSERT(ifa != NULL, ("%s: no lladdr!\n", __FUNCTION__)); sdl = (struct sockaddr_dl *)ifa->ifa_addr; sdl->sdl_type = IFT_ARCNET; sdl->sdl_alen = ifp->if_addrlen; if (ifp->if_flags & IFF_BROADCAST) ifp->if_flags |= IFF_MULTICAST|IFF_ALLMULTI; ac = (struct arccom *)ifp; ac->ac_seqid = (time_second) & 0xFFFF; /* try to make seqid unique */ if (lla == 0) { /* XXX this message isn't entirely clear, to me -- cgd */ log(LOG_ERR,"%s: link address 0 reserved for broadcasts. Please change it and ifconfig %s down up\n", ifp->if_xname, ifp->if_xname); } arc_storelladdr(ifp, lla); ifp->if_broadcastaddr = &arcbroadcastaddr; bpfattach(ifp, DLT_ARCNET, ARC_HDRLEN); } void arc_ifdetach(ifp) struct ifnet *ifp; { bpfdetach(ifp); if_detach(ifp); } int arc_ioctl(ifp, command, data) struct ifnet *ifp; int command; caddr_t data; { struct ifaddr *ifa = (struct ifaddr *) data; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (command) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP; switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: ifp->if_init(ifp->if_softc); /* before arpwhohas */ arp_ifinit(ifp, ifa); break; #endif #ifdef IPX /* * XXX This code is probably wrong */ case AF_IPX: { struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr); if (ipx_nullhost(*ina)) ina->x_host.c_host[5] = ARC_LLADDR(ifp); else arc_storelladdr(ifp, ina->x_host.c_host[5]); /* * Set new address */ ifp->if_init(ifp->if_softc); break; } #endif default: ifp->if_init(ifp->if_softc); break; } break; case SIOCGIFADDR: { struct sockaddr *sa; sa = (struct sockaddr *) &ifr->ifr_data; *(u_int8_t *)sa->sa_data = ARC_LLADDR(ifp); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifr == NULL) error = EAFNOSUPPORT; else { switch (ifr->ifr_addr.sa_family) { case AF_INET: case AF_INET6: error = 0; break; default: error = EAFNOSUPPORT; break; } } break; case SIOCSIFMTU: /* * Set the interface MTU. * mtu can't be larger than ARCMTU for RFC1051 * and can't be larger than ARC_PHDS_MTU */ if (((ifp->if_flags & IFF_LINK0) && ifr->ifr_mtu > ARCMTU) || ifr->ifr_mtu > ARC_PHDS_MAXMTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; } return (error); } /* based on ether_resolvemulti() */ int arc_resolvemulti(ifp, llsa, sa) struct ifnet *ifp; struct sockaddr **llsa; struct sockaddr *sa; { struct sockaddr_dl *sdl; struct sockaddr_in *sin; #ifdef INET6 struct sockaddr_in6 *sin6; #endif switch(sa->sa_family) { case AF_LINK: /* * No mapping needed. Just check that it's a valid MC address. */ sdl = (struct sockaddr_dl *)sa; if (*LLADDR(sdl) != arcbroadcastaddr) return EADDRNOTAVAIL; *llsa = 0; return 0; #ifdef INET case AF_INET: sin = (struct sockaddr_in *)sa; if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return EADDRNOTAVAIL; MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, M_ZERO); sdl->sdl_len = sizeof *sdl; sdl->sdl_family = AF_LINK; sdl->sdl_index = ifp->if_index; sdl->sdl_type = IFT_ARCNET; sdl->sdl_alen = ARC_ADDR_LEN; *LLADDR(sdl) = 0; *llsa = (struct sockaddr *)sdl; return 0; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)sa; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* * An IP6 address of 0 means listen to all * of the Ethernet multicast address used for IP6. * (This is used for multicast routers.) */ ifp->if_flags |= IFF_ALLMULTI; *llsa = 0; return 0; } if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) return EADDRNOTAVAIL; MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, M_ZERO); sdl->sdl_len = sizeof *sdl; sdl->sdl_family = AF_LINK; sdl->sdl_index = ifp->if_index; sdl->sdl_type = IFT_ARCNET; sdl->sdl_alen = ARC_ADDR_LEN; *LLADDR(sdl) = 0; *llsa = (struct sockaddr *)sdl; return 0; #endif default: /* * Well, the text isn't quite right, but it's the name * that counts... */ return EAFNOSUPPORT; } } Index: head/sys/net/if_faith.c =================================================================== --- head/sys/net/if_faith.c (revision 129879) +++ head/sys/net/if_faith.c (revision 129880) @@ -1,372 +1,373 @@ /* $KAME: if_faith.c,v 1.23 2001/12/17 13:55:29 sumikawa Exp $ */ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * derived from * @(#)if_loop.c 8.1 (Berkeley) 6/10/93 * Id: if_loop.c,v 1.22 1996/06/19 16:24:10 wollman Exp */ /* * Loopback interface driver for protocol testing and timing. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #ifdef INET6 #ifndef INET #include #endif #include #include #include #endif #include #define FAITHNAME "faith" struct faith_softc { struct ifnet sc_if; /* must be first */ LIST_ENTRY(faith_softc) sc_list; }; static int faithioctl(struct ifnet *, u_long, caddr_t); int faithoutput(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *); static void faithrtrequest(int, struct rtentry *, struct rt_addrinfo *); #ifdef INET6 static int faithprefix(struct in6_addr *); #endif static int faithmodevent(module_t, int, void *); static struct mtx faith_mtx; static MALLOC_DEFINE(M_FAITH, FAITHNAME, "Firewall Assisted Tunnel Interface"); static LIST_HEAD(, faith_softc) faith_softc_list; static int faith_clone_create(struct if_clone *, int); static void faith_clone_destroy(struct ifnet *); static void faith_destroy(struct faith_softc *); struct if_clone faith_cloner = IF_CLONE_INITIALIZER(FAITHNAME, faith_clone_create, faith_clone_destroy, 0, IF_MAXUNIT); #define FAITHMTU 1500 static int faithmodevent(mod, type, data) module_t mod; int type; void *data; { struct faith_softc *sc; switch (type) { case MOD_LOAD: mtx_init(&faith_mtx, "faith_mtx", NULL, MTX_DEF); LIST_INIT(&faith_softc_list); if_clone_attach(&faith_cloner); #ifdef INET6 faithprefix_p = faithprefix; #endif break; case MOD_UNLOAD: #ifdef INET6 faithprefix_p = NULL; #endif if_clone_detach(&faith_cloner); mtx_lock(&faith_mtx); while ((sc = LIST_FIRST(&faith_softc_list)) != NULL) { LIST_REMOVE(sc, sc_list); mtx_unlock(&faith_mtx); faith_destroy(sc); mtx_lock(&faith_mtx); } mtx_unlock(&faith_mtx); mtx_destroy(&faith_mtx); break; } return 0; } static moduledata_t faith_mod = { "if_faith", faithmodevent, 0 }; DECLARE_MODULE(if_faith, faith_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_faith, 1); static int faith_clone_create(ifc, unit) struct if_clone *ifc; int unit; { struct faith_softc *sc; sc = malloc(sizeof(struct faith_softc), M_FAITH, M_WAITOK); bzero(sc, sizeof(struct faith_softc)); sc->sc_if.if_softc = sc; if_initname(&sc->sc_if, ifc->ifc_name, unit); sc->sc_if.if_mtu = FAITHMTU; /* Change to BROADCAST experimentaly to announce its prefix. */ sc->sc_if.if_flags = /* IFF_LOOPBACK */ IFF_BROADCAST | IFF_MULTICAST; sc->sc_if.if_ioctl = faithioctl; sc->sc_if.if_output = faithoutput; sc->sc_if.if_type = IFT_FAITH; sc->sc_if.if_hdrlen = 0; sc->sc_if.if_addrlen = 0; sc->sc_if.if_snd.ifq_maxlen = ifqmaxlen; if_attach(&sc->sc_if); bpfattach(&sc->sc_if, DLT_NULL, sizeof(u_int)); mtx_lock(&faith_mtx); LIST_INSERT_HEAD(&faith_softc_list, sc, sc_list); mtx_unlock(&faith_mtx); return (0); } static void faith_destroy(struct faith_softc *sc) { bpfdetach(&sc->sc_if); if_detach(&sc->sc_if); free(sc, M_FAITH); } static void faith_clone_destroy(ifp) struct ifnet *ifp; { struct faith_softc *sc = (void *) ifp; mtx_lock(&faith_mtx); LIST_REMOVE(sc, sc_list); mtx_unlock(&faith_mtx); faith_destroy(sc); } int faithoutput(ifp, m, dst, rt) struct ifnet *ifp; struct mbuf *m; struct sockaddr *dst; struct rtentry *rt; { int isr; M_ASSERTPKTHDR(m); /* BPF write needs to be handled specially */ if (dst->sa_family == AF_UNSPEC) { dst->sa_family = *(mtod(m, int *)); m->m_len -= sizeof(int); m->m_pkthdr.len -= sizeof(int); m->m_data += sizeof(int); } if (ifp->if_bpf) { u_int32_t af = dst->sa_family; bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); } if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { m_freem(m); return (rt->rt_flags & RTF_BLACKHOLE ? 0 : rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); } ifp->if_opackets++; ifp->if_obytes += m->m_pkthdr.len; switch (dst->sa_family) { #ifdef INET case AF_INET: isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: isr = NETISR_IPV6; break; #endif default: m_freem(m); return EAFNOSUPPORT; } /* XXX do we need more sanity checks? */ m->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; netisr_dispatch(isr, m); return (0); } /* ARGSUSED */ static void faithrtrequest(cmd, rt, info) int cmd; struct rtentry *rt; struct rt_addrinfo *info; { RT_LOCK_ASSERT(rt); if (rt) rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; } /* * Process an ioctl request. */ /* ARGSUSED */ static int faithioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct ifaddr *ifa; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP | IFF_RUNNING; ifa = (struct ifaddr *)data; ifa->ifa_rtrequest = faithrtrequest; /* * Everything else is done at a higher level. */ break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifr == 0) { error = EAFNOSUPPORT; /* XXX */ break; } switch (ifr->ifr_addr.sa_family) { #ifdef INET case AF_INET: break; #endif #ifdef INET6 case AF_INET6: break; #endif default: error = EAFNOSUPPORT; break; } break; #ifdef SIOCSIFMTU case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; break; #endif case SIOCSIFFLAGS: break; default: error = EINVAL; } return (error); } #ifdef INET6 /* * XXX could be slow * XXX could be layer violation to call sys/net from sys/netinet6 */ static int faithprefix(in6) struct in6_addr *in6; { struct rtentry *rt; struct sockaddr_in6 sin6; int ret; if (ip6_keepfaith == 0) return 0; bzero(&sin6, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_addr = *in6; rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL); if (rt && rt->rt_ifp && rt->rt_ifp->if_type == IFT_FAITH && (rt->rt_ifp->if_flags & IFF_UP) != 0) ret = 1; else ret = 0; if (rt) RTFREE_LOCKED(rt); return ret; } #endif Index: head/sys/net/if_gif.c =================================================================== --- head/sys/net/if_gif.c (revision 129879) +++ head/sys/net/if_gif.c (revision 129880) @@ -1,887 +1,888 @@ /* $FreeBSD$ */ /* $KAME: if_gif.c,v 1.87 2001/10/19 08:50:27 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_mac.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #endif /* INET */ #ifdef INET6 #ifndef INET #include #endif #include #include #include #include #include #endif /* INET6 */ #include #include #include #define GIFNAME "gif" /* * gif_mtx protects the global gif_softc_list. * XXX: Per-softc locking is still required. */ static struct mtx gif_mtx; static MALLOC_DEFINE(M_GIF, "gif", "Generic Tunnel Interface"); static LIST_HEAD(, gif_softc) gif_softc_list; void (*ng_gif_input_p)(struct ifnet *ifp, struct mbuf **mp, int af); void (*ng_gif_input_orphan_p)(struct ifnet *ifp, struct mbuf *m, int af); void (*ng_gif_attach_p)(struct ifnet *ifp); void (*ng_gif_detach_p)(struct ifnet *ifp); static int gif_clone_create(struct if_clone *, int); static void gif_clone_destroy(struct ifnet *); struct if_clone gif_cloner = IF_CLONE_INITIALIZER("gif", gif_clone_create, gif_clone_destroy, 0, IF_MAXUNIT); static int gifmodevent(module_t, int, void *); SYSCTL_DECL(_net_link); SYSCTL_NODE(_net_link, IFT_GIF, gif, CTLFLAG_RW, 0, "Generic Tunnel Interface"); #ifndef MAX_GIF_NEST /* * This macro controls the default upper limitation on nesting of gif tunnels. * Since, setting a large value to this macro with a careless configuration * may introduce system crash, we don't allow any nestings by default. * If you need to configure nested gif tunnels, you can define this macro * in your kernel configuration file. However, if you do so, please be * careful to configure the tunnels so that it won't make a loop. */ #define MAX_GIF_NEST 1 #endif static int max_gif_nesting = MAX_GIF_NEST; SYSCTL_INT(_net_link_gif, OID_AUTO, max_nesting, CTLFLAG_RW, &max_gif_nesting, 0, "Max nested tunnels"); /* * By default, we disallow creation of multiple tunnels between the same * pair of addresses. Some applications require this functionality so * we allow control over this check here. */ #ifdef XBONEHACK static int parallel_tunnels = 1; #else static int parallel_tunnels = 0; #endif SYSCTL_INT(_net_link_gif, OID_AUTO, parallel_tunnels, CTLFLAG_RW, ¶llel_tunnels, 0, "Allow parallel tunnels?"); static int gif_clone_create(ifc, unit) struct if_clone *ifc; int unit; { struct gif_softc *sc; sc = malloc (sizeof(struct gif_softc), M_GIF, M_WAITOK); bzero(sc, sizeof(struct gif_softc)); sc->gif_if.if_softc = sc; if_initname(&sc->gif_if, ifc->ifc_name, unit); gifattach0(sc); mtx_lock(&gif_mtx); LIST_INSERT_HEAD(&gif_softc_list, sc, gif_list); mtx_unlock(&gif_mtx); return (0); } void gifattach0(sc) struct gif_softc *sc; { sc->encap_cookie4 = sc->encap_cookie6 = NULL; sc->gif_if.if_addrlen = 0; sc->gif_if.if_mtu = GIF_MTU; sc->gif_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; #if 0 /* turn off ingress filter */ sc->gif_if.if_flags |= IFF_LINK2; #endif sc->gif_if.if_ioctl = gif_ioctl; sc->gif_if.if_output = gif_output; sc->gif_if.if_type = IFT_GIF; sc->gif_if.if_snd.ifq_maxlen = IFQ_MAXLEN; if_attach(&sc->gif_if); bpfattach(&sc->gif_if, DLT_NULL, sizeof(u_int)); if (ng_gif_attach_p != NULL) (*ng_gif_attach_p)(&sc->gif_if); } static void gif_destroy(struct gif_softc *sc) { struct ifnet *ifp = &sc->gif_if; int err; gif_delete_tunnel(ifp); #ifdef INET6 if (sc->encap_cookie6 != NULL) { err = encap_detach(sc->encap_cookie6); KASSERT(err == 0, ("Unexpected error detaching encap_cookie6")); } #endif #ifdef INET if (sc->encap_cookie4 != NULL) { err = encap_detach(sc->encap_cookie4); KASSERT(err == 0, ("Unexpected error detaching encap_cookie4")); } #endif if (ng_gif_detach_p != NULL) (*ng_gif_detach_p)(ifp); bpfdetach(ifp); if_detach(ifp); free(sc, M_GIF); } static void gif_clone_destroy(ifp) struct ifnet *ifp; { struct gif_softc *sc = ifp->if_softc; mtx_lock(&gif_mtx); LIST_REMOVE(sc, gif_list); mtx_unlock(&gif_mtx); gif_destroy(sc); } static int gifmodevent(mod, type, data) module_t mod; int type; void *data; { struct gif_softc *sc; switch (type) { case MOD_LOAD: mtx_init(&gif_mtx, "gif_mtx", NULL, MTX_DEF); LIST_INIT(&gif_softc_list); if_clone_attach(&gif_cloner); #ifdef INET6 ip6_gif_hlim = GIF_HLIM; #endif break; case MOD_UNLOAD: if_clone_detach(&gif_cloner); mtx_lock(&gif_mtx); while ((sc = LIST_FIRST(&gif_softc_list)) != NULL) { LIST_REMOVE(sc, gif_list); mtx_unlock(&gif_mtx); gif_destroy(sc); mtx_lock(&gif_mtx); } mtx_unlock(&gif_mtx); mtx_destroy(&gif_mtx); #ifdef INET6 ip6_gif_hlim = 0; #endif break; } return 0; } static moduledata_t gif_mod = { "if_gif", gifmodevent, 0 }; DECLARE_MODULE(if_gif, gif_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_gif, 1); int gif_encapcheck(m, off, proto, arg) const struct mbuf *m; int off; int proto; void *arg; { struct ip ip; struct gif_softc *sc; sc = (struct gif_softc *)arg; if (sc == NULL) return 0; if ((sc->gif_if.if_flags & IFF_UP) == 0) return 0; /* no physical address */ if (!sc->gif_psrc || !sc->gif_pdst) return 0; switch (proto) { #ifdef INET case IPPROTO_IPV4: break; #endif #ifdef INET6 case IPPROTO_IPV6: break; #endif default: return 0; } /* Bail on short packets */ if (m->m_pkthdr.len < sizeof(ip)) return 0; m_copydata(m, 0, sizeof(ip), (caddr_t)&ip); switch (ip.ip_v) { #ifdef INET case 4: if (sc->gif_psrc->sa_family != AF_INET || sc->gif_pdst->sa_family != AF_INET) return 0; return gif_encapcheck4(m, off, proto, arg); #endif #ifdef INET6 case 6: if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) return 0; if (sc->gif_psrc->sa_family != AF_INET6 || sc->gif_pdst->sa_family != AF_INET6) return 0; return gif_encapcheck6(m, off, proto, arg); #endif default: return 0; } } int gif_output(ifp, m, dst, rt) struct ifnet *ifp; struct mbuf *m; struct sockaddr *dst; struct rtentry *rt; /* added in net2 */ { struct gif_softc *sc = (struct gif_softc*)ifp; struct m_tag *mtag; int error = 0; int gif_called; #ifdef MAC error = mac_check_ifnet_transmit(ifp, m); if (error) { m_freem(m); goto end; } #endif /* * gif may cause infinite recursion calls when misconfigured. * We'll prevent this by detecting loops. * * High nesting level may cause stack exhaustion. * We'll prevent this by introducing upper limit. */ gif_called = 1; mtag = m_tag_locate(m, MTAG_GIF, MTAG_GIF_CALLED, NULL); while (mtag != NULL) { if (*(struct ifnet **)(mtag + 1) == ifp) { log(LOG_NOTICE, "gif_output: loop detected on %s\n", (*(struct ifnet **)(mtag + 1))->if_xname); m_freem(m); error = EIO; /* is there better errno? */ goto end; } mtag = m_tag_locate(m, MTAG_GIF, MTAG_GIF_CALLED, mtag); gif_called++; } if (gif_called > max_gif_nesting) { log(LOG_NOTICE, "gif_output: recursively called too many times(%d)\n", gif_called); m_freem(m); error = EIO; /* is there better errno? */ goto end; } mtag = m_tag_alloc(MTAG_GIF, MTAG_GIF_CALLED, sizeof(struct ifnet *), M_NOWAIT); if (mtag == NULL) { m_freem(m); error = ENOMEM; goto end; } *(struct ifnet **)(mtag + 1) = ifp; m_tag_prepend(m, mtag); m->m_flags &= ~(M_BCAST|M_MCAST); if (!(ifp->if_flags & IFF_UP) || sc->gif_psrc == NULL || sc->gif_pdst == NULL) { m_freem(m); error = ENETDOWN; goto end; } if (ifp->if_bpf) { u_int32_t af = dst->sa_family; bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); } ifp->if_opackets++; ifp->if_obytes += m->m_pkthdr.len; /* inner AF-specific encapsulation */ /* XXX should we check if our outer source is legal? */ /* dispatch to output logic based on outer AF */ switch (sc->gif_psrc->sa_family) { #ifdef INET case AF_INET: error = in_gif_output(ifp, dst->sa_family, m); break; #endif #ifdef INET6 case AF_INET6: error = in6_gif_output(ifp, dst->sa_family, m); break; #endif default: m_freem(m); error = ENETDOWN; goto end; } end: if (error) ifp->if_oerrors++; return error; } void gif_input(m, af, ifp) struct mbuf *m; int af; struct ifnet *ifp; { int isr; if (ifp == NULL) { /* just in case */ m_freem(m); return; } m->m_pkthdr.rcvif = ifp; #ifdef MAC mac_create_mbuf_from_ifnet(ifp, m); #endif if (ifp->if_bpf) { u_int32_t af1 = af; bpf_mtap2(ifp->if_bpf, &af1, sizeof(af1), m); } if (ng_gif_input_p != NULL) { (*ng_gif_input_p)(ifp, &m, af); if (m == NULL) return; } /* * Put the packet to the network layer input queue according to the * specified address family. * Note: older versions of gif_input directly called network layer * input functions, e.g. ip6_input, here. We changed the policy to * prevent too many recursive calls of such input functions, which * might cause kernel panic. But the change may introduce another * problem; if the input queue is full, packets are discarded. * The kernel stack overflow really happened, and we believed * queue-full rarely occurs, so we changed the policy. */ switch (af) { #ifdef INET case AF_INET: isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: isr = NETISR_IPV6; break; #endif default: if (ng_gif_input_orphan_p != NULL) (*ng_gif_input_orphan_p)(ifp, m, af); else m_freem(m); return; } ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; netisr_dispatch(isr, m); } /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */ int gif_ioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct gif_softc *sc = (struct gif_softc*)ifp; struct ifreq *ifr = (struct ifreq*)data; int error = 0, size; struct sockaddr *dst, *src; #ifdef SIOCSIFMTU /* xxx */ u_long mtu; #endif switch (cmd) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP; break; case SIOCSIFDSTADDR: break; case SIOCADDMULTI: case SIOCDELMULTI: break; #ifdef SIOCSIFMTU /* xxx */ case SIOCGIFMTU: break; case SIOCSIFMTU: mtu = ifr->ifr_mtu; if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX) return (EINVAL); ifp->if_mtu = mtu; break; #endif /* SIOCSIFMTU */ #ifdef INET case SIOCSIFPHYADDR: #endif #ifdef INET6 case SIOCSIFPHYADDR_IN6: #endif /* INET6 */ case SIOCSLIFPHYADDR: switch (cmd) { #ifdef INET case SIOCSIFPHYADDR: src = (struct sockaddr *) &(((struct in_aliasreq *)data)->ifra_addr); dst = (struct sockaddr *) &(((struct in_aliasreq *)data)->ifra_dstaddr); break; #endif #ifdef INET6 case SIOCSIFPHYADDR_IN6: src = (struct sockaddr *) &(((struct in6_aliasreq *)data)->ifra_addr); dst = (struct sockaddr *) &(((struct in6_aliasreq *)data)->ifra_dstaddr); break; #endif case SIOCSLIFPHYADDR: src = (struct sockaddr *) &(((struct if_laddrreq *)data)->addr); dst = (struct sockaddr *) &(((struct if_laddrreq *)data)->dstaddr); break; default: return EINVAL; } /* sa_family must be equal */ if (src->sa_family != dst->sa_family) return EINVAL; /* validate sa_len */ switch (src->sa_family) { #ifdef INET case AF_INET: if (src->sa_len != sizeof(struct sockaddr_in)) return EINVAL; break; #endif #ifdef INET6 case AF_INET6: if (src->sa_len != sizeof(struct sockaddr_in6)) return EINVAL; break; #endif default: return EAFNOSUPPORT; } switch (dst->sa_family) { #ifdef INET case AF_INET: if (dst->sa_len != sizeof(struct sockaddr_in)) return EINVAL; break; #endif #ifdef INET6 case AF_INET6: if (dst->sa_len != sizeof(struct sockaddr_in6)) return EINVAL; break; #endif default: return EAFNOSUPPORT; } /* check sa_family looks sane for the cmd */ switch (cmd) { case SIOCSIFPHYADDR: if (src->sa_family == AF_INET) break; return EAFNOSUPPORT; #ifdef INET6 case SIOCSIFPHYADDR_IN6: if (src->sa_family == AF_INET6) break; return EAFNOSUPPORT; #endif /* INET6 */ case SIOCSLIFPHYADDR: /* checks done in the above */ break; } error = gif_set_tunnel(&sc->gif_if, src, dst); break; #ifdef SIOCDIFPHYADDR case SIOCDIFPHYADDR: gif_delete_tunnel(&sc->gif_if); break; #endif case SIOCGIFPSRCADDR: #ifdef INET6 case SIOCGIFPSRCADDR_IN6: #endif /* INET6 */ if (sc->gif_psrc == NULL) { error = EADDRNOTAVAIL; goto bad; } src = sc->gif_psrc; switch (cmd) { #ifdef INET case SIOCGIFPSRCADDR: dst = &ifr->ifr_addr; size = sizeof(ifr->ifr_addr); break; #endif /* INET */ #ifdef INET6 case SIOCGIFPSRCADDR_IN6: dst = (struct sockaddr *) &(((struct in6_ifreq *)data)->ifr_addr); size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; #endif /* INET6 */ default: error = EADDRNOTAVAIL; goto bad; } if (src->sa_len > size) return EINVAL; bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); break; case SIOCGIFPDSTADDR: #ifdef INET6 case SIOCGIFPDSTADDR_IN6: #endif /* INET6 */ if (sc->gif_pdst == NULL) { error = EADDRNOTAVAIL; goto bad; } src = sc->gif_pdst; switch (cmd) { #ifdef INET case SIOCGIFPDSTADDR: dst = &ifr->ifr_addr; size = sizeof(ifr->ifr_addr); break; #endif /* INET */ #ifdef INET6 case SIOCGIFPDSTADDR_IN6: dst = (struct sockaddr *) &(((struct in6_ifreq *)data)->ifr_addr); size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; #endif /* INET6 */ default: error = EADDRNOTAVAIL; goto bad; } if (src->sa_len > size) return EINVAL; bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); break; case SIOCGLIFPHYADDR: if (sc->gif_psrc == NULL || sc->gif_pdst == NULL) { error = EADDRNOTAVAIL; goto bad; } /* copy src */ src = sc->gif_psrc; dst = (struct sockaddr *) &(((struct if_laddrreq *)data)->addr); size = sizeof(((struct if_laddrreq *)data)->addr); if (src->sa_len > size) return EINVAL; bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); /* copy dst */ src = sc->gif_pdst; dst = (struct sockaddr *) &(((struct if_laddrreq *)data)->dstaddr); size = sizeof(((struct if_laddrreq *)data)->dstaddr); if (src->sa_len > size) return EINVAL; bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); break; case SIOCSIFFLAGS: /* if_ioctl() takes care of it */ break; default: error = EINVAL; break; } bad: return error; } /* * XXXRW: There's a general event-ordering issue here: the code to check * if a given tunnel is already present happens before we perform a * potentially blocking setup of the tunnel. This code needs to be * re-ordered so that the check and replacement can be atomic using * a mutex. */ int gif_set_tunnel(ifp, src, dst) struct ifnet *ifp; struct sockaddr *src; struct sockaddr *dst; { struct gif_softc *sc = (struct gif_softc *)ifp; struct gif_softc *sc2; struct sockaddr *osrc, *odst, *sa; int s; int error = 0; s = splnet(); mtx_lock(&gif_mtx); LIST_FOREACH(sc2, &gif_softc_list, gif_list) { if (sc2 == sc) continue; if (!sc2->gif_pdst || !sc2->gif_psrc) continue; if (sc2->gif_pdst->sa_family != dst->sa_family || sc2->gif_pdst->sa_len != dst->sa_len || sc2->gif_psrc->sa_family != src->sa_family || sc2->gif_psrc->sa_len != src->sa_len) continue; /* * Disallow parallel tunnels unless instructed * otherwise. */ if (!parallel_tunnels && bcmp(sc2->gif_pdst, dst, dst->sa_len) == 0 && bcmp(sc2->gif_psrc, src, src->sa_len) == 0) { error = EADDRNOTAVAIL; mtx_unlock(&gif_mtx); goto bad; } /* XXX both end must be valid? (I mean, not 0.0.0.0) */ } mtx_unlock(&gif_mtx); /* XXX we can detach from both, but be polite just in case */ if (sc->gif_psrc) switch (sc->gif_psrc->sa_family) { #ifdef INET case AF_INET: (void)in_gif_detach(sc); break; #endif #ifdef INET6 case AF_INET6: (void)in6_gif_detach(sc); break; #endif } osrc = sc->gif_psrc; sa = (struct sockaddr *)malloc(src->sa_len, M_IFADDR, M_WAITOK); bcopy((caddr_t)src, (caddr_t)sa, src->sa_len); sc->gif_psrc = sa; odst = sc->gif_pdst; sa = (struct sockaddr *)malloc(dst->sa_len, M_IFADDR, M_WAITOK); bcopy((caddr_t)dst, (caddr_t)sa, dst->sa_len); sc->gif_pdst = sa; switch (sc->gif_psrc->sa_family) { #ifdef INET case AF_INET: error = in_gif_attach(sc); break; #endif #ifdef INET6 case AF_INET6: error = in6_gif_attach(sc); break; #endif } if (error) { /* rollback */ free((caddr_t)sc->gif_psrc, M_IFADDR); free((caddr_t)sc->gif_pdst, M_IFADDR); sc->gif_psrc = osrc; sc->gif_pdst = odst; goto bad; } if (osrc) free((caddr_t)osrc, M_IFADDR); if (odst) free((caddr_t)odst, M_IFADDR); if (sc->gif_psrc && sc->gif_pdst) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; splx(s); return 0; bad: if (sc->gif_psrc && sc->gif_pdst) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; splx(s); return error; } void gif_delete_tunnel(ifp) struct ifnet *ifp; { struct gif_softc *sc = (struct gif_softc *)ifp; int s; s = splnet(); if (sc->gif_psrc) { free((caddr_t)sc->gif_psrc, M_IFADDR); sc->gif_psrc = NULL; } if (sc->gif_pdst) { free((caddr_t)sc->gif_pdst, M_IFADDR); sc->gif_pdst = NULL; } /* it is safe to detach from both */ #ifdef INET (void)in_gif_detach(sc); #endif #ifdef INET6 (void)in6_gif_detach(sc); #endif if (sc->gif_psrc && sc->gif_pdst) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; splx(s); } Index: head/sys/net/if_gre.c =================================================================== --- head/sys/net/if_gre.c (revision 129879) +++ head/sys/net/if_gre.c (revision 129880) @@ -1,781 +1,782 @@ /* $NetBSD: if_gre.c,v 1.49 2003/12/11 00:22:29 itojun Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Heiko W.Rupp * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Encapsulate L3 protocols into IP * See RFC 1701 and 1702 for more details. * If_gre is compatible with Cisco GRE tunnels, so you can * have a NetBSD box as the other end of a tunnel interface of a Cisco * router. See gre(4) for more details. * Also supported: IP in IP encaps (proto 55) as of RFC 2004 */ #include "opt_atalk.h" #include "opt_inet.h" #include "opt_inet6.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #include #include #else #error "Huh? if_gre without inet?" #endif #include #include #include /* * It is not easy to calculate the right value for a GRE MTU. * We leave this task to the admin and use the same default that * other vendors use. */ #define GREMTU 1476 #define GRENAME "gre" /* * gre_mtx protects all global variables in if_gre.c. * XXX: gre_softc data not protected yet. */ struct mtx gre_mtx; static MALLOC_DEFINE(M_GRE, GRENAME, "Generic Routing Encapsulation"); struct gre_softc_head gre_softc_list; static int gre_clone_create(struct if_clone *, int); static void gre_clone_destroy(struct ifnet *); static int gre_ioctl(struct ifnet *, u_long, caddr_t); static int gre_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *rt); static struct if_clone gre_cloner = IF_CLONE_INITIALIZER("gre", gre_clone_create, gre_clone_destroy, 0, IF_MAXUNIT); static int gre_compute_route(struct gre_softc *sc); static void greattach(void); #ifdef INET extern struct domain inetdomain; static const struct protosw in_gre_protosw = { SOCK_RAW, &inetdomain, IPPROTO_GRE, PR_ATOMIC|PR_ADDR, (pr_input_t*)gre_input, (pr_output_t*)rip_output, rip_ctlinput, rip_ctloutput, 0, 0, 0, 0, 0, &rip_usrreqs }; static const struct protosw in_mobile_protosw = { SOCK_RAW, &inetdomain, IPPROTO_MOBILE, PR_ATOMIC|PR_ADDR, (pr_input_t*)gre_mobile_input, (pr_output_t*)rip_output, rip_ctlinput, rip_ctloutput, 0, 0, 0, 0, 0, &rip_usrreqs }; #endif SYSCTL_DECL(_net_link); SYSCTL_NODE(_net_link, IFT_TUNNEL, gre, CTLFLAG_RW, 0, "Generic Routing Encapsulation"); #ifndef MAX_GRE_NEST /* * This macro controls the default upper limitation on nesting of gre tunnels. * Since, setting a large value to this macro with a careless configuration * may introduce system crash, we don't allow any nestings by default. * If you need to configure nested gre tunnels, you can define this macro * in your kernel configuration file. However, if you do so, please be * careful to configure the tunnels so that it won't make a loop. */ #define MAX_GRE_NEST 1 #endif static int max_gre_nesting = MAX_GRE_NEST; SYSCTL_INT(_net_link_gre, OID_AUTO, max_nesting, CTLFLAG_RW, &max_gre_nesting, 0, "Max nested tunnels"); /* ARGSUSED */ static void greattach(void) { mtx_init(&gre_mtx, "gre_mtx", NULL, MTX_DEF); LIST_INIT(&gre_softc_list); if_clone_attach(&gre_cloner); } static int gre_clone_create(ifc, unit) struct if_clone *ifc; int unit; { struct gre_softc *sc; sc = malloc(sizeof(struct gre_softc), M_GRE, M_WAITOK); memset(sc, 0, sizeof(struct gre_softc)); if_initname(&sc->sc_if, ifc->ifc_name, unit); sc->sc_if.if_softc = sc; sc->sc_if.if_snd.ifq_maxlen = IFQ_MAXLEN; sc->sc_if.if_type = IFT_TUNNEL; sc->sc_if.if_addrlen = 0; sc->sc_if.if_hdrlen = 24; /* IP + GRE */ sc->sc_if.if_mtu = GREMTU; sc->sc_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST; sc->sc_if.if_output = gre_output; sc->sc_if.if_ioctl = gre_ioctl; sc->g_dst.s_addr = sc->g_src.s_addr = INADDR_ANY; sc->g_proto = IPPROTO_GRE; sc->sc_if.if_flags |= IFF_LINK0; sc->encap = NULL; sc->called = 0; sc->wccp_ver = WCCP_V1; if_attach(&sc->sc_if); bpfattach(&sc->sc_if, DLT_NULL, sizeof(u_int32_t)); mtx_lock(&gre_mtx); LIST_INSERT_HEAD(&gre_softc_list, sc, sc_list); mtx_unlock(&gre_mtx); return (0); } static void gre_destroy(struct gre_softc *sc) { #ifdef INET if (sc->encap != NULL) encap_detach(sc->encap); #endif bpfdetach(&sc->sc_if); if_detach(&sc->sc_if); free(sc, M_GRE); } static void gre_clone_destroy(ifp) struct ifnet *ifp; { struct gre_softc *sc = ifp->if_softc; mtx_lock(&gre_mtx); LIST_REMOVE(sc, sc_list); mtx_unlock(&gre_mtx); gre_destroy(sc); } /* * The output routine. Takes a packet and encapsulates it in the protocol * given by sc->g_proto. See also RFC 1701 and RFC 2004 */ static int gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { int error = 0; struct gre_softc *sc = ifp->if_softc; struct greip *gh; struct ip *ip; u_int16_t etype = 0; struct mobile_h mob_h; /* * gre may cause infinite recursion calls when misconfigured. * We'll prevent this by introducing upper limit. */ if (++(sc->called) > max_gre_nesting) { printf("%s: gre_output: recursively called too many " "times(%d)\n", if_name(&sc->sc_if), sc->called); m_freem(m); error = EIO; /* is there better errno? */ goto end; } if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 0 || sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) { m_freem(m); error = ENETDOWN; goto end; } gh = NULL; ip = NULL; if (ifp->if_bpf) { u_int32_t af = dst->sa_family; bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); } m->m_flags &= ~(M_BCAST|M_MCAST); if (sc->g_proto == IPPROTO_MOBILE) { if (dst->sa_family == AF_INET) { struct mbuf *m0; int msiz; ip = mtod(m, struct ip *); /* * RFC2004 specifies that fragmented diagrams shouldn't * be encapsulated. */ if ((ip->ip_off & IP_MF) != 0) { _IF_DROP(&ifp->if_snd); m_freem(m); error = EINVAL; /* is there better errno? */ goto end; } memset(&mob_h, 0, MOB_H_SIZ_L); mob_h.proto = (ip->ip_p) << 8; mob_h.odst = ip->ip_dst.s_addr; ip->ip_dst.s_addr = sc->g_dst.s_addr; /* * If the packet comes from our host, we only change * the destination address in the IP header. * Else we also need to save and change the source */ if (in_hosteq(ip->ip_src, sc->g_src)) { msiz = MOB_H_SIZ_S; } else { mob_h.proto |= MOB_H_SBIT; mob_h.osrc = ip->ip_src.s_addr; ip->ip_src.s_addr = sc->g_src.s_addr; msiz = MOB_H_SIZ_L; } mob_h.proto = htons(mob_h.proto); mob_h.hcrc = gre_in_cksum((u_int16_t *)&mob_h, msiz); if ((m->m_data - msiz) < m->m_pktdat) { /* need new mbuf */ MGETHDR(m0, M_DONTWAIT, MT_HEADER); if (m0 == NULL) { _IF_DROP(&ifp->if_snd); m_freem(m); error = ENOBUFS; goto end; } m0->m_next = m; m->m_data += sizeof(struct ip); m->m_len -= sizeof(struct ip); m0->m_pkthdr.len = m->m_pkthdr.len + msiz; m0->m_len = msiz + sizeof(struct ip); m0->m_data += max_linkhdr; memcpy(mtod(m0, caddr_t), (caddr_t)ip, sizeof(struct ip)); m = m0; } else { /* we have some space left in the old one */ m->m_data -= msiz; m->m_len += msiz; m->m_pkthdr.len += msiz; bcopy(ip, mtod(m, caddr_t), sizeof(struct ip)); } ip = mtod(m, struct ip *); memcpy((caddr_t)(ip + 1), &mob_h, (unsigned)msiz); ip->ip_len = ntohs(ip->ip_len) + msiz; } else { /* AF_INET */ _IF_DROP(&ifp->if_snd); m_freem(m); error = EINVAL; goto end; } } else if (sc->g_proto == IPPROTO_GRE) { switch (dst->sa_family) { case AF_INET: ip = mtod(m, struct ip *); etype = ETHERTYPE_IP; break; #ifdef NETATALK case AF_APPLETALK: etype = ETHERTYPE_ATALK; break; #endif default: _IF_DROP(&ifp->if_snd); m_freem(m); error = EAFNOSUPPORT; goto end; } M_PREPEND(m, sizeof(struct greip), M_DONTWAIT); } else { _IF_DROP(&ifp->if_snd); m_freem(m); error = EINVAL; goto end; } if (m == NULL) { /* mbuf allocation failed */ _IF_DROP(&ifp->if_snd); error = ENOBUFS; goto end; } gh = mtod(m, struct greip *); if (sc->g_proto == IPPROTO_GRE) { /* we don't have any GRE flags for now */ memset((void *)gh, 0, sizeof(struct greip)); gh->gi_ptype = htons(etype); } gh->gi_pr = sc->g_proto; if (sc->g_proto != IPPROTO_MOBILE) { gh->gi_src = sc->g_src; gh->gi_dst = sc->g_dst; ((struct ip*)gh)->ip_hl = (sizeof(struct ip)) >> 2; ((struct ip*)gh)->ip_ttl = GRE_TTL; ((struct ip*)gh)->ip_tos = ip->ip_tos; ((struct ip*)gh)->ip_id = ip->ip_id; gh->gi_len = m->m_pkthdr.len; } ifp->if_opackets++; ifp->if_obytes += m->m_pkthdr.len; /* * Send it off and with IP_FORWARD flag to prevent it from * overwriting the ip_id again. ip_id is already set to the * ip_id of the encapsulated packet. */ error = ip_output(m, NULL, &sc->route, IP_FORWARDING, (struct ip_moptions *)NULL, (struct inpcb *)NULL); end: sc->called = 0; if (error) ifp->if_oerrors++; return (error); } static int gre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct if_laddrreq *lifr = (struct if_laddrreq *)data; struct in_aliasreq *aifr = (struct in_aliasreq *)data; struct gre_softc *sc = ifp->if_softc; int s; struct sockaddr_in si; struct sockaddr *sa = NULL; int error; struct sockaddr_in sp, sm, dp, dm; error = 0; s = splnet(); switch (cmd) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP; break; case SIOCSIFDSTADDR: break; case SIOCSIFFLAGS: if ((error = suser(curthread)) != 0) break; if ((ifr->ifr_flags & IFF_LINK0) != 0) sc->g_proto = IPPROTO_GRE; else sc->g_proto = IPPROTO_MOBILE; if ((ifr->ifr_flags & IFF_LINK2) != 0) sc->wccp_ver = WCCP_V2; else sc->wccp_ver = WCCP_V1; goto recompute; case SIOCSIFMTU: if ((error = suser(curthread)) != 0) break; if (ifr->ifr_mtu < 576) { error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; break; case SIOCGIFMTU: ifr->ifr_mtu = sc->sc_if.if_mtu; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((error = suser(curthread)) != 0) break; if (ifr == 0) { error = EAFNOSUPPORT; break; } switch (ifr->ifr_addr.sa_family) { #ifdef INET case AF_INET: break; #endif default: error = EAFNOSUPPORT; break; } break; case GRESPROTO: if ((error = suser(curthread)) != 0) break; sc->g_proto = ifr->ifr_flags; switch (sc->g_proto) { case IPPROTO_GRE: ifp->if_flags |= IFF_LINK0; break; case IPPROTO_MOBILE: ifp->if_flags &= ~IFF_LINK0; break; default: error = EPROTONOSUPPORT; break; } goto recompute; case GREGPROTO: ifr->ifr_flags = sc->g_proto; break; case GRESADDRS: case GRESADDRD: if ((error = suser(curthread)) != 0) break; /* * set tunnel endpoints, compute a less specific route * to the remote end and mark if as up */ sa = &ifr->ifr_addr; if (cmd == GRESADDRS) sc->g_src = (satosin(sa))->sin_addr; if (cmd == GRESADDRD) sc->g_dst = (satosin(sa))->sin_addr; recompute: #ifdef INET if (sc->encap != NULL) { encap_detach(sc->encap); sc->encap = NULL; } #endif if ((sc->g_src.s_addr != INADDR_ANY) && (sc->g_dst.s_addr != INADDR_ANY)) { bzero(&sp, sizeof(sp)); bzero(&sm, sizeof(sm)); bzero(&dp, sizeof(dp)); bzero(&dm, sizeof(dm)); sp.sin_len = sm.sin_len = dp.sin_len = dm.sin_len = sizeof(struct sockaddr_in); sp.sin_family = sm.sin_family = dp.sin_family = dm.sin_family = AF_INET; sp.sin_addr = sc->g_src; dp.sin_addr = sc->g_dst; sm.sin_addr.s_addr = dm.sin_addr.s_addr = INADDR_BROADCAST; #ifdef INET sc->encap = encap_attach(AF_INET, sc->g_proto, sintosa(&sp), sintosa(&sm), sintosa(&dp), sintosa(&dm), (sc->g_proto == IPPROTO_GRE) ? &in_gre_protosw : &in_mobile_protosw, sc); if (sc->encap == NULL) printf("%s: unable to attach encap\n", if_name(&sc->sc_if)); #endif if (sc->route.ro_rt != 0) /* free old route */ RTFREE(sc->route.ro_rt); if (gre_compute_route(sc) == 0) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; } break; case GREGADDRS: memset(&si, 0, sizeof(si)); si.sin_family = AF_INET; si.sin_len = sizeof(struct sockaddr_in); si.sin_addr.s_addr = sc->g_src.s_addr; sa = sintosa(&si); ifr->ifr_addr = *sa; break; case GREGADDRD: memset(&si, 0, sizeof(si)); si.sin_family = AF_INET; si.sin_len = sizeof(struct sockaddr_in); si.sin_addr.s_addr = sc->g_dst.s_addr; sa = sintosa(&si); ifr->ifr_addr = *sa; break; case SIOCSIFPHYADDR: if ((error = suser(curthread)) != 0) break; if (aifr->ifra_addr.sin_family != AF_INET || aifr->ifra_dstaddr.sin_family != AF_INET) { error = EAFNOSUPPORT; break; } if (aifr->ifra_addr.sin_len != sizeof(si) || aifr->ifra_dstaddr.sin_len != sizeof(si)) { error = EINVAL; break; } sc->g_src = aifr->ifra_addr.sin_addr; sc->g_dst = aifr->ifra_dstaddr.sin_addr; goto recompute; case SIOCSLIFPHYADDR: if ((error = suser(curthread)) != 0) break; if (lifr->addr.ss_family != AF_INET || lifr->dstaddr.ss_family != AF_INET) { error = EAFNOSUPPORT; break; } if (lifr->addr.ss_len != sizeof(si) || lifr->dstaddr.ss_len != sizeof(si)) { error = EINVAL; break; } sc->g_src = (satosin((struct sockadrr *)&lifr->addr))->sin_addr; sc->g_dst = (satosin((struct sockadrr *)&lifr->dstaddr))->sin_addr; goto recompute; case SIOCDIFPHYADDR: if ((error = suser(curthread)) != 0) break; sc->g_src.s_addr = INADDR_ANY; sc->g_dst.s_addr = INADDR_ANY; goto recompute; case SIOCGLIFPHYADDR: if (sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) { error = EADDRNOTAVAIL; break; } memset(&si, 0, sizeof(si)); si.sin_family = AF_INET; si.sin_len = sizeof(struct sockaddr_in); si.sin_addr.s_addr = sc->g_src.s_addr; memcpy(&lifr->addr, &si, sizeof(si)); si.sin_addr.s_addr = sc->g_dst.s_addr; memcpy(&lifr->dstaddr, &si, sizeof(si)); break; case SIOCGIFPSRCADDR: #ifdef INET6 case SIOCGIFPSRCADDR_IN6: #endif if (sc->g_src.s_addr == INADDR_ANY) { error = EADDRNOTAVAIL; break; } memset(&si, 0, sizeof(si)); si.sin_family = AF_INET; si.sin_len = sizeof(struct sockaddr_in); si.sin_addr.s_addr = sc->g_src.s_addr; bcopy(&si, &ifr->ifr_addr, sizeof(ifr->ifr_addr)); break; case SIOCGIFPDSTADDR: #ifdef INET6 case SIOCGIFPDSTADDR_IN6: #endif if (sc->g_dst.s_addr == INADDR_ANY) { error = EADDRNOTAVAIL; break; } memset(&si, 0, sizeof(si)); si.sin_family = AF_INET; si.sin_len = sizeof(struct sockaddr_in); si.sin_addr.s_addr = sc->g_dst.s_addr; bcopy(&si, &ifr->ifr_addr, sizeof(ifr->ifr_addr)); break; default: error = EINVAL; break; } splx(s); return (error); } /* * computes a route to our destination that is not the one * which would be taken by ip_output(), as this one will loop back to * us. If the interface is p2p as a--->b, then a routing entry exists * If we now send a packet to b (e.g. ping b), this will come down here * gets src=a, dst=b tacked on and would from ip_output() sent back to * if_gre. * Goal here is to compute a route to b that is less specific than * a-->b. We know that this one exists as in normal operation we have * at least a default route which matches. */ static int gre_compute_route(struct gre_softc *sc) { struct route *ro; u_int32_t a, b, c; ro = &sc->route; memset(ro, 0, sizeof(struct route)); ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sc->g_dst; ro->ro_dst.sa_family = AF_INET; ro->ro_dst.sa_len = sizeof(ro->ro_dst); /* * toggle last bit, so our interface is not found, but a less * specific route. I'd rather like to specify a shorter mask, * but this is not possible. Should work though. XXX * there is a simpler way ... */ if ((sc->sc_if.if_flags & IFF_LINK1) == 0) { a = ntohl(sc->g_dst.s_addr); b = a & 0x01; c = a & 0xfffffffe; b = b ^ 0x01; a = b | c; ((struct sockaddr_in *)&ro->ro_dst)->sin_addr.s_addr = htonl(a); } #ifdef DIAGNOSTIC printf("%s: searching for a route to %s", if_name(&sc->sc_if), inet_ntoa(((struct sockaddr_in *)&ro->ro_dst)->sin_addr)); #endif rtalloc(ro); /* * check if this returned a route at all and this route is no * recursion to ourself */ if (ro->ro_rt == NULL || ro->ro_rt->rt_ifp->if_softc == sc) { #ifdef DIAGNOSTIC if (ro->ro_rt == NULL) printf(" - no route found!\n"); else printf(" - route loops back to ourself!\n"); #endif return EADDRNOTAVAIL; } /* * now change it back - else ip_output will just drop * the route and search one to this interface ... */ if ((sc->sc_if.if_flags & IFF_LINK1) == 0) ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sc->g_dst; #ifdef DIAGNOSTIC printf(", choosing %s with gateway %s", if_name(ro->ro_rt->rt_ifp), inet_ntoa(((struct sockaddr_in *)(ro->ro_rt->rt_gateway))->sin_addr)); printf("\n"); #endif return 0; } /* * do a checksum of a buffer - much like in_cksum, which operates on * mbufs. */ u_int16_t gre_in_cksum(u_int16_t *p, u_int len) { u_int32_t sum = 0; int nwords = len >> 1; while (nwords-- != 0) sum += *p++; if (len & 1) { union { u_short w; u_char c[2]; } u; u.c[0] = *(u_char *)p; u.c[1] = 0; sum += u.w; } /* end-around-carry */ sum = (sum >> 16) + (sum & 0xffff); sum += (sum >> 16); return (~sum); } static int gremodevent(module_t mod, int type, void *data) { struct gre_softc *sc; switch (type) { case MOD_LOAD: greattach(); break; case MOD_UNLOAD: if_clone_detach(&gre_cloner); mtx_lock(&gre_mtx); while ((sc = LIST_FIRST(&gre_softc_list)) != NULL) { LIST_REMOVE(sc, sc_list); mtx_unlock(&gre_mtx); gre_destroy(sc); mtx_lock(&gre_mtx); } mtx_unlock(&gre_mtx); mtx_destroy(&gre_mtx); break; } return 0; } static moduledata_t gre_mod = { "if_gre", gremodevent, 0 }; DECLARE_MODULE(if_gre, gre_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_gre, 1); Index: head/sys/net/if_stf.c =================================================================== --- head/sys/net/if_stf.c (revision 129879) +++ head/sys/net/if_stf.c (revision 129880) @@ -1,809 +1,810 @@ /* $FreeBSD$ */ /* $KAME: if_stf.c,v 1.73 2001/12/03 11:08:30 keiichi Exp $ */ /* * Copyright (C) 2000 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * 6to4 interface, based on RFC3056. * * 6to4 interface is NOT capable of link-layer (I mean, IPv4) multicasting. * There is no address mapping defined from IPv6 multicast address to IPv4 * address. Therefore, we do not have IFF_MULTICAST on the interface. * * Due to the lack of address mapping for link-local addresses, we cannot * throw packets toward link-local addresses (fe80::x). Also, we cannot throw * packets to link-local multicast addresses (ff02::x). * * Here are interesting symptoms due to the lack of link-local address: * * Unicast routing exchange: * - RIPng: Impossible. Uses link-local multicast packet toward ff02::9, * and link-local addresses as nexthop. * - OSPFv6: Impossible. OSPFv6 assumes that there's link-local address * assigned to the link, and makes use of them. Also, HELLO packets use * link-local multicast addresses (ff02::5 and ff02::6). * - BGP4+: Maybe. You can only use global address as nexthop, and global * address as TCP endpoint address. * * Multicast routing protocols: * - PIM: Hello packet cannot be used to discover adjacent PIM routers. * Adjacent PIM routers must be configured manually (is it really spec-wise * correct thing to do?). * * ICMPv6: * - Redirects cannot be used due to the lack of link-local address. * * stf interface does not have, and will not need, a link-local address. * It seems to have no real benefit and does not help the above symptoms much. * Even if we assign link-locals to interface, we cannot really * use link-local unicast/multicast on top of 6to4 cloud (since there's no * encapsulation defined for link-local address), and the above analysis does * not change. RFC3056 does not mandate the assignment of link-local address * either. * * 6to4 interface has security issues. Refer to * http://playground.iijlab.net/i-d/draft-itojun-ipv6-transition-abuse-00.txt * for details. The code tries to filter out some of malicious packets. * Note that there is no way to be 100% secure. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define STFNAME "stf" #define IN6_IS_ADDR_6TO4(x) (ntohs((x)->s6_addr16[0]) == 0x2002) /* * XXX: Return a pointer with 16-bit aligned. Don't cast it to * struct in_addr *; use bcopy() instead. */ #define GET_V4(x) ((caddr_t)(&(x)->s6_addr16[1])) struct stf_softc { struct ifnet sc_if; /* common area */ union { struct route __sc_ro4; struct route_in6 __sc_ro6; /* just for safety */ } __sc_ro46; #define sc_ro __sc_ro46.__sc_ro4 const struct encaptab *encap_cookie; LIST_ENTRY(stf_softc) sc_list; /* all stf's are linked */ }; /* * All mutable global variables in if_stf.c are protected by stf_mtx. * XXXRW: Note that mutable fields in the softc are not currently locked: * in particular, sc_ro needs to be protected from concurrent entrance * of stf_output(). */ static struct mtx stf_mtx; static LIST_HEAD(, stf_softc) stf_softc_list; static MALLOC_DEFINE(M_STF, STFNAME, "6to4 Tunnel Interface"); static const int ip_stf_ttl = 40; extern struct domain inetdomain; struct protosw in_stf_protosw = { SOCK_RAW, &inetdomain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR, in_stf_input, (pr_output_t*)rip_output, 0, rip_ctloutput, 0, 0, 0, 0, 0, &rip_usrreqs }; static int stfmodevent(module_t, int, void *); static int stf_encapcheck(const struct mbuf *, int, int, void *); static struct in6_ifaddr *stf_getsrcifa6(struct ifnet *); static int stf_output(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *); static int isrfc1918addr(struct in_addr *); static int stf_checkaddr4(struct stf_softc *, struct in_addr *, struct ifnet *); static int stf_checkaddr6(struct stf_softc *, struct in6_addr *, struct ifnet *); static void stf_rtrequest(int, struct rtentry *, struct rt_addrinfo *); static int stf_ioctl(struct ifnet *, u_long, caddr_t); static int stf_clone_create(struct if_clone *, int); static void stf_clone_destroy(struct ifnet *); /* only one clone is currently allowed */ struct if_clone stf_cloner = IF_CLONE_INITIALIZER(STFNAME, stf_clone_create, stf_clone_destroy, 0, 0); static int stf_clone_create(ifc, unit) struct if_clone *ifc; int unit; { struct stf_softc *sc; struct ifnet *ifp; sc = malloc(sizeof(struct stf_softc), M_STF, M_WAITOK | M_ZERO); ifp = &sc->sc_if; if_initname(ifp, ifc->ifc_name, unit); sc->encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV6, stf_encapcheck, &in_stf_protosw, sc); if (sc->encap_cookie == NULL) { if_printf(ifp, "attach failed\n"); free(sc, M_STF); return (ENOMEM); } ifp->if_mtu = IPV6_MMTU; ifp->if_ioctl = stf_ioctl; ifp->if_output = stf_output; ifp->if_type = IFT_STF; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int)); mtx_lock(&stf_mtx); LIST_INSERT_HEAD(&stf_softc_list, sc, sc_list); mtx_unlock(&stf_mtx); return (0); } static void stf_destroy(struct stf_softc *sc) { int err; err = encap_detach(sc->encap_cookie); KASSERT(err == 0, ("Unexpected error detaching encap_cookie")); bpfdetach(&sc->sc_if); if_detach(&sc->sc_if); free(sc, M_STF); } static void stf_clone_destroy(ifp) struct ifnet *ifp; { struct stf_softc *sc = (void *) ifp; mtx_lock(&stf_mtx); LIST_REMOVE(sc, sc_list); mtx_unlock(&stf_mtx); stf_destroy(sc); } static int stfmodevent(mod, type, data) module_t mod; int type; void *data; { struct stf_softc *sc; switch (type) { case MOD_LOAD: mtx_init(&stf_mtx, "stf_mtx", NULL, MTX_DEF); LIST_INIT(&stf_softc_list); if_clone_attach(&stf_cloner); break; case MOD_UNLOAD: if_clone_detach(&stf_cloner); mtx_lock(&stf_mtx); while ((sc = LIST_FIRST(&stf_softc_list)) != NULL) { LIST_REMOVE(sc, sc_list); mtx_unlock(&stf_mtx); stf_destroy(sc); mtx_lock(&stf_mtx); } mtx_unlock(&stf_mtx); mtx_destroy(&stf_mtx); break; } return (0); } static moduledata_t stf_mod = { "if_stf", stfmodevent, 0 }; DECLARE_MODULE(if_stf, stf_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); static int stf_encapcheck(m, off, proto, arg) const struct mbuf *m; int off; int proto; void *arg; { struct ip ip; struct in6_ifaddr *ia6; struct stf_softc *sc; struct in_addr a, b, mask; sc = (struct stf_softc *)arg; if (sc == NULL) return 0; if ((sc->sc_if.if_flags & IFF_UP) == 0) return 0; /* IFF_LINK0 means "no decapsulation" */ if ((sc->sc_if.if_flags & IFF_LINK0) != 0) return 0; if (proto != IPPROTO_IPV6) return 0; /* LINTED const cast */ m_copydata((struct mbuf *)(uintptr_t)m, 0, sizeof(ip), (caddr_t)&ip); if (ip.ip_v != 4) return 0; ia6 = stf_getsrcifa6(&sc->sc_if); if (ia6 == NULL) return 0; /* * check if IPv4 dst matches the IPv4 address derived from the * local 6to4 address. * success on: dst = 10.1.1.1, ia6->ia_addr = 2002:0a01:0101:... */ if (bcmp(GET_V4(&ia6->ia_addr.sin6_addr), &ip.ip_dst, sizeof(ip.ip_dst)) != 0) return 0; /* * check if IPv4 src matches the IPv4 address derived from the * local 6to4 address masked by prefixmask. * success on: src = 10.1.1.1, ia6->ia_addr = 2002:0a00:.../24 * fail on: src = 10.1.1.1, ia6->ia_addr = 2002:0b00:.../24 */ bzero(&a, sizeof(a)); bcopy(GET_V4(&ia6->ia_addr.sin6_addr), &a, sizeof(a)); bcopy(GET_V4(&ia6->ia_prefixmask.sin6_addr), &mask, sizeof(mask)); a.s_addr &= mask.s_addr; b = ip.ip_src; b.s_addr &= mask.s_addr; if (a.s_addr != b.s_addr) return 0; /* stf interface makes single side match only */ return 32; } static struct in6_ifaddr * stf_getsrcifa6(ifp) struct ifnet *ifp; { struct ifaddr *ia; struct in_ifaddr *ia4; struct sockaddr_in6 *sin6; struct in_addr in; for (ia = TAILQ_FIRST(&ifp->if_addrlist); ia; ia = TAILQ_NEXT(ia, ifa_list)) { if (ia->ifa_addr == NULL) continue; if (ia->ifa_addr->sa_family != AF_INET6) continue; sin6 = (struct sockaddr_in6 *)ia->ifa_addr; if (!IN6_IS_ADDR_6TO4(&sin6->sin6_addr)) continue; bcopy(GET_V4(&sin6->sin6_addr), &in, sizeof(in)); LIST_FOREACH(ia4, INADDR_HASH(in.s_addr), ia_hash) if (ia4->ia_addr.sin_addr.s_addr == in.s_addr) break; if (ia4 == NULL) continue; return (struct in6_ifaddr *)ia; } return NULL; } static int stf_output(ifp, m, dst, rt) struct ifnet *ifp; struct mbuf *m; struct sockaddr *dst; struct rtentry *rt; { struct stf_softc *sc; struct sockaddr_in6 *dst6; struct in_addr in4; caddr_t ptr; struct sockaddr_in *dst4; u_int8_t tos; struct ip *ip; struct ip6_hdr *ip6; struct in6_ifaddr *ia6; #ifdef MAC int error; error = mac_check_ifnet_transmit(ifp, m); if (error) { m_freem(m); return (error); } #endif sc = (struct stf_softc*)ifp; dst6 = (struct sockaddr_in6 *)dst; /* just in case */ if ((ifp->if_flags & IFF_UP) == 0) { m_freem(m); ifp->if_oerrors++; return ENETDOWN; } /* * If we don't have an ip4 address that match my inner ip6 address, * we shouldn't generate output. Without this check, we'll end up * using wrong IPv4 source. */ ia6 = stf_getsrcifa6(ifp); if (ia6 == NULL) { m_freem(m); ifp->if_oerrors++; return ENETDOWN; } if (m->m_len < sizeof(*ip6)) { m = m_pullup(m, sizeof(*ip6)); if (!m) { ifp->if_oerrors++; return ENOBUFS; } } ip6 = mtod(m, struct ip6_hdr *); tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; /* * Pickup the right outer dst addr from the list of candidates. * ip6_dst has priority as it may be able to give us shorter IPv4 hops. */ ptr = NULL; if (IN6_IS_ADDR_6TO4(&ip6->ip6_dst)) ptr = GET_V4(&ip6->ip6_dst); else if (IN6_IS_ADDR_6TO4(&dst6->sin6_addr)) ptr = GET_V4(&dst6->sin6_addr); else { m_freem(m); ifp->if_oerrors++; return ENETUNREACH; } bcopy(ptr, &in4, sizeof(in4)); #if NBPFILTER > 0 if (ifp->if_bpf) { /* * We need to prepend the address family as * a four byte field. Cons up a dummy header * to pacify bpf. This is safe because bpf * will only read from the mbuf (i.e., it won't * try to free it or keep a pointer a to it). */ u_int32_t af = AF_INET6; #ifdef HAVE_OLD_BPF struct mbuf m0; m0.m_next = m; m0.m_len = 4; m0.m_data = (char *)⁡ BPF_MTAP(ifp, &m0); #else bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); #endif } #endif /*NBPFILTER > 0*/ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); if (m && m->m_len < sizeof(struct ip)) m = m_pullup(m, sizeof(struct ip)); if (m == NULL) { ifp->if_oerrors++; return ENOBUFS; } ip = mtod(m, struct ip *); bzero(ip, sizeof(*ip)); bcopy(GET_V4(&((struct sockaddr_in6 *)&ia6->ia_addr)->sin6_addr), &ip->ip_src, sizeof(ip->ip_src)); bcopy(&in4, &ip->ip_dst, sizeof(ip->ip_dst)); ip->ip_p = IPPROTO_IPV6; ip->ip_ttl = ip_stf_ttl; ip->ip_len = m->m_pkthdr.len; /*host order*/ if (ifp->if_flags & IFF_LINK1) ip_ecn_ingress(ECN_ALLOWED, &ip->ip_tos, &tos); else ip_ecn_ingress(ECN_NOCARE, &ip->ip_tos, &tos); /* * XXXRW: Locking of sc_ro required. */ dst4 = (struct sockaddr_in *)&sc->sc_ro.ro_dst; if (dst4->sin_family != AF_INET || bcmp(&dst4->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst)) != 0) { /* cache route doesn't match */ dst4->sin_family = AF_INET; dst4->sin_len = sizeof(struct sockaddr_in); bcopy(&ip->ip_dst, &dst4->sin_addr, sizeof(dst4->sin_addr)); if (sc->sc_ro.ro_rt) { RTFREE(sc->sc_ro.ro_rt); sc->sc_ro.ro_rt = NULL; } } if (sc->sc_ro.ro_rt == NULL) { rtalloc(&sc->sc_ro); if (sc->sc_ro.ro_rt == NULL) { m_freem(m); ifp->if_oerrors++; return ENETUNREACH; } } ifp->if_opackets++; return ip_output(m, NULL, &sc->sc_ro, 0, NULL, NULL); } static int isrfc1918addr(in) struct in_addr *in; { /* * returns 1 if private address range: * 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 */ if ((ntohl(in->s_addr) & 0xff000000) >> 24 == 10 || (ntohl(in->s_addr) & 0xfff00000) >> 16 == 172 * 256 + 16 || (ntohl(in->s_addr) & 0xffff0000) >> 16 == 192 * 256 + 168) return 1; return 0; } static int stf_checkaddr4(sc, in, inifp) struct stf_softc *sc; struct in_addr *in; struct ifnet *inifp; /* incoming interface */ { struct in_ifaddr *ia4; /* * reject packets with the following address: * 224.0.0.0/4 0.0.0.0/8 127.0.0.0/8 255.0.0.0/8 */ if (IN_MULTICAST(ntohl(in->s_addr))) return -1; switch ((ntohl(in->s_addr) & 0xff000000) >> 24) { case 0: case 127: case 255: return -1; } /* * reject packets with private address range. * (requirement from RFC3056 section 2 1st paragraph) */ if (isrfc1918addr(in)) return -1; /* * reject packets with broadcast */ for (ia4 = TAILQ_FIRST(&in_ifaddrhead); ia4; ia4 = TAILQ_NEXT(ia4, ia_link)) { if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0) continue; if (in->s_addr == ia4->ia_broadaddr.sin_addr.s_addr) return -1; } /* * perform ingress filter */ if (sc && (sc->sc_if.if_flags & IFF_LINK2) == 0 && inifp) { struct sockaddr_in sin; struct rtentry *rt; bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr = *in; rt = rtalloc1((struct sockaddr *)&sin, 0, 0UL); if (!rt || rt->rt_ifp != inifp) { #if 0 log(LOG_WARNING, "%s: packet from 0x%x dropped " "due to ingress filter\n", if_name(&sc->sc_if), (u_int32_t)ntohl(sin.sin_addr.s_addr)); #endif if (rt) rtfree(rt); return -1; } rtfree(rt); } return 0; } static int stf_checkaddr6(sc, in6, inifp) struct stf_softc *sc; struct in6_addr *in6; struct ifnet *inifp; /* incoming interface */ { /* * check 6to4 addresses */ if (IN6_IS_ADDR_6TO4(in6)) { struct in_addr in4; bcopy(GET_V4(in6), &in4, sizeof(in4)); return stf_checkaddr4(sc, &in4, inifp); } /* * reject anything that look suspicious. the test is implemented * in ip6_input too, but we check here as well to * (1) reject bad packets earlier, and * (2) to be safe against future ip6_input change. */ if (IN6_IS_ADDR_V4COMPAT(in6) || IN6_IS_ADDR_V4MAPPED(in6)) return -1; return 0; } void in_stf_input(m, off) struct mbuf *m; int off; { int proto; struct stf_softc *sc; struct ip *ip; struct ip6_hdr *ip6; u_int8_t otos, itos; struct ifnet *ifp; proto = mtod(m, struct ip *)->ip_p; if (proto != IPPROTO_IPV6) { m_freem(m); return; } ip = mtod(m, struct ip *); sc = (struct stf_softc *)encap_getarg(m); if (sc == NULL || (sc->sc_if.if_flags & IFF_UP) == 0) { m_freem(m); return; } ifp = &sc->sc_if; #ifdef MAC mac_create_mbuf_from_ifnet(ifp, m); #endif /* * perform sanity check against outer src/dst. * for source, perform ingress filter as well. */ if (stf_checkaddr4(sc, &ip->ip_dst, NULL) < 0 || stf_checkaddr4(sc, &ip->ip_src, m->m_pkthdr.rcvif) < 0) { m_freem(m); return; } otos = ip->ip_tos; m_adj(m, off); if (m->m_len < sizeof(*ip6)) { m = m_pullup(m, sizeof(*ip6)); if (!m) return; } ip6 = mtod(m, struct ip6_hdr *); /* * perform sanity check against inner src/dst. * for source, perform ingress filter as well. */ if (stf_checkaddr6(sc, &ip6->ip6_dst, NULL) < 0 || stf_checkaddr6(sc, &ip6->ip6_src, m->m_pkthdr.rcvif) < 0) { m_freem(m); return; } itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; if ((ifp->if_flags & IFF_LINK1) != 0) ip_ecn_egress(ECN_ALLOWED, &otos, &itos); else ip_ecn_egress(ECN_NOCARE, &otos, &itos); ip6->ip6_flow &= ~htonl(0xff << 20); ip6->ip6_flow |= htonl((u_int32_t)itos << 20); m->m_pkthdr.rcvif = ifp; if (ifp->if_bpf) { /* * We need to prepend the address family as * a four byte field. Cons up a dummy header * to pacify bpf. This is safe because bpf * will only read from the mbuf (i.e., it won't * try to free it or keep a pointer a to it). */ u_int32_t af = AF_INET6; #ifdef HAVE_OLD_BPF struct mbuf m0; m0.m_next = m; m0.m_len = 4; m0.m_data = (char *)⁡ BPF_MTAP(ifp, &m0); #else bpf_mtap2(ifp->if_bpf, &af, sizeof(ah), m); #endif } /* * Put the packet to the network layer input queue according to the * specified address family. * See net/if_gif.c for possible issues with packet processing * reorder due to extra queueing. */ ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; netisr_dispatch(NETISR_IPV6, m); } /* ARGSUSED */ static void stf_rtrequest(cmd, rt, info) int cmd; struct rtentry *rt; struct rt_addrinfo *info; { RT_LOCK_ASSERT(rt); if (rt) rt->rt_rmx.rmx_mtu = IPV6_MMTU; } static int stf_ioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct ifaddr *ifa; struct ifreq *ifr; struct sockaddr_in6 *sin6; struct in_addr addr; int error; error = 0; switch (cmd) { case SIOCSIFADDR: ifa = (struct ifaddr *)data; if (ifa == NULL || ifa->ifa_addr->sa_family != AF_INET6) { error = EAFNOSUPPORT; break; } sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; if (!IN6_IS_ADDR_6TO4(&sin6->sin6_addr)) { error = EINVAL; break; } bcopy(GET_V4(&sin6->sin6_addr), &addr, sizeof(addr)); if (isrfc1918addr(&addr)) { error = EINVAL; break; } ifa->ifa_rtrequest = stf_rtrequest; ifp->if_flags |= IFF_UP; break; case SIOCADDMULTI: case SIOCDELMULTI: ifr = (struct ifreq *)data; if (ifr && ifr->ifr_addr.sa_family == AF_INET6) ; else error = EAFNOSUPPORT; break; default: error = EINVAL; break; } return error; } Index: head/sys/net/if_tap.c =================================================================== --- head/sys/net/if_tap.c (revision 129879) +++ head/sys/net/if_tap.c (revision 129880) @@ -1,892 +1,893 @@ /* * Copyright (C) 1999-2000 by Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * BASED ON: * ------------------------------------------------------------------------- * * Copyright (c) 1988, Julian Onions * Nottingham University 1987. */ /* * $FreeBSD$ * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $ */ #include "opt_inet.h" #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CDEV_NAME "tap" #define TAPDEBUG if (tapdebug) printf #define TAP "tap" #define VMNET "vmnet" #define TAPMAXUNIT 0x7fff #define VMNET_DEV_MASK CLONE_FLAG0 /* module */ static int tapmodevent(module_t, int, void *); /* device */ static void tapclone(void *, char *, int, dev_t *); static void tapcreate(dev_t); /* network interface */ static void tapifstart(struct ifnet *); static int tapifioctl(struct ifnet *, u_long, caddr_t); static void tapifinit(void *); /* character device */ static d_open_t tapopen; static d_close_t tapclose; static d_read_t tapread; static d_write_t tapwrite; static d_ioctl_t tapioctl; static d_poll_t tappoll; static struct cdevsw tap_cdevsw = { .d_version = D_VERSION, .d_flags = D_PSEUDO | D_NEEDGIANT, .d_open = tapopen, .d_close = tapclose, .d_read = tapread, .d_write = tapwrite, .d_ioctl = tapioctl, .d_poll = tappoll, .d_name = CDEV_NAME, }; /* * All global variables in if_tap.c are locked with tapmtx, with the * exception of tapdebug, which is accessed unlocked; tapclones is * static at runtime. */ static struct mtx tapmtx; static int tapdebug = 0; /* debug flag */ static SLIST_HEAD(, tap_softc) taphead; /* first device */ static struct clonedevs *tapclones; MALLOC_DECLARE(M_TAP); MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface"); SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, ""); DEV_MODULE(if_tap, tapmodevent, NULL); /* * tapmodevent * * module event handler */ static int tapmodevent(mod, type, data) module_t mod; int type; void *data; { static eventhandler_tag eh_tag = NULL; struct tap_softc *tp = NULL; struct ifnet *ifp = NULL; int s; switch (type) { case MOD_LOAD: /* intitialize device */ mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF); SLIST_INIT(&taphead); clone_setup(&tapclones); eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000); if (eh_tag == NULL) { clone_cleanup(&tapclones); mtx_destroy(&tapmtx); return (ENOMEM); } return (0); case MOD_UNLOAD: /* * The EBUSY algorithm here can't quite atomically * guarantee that this is race-free since we have to * release the tap mtx to deregister the clone handler. */ mtx_lock(&tapmtx); SLIST_FOREACH(tp, &taphead, tap_next) { mtx_lock(&tp->tap_mtx); if (tp->tap_flags & TAP_OPEN) { mtx_unlock(&tp->tap_mtx); mtx_unlock(&tapmtx); return (EBUSY); } mtx_unlock(&tp->tap_mtx); } mtx_unlock(&tapmtx); EVENTHANDLER_DEREGISTER(dev_clone, eh_tag); mtx_lock(&tapmtx); while ((tp = SLIST_FIRST(&taphead)) != NULL) { SLIST_REMOVE_HEAD(&taphead, tap_next); mtx_unlock(&tapmtx); ifp = &tp->tap_if; TAPDEBUG("detaching %s\n", ifp->if_xname); /* Unlocked read. */ KASSERT(!(tp->tap_flags & TAP_OPEN), ("%s flags is out of sync", ifp->if_xname)); destroy_dev(tp->tap_dev); s = splimp(); ether_ifdetach(ifp); splx(s); mtx_destroy(&tp->tap_mtx); free(tp, M_TAP); mtx_lock(&tapmtx); } mtx_unlock(&tapmtx); clone_cleanup(&tapclones); break; default: return (EOPNOTSUPP); } return (0); } /* tapmodevent */ /* * DEVFS handler * * We need to support two kind of devices - tap and vmnet */ static void tapclone(arg, name, namelen, dev) void *arg; char *name; int namelen; dev_t *dev; { u_int extra; int i, unit; char *device_name = name; if (*dev != NODEV) return; device_name = TAP; extra = 0; if (strcmp(name, TAP) == 0) { unit = -1; } else if (strcmp(name, VMNET) == 0) { device_name = VMNET; extra = VMNET_DEV_MASK; unit = -1; } else if (dev_stdclone(name, NULL, device_name, &unit) != 1) { device_name = VMNET; extra = VMNET_DEV_MASK; if (dev_stdclone(name, NULL, device_name, &unit) != 1) return; } /* find any existing device, or allocate new unit number */ i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra); if (i) { *dev = make_dev(&tap_cdevsw, unit2minor(unit | extra), UID_ROOT, GID_WHEEL, 0600, "%s%d", device_name, unit); if (*dev != NULL) (*dev)->si_flags |= SI_CHEAPCLONE; } } /* tapclone */ /* * tapcreate * * to create interface */ static void tapcreate(dev) dev_t dev; { struct ifnet *ifp = NULL; struct tap_softc *tp = NULL; unsigned short macaddr_hi; int unit, s; char *name = NULL; dev->si_flags &= ~SI_CHEAPCLONE; /* allocate driver storage and create device */ MALLOC(tp, struct tap_softc *, sizeof(*tp), M_TAP, M_WAITOK | M_ZERO); mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF); mtx_lock(&tapmtx); SLIST_INSERT_HEAD(&taphead, tp, tap_next); mtx_unlock(&tapmtx); unit = dev2unit(dev); /* select device: tap or vmnet */ if (unit & VMNET_DEV_MASK) { name = VMNET; tp->tap_flags |= TAP_VMNET; } else name = TAP; unit &= TAPMAXUNIT; TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, minor(dev)); /* generate fake MAC address: 00 bd xx xx xx unit_no */ macaddr_hi = htons(0x00bd); bcopy(&macaddr_hi, &tp->arpcom.ac_enaddr[0], sizeof(short)); bcopy(&ticks, &tp->arpcom.ac_enaddr[2], sizeof(long)); tp->arpcom.ac_enaddr[5] = (u_char)unit; /* fill the rest and attach interface */ ifp = &tp->tap_if; ifp->if_softc = tp; if_initname(ifp, name, unit); ifp->if_init = tapifinit; ifp->if_start = tapifstart; ifp->if_ioctl = tapifioctl; ifp->if_mtu = ETHERMTU; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = ifqmaxlen; dev->si_drv1 = tp; tp->tap_dev = dev; s = splimp(); ether_ifattach(ifp, tp->arpcom.ac_enaddr); splx(s); mtx_lock(&tp->tap_mtx); tp->tap_flags |= TAP_INITED; mtx_unlock(&tp->tap_mtx); TAPDEBUG("interface %s is created. minor = %#x\n", ifp->if_xname, minor(dev)); } /* tapcreate */ /* * tapopen * * to open tunnel. must be superuser */ static int tapopen(dev, flag, mode, td) dev_t dev; int flag; int mode; struct thread *td; { struct tap_softc *tp = NULL; int error; if ((error = suser(td)) != 0) return (error); if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT) return (ENXIO); /* * XXXRW: Non-atomic test-and-set of si_drv1. Currently protected * by Giant, but the race actually exists under memory pressure as * well even when running with Giant, as malloc() may sleep. */ tp = dev->si_drv1; if (tp == NULL) { tapcreate(dev); tp = dev->si_drv1; } mtx_lock(&tp->tap_mtx); if (tp->tap_flags & TAP_OPEN) { mtx_unlock(&tp->tap_mtx); return (EBUSY); } bcopy(tp->arpcom.ac_enaddr, tp->ether_addr, sizeof(tp->ether_addr)); tp->tap_pid = td->td_proc->p_pid; tp->tap_flags |= TAP_OPEN; mtx_unlock(&tp->tap_mtx); TAPDEBUG("%s is open. minor = %#x\n", tp->tap_if.if_xname, minor(dev)); return (0); } /* tapopen */ /* * tapclose * * close the device - mark i/f down & delete routing info */ static int tapclose(dev, foo, bar, td) dev_t dev; int foo; int bar; struct thread *td; { struct tap_softc *tp = dev->si_drv1; struct ifnet *ifp = &tp->tap_if; int s; /* junk all pending output */ IF_DRAIN(&ifp->if_snd); /* * do not bring the interface down, and do not anything with * interface, if we are in VMnet mode. just close the device. */ mtx_lock(&tp->tap_mtx); if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) { mtx_unlock(&tp->tap_mtx); s = splimp(); if_down(ifp); if (ifp->if_flags & IFF_RUNNING) { /* find internet addresses and delete routes */ struct ifaddr *ifa = NULL; /* In desparate need of ifaddr locking. */ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) { rtinit(ifa, (int)RTM_DELETE, 0); /* remove address from interface */ bzero(ifa->ifa_addr, sizeof(*(ifa->ifa_addr))); bzero(ifa->ifa_dstaddr, sizeof(*(ifa->ifa_dstaddr))); bzero(ifa->ifa_netmask, sizeof(*(ifa->ifa_netmask))); } } ifp->if_flags &= ~IFF_RUNNING; } splx(s); } else mtx_unlock(&tp->tap_mtx); funsetown(&tp->tap_sigio); selwakeuppri(&tp->tap_rsel, PZERO+1); mtx_lock(&tp->tap_mtx); tp->tap_flags &= ~TAP_OPEN; tp->tap_pid = 0; mtx_unlock(&tp->tap_mtx); TAPDEBUG("%s is closed. minor = %#x\n", ifp->if_xname, minor(dev)); return (0); } /* tapclose */ /* * tapifinit * * network interface initialization function */ static void tapifinit(xtp) void *xtp; { struct tap_softc *tp = (struct tap_softc *)xtp; struct ifnet *ifp = &tp->tap_if; TAPDEBUG("initializing %s\n", ifp->if_xname); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* attempt to start output */ tapifstart(ifp); } /* tapifinit */ /* * tapifioctl * * Process an ioctl request on network interface */ static int tapifioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct tap_softc *tp = (struct tap_softc *)(ifp->if_softc); struct ifstat *ifs = NULL; int s, dummy; switch (cmd) { case SIOCSIFFLAGS: /* XXX -- just like vmnet does */ case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCGIFSTATUS: s = splimp(); ifs = (struct ifstat *)data; dummy = strlen(ifs->ascii); mtx_lock(&tp->tap_mtx); if (tp->tap_pid != 0 && dummy < sizeof(ifs->ascii)) snprintf(ifs->ascii + dummy, sizeof(ifs->ascii) - dummy, "\tOpened by PID %d\n", tp->tap_pid); mtx_unlock(&tp->tap_mtx); splx(s); break; default: s = splimp(); dummy = ether_ioctl(ifp, cmd, data); splx(s); return (dummy); } return (0); } /* tapifioctl */ /* * tapifstart * * queue packets from higher level ready to put out */ static void tapifstart(ifp) struct ifnet *ifp; { struct tap_softc *tp = ifp->if_softc; int s; TAPDEBUG("%s starting\n", ifp->if_xname); /* * do not junk pending output if we are in VMnet mode. * XXX: can this do any harm because of queue overflow? */ mtx_lock(&tp->tap_mtx); if (((tp->tap_flags & TAP_VMNET) == 0) && ((tp->tap_flags & TAP_READY) != TAP_READY)) { struct mbuf *m = NULL; mtx_unlock(&tp->tap_mtx); /* Unlocked read. */ TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname, tp->tap_flags); s = splimp(); do { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) m_freem(m); ifp->if_oerrors ++; } while (m != NULL); splx(s); return; } mtx_unlock(&tp->tap_mtx); s = splimp(); ifp->if_flags |= IFF_OACTIVE; if (ifp->if_snd.ifq_len != 0) { mtx_lock(&tp->tap_mtx); if (tp->tap_flags & TAP_RWAIT) { tp->tap_flags &= ~TAP_RWAIT; wakeup(tp); } if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) { mtx_unlock(&tp->tap_mtx); pgsigio(&tp->tap_sigio, SIGIO, 0); } else mtx_unlock(&tp->tap_mtx); selwakeuppri(&tp->tap_rsel, PZERO+1); ifp->if_opackets ++; /* obytes are counted in ether_output */ } ifp->if_flags &= ~IFF_OACTIVE; splx(s); } /* tapifstart */ /* * tapioctl * * the cdevsw interface is now pretty minimal */ static int tapioctl(dev, cmd, data, flag, td) dev_t dev; u_long cmd; caddr_t data; int flag; struct thread *td; { struct tap_softc *tp = dev->si_drv1; struct ifnet *ifp = &tp->tap_if; struct tapinfo *tapp = NULL; int s; int f; switch (cmd) { case TAPSIFINFO: s = splimp(); tapp = (struct tapinfo *)data; ifp->if_mtu = tapp->mtu; ifp->if_type = tapp->type; ifp->if_baudrate = tapp->baudrate; splx(s); break; case TAPGIFINFO: tapp = (struct tapinfo *)data; tapp->mtu = ifp->if_mtu; tapp->type = ifp->if_type; tapp->baudrate = ifp->if_baudrate; break; case TAPSDEBUG: tapdebug = *(int *)data; break; case TAPGDEBUG: *(int *)data = tapdebug; break; case FIONBIO: break; case FIOASYNC: s = splimp(); mtx_lock(&tp->tap_mtx); if (*(int *)data) tp->tap_flags |= TAP_ASYNC; else tp->tap_flags &= ~TAP_ASYNC; mtx_unlock(&tp->tap_mtx); splx(s); break; case FIONREAD: s = splimp(); if (ifp->if_snd.ifq_head) { struct mbuf *mb = ifp->if_snd.ifq_head; for(*(int *)data = 0;mb != NULL;mb = mb->m_next) *(int *)data += mb->m_len; } else *(int *)data = 0; splx(s); break; case FIOSETOWN: return (fsetown(*(int *)data, &tp->tap_sigio)); case FIOGETOWN: *(int *)data = fgetown(&tp->tap_sigio); return (0); /* this is deprecated, FIOSETOWN should be used instead */ case TIOCSPGRP: return (fsetown(-(*(int *)data), &tp->tap_sigio)); /* this is deprecated, FIOGETOWN should be used instead */ case TIOCGPGRP: *(int *)data = -fgetown(&tp->tap_sigio); return (0); /* VMware/VMnet port ioctl's */ case SIOCGIFFLAGS: /* get ifnet flags */ bcopy(&ifp->if_flags, data, sizeof(ifp->if_flags)); break; case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */ f = *(int *)data; f &= 0x0fff; f &= ~IFF_CANTCHANGE; f |= IFF_UP; s = splimp(); ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE); splx(s); break; case OSIOCGIFADDR: /* get MAC address of the remote side */ case SIOCGIFADDR: mtx_lock(&tp->tap_mtx); bcopy(tp->ether_addr, data, sizeof(tp->ether_addr)); mtx_unlock(&tp->tap_mtx); break; case SIOCSIFADDR: /* set MAC address of the remote side */ mtx_lock(&tp->tap_mtx); bcopy(data, tp->ether_addr, sizeof(tp->ether_addr)); mtx_unlock(&tp->tap_mtx); break; default: return (ENOTTY); } return (0); } /* tapioctl */ /* * tapread * * the cdevsw read interface - reads a packet at a time, or at * least as much of a packet as can be read */ static int tapread(dev, uio, flag) dev_t dev; struct uio *uio; int flag; { struct tap_softc *tp = dev->si_drv1; struct ifnet *ifp = &tp->tap_if; struct mbuf *m = NULL; int error = 0, len, s; TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, minor(dev)); mtx_lock(&tp->tap_mtx); if ((tp->tap_flags & TAP_READY) != TAP_READY) { mtx_unlock(&tp->tap_mtx); /* Unlocked read. */ TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n", ifp->if_xname, minor(dev), tp->tap_flags); return (EHOSTDOWN); } tp->tap_flags &= ~TAP_RWAIT; mtx_unlock(&tp->tap_mtx); /* sleep until we get a packet */ do { s = splimp(); IF_DEQUEUE(&ifp->if_snd, m); splx(s); if (m == NULL) { if (flag & IO_NDELAY) return (EWOULDBLOCK); mtx_lock(&tp->tap_mtx); tp->tap_flags |= TAP_RWAIT; mtx_unlock(&tp->tap_mtx); error = tsleep(tp,PCATCH|(PZERO+1),"taprd",0); if (error) return (error); } } while (m == NULL); /* feed packet to bpf */ BPF_MTAP(ifp, m); /* xfer packet to user space */ while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) { len = min(uio->uio_resid, m->m_len); if (len == 0) break; error = uiomove(mtod(m, void *), len, uio); m = m_free(m); } if (m != NULL) { TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname, minor(dev)); m_freem(m); } return (error); } /* tapread */ /* * tapwrite * * the cdevsw write interface - an atomic write is a packet - or else! */ static int tapwrite(dev, uio, flag) dev_t dev; struct uio *uio; int flag; { struct tap_softc *tp = dev->si_drv1; struct ifnet *ifp = &tp->tap_if; struct mbuf *top = NULL, **mp = NULL, *m = NULL; int error = 0, tlen, mlen; TAPDEBUG("%s writting, minor = %#x\n", ifp->if_xname, minor(dev)); if (uio->uio_resid == 0) return (0); if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) { TAPDEBUG("%s invalid packet len = %d, minor = %#x\n", ifp->if_xname, uio->uio_resid, minor(dev)); return (EIO); } tlen = uio->uio_resid; /* get a header mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); mlen = MHLEN; top = 0; mp = ⊤ while ((error == 0) && (uio->uio_resid > 0)) { m->m_len = min(mlen, uio->uio_resid); error = uiomove(mtod(m, void *), m->m_len, uio); *mp = m; mp = &m->m_next; if (uio->uio_resid > 0) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { error = ENOBUFS; break; } mlen = MLEN; } } if (error) { ifp->if_ierrors ++; if (top) m_freem(top); return (error); } top->m_pkthdr.len = tlen; top->m_pkthdr.rcvif = ifp; /* Pass packet up to parent. */ (*ifp->if_input)(ifp, top); ifp->if_ipackets ++; /* ibytes are counted in parent */ return (0); } /* tapwrite */ /* * tappoll * * the poll interface, this is only useful on reads * really. the write detect always returns true, write never blocks * anyway, it either accepts the packet or drops it */ static int tappoll(dev, events, td) dev_t dev; int events; struct thread *td; { struct tap_softc *tp = dev->si_drv1; struct ifnet *ifp = &tp->tap_if; int s, revents = 0; TAPDEBUG("%s polling, minor = %#x\n", ifp->if_xname, minor(dev)); s = splimp(); if (events & (POLLIN | POLLRDNORM)) { if (ifp->if_snd.ifq_len > 0) { TAPDEBUG("%s have data in queue. len = %d, " \ "minor = %#x\n", ifp->if_xname, ifp->if_snd.ifq_len, minor(dev)); revents |= (events & (POLLIN | POLLRDNORM)); } else { TAPDEBUG("%s waiting for data, minor = %#x\n", ifp->if_xname, minor(dev)); selrecord(td, &tp->tap_rsel); } } if (events & (POLLOUT | POLLWRNORM)) revents |= (events & (POLLOUT | POLLWRNORM)); splx(s); return (revents); } /* tappoll */ Index: head/sys/netgraph/bluetooth/common/ng_bluetooth.c =================================================================== --- head/sys/netgraph/bluetooth/common/ng_bluetooth.c (revision 129879) +++ head/sys/netgraph/bluetooth/common/ng_bluetooth.c (revision 129880) @@ -1,250 +1,251 @@ /* * bluetooth.c * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_bluetooth.c,v 1.3 2003/04/26 22:37:31 max Exp $ * $FreeBSD$ */ #include #include #include #include +#include #include #include /* * Bluetooth stack sysctl globals */ static u_int32_t bluetooth_hci_command_timeout_value = 5; /* sec */ static u_int32_t bluetooth_hci_connect_timeout_value = 60; /* sec */ static u_int32_t bluetooth_hci_max_neighbor_age_value = 600; /* sec */ static u_int32_t bluetooth_l2cap_rtx_timeout_value = 60; /* sec */ static u_int32_t bluetooth_l2cap_ertx_timeout_value = 300; /* sec */ /* * Define sysctl tree that shared by other parts of Bluetooth stack */ SYSCTL_NODE(_net, OID_AUTO, bluetooth, CTLFLAG_RW, 0, "Bluetooth family"); SYSCTL_INT(_net_bluetooth, OID_AUTO, version, CTLFLAG_RD, 0, NG_BLUETOOTH_VERSION, ""); /* * HCI */ SYSCTL_NODE(_net_bluetooth, OID_AUTO, hci, CTLFLAG_RW, 0, "Bluetooth HCI family"); static int bluetooth_set_hci_command_timeout_value(SYSCTL_HANDLER_ARGS) { u_int32_t value; int error; value = bluetooth_hci_command_timeout_value; error = sysctl_handle_int(oidp, &value, sizeof(value), req); if (error == 0 && req->newptr != NULL) { if (value > 0) bluetooth_hci_command_timeout_value = value; else error = EINVAL; } return (error); } /* bluetooth_set_hci_command_timeout_value */ SYSCTL_PROC(_net_bluetooth_hci, OID_AUTO, command_timeout, CTLTYPE_INT | CTLFLAG_RW, &bluetooth_hci_command_timeout_value, 5, bluetooth_set_hci_command_timeout_value, "I", "HCI command timeout (sec)"); static int bluetooth_set_hci_connect_timeout_value(SYSCTL_HANDLER_ARGS) { u_int32_t value; int error; value = bluetooth_hci_connect_timeout_value; error = sysctl_handle_int(oidp, &value, sizeof(value), req); if (error == 0 && req->newptr != NULL) { if (0 < value && value <= bluetooth_l2cap_rtx_timeout_value) bluetooth_hci_connect_timeout_value = value; else error = EINVAL; } return (error); } /* bluetooth_set_hci_connect_timeout_value */ SYSCTL_PROC(_net_bluetooth_hci, OID_AUTO, connection_timeout, CTLTYPE_INT | CTLFLAG_RW, &bluetooth_hci_connect_timeout_value, 60, bluetooth_set_hci_connect_timeout_value, "I", "HCI connect timeout (sec)"); SYSCTL_INT(_net_bluetooth_hci, OID_AUTO, max_neighbor_age, CTLFLAG_RW, &bluetooth_hci_max_neighbor_age_value, 600, "Maximal HCI neighbor cache entry age (sec)"); /* * L2CAP */ SYSCTL_NODE(_net_bluetooth, OID_AUTO, l2cap, CTLFLAG_RW, 0, "Bluetooth L2CAP family"); static int bluetooth_set_l2cap_rtx_timeout_value(SYSCTL_HANDLER_ARGS) { u_int32_t value; int error; value = bluetooth_l2cap_rtx_timeout_value; error = sysctl_handle_int(oidp, &value, sizeof(value), req); if (error == 0 && req->newptr != NULL) { if (bluetooth_hci_connect_timeout_value <= value && value <= bluetooth_l2cap_ertx_timeout_value) bluetooth_l2cap_rtx_timeout_value = value; else error = EINVAL; } return (error); } /* bluetooth_set_l2cap_rtx_timeout_value */ SYSCTL_PROC(_net_bluetooth_l2cap, OID_AUTO, rtx_timeout, CTLTYPE_INT | CTLFLAG_RW, &bluetooth_l2cap_rtx_timeout_value, 60, bluetooth_set_l2cap_rtx_timeout_value, "I", "L2CAP RTX timeout (sec)"); static int bluetooth_set_l2cap_ertx_timeout_value(SYSCTL_HANDLER_ARGS) { u_int32_t value; int error; value = bluetooth_l2cap_ertx_timeout_value; error = sysctl_handle_int(oidp, &value, sizeof(value), req); if (error == 0 && req->newptr != NULL) { if (value >= bluetooth_l2cap_rtx_timeout_value) bluetooth_l2cap_ertx_timeout_value = value; else error = EINVAL; } return (error); } /* bluetooth_set_l2cap_ertx_timeout_value */ SYSCTL_PROC(_net_bluetooth_l2cap, OID_AUTO, ertx_timeout, CTLTYPE_INT | CTLFLAG_RW, &bluetooth_l2cap_ertx_timeout_value, 300, bluetooth_set_l2cap_ertx_timeout_value, "I", "L2CAP ERTX timeout (sec)"); /* * Return various sysctl values */ u_int32_t bluetooth_hci_command_timeout(void) { return (bluetooth_hci_command_timeout_value * hz); } /* bluetooth_hci_command_timeout */ u_int32_t bluetooth_hci_connect_timeout(void) { return (bluetooth_hci_connect_timeout_value * hz); } /* bluetooth_hci_connect_timeout */ u_int32_t bluetooth_hci_max_neighbor_age(void) { return (bluetooth_hci_max_neighbor_age_value); } /* bluetooth_hci_max_neighbor_age */ u_int32_t bluetooth_l2cap_rtx_timeout(void) { return (bluetooth_l2cap_rtx_timeout_value * hz); } /* bluetooth_l2cap_rtx_timeout */ u_int32_t bluetooth_l2cap_ertx_timeout(void) { return (bluetooth_l2cap_ertx_timeout_value * hz); } /* bluetooth_l2cap_ertx_timeout */ /* * RFCOMM */ SYSCTL_NODE(_net_bluetooth, OID_AUTO, rfcomm, CTLFLAG_RW, 0, "Bluetooth RFCOMM family"); /* * Handle loading and unloading for this code. */ static int bluetooth_modevent(module_t mod, int event, void *data) { int error = 0; switch (event) { case MOD_LOAD: break; case MOD_UNLOAD: break; default: error = EOPNOTSUPP; break; } return (error); } /* bluetooth_modevent */ /* * Module */ static moduledata_t bluetooth_mod = { "bluetooth", bluetooth_modevent, NULL }; DECLARE_MODULE(ng_bluetooth, bluetooth_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(ng_bluetooth, NG_BLUETOOTH_VERSION); Index: head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c =================================================================== --- head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c (revision 129879) +++ head/sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c (revision 129880) @@ -1,2729 +1,2730 @@ /* * ng_ubt.c * * Copyright (c) 2001-2002 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ng_ubt.c,v 1.16 2003/10/10 19:15:06 max Exp $ * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * USB methods */ USB_DECLARE_DRIVER(ubt); Static int ubt_modevent (module_t, int, void *); Static usbd_status ubt_request_start (ubt_softc_p); Static void ubt_request_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_request_complete2 (node_p, hook_p, void *, int); Static usbd_status ubt_intr_start (ubt_softc_p); Static void ubt_intr_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_intr_complete2 (node_p, hook_p, void *, int); Static usbd_status ubt_bulk_in_start (ubt_softc_p); Static void ubt_bulk_in_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_bulk_in_complete2 (node_p, hook_p, void *, int); Static usbd_status ubt_bulk_out_start (ubt_softc_p); Static void ubt_bulk_out_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_bulk_out_complete2 (node_p, hook_p, void *, int); Static usbd_status ubt_isoc_in_start (ubt_softc_p); Static void ubt_isoc_in_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_isoc_in_complete2 (node_p, hook_p, void *, int); Static usbd_status ubt_isoc_out_start (ubt_softc_p); Static void ubt_isoc_out_complete (usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubt_isoc_out_complete2 (node_p, hook_p, void *, int); Static void ubt_reset (ubt_softc_p); /* * Netgraph methods */ Static ng_constructor_t ng_ubt_constructor; Static ng_shutdown_t ng_ubt_shutdown; Static ng_newhook_t ng_ubt_newhook; Static ng_connect_t ng_ubt_connect; Static ng_disconnect_t ng_ubt_disconnect; Static ng_rcvmsg_t ng_ubt_rcvmsg; Static ng_rcvdata_t ng_ubt_rcvdata; /* Queue length */ Static const struct ng_parse_struct_field ng_ubt_node_qlen_type_fields[] = { { "queue", &ng_parse_int32_type, }, { "qlen", &ng_parse_int32_type, }, { NULL, } }; Static const struct ng_parse_type ng_ubt_node_qlen_type = { &ng_parse_struct_type, &ng_ubt_node_qlen_type_fields }; /* Stat info */ Static const struct ng_parse_struct_field ng_ubt_node_stat_type_fields[] = { { "pckts_recv", &ng_parse_uint32_type, }, { "bytes_recv", &ng_parse_uint32_type, }, { "pckts_sent", &ng_parse_uint32_type, }, { "bytes_sent", &ng_parse_uint32_type, }, { "oerrors", &ng_parse_uint32_type, }, { "ierrors", &ng_parse_uint32_type, }, { NULL, } }; Static const struct ng_parse_type ng_ubt_node_stat_type = { &ng_parse_struct_type, &ng_ubt_node_stat_type_fields }; /* Netgraph node command list */ Static const struct ng_cmdlist ng_ubt_cmdlist[] = { { NGM_UBT_COOKIE, NGM_UBT_NODE_SET_DEBUG, "set_debug", &ng_parse_uint16_type, NULL }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_DEBUG, "get_debug", NULL, &ng_parse_uint16_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_SET_QLEN, "set_qlen", &ng_ubt_node_qlen_type, NULL }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_QLEN, "get_qlen", &ng_ubt_node_qlen_type, &ng_ubt_node_qlen_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_GET_STAT, "get_stat", NULL, &ng_ubt_node_stat_type }, { NGM_UBT_COOKIE, NGM_UBT_NODE_RESET_STAT, "reset_stat", NULL, NULL }, { NGM_UBT_COOKIE, NGM_UBT_NODE_DEV_NODES, "dev_nodes", &ng_parse_uint16_type, NULL }, { 0, } }; /* Netgraph node type */ Static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_UBT_NODE_TYPE, .constructor = ng_ubt_constructor, .rcvmsg = ng_ubt_rcvmsg, .shutdown = ng_ubt_shutdown, .newhook = ng_ubt_newhook, .connect = ng_ubt_connect, .rcvdata = ng_ubt_rcvdata, .disconnect = ng_ubt_disconnect, .cmdlist = ng_ubt_cmdlist }; /* * Device methods */ #define UBT_UNIT(n) ((minor(n) >> 4) & 0xf) #define UBT_ENDPOINT(n) (minor(n) & 0xf) #define UBT_MINOR(u, e) (((u) << 4) | (e)) #define UBT_BSIZE 1024 Static d_open_t ubt_open; Static d_close_t ubt_close; Static d_read_t ubt_read; Static d_write_t ubt_write; Static d_ioctl_t ubt_ioctl; Static d_poll_t ubt_poll; Static void ubt_create_device_nodes (ubt_softc_p); Static void ubt_destroy_device_nodes (ubt_softc_p); Static struct cdevsw ubt_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = ubt_open, .d_close = ubt_close, .d_read = ubt_read, .d_write = ubt_write, .d_ioctl = ubt_ioctl, .d_poll = ubt_poll, .d_name = "ubt", }; /* * Module */ DRIVER_MODULE(ubt, uhub, ubt_driver, ubt_devclass, ubt_modevent, 0); MODULE_VERSION(ng_ubt, NG_BLUETOOTH_VERSION); MODULE_DEPEND(ng_ubt, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); /**************************************************************************** **************************************************************************** ** USB specific **************************************************************************** ****************************************************************************/ /* * Load/Unload the driver module */ Static int ubt_modevent(module_t mod, int event, void *data) { int error; switch (event) { case MOD_LOAD: error = ng_newtype(&typestruct); if (error != 0) printf( "%s: Could not register Netgraph node type, error=%d\n", NG_UBT_NODE_TYPE, error); else error = usbd_driver_load(mod, event, data); break; case MOD_UNLOAD: error = ng_rmtype(&typestruct); if (error == 0) error = usbd_driver_load(mod, event, data); break; default: error = EOPNOTSUPP; break; } return (error); } /* ubt_modevent */ /* * Probe for a USB Bluetooth device */ USB_MATCH(ubt) { /* * If for some reason device should not be attached then put * VendorID/ProductID pair into the list below. Currently I * do not know of any such devices. The format is as follows: * * { VENDOR_ID, PRODUCT_ID }, * * where VENDOR_ID and PRODUCT_ID are hex numbers. */ Static struct usb_devno const ubt_ignored_devices[] = { { 0, 0 } /* This should be the last item in the list */ }; /* * If device violates Bluetooth specification and has bDeviceClass, * bDeviceSubClass and bDeviceProtocol set to wrong values then you * could try to put VendorID/ProductID pair into the list below. * Currently I do not know of any such devices. */ Static struct usb_devno const ubt_broken_devices[] = { { 0, 0 } /* This should be the last item in the list */ }; USB_MATCH_START(ubt, uaa); usb_device_descriptor_t *dd = usbd_get_device_descriptor(uaa->device); if (uaa->iface == NULL || usb_lookup(ubt_ignored_devices, uaa->vendor, uaa->product)) return (UMATCH_NONE); if (dd->bDeviceClass == UDCLASS_WIRELESS && dd->bDeviceSubClass == UDSUBCLASS_RF && dd->bDeviceProtocol == UDPROTO_BLUETOOTH) return (UMATCH_DEVCLASS_DEVSUBCLASS); if (usb_lookup(ubt_broken_devices, uaa->vendor, uaa->product)) return (UMATCH_VENDOR_PRODUCT); return (UMATCH_NONE); } /* USB_MATCH(ubt) */ /* * Attach the device */ USB_ATTACH(ubt) { USB_ATTACH_START(ubt, sc, uaa); usb_config_descriptor_t *cd = NULL; usb_interface_descriptor_t *id = NULL; usb_endpoint_descriptor_t *ed = NULL; char devinfo[UBT_BSIZE]; usbd_status error; int i, ai, alt_no, isoc_in, isoc_out, isoc_isize, isoc_osize; /* Get USB device info */ sc->sc_udev = uaa->device; usbd_devinfo(sc->sc_udev, 0, devinfo); USB_ATTACH_SETUP; printf("%s: %s\n", USBDEVNAME(sc->sc_dev), devinfo); /* * Initialize device softc structure */ /* State */ sc->sc_debug = NG_UBT_WARN_LEVEL; sc->sc_flags = 0; NG_UBT_STAT_RESET(sc->sc_stat); /* Interfaces */ sc->sc_iface0 = sc->sc_iface1 = NULL; /* Interrupt pipe */ sc->sc_intr_ep = -1; sc->sc_intr_pipe = NULL; sc->sc_intr_xfer = NULL; sc->sc_intr_buffer = NULL; /* Control pipe */ sc->sc_ctrl_xfer = NULL; sc->sc_ctrl_buffer = NULL; NG_BT_MBUFQ_INIT(&sc->sc_cmdq, UBT_DEFAULT_QLEN); /* Bulk-in pipe */ sc->sc_bulk_in_ep = -1; sc->sc_bulk_in_pipe = NULL; sc->sc_bulk_in_xfer = NULL; sc->sc_bulk_in_buffer = NULL; /* Bulk-out pipe */ sc->sc_bulk_out_ep = -1; sc->sc_bulk_out_pipe = NULL; sc->sc_bulk_out_xfer = NULL; sc->sc_bulk_out_buffer = NULL; NG_BT_MBUFQ_INIT(&sc->sc_aclq, UBT_DEFAULT_QLEN); /* Isoc-in pipe */ sc->sc_isoc_in_ep = -1; sc->sc_isoc_in_pipe = NULL; sc->sc_isoc_in_xfer = NULL; /* Isoc-out pipe */ sc->sc_isoc_out_ep = -1; sc->sc_isoc_out_pipe = NULL; sc->sc_isoc_out_xfer = NULL; sc->sc_isoc_size = -1; NG_BT_MBUFQ_INIT(&sc->sc_scoq, UBT_DEFAULT_QLEN); /* Netgraph part */ sc->sc_node = NULL; sc->sc_hook = NULL; /* Device part */ sc->sc_ctrl_dev = sc->sc_intr_dev = sc->sc_bulk_dev = NODEV; sc->sc_refcnt = sc->sc_dying = 0; /* * XXX set configuration? * * Configure Bluetooth USB device. Discover all required USB interfaces * and endpoints. * * USB device must present two interfaces: * 1) Interface 0 that has 3 endpoints * 1) Interrupt endpoint to receive HCI events * 2) Bulk IN endpoint to receive ACL data * 3) Bulk OUT endpoint to send ACL data * * 2) Interface 1 then has 2 endpoints * 1) Isochronous IN endpoint to receive SCO data * 2) Isochronous OUT endpoint to send SCO data * * Interface 1 (with isochronous endpoints) has several alternate * configurations with different packet size. */ /* * Interface 0 */ error = usbd_device2interface_handle(sc->sc_udev, 0, &sc->sc_iface0); if (error || sc->sc_iface0 == NULL) { printf("%s: Could not get interface 0 handle. %s (%d), " \ "handle=%p\n", USBDEVNAME(sc->sc_dev), usbd_errstr(error), error, sc->sc_iface0); goto bad; } id = usbd_get_interface_descriptor(sc->sc_iface0); if (id == NULL) { printf("%s: Could not get interface 0 descriptor\n", USBDEVNAME(sc->sc_dev)); goto bad; } for (i = 0; i < id->bNumEndpoints; i ++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface0, i); if (ed == NULL) { printf("%s: Could not read endpoint descriptor for " \ "interface 0, i=%d\n", USBDEVNAME(sc->sc_dev), i); goto bad; } switch (UE_GET_XFERTYPE(ed->bmAttributes)) { case UE_BULK: if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_bulk_in_ep = ed->bEndpointAddress; else sc->sc_bulk_out_ep = ed->bEndpointAddress; break; case UE_INTERRUPT: sc->sc_intr_ep = ed->bEndpointAddress; break; } } /* Check if we got everything we wanted on Interface 0 */ if (sc->sc_intr_ep == -1) { printf("%s: Could not detect interrupt endpoint\n", USBDEVNAME(sc->sc_dev)); goto bad; } if (sc->sc_bulk_in_ep == -1) { printf("%s: Could not detect bulk-in endpoint\n", USBDEVNAME(sc->sc_dev)); goto bad; } if (sc->sc_bulk_out_ep == -1) { printf("%s: Could not detect bulk-out endpoint\n", USBDEVNAME(sc->sc_dev)); goto bad; } printf("%s: Interface 0 endpoints: interrupt=%#x, bulk-in=%#x, " \ "bulk-out=%#x\n", USBDEVNAME(sc->sc_dev), sc->sc_intr_ep, sc->sc_bulk_in_ep, sc->sc_bulk_out_ep); /* * Interface 1 */ cd = usbd_get_config_descriptor(sc->sc_udev); if (cd == NULL) { printf("%s: Could not get device configuration descriptor\n", USBDEVNAME(sc->sc_dev)); goto bad; } error = usbd_device2interface_handle(sc->sc_udev, 1, &sc->sc_iface1); if (error || sc->sc_iface1 == NULL) { printf("%s: Could not get interface 1 handle. %s (%d), " \ "handle=%p\n", USBDEVNAME(sc->sc_dev), usbd_errstr(error), error, sc->sc_iface1); goto bad; } id = usbd_get_interface_descriptor(sc->sc_iface1); if (id == NULL) { printf("%s: Could not get interface 1 descriptor\n", USBDEVNAME(sc->sc_dev)); goto bad; } /* * Scan all alternate configurations for interface 1 */ alt_no = -1; for (ai = 0; ai < usbd_get_no_alts(cd, 1); ai++) { error = usbd_set_interface(sc->sc_iface1, ai); if (error) { printf("%s: [SCAN] Could not set alternate " \ "configuration %d for interface 1. %s (%d)\n", USBDEVNAME(sc->sc_dev), ai, usbd_errstr(error), error); goto bad; } id = usbd_get_interface_descriptor(sc->sc_iface1); if (id == NULL) { printf("%s: Could not get interface 1 descriptor for " \ "alternate configuration %d\n", USBDEVNAME(sc->sc_dev), ai); goto bad; } isoc_in = isoc_out = -1; isoc_isize = isoc_osize = 0; for (i = 0; i < id->bNumEndpoints; i ++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface1, i); if (ed == NULL) { printf("%s: Could not read endpoint " \ "descriptor for interface 1, " \ "alternate configuration %d, i=%d\n", USBDEVNAME(sc->sc_dev), ai, i); goto bad; } if (UE_GET_XFERTYPE(ed->bmAttributes) != UE_ISOCHRONOUS) continue; if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) { isoc_in = ed->bEndpointAddress; isoc_isize = UGETW(ed->wMaxPacketSize); } else { isoc_out = ed->bEndpointAddress; isoc_osize = UGETW(ed->wMaxPacketSize); } } /* * Make sure that configuration looks sane and if so * update current settings */ if (isoc_in != -1 && isoc_out != -1 && isoc_isize > 0 && isoc_osize > 0 && isoc_isize == isoc_osize && isoc_isize > sc->sc_isoc_size) { sc->sc_isoc_in_ep = isoc_in; sc->sc_isoc_out_ep = isoc_out; sc->sc_isoc_size = isoc_isize; alt_no = ai; } } /* Check if we got everything we wanted on Interface 0 */ if (sc->sc_isoc_in_ep == -1) { printf("%s: Could not detect isoc-in endpoint\n", USBDEVNAME(sc->sc_dev)); goto bad; } if (sc->sc_isoc_out_ep == -1) { printf("%s: Could not detect isoc-out endpoint\n", USBDEVNAME(sc->sc_dev)); goto bad; } if (sc->sc_isoc_size <= 0) { printf("%s: Invalid isoc. packet size=%d\n", USBDEVNAME(sc->sc_dev), sc->sc_isoc_size); goto bad; } error = usbd_set_interface(sc->sc_iface1, alt_no); if (error) { printf("%s: Could not set alternate configuration " \ "%d for interface 1. %s (%d)\n", USBDEVNAME(sc->sc_dev), alt_no, usbd_errstr(error), error); goto bad; } /* Allocate USB transfer handles and buffers */ sc->sc_ctrl_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_ctrl_xfer == NULL) { printf("%s: Could not allocate control xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_ctrl_buffer = usbd_alloc_buffer(sc->sc_ctrl_xfer, UBT_CTRL_BUFFER_SIZE); if (sc->sc_ctrl_buffer == NULL) { printf("%s: Could not allocate control buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_intr_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_intr_xfer == NULL) { printf("%s: Could not allocate interrupt xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_bulk_in_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_bulk_in_xfer == NULL) { printf("%s: Could not allocate bulk-in xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_bulk_out_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_bulk_out_xfer == NULL) { printf("%s: Could not allocate bulk-out xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_bulk_out_buffer = usbd_alloc_buffer(sc->sc_bulk_out_xfer, UBT_BULK_BUFFER_SIZE); if (sc->sc_bulk_out_buffer == NULL) { printf("%s: Could not allocate bulk-out buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } /* * Allocate buffers for isoc. transfers */ sc->sc_isoc_nframes = (UBT_ISOC_BUFFER_SIZE / sc->sc_isoc_size) + 1; sc->sc_isoc_in_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_isoc_in_xfer == NULL) { printf("%s: Could not allocate isoc-in xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_isoc_in_buffer = usbd_alloc_buffer(sc->sc_isoc_in_xfer, sc->sc_isoc_nframes * sc->sc_isoc_size); if (sc->sc_isoc_in_buffer == NULL) { printf("%s: Could not allocate isoc-in buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_isoc_in_frlen = malloc(sizeof(u_int16_t) * sc->sc_isoc_nframes, M_USBDEV, M_NOWAIT); if (sc->sc_isoc_in_frlen == NULL) { printf("%s: Could not allocate isoc-in frame sizes buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_isoc_out_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_isoc_out_xfer == NULL) { printf("%s: Could not allocate isoc-out xfer handle\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_isoc_out_buffer = usbd_alloc_buffer(sc->sc_isoc_out_xfer, sc->sc_isoc_nframes * sc->sc_isoc_size); if (sc->sc_isoc_out_buffer == NULL) { printf("%s: Could not allocate isoc-out buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->sc_isoc_out_frlen = malloc(sizeof(u_int16_t) * sc->sc_isoc_nframes, M_USBDEV, M_NOWAIT); if (sc->sc_isoc_out_frlen == NULL) { printf("%s: Could not allocate isoc-out frame sizes buffer\n", USBDEVNAME(sc->sc_dev)); goto bad; } printf("%s: Interface 1 (alt.config %d) endpoints: isoc-in=%#x, " \ "isoc-out=%#x; wMaxPacketSize=%d; nframes=%d, buffer size=%d\n", USBDEVNAME(sc->sc_dev), alt_no, sc->sc_isoc_in_ep, sc->sc_isoc_out_ep, sc->sc_isoc_size, sc->sc_isoc_nframes, (sc->sc_isoc_nframes * sc->sc_isoc_size)); /* * Open pipes */ /* Interrupt */ error = usbd_open_pipe(sc->sc_iface0, sc->sc_intr_ep, USBD_EXCLUSIVE_USE, &sc->sc_intr_pipe); if (error != USBD_NORMAL_COMPLETION) { printf("%s: %s - Could not open interrupt pipe. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(error), error); goto bad; } /* Bulk-in */ error = usbd_open_pipe(sc->sc_iface0, sc->sc_bulk_in_ep, USBD_EXCLUSIVE_USE, &sc->sc_bulk_in_pipe); if (error != USBD_NORMAL_COMPLETION) { printf("%s: %s - Could not open bulk-in pipe. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(error), error); goto bad; } /* Bulk-out */ error = usbd_open_pipe(sc->sc_iface0, sc->sc_bulk_out_ep, USBD_EXCLUSIVE_USE, &sc->sc_bulk_out_pipe); if (error != USBD_NORMAL_COMPLETION) { printf("%s: %s - Could not open bulk-out pipe. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(error), error); goto bad; } #if __broken__ /* XXX FIXME */ /* Isoc-in */ error = usbd_open_pipe(sc->sc_iface1, sc->sc_isoc_in_ep, USBD_EXCLUSIVE_USE, &sc->sc_isoc_in_pipe); if (error != USBD_NORMAL_COMPLETION) { printf("%s: %s - Could not open isoc-in pipe. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(error), error); goto bad; } /* Isoc-out */ error = usbd_open_pipe(sc->sc_iface1, sc->sc_isoc_out_ep, USBD_EXCLUSIVE_USE, &sc->sc_isoc_out_pipe); if (error != USBD_NORMAL_COMPLETION) { printf("%s: %s - Could not open isoc-out pipe. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(error), error); goto bad; } #endif /* __broken__ */ /* Create Netgraph node */ if (ng_make_node_common(&typestruct, &sc->sc_node) != 0) { printf("%s: Could not create Netgraph node\n", USBDEVNAME(sc->sc_dev)); sc->sc_node = NULL; goto bad; } /* Name node */ if (ng_name_node(sc->sc_node, USBDEVNAME(sc->sc_dev)) != 0) { printf("%s: Could not name Netgraph node\n", USBDEVNAME(sc->sc_dev)); NG_NODE_UNREF(sc->sc_node); sc->sc_node = NULL; goto bad; } NG_NODE_SET_PRIVATE(sc->sc_node, sc); NG_NODE_FORCE_WRITER(sc->sc_node); /* Claim all interfaces on the device */ for (i = 0; i < uaa->nifaces; i++) uaa->ifaces[i] = NULL; usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, USBDEV(sc->sc_dev)); USB_ATTACH_SUCCESS_RETURN; bad: ubt_detach(self); USB_ATTACH_ERROR_RETURN; } /* USB_ATTACH(ubt) */ /* * Detach the device */ USB_DETACH(ubt) { USB_DETACH_START(ubt, sc); sc->sc_dying = 1; ubt_destroy_device_nodes(sc); /* XXX FIXME locking? */ /* Destroy Netgraph node */ if (sc->sc_node != NULL) { NG_NODE_SET_PRIVATE(sc->sc_node, NULL); ng_rmnode_self(sc->sc_node); sc->sc_node = NULL; } /* Close pipes */ if (sc->sc_intr_pipe != NULL) { usbd_close_pipe(sc->sc_intr_pipe); sc->sc_intr_pipe = NULL; } if (sc->sc_bulk_in_pipe != NULL) { usbd_close_pipe(sc->sc_bulk_in_pipe); sc->sc_bulk_in_pipe = NULL; } if (sc->sc_bulk_out_pipe != NULL) { usbd_close_pipe(sc->sc_bulk_out_pipe); sc->sc_bulk_out_pipe = NULL; } if (sc->sc_isoc_in_pipe != NULL) { usbd_close_pipe(sc->sc_isoc_in_pipe); sc->sc_isoc_in_pipe = NULL; } if (sc->sc_isoc_out_pipe != NULL) { usbd_close_pipe(sc->sc_isoc_out_pipe); sc->sc_isoc_out_pipe = NULL; } /* Destroy USB transfer handles */ if (sc->sc_ctrl_xfer != NULL) { usbd_free_xfer(sc->sc_ctrl_xfer); sc->sc_ctrl_xfer = NULL; } if (sc->sc_intr_xfer != NULL) { usbd_free_xfer(sc->sc_intr_xfer); sc->sc_intr_xfer = NULL; } if (sc->sc_bulk_in_xfer != NULL) { usbd_free_xfer(sc->sc_bulk_in_xfer); sc->sc_bulk_in_xfer = NULL; } if (sc->sc_bulk_out_xfer != NULL) { usbd_free_xfer(sc->sc_bulk_out_xfer); sc->sc_bulk_out_xfer = NULL; } if (sc->sc_isoc_in_xfer != NULL) { usbd_free_xfer(sc->sc_isoc_in_xfer); sc->sc_isoc_in_xfer = NULL; } if (sc->sc_isoc_out_xfer != NULL) { usbd_free_xfer(sc->sc_isoc_out_xfer); sc->sc_isoc_out_xfer = NULL; } /* Destroy isoc. frame size buffers */ if (sc->sc_isoc_in_frlen != NULL) { free(sc->sc_isoc_in_frlen, M_USBDEV); sc->sc_isoc_in_frlen = NULL; } if (sc->sc_isoc_out_frlen != NULL) { free(sc->sc_isoc_out_frlen, M_USBDEV); sc->sc_isoc_out_frlen = NULL; } /* Destroy queues */ NG_BT_MBUFQ_DRAIN(&sc->sc_cmdq); NG_BT_MBUFQ_DRAIN(&sc->sc_aclq); NG_BT_MBUFQ_DRAIN(&sc->sc_scoq); usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, USBDEV(sc->sc_dev)); return (0); } /* USB_DETACH(ubt) */ /* * Start USB control request (HCI command). Must be called with node locked */ Static usbd_status ubt_request_start(ubt_softc_p sc) { usb_device_request_t req; struct mbuf *m = NULL; usbd_status status; KASSERT(!(sc->sc_flags & UBT_CMD_XMIT), ( "%s: %s - Another control request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); NG_BT_MBUFQ_DEQUEUE(&sc->sc_cmdq, m); if (m == NULL) { NG_UBT_INFO( "%s: %s - HCI command queue is empty\n", __func__, USBDEVNAME(sc->sc_dev)); return (USBD_NORMAL_COMPLETION); } /* * Check HCI command frame size and copy it back to * linear USB transfer buffer. */ if (m->m_pkthdr.len > UBT_CTRL_BUFFER_SIZE) panic( "%s: %s - HCI command frame too big, size=%zd, len=%d\n", __func__, USBDEVNAME(sc->sc_dev), UBT_CTRL_BUFFER_SIZE, m->m_pkthdr.len); m_copydata(m, 0, m->m_pkthdr.len, sc->sc_ctrl_buffer); /* Initialize a USB control request and then schedule it */ bzero(&req, sizeof(req)); req.bmRequestType = UBT_HCI_REQUEST; USETW(req.wLength, m->m_pkthdr.len); NG_UBT_INFO( "%s: %s - Sending control request, bmRequestType=%#x, wLength=%d\n", __func__, USBDEVNAME(sc->sc_dev), req.bmRequestType, UGETW(req.wLength)); usbd_setup_default_xfer( sc->sc_ctrl_xfer, sc->sc_udev, (usbd_private_handle) sc->sc_node, USBD_DEFAULT_TIMEOUT, /* XXX */ &req, sc->sc_ctrl_buffer, m->m_pkthdr.len, USBD_NO_COPY, ubt_request_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_ctrl_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Could not start control request. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); NG_BT_MBUFQ_DROP(&sc->sc_cmdq); NG_UBT_STAT_OERROR(sc->sc_stat); /* XXX FIXME should we try to resubmit another request? */ } else { NG_UBT_INFO( "%s: %s - Control request has been started\n", __func__, USBDEVNAME(sc->sc_dev)); sc->sc_flags |= UBT_CMD_XMIT; status = USBD_NORMAL_COMPLETION; } NG_FREE_M(m); return (status); } /* ubt_request_start */ /* * USB control request callback */ Static void ubt_request_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_request_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_request_complete */ Static void ubt_request_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_CMD_XMIT), ( "%s: %s - No control request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_CMD_XMIT; if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Control request cancelled\n", __func__, USBDEVNAME(sc->sc_dev)); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_ERR( "%s: %s - Control request failed. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(h->pipe); NG_UBT_STAT_OERROR(sc->sc_stat); } else { NG_UBT_INFO( "%s: %s - Sent %d bytes to control pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); NG_UBT_STAT_BYTES_SENT(sc->sc_stat, h->actlen); NG_UBT_STAT_PCKTS_SENT(sc->sc_stat); } if (NG_BT_MBUFQ_LEN(&sc->sc_cmdq) > 0) ubt_request_start(sc); } /* ubt_request_complete2 */ /* * Start interrupt transfer. Must be called when node is locked */ Static usbd_status ubt_intr_start(ubt_softc_p sc) { struct mbuf *m = NULL; usbd_status status; KASSERT(!(sc->sc_flags & UBT_EVT_RECV), ( "%s: %s - Another interrupt request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); /* Allocate new mbuf cluster */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (USBD_NOMEM); MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { NG_FREE_M(m); return (USBD_NOMEM); } if (!(sc->sc_flags & UBT_HAVE_FRAME_TYPE)) { *mtod(m, u_int8_t *) = NG_HCI_EVENT_PKT; m->m_pkthdr.len = m->m_len = 1; } else m->m_pkthdr.len = m->m_len = 0; /* Initialize a USB transfer and then schedule it */ usbd_setup_xfer( sc->sc_intr_xfer, sc->sc_intr_pipe, (usbd_private_handle) sc->sc_node, (void *)(mtod(m, u_int8_t *) + m->m_len), MCLBYTES - m->m_len, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ubt_intr_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_intr_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Failed to start intrerrupt transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); NG_FREE_M(m); return (status); } sc->sc_flags |= UBT_EVT_RECV; sc->sc_intr_buffer = m; return (USBD_NORMAL_COMPLETION); } /* ubt_intr_start */ /* * Process interrupt from USB device (We got data from interrupt pipe) */ Static void ubt_intr_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_intr_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_intr_complete */ Static void ubt_intr_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; struct mbuf *m = NULL; ng_hci_event_pkt_t *hdr = NULL; int error; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_EVT_RECV), ( "%s: %s - No interrupt request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_EVT_RECV; m = sc->sc_intr_buffer; sc->sc_intr_buffer = NULL; hdr = mtod(m, ng_hci_event_pkt_t *); if (sc->sc_hook == NULL || NG_HOOK_NOT_VALID(sc->sc_hook)) { NG_UBT_INFO( "%s: %s - No upstream hook\n", __func__, USBDEVNAME(sc->sc_dev)); NG_FREE_M(m); return; } if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Interrupt xfer cancelled\n", __func__, USBDEVNAME(sc->sc_dev)); NG_FREE_M(m); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_WARN( "%s: %s - Interrupt xfer failed, %s (%d). No new xfer will be submitted!\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_intr_pipe); NG_UBT_STAT_IERROR(sc->sc_stat); NG_FREE_M(m); return; /* XXX FIXME we should restart after some delay */ } NG_UBT_STAT_BYTES_RECV(sc->sc_stat, h->actlen); m->m_pkthdr.len += h->actlen; m->m_len += h->actlen; NG_UBT_INFO( "%s: %s - Got %d bytes from interrupt pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); if (m->m_pkthdr.len < sizeof(*hdr)) { NG_FREE_M(m); goto done; } if (hdr->length == m->m_pkthdr.len - sizeof(*hdr)) { NG_UBT_INFO( "%s: %s - Got complete HCI event frame, pktlen=%d, length=%d\n", __func__, USBDEVNAME(sc->sc_dev), m->m_pkthdr.len, hdr->length); NG_UBT_STAT_PCKTS_RECV(sc->sc_stat); NG_SEND_DATA_ONLY(error, sc->sc_hook, m); if (error != 0) NG_UBT_STAT_IERROR(sc->sc_stat); } else { NG_UBT_ERR( "%s: %s - Invalid HCI event frame size, length=%d, pktlen=%d\n", __func__, USBDEVNAME(sc->sc_dev), hdr->length, m->m_pkthdr.len); NG_UBT_STAT_IERROR(sc->sc_stat); NG_FREE_M(m); } done: ubt_intr_start(sc); } /* ubt_intr_complete2 */ /* * Start bulk-in USB transfer (ACL data). Must be called when node is locked */ Static usbd_status ubt_bulk_in_start(ubt_softc_p sc) { struct mbuf *m = NULL; usbd_status status; KASSERT(!(sc->sc_flags & UBT_ACL_RECV), ( "%s: %s - Another bulk-in request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); /* Allocate new mbuf cluster */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return (USBD_NOMEM); MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { NG_FREE_M(m); return (USBD_NOMEM); } if (!(sc->sc_flags & UBT_HAVE_FRAME_TYPE)) { *mtod(m, u_int8_t *) = NG_HCI_ACL_DATA_PKT; m->m_pkthdr.len = m->m_len = 1; } else m->m_pkthdr.len = m->m_len = 0; /* Initialize a bulk-in USB transfer and then schedule it */ usbd_setup_xfer( sc->sc_bulk_in_xfer, sc->sc_bulk_in_pipe, (usbd_private_handle) sc->sc_node, (void *)(mtod(m, u_int8_t *) + m->m_len), MCLBYTES - m->m_len, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ubt_bulk_in_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_bulk_in_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Failed to start bulk-in transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); NG_FREE_M(m); return (status); } sc->sc_flags |= UBT_ACL_RECV; sc->sc_bulk_in_buffer = m; return (USBD_NORMAL_COMPLETION); } /* ubt_bulk_in_start */ /* * USB bulk-in transfer callback */ Static void ubt_bulk_in_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_bulk_in_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_bulk_in_complete */ Static void ubt_bulk_in_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; struct mbuf *m = NULL; ng_hci_acldata_pkt_t *hdr = NULL; int len; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_ACL_RECV), ( "%s: %s - No bulk-in request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_ACL_RECV; m = sc->sc_bulk_in_buffer; sc->sc_bulk_in_buffer = NULL; hdr = mtod(m, ng_hci_acldata_pkt_t *); if (sc->sc_hook == NULL || NG_HOOK_NOT_VALID(sc->sc_hook)) { NG_UBT_INFO( "%s: %s - No upstream hook\n", __func__, USBDEVNAME(sc->sc_dev)); NG_FREE_M(m); return; } if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Bulk-in xfer cancelled, pipe=%p\n", __func__, USBDEVNAME(sc->sc_dev), sc->sc_bulk_in_pipe); NG_FREE_M(m); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_WARN( "%s: %s - Bulk-in xfer failed, %s (%d). No new xfer will be submitted!\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulk_in_pipe); NG_UBT_STAT_IERROR(sc->sc_stat); NG_FREE_M(m); return; /* XXX FIXME we should restart after some delay */ } NG_UBT_STAT_BYTES_RECV(sc->sc_stat, h->actlen); m->m_pkthdr.len += h->actlen; m->m_len += h->actlen; NG_UBT_INFO( "%s: %s - Got %d bytes from bulk-in pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); if (m->m_pkthdr.len < sizeof(*hdr)) { NG_FREE_M(m); goto done; } len = le16toh(hdr->length); if (len == m->m_pkthdr.len - sizeof(*hdr)) { NG_UBT_INFO( "%s: %s - Got complete ACL data frame, pktlen=%d, length=%d\n", __func__, USBDEVNAME(sc->sc_dev), m->m_pkthdr.len, len); NG_UBT_STAT_PCKTS_RECV(sc->sc_stat); NG_SEND_DATA_ONLY(len, sc->sc_hook, m); if (len != 0) NG_UBT_STAT_IERROR(sc->sc_stat); } else { NG_UBT_ERR( "%s: %s - Invalid ACL frame size, length=%d, pktlen=%d\n", __func__, USBDEVNAME(sc->sc_dev), len, m->m_pkthdr.len); NG_UBT_STAT_IERROR(sc->sc_stat); NG_FREE_M(m); } done: ubt_bulk_in_start(sc); } /* ubt_bulk_in_complete2 */ /* * Start bulk-out USB transfer. Must be called with node locked */ Static usbd_status ubt_bulk_out_start(ubt_softc_p sc) { struct mbuf *m = NULL; usbd_status status; KASSERT(!(sc->sc_flags & UBT_ACL_XMIT), ( "%s: %s - Another bulk-out request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); NG_BT_MBUFQ_DEQUEUE(&sc->sc_aclq, m); if (m == NULL) { NG_UBT_INFO( "%s: %s - ACL data queue is empty\n", __func__, USBDEVNAME(sc->sc_dev)); return (USBD_NORMAL_COMPLETION); } /* * Check ACL data frame size and copy it back to linear USB * transfer buffer. */ if (m->m_pkthdr.len > UBT_BULK_BUFFER_SIZE) panic( "%s: %s - ACL data frame too big, size=%d, len=%d\n", __func__, USBDEVNAME(sc->sc_dev), UBT_BULK_BUFFER_SIZE, m->m_pkthdr.len); m_copydata(m, 0, m->m_pkthdr.len, sc->sc_bulk_out_buffer); /* Initialize a bulk-out USB transfer and then schedule it */ usbd_setup_xfer( sc->sc_bulk_out_xfer, sc->sc_bulk_out_pipe, (usbd_private_handle) sc->sc_node, sc->sc_bulk_out_buffer, m->m_pkthdr.len, USBD_NO_COPY, USBD_DEFAULT_TIMEOUT, /* XXX */ ubt_bulk_out_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_bulk_out_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Could not start bulk-out transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); NG_BT_MBUFQ_DROP(&sc->sc_aclq); NG_UBT_STAT_OERROR(sc->sc_stat); /* XXX FIXME should we try to start another transfer? */ } else { NG_UBT_INFO( "%s: %s - Bulk-out transfer has been started, len=%d\n", __func__, USBDEVNAME(sc->sc_dev), m->m_pkthdr.len); sc->sc_flags |= UBT_ACL_XMIT; status = USBD_NORMAL_COMPLETION; } NG_FREE_M(m); return (status); } /* ubt_bulk_out_start */ /* * USB bulk-out transfer callback */ Static void ubt_bulk_out_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_bulk_out_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_bulk_out_complete */ Static void ubt_bulk_out_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_ACL_XMIT), ( "%s: %s - No bulk-out request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_ACL_XMIT; if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Bulk-out xfer cancelled, pipe=%p\n", __func__, USBDEVNAME(sc->sc_dev), sc->sc_bulk_out_pipe); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_WARN( "%s: %s - Bulk-out xfer failed. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulk_out_pipe); NG_UBT_STAT_OERROR(sc->sc_stat); } else { NG_UBT_INFO( "%s: %s - Sent %d bytes to bulk-out pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); NG_UBT_STAT_BYTES_SENT(sc->sc_stat, h->actlen); NG_UBT_STAT_PCKTS_SENT(sc->sc_stat); } if (NG_BT_MBUFQ_LEN(&sc->sc_aclq) > 0) ubt_bulk_out_start(sc); } /* ubt_bulk_out_complete2 */ /* * Start Isochronous-in USB transfer. Must be called with node locked */ Static usbd_status ubt_isoc_in_start(ubt_softc_p sc) { usbd_status status; int i; KASSERT(!(sc->sc_flags & UBT_SCO_RECV), ( "%s: %s - Another isoc-in request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); /* Initialize a isoc-in USB transfer and then schedule it */ for (i = 0; i < sc->sc_isoc_nframes; i++) sc->sc_isoc_in_frlen[i] = sc->sc_isoc_size; usbd_setup_isoc_xfer( sc->sc_isoc_in_xfer, sc->sc_isoc_in_pipe, (usbd_private_handle) sc->sc_node, sc->sc_isoc_in_frlen, sc->sc_isoc_nframes, USBD_NO_COPY, /* XXX flags */ ubt_isoc_in_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_isoc_in_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Failed to start isoc-in transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); return (status); } sc->sc_flags |= UBT_SCO_RECV; return (USBD_NORMAL_COMPLETION); } /* ubt_isoc_in_start */ /* * USB isochronous transfer callback */ Static void ubt_isoc_in_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_isoc_in_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_isoc_in_complete */ Static void ubt_isoc_in_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; struct mbuf *m = NULL; ng_hci_scodata_pkt_t *hdr = NULL; u_int8_t *b = NULL; int i; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_SCO_RECV), ( "%s: %s - No isoc-in request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_SCO_RECV; if (sc->sc_hook == NULL || NG_HOOK_NOT_VALID(sc->sc_hook)) { NG_UBT_INFO( "%s: %s - No upstream hook\n", __func__, USBDEVNAME(sc->sc_dev)); return; } if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Isoc-in xfer cancelled, pipe=%p\n", __func__, USBDEVNAME(sc->sc_dev), sc->sc_isoc_in_pipe); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_WARN( "%s: %s - Isoc-in xfer failed, %s (%d). No new xfer will be submitted!\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_isoc_in_pipe); NG_UBT_STAT_IERROR(sc->sc_stat); return; /* XXX FIXME we should restart after some delay */ } NG_UBT_STAT_BYTES_RECV(sc->sc_stat, h->actlen); NG_UBT_INFO( "%s: %s - Got %d bytes from isoc-in pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); /* Copy SCO data frame to mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { NG_UBT_ALERT( "%s: %s - Could not allocate mbuf\n", __func__, USBDEVNAME(sc->sc_dev)); NG_UBT_STAT_IERROR(sc->sc_stat); goto done; } /* Fix SCO data frame header if required */ if (!(sc->sc_flags & UBT_HAVE_FRAME_TYPE)) { *mtod(m, u_int8_t *) = NG_HCI_SCO_DATA_PKT; m->m_pkthdr.len = 1; m->m_len = min(MHLEN, h->actlen + 1); /* XXX m_copyback */ } else { m->m_pkthdr.len = 0; m->m_len = min(MHLEN, h->actlen); /* XXX m_copyback */ } /* * XXX FIXME how do we know how many frames we have received? * XXX use frlen for now. is that correct? */ b = (u_int8_t *) sc->sc_isoc_in_buffer; for (i = 0; i < sc->sc_isoc_nframes; i++) { b += (i * sc->sc_isoc_size); if (sc->sc_isoc_in_frlen[i] > 0) m_copyback(m, m->m_pkthdr.len, sc->sc_isoc_in_frlen[i], b); } if (m->m_pkthdr.len < sizeof(*hdr)) goto done; hdr = mtod(m, ng_hci_scodata_pkt_t *); if (hdr->length == m->m_pkthdr.len - sizeof(*hdr)) { NG_UBT_INFO( "%s: %s - Got complete SCO data frame, pktlen=%d, length=%d\n", __func__, USBDEVNAME(sc->sc_dev), m->m_pkthdr.len, hdr->length); NG_UBT_STAT_PCKTS_RECV(sc->sc_stat); NG_SEND_DATA_ONLY(i, sc->sc_hook, m); if (i != 0) NG_UBT_STAT_IERROR(sc->sc_stat); } else { NG_UBT_ERR( "%s: %s - Invalid SCO frame size, length=%d, pktlen=%d\n", __func__, USBDEVNAME(sc->sc_dev), hdr->length, m->m_pkthdr.len); NG_UBT_STAT_IERROR(sc->sc_stat); NG_FREE_M(m); } done: ubt_isoc_in_start(sc); } /* ubt_isoc_in_complete2 */ /* * Start isochronous-out USB transfer. Must be called with node locked */ Static usbd_status ubt_isoc_out_start(ubt_softc_p sc) { struct mbuf *m = NULL; u_int8_t *b = NULL; int i, len, nframes; usbd_status status; KASSERT(!(sc->sc_flags & UBT_SCO_XMIT), ( "%s: %s - Another isoc-out request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); NG_BT_MBUFQ_DEQUEUE(&sc->sc_scoq, m); if (m == NULL) { NG_UBT_INFO( "%s: %s - SCO data queue is empty\n", __func__, USBDEVNAME(sc->sc_dev)); return (USBD_NORMAL_COMPLETION); } /* Copy entire SCO frame into USB transfer buffer and start transfer */ b = (u_int8_t *) sc->sc_isoc_out_buffer; nframes = 0; for (i = 0; i < sc->sc_isoc_nframes; i++) { b += (i * sc->sc_isoc_size); len = min(m->m_pkthdr.len, sc->sc_isoc_size); if (len > 0) { m_copydata(m, 0, len, b); m_adj(m, len); nframes ++; } sc->sc_isoc_out_frlen[i] = len; } if (m->m_pkthdr.len > 0) panic( "%s: %s - SCO data frame is too big, nframes=%d, size=%d, len=%d\n", __func__, USBDEVNAME(sc->sc_dev), sc->sc_isoc_nframes, sc->sc_isoc_size, m->m_pkthdr.len); NG_FREE_M(m); /* Initialize a isoc-out USB transfer and then schedule it */ usbd_setup_isoc_xfer( sc->sc_isoc_out_xfer, sc->sc_isoc_out_pipe, (usbd_private_handle) sc->sc_node, sc->sc_isoc_out_frlen, nframes, USBD_NO_COPY, ubt_isoc_out_complete); NG_NODE_REF(sc->sc_node); status = usbd_transfer(sc->sc_isoc_out_xfer); if (status != USBD_NORMAL_COMPLETION && status != USBD_IN_PROGRESS) { NG_UBT_ERR( "%s: %s - Could not start isoc-out transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); NG_NODE_UNREF(sc->sc_node); NG_BT_MBUFQ_DROP(&sc->sc_scoq); NG_UBT_STAT_OERROR(sc->sc_stat); } else { NG_UBT_INFO( "%s: %s - Isoc-out transfer has been started, nframes=%d, size=%d\n", __func__, USBDEVNAME(sc->sc_dev), nframes, sc->sc_isoc_size); sc->sc_flags |= UBT_SCO_XMIT; status = USBD_NORMAL_COMPLETION; } return (status); } /* ubt_isoc_out_start */ /* * USB isoc-out. transfer callback */ Static void ubt_isoc_out_complete(usbd_xfer_handle h, usbd_private_handle p, usbd_status s) { ng_send_fn((node_p) p, NULL, ubt_isoc_out_complete2, (void *) h, s); NG_NODE_UNREF((node_p) p); } /* ubt_isoc_out_complete */ Static void ubt_isoc_out_complete2(node_p node, hook_p hook, void *arg1, int arg2) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); usbd_xfer_handle h = (usbd_xfer_handle) arg1; usbd_status s = (usbd_status) arg2; if (sc == NULL) return; KASSERT((sc->sc_flags & UBT_SCO_XMIT), ( "%s: %s - No isoc-out request is pending\n", __func__, USBDEVNAME(sc->sc_dev))); sc->sc_flags &= ~UBT_SCO_XMIT; if (s == USBD_CANCELLED) { NG_UBT_INFO( "%s: %s - Isoc-out xfer cancelled, pipe=%p\n", __func__, USBDEVNAME(sc->sc_dev), sc->sc_isoc_out_pipe); return; } if (s != USBD_NORMAL_COMPLETION) { NG_UBT_WARN( "%s: %s - Isoc-out xfer failed. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(s), s); if (s == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_isoc_out_pipe); NG_UBT_STAT_OERROR(sc->sc_stat); } else { NG_UBT_INFO( "%s: %s - Sent %d bytes to isoc-out pipe\n", __func__, USBDEVNAME(sc->sc_dev), h->actlen); NG_UBT_STAT_BYTES_SENT(sc->sc_stat, h->actlen); NG_UBT_STAT_PCKTS_SENT(sc->sc_stat); } if (NG_BT_MBUFQ_LEN(&sc->sc_scoq) > 0) ubt_isoc_out_start(sc); } /* ubt_isoc_out_complete2 */ /* * Abort transfers on all USB pipes */ Static void ubt_reset(ubt_softc_p sc) { /* Interrupt */ if (sc->sc_intr_pipe != NULL) usbd_abort_pipe(sc->sc_intr_pipe); /* Bulk-in/out */ if (sc->sc_bulk_in_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_in_pipe); if (sc->sc_bulk_out_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_out_pipe); /* Isoc-in/out */ if (sc->sc_isoc_in_pipe != NULL) usbd_abort_pipe(sc->sc_isoc_in_pipe); if (sc->sc_isoc_out_pipe != NULL) usbd_abort_pipe(sc->sc_isoc_out_pipe); /* Cleanup queues */ NG_BT_MBUFQ_DRAIN(&sc->sc_cmdq); NG_BT_MBUFQ_DRAIN(&sc->sc_aclq); NG_BT_MBUFQ_DRAIN(&sc->sc_scoq); } /* ubt_reset */ /**************************************************************************** **************************************************************************** ** Netgraph specific **************************************************************************** ****************************************************************************/ /* * Netgraph node constructor. Do not allow to create node of this type. */ Static int ng_ubt_constructor(node_p node) { return (EINVAL); } /* ng_ubt_constructor */ /* * Netgraph node destructor. Destroy node only when device has been detached */ Static int ng_ubt_shutdown(node_p node) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); /* Let old node go */ NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); if (sc == NULL) goto done; /* Create Netgraph node */ if (ng_make_node_common(&typestruct, &sc->sc_node) != 0) { printf("%s: Could not create Netgraph node\n", USBDEVNAME(sc->sc_dev)); sc->sc_node = NULL; goto done; } /* Name node */ if (ng_name_node(sc->sc_node, USBDEVNAME(sc->sc_dev)) != 0) { printf("%s: Could not name Netgraph node\n", USBDEVNAME(sc->sc_dev)); NG_NODE_UNREF(sc->sc_node); sc->sc_node = NULL; goto done; } NG_NODE_SET_PRIVATE(sc->sc_node, sc); NG_NODE_FORCE_WRITER(sc->sc_node); done: return (0); } /* ng_ubt_shutdown */ /* * Create new hook. There can only be one. */ Static int ng_ubt_newhook(node_p node, hook_p hook, char const *name) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); /* Refuse to create new hook if device interface is active */ if (sc->sc_ctrl_dev != NODEV || sc->sc_intr_dev != NODEV || sc->sc_bulk_dev != NODEV) return (EBUSY); if (strcmp(name, NG_UBT_HOOK) != 0) return (EINVAL); if (sc->sc_hook != NULL) return (EISCONN); sc->sc_hook = hook; return (0); } /* ng_ubt_newhook */ /* * Connect hook. Start incoming USB transfers */ Static int ng_ubt_connect(hook_p hook) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); usbd_status status; /* Refuse to connect hook if device interface is active */ if (sc->sc_ctrl_dev != NODEV || sc->sc_intr_dev != NODEV || sc->sc_bulk_dev != NODEV) return (EBUSY); NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); /* Start intr transfer */ status = ubt_intr_start(sc); if (status != USBD_NORMAL_COMPLETION) { NG_UBT_ALERT( "%s: %s - Could not start interrupt transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); goto fail; } /* Start bulk-in transfer */ status = ubt_bulk_in_start(sc); if (status != USBD_NORMAL_COMPLETION) { NG_UBT_ALERT( "%s: %s - Could not start bulk-in transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); goto fail; } #if __broken__ /* XXX FIXME */ /* Start isoc-in transfer */ status = ubt_isoc_in_start(sc); if (status != USBD_NORMAL_COMPLETION) { NG_UBT_ALERT( "%s: %s - Could not start isoc-in transfer. %s (%d)\n", __func__, USBDEVNAME(sc->sc_dev), usbd_errstr(status), status); goto fail; } #endif /* __broken__ */ return (0); fail: ubt_reset(sc); sc->sc_hook = NULL; return (ENXIO); } /* ng_ubt_connect */ /* * Disconnect hook */ Static int ng_ubt_disconnect(hook_p hook) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); if (sc != NULL) { if (hook != sc->sc_hook) return (EINVAL); ubt_reset(sc); sc->sc_hook = NULL; } return (0); } /* ng_ubt_disconnect */ /* * Process control message */ Static int ng_ubt_rcvmsg(node_p node, item_p item, hook_p lasthook) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(node); struct ng_mesg *msg = NULL, *rsp = NULL; struct ng_bt_mbufq *q = NULL; int error = 0, queue, qlen; if (sc == NULL) { NG_FREE_ITEM(item); return (EHOSTDOWN); } NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { case NGM_TEXT_STATUS: NG_MKRESPONSE(rsp, msg, NG_TEXTRESPONSE, M_NOWAIT); if (rsp == NULL) error = ENOMEM; else snprintf(rsp->data, NG_TEXTRESPONSE, "Hook: %s\n" \ "Flags: %#x\n" \ "Debug: %d\n" \ "CMD queue: [have:%d,max:%d]\n" \ "ACL queue: [have:%d,max:%d]\n" \ "SCO queue: [have:%d,max:%d]", (sc->sc_hook != NULL)? NG_UBT_HOOK : "", sc->sc_flags, sc->sc_debug, NG_BT_MBUFQ_LEN(&sc->sc_cmdq), sc->sc_cmdq.maxlen, NG_BT_MBUFQ_LEN(&sc->sc_aclq), sc->sc_aclq.maxlen, NG_BT_MBUFQ_LEN(&sc->sc_scoq), sc->sc_scoq.maxlen); break; default: error = EINVAL; break; } break; case NGM_UBT_COOKIE: switch (msg->header.cmd) { case NGM_UBT_NODE_SET_DEBUG: if (msg->header.arglen != sizeof(ng_ubt_node_debug_ep)) error = EMSGSIZE; else sc->sc_debug = *((ng_ubt_node_debug_ep *)(msg->data)); break; case NGM_UBT_NODE_GET_DEBUG: NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_debug_ep), M_NOWAIT); if (rsp == NULL) error = ENOMEM; else *((ng_ubt_node_debug_ep *)(rsp->data)) = sc->sc_debug; break; case NGM_UBT_NODE_SET_QLEN: if (msg->header.arglen != sizeof(ng_ubt_node_qlen_ep)) error = EMSGSIZE; else { queue = ((ng_ubt_node_qlen_ep *) (msg->data))->queue; qlen = ((ng_ubt_node_qlen_ep *) (msg->data))->qlen; if (qlen <= 0) { error = EINVAL; break; } switch (queue) { case NGM_UBT_NODE_QUEUE_CMD: q = &sc->sc_cmdq; break; case NGM_UBT_NODE_QUEUE_ACL: q = &sc->sc_aclq; break; case NGM_UBT_NODE_QUEUE_SCO: q = &sc->sc_scoq; break; default: q = NULL; error = EINVAL; break; } if (q != NULL) q->maxlen = qlen; } break; case NGM_UBT_NODE_GET_QLEN: if (msg->header.arglen != sizeof(ng_ubt_node_qlen_ep)) { error = EMSGSIZE; break; } queue = ((ng_ubt_node_qlen_ep *)(msg->data))->queue; switch (queue) { case NGM_UBT_NODE_QUEUE_CMD: q = &sc->sc_cmdq; break; case NGM_UBT_NODE_QUEUE_ACL: q = &sc->sc_aclq; break; case NGM_UBT_NODE_QUEUE_SCO: q = &sc->sc_scoq; break; default: q = NULL; error = EINVAL; break; } if (q != NULL) { NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_qlen_ep), M_NOWAIT); if (rsp == NULL) { error = ENOMEM; break; } ((ng_ubt_node_qlen_ep *)(rsp->data))->queue = queue; ((ng_ubt_node_qlen_ep *)(rsp->data))->qlen = q->maxlen; } break; case NGM_UBT_NODE_GET_STAT: NG_MKRESPONSE(rsp, msg, sizeof(ng_ubt_node_stat_ep), M_NOWAIT); if (rsp == NULL) error = ENOMEM; else bcopy(&sc->sc_stat, rsp->data, sizeof(ng_ubt_node_stat_ep)); break; case NGM_UBT_NODE_RESET_STAT: NG_UBT_STAT_RESET(sc->sc_stat); break; case NGM_UBT_NODE_DEV_NODES: if (msg->header.arglen != sizeof(ng_ubt_node_dev_nodes_ep)) { error = EMSGSIZE; break; } if ((sc->sc_flags & UBT_ANY_DEV) || sc->sc_hook != NULL) { error = EBUSY; break; } if (*((ng_ubt_node_dev_nodes_ep *)(msg->data))) ubt_create_device_nodes(sc); else ubt_destroy_device_nodes(sc); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } NG_RESPOND_MSG(error, node, item, rsp); NG_FREE_MSG(msg); return (error); } /* ng_ubt_rcvmsg */ /* * Process data */ Static int ng_ubt_rcvdata(hook_p hook, item_p item) { ubt_softc_p sc = (ubt_softc_p) NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m = NULL; usbd_status (*f)(ubt_softc_p) = NULL; struct ng_bt_mbufq *q = NULL; int b, error = 0; if (sc == NULL) { error = EHOSTDOWN; goto done; } if (hook != sc->sc_hook) { error = EINVAL; goto done; } /* Deatch mbuf and get HCI frame type */ NGI_GET_M(item, m); /* Process HCI frame */ switch (*mtod(m, u_int8_t *)) { /* XXX call m_pullup ? */ case NG_HCI_CMD_PKT: f = ubt_request_start; q = &sc->sc_cmdq; b = UBT_CMD_XMIT; break; case NG_HCI_ACL_DATA_PKT: f = ubt_bulk_out_start; q = &sc->sc_aclq; b = UBT_ACL_XMIT; break; #if __broken__ /* XXX FIXME */ case NG_HCI_SCO_DATA_PKT: f = ubt_isoc_out_start; q = &sc->sc_scoq; b = UBT_SCO_XMIT; break; #endif /* __broken__ */ default: NG_UBT_ERR( "%s: %s - Dropping unknown/unsupported HCI frame, type=%d, pktlen=%d\n", __func__, USBDEVNAME(sc->sc_dev), *mtod(m, u_int8_t *), m->m_pkthdr.len); NG_FREE_M(m); error = EINVAL; goto done; /* NOT REACHED */ } /* Loose frame type, if required */ if (!(sc->sc_flags & UBT_NEED_FRAME_TYPE)) m_adj(m, sizeof(u_int8_t)); if (NG_BT_MBUFQ_FULL(q)) { NG_UBT_ERR( "%s: %s - Dropping HCI frame %#x, len=%d. Queue full\n", __func__, USBDEVNAME(sc->sc_dev), *mtod(m, u_int8_t *), m->m_pkthdr.len); NG_FREE_M(m); } else NG_BT_MBUFQ_ENQUEUE(q, m); if (!(sc->sc_flags & b)) if ((*f)(sc) != USBD_NORMAL_COMPLETION) error = EIO; done: NG_FREE_ITEM(item); return (error); } /* ng_ubt_rcvdata */ /**************************************************************************** **************************************************************************** ** Device specific **************************************************************************** ****************************************************************************/ /* * Open endpoint device * XXX FIXME softc locking */ Static int ubt_open(dev_t dev, int flag, int mode, usb_proc_ptr p) { ubt_softc_p sc = NULL; int ep = UBT_ENDPOINT(dev); USB_GET_SC_OPEN(ubt, UBT_UNIT(dev), sc); /* check for sc != NULL */ if (sc->sc_dying) return (ENXIO); if (ep == USB_CONTROL_ENDPOINT) { if (sc->sc_flags & UBT_CTRL_DEV) return (EBUSY); sc->sc_flags |= UBT_CTRL_DEV; } else if (ep == UE_GET_ADDR(sc->sc_intr_ep)) { if (sc->sc_flags & UBT_INTR_DEV) return (EBUSY); if (sc->sc_intr_pipe == NULL) return (ENXIO); sc->sc_flags |= UBT_INTR_DEV; } else if (ep == UE_GET_ADDR(sc->sc_bulk_in_ep)) { if (sc->sc_flags & UBT_BULK_DEV) return (EBUSY); if (sc->sc_bulk_in_pipe == NULL || sc->sc_bulk_out_pipe == NULL) return (ENXIO); sc->sc_flags |= UBT_BULK_DEV; } else return (EINVAL); return (0); } /* ubt_open */ /* * Close endpoint device * XXX FIXME softc locking */ Static int ubt_close(dev_t dev, int flag, int mode, usb_proc_ptr p) { ubt_softc_p sc = NULL; int ep = UBT_ENDPOINT(dev); USB_GET_SC(ubt, UBT_UNIT(dev), sc); if (sc == NULL) return (ENXIO); if (ep == USB_CONTROL_ENDPOINT) sc->sc_flags &= ~UBT_CTRL_DEV; else if (ep == UE_GET_ADDR(sc->sc_intr_ep)) { if (sc->sc_intr_pipe != NULL) usbd_abort_pipe(sc->sc_intr_pipe); sc->sc_flags &= ~UBT_INTR_DEV; } else if (ep == UE_GET_ADDR(sc->sc_bulk_in_ep)) { /* Close both in and out bulk pipes */ if (sc->sc_bulk_in_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_in_pipe); if (sc->sc_bulk_out_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_out_pipe); sc->sc_flags &= ~UBT_BULK_DEV; } else return (EINVAL); return (0); } /* ubt_close */ /* * Read from the endpoint device * XXX FIXME softc locking */ Static int ubt_read(dev_t dev, struct uio *uio, int flag) { ubt_softc_p sc = NULL; int error = 0, n, tn, ep = UBT_ENDPOINT(dev); usbd_status status; usbd_pipe_handle pipe = NULL; usbd_xfer_handle xfer = NULL; u_int8_t buf[UBT_BSIZE]; USB_GET_SC(ubt, UBT_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (ep == USB_CONTROL_ENDPOINT) return (EOPNOTSUPP); if (ep == UE_GET_ADDR(sc->sc_intr_ep)) { pipe = sc->sc_intr_pipe; xfer = sc->sc_intr_xfer; } else if (ep == UE_GET_ADDR(sc->sc_bulk_in_ep)) { pipe = sc->sc_bulk_in_pipe; xfer = sc->sc_bulk_in_xfer; } else return (EINVAL); if (pipe == NULL || xfer == NULL) return (ENXIO); sc->sc_refcnt ++; while ((n = min(sizeof(buf), uio->uio_resid)) != 0) { tn = n; status = usbd_bulk_transfer(xfer, pipe, USBD_SHORT_XFER_OK, USBD_DEFAULT_TIMEOUT, buf, &tn, "ubtrd"); switch (status) { case USBD_NORMAL_COMPLETION: error = uiomove(buf, tn, uio); break; case USBD_INTERRUPTED: error = EINTR; break; case USBD_TIMEOUT: error = ETIMEDOUT; break; default: error = EIO; break; } if (error != 0 || tn < n) break; } if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* ubt_read */ /* * Write into the endpoint device * XXX FIXME softc locking */ Static int ubt_write(dev_t dev, struct uio *uio, int flag) { ubt_softc_p sc = NULL; int error = 0, n, ep = UBT_ENDPOINT(dev); usbd_status status; u_int8_t buf[UBT_BSIZE]; USB_GET_SC(ubt, UBT_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (ep == USB_CONTROL_ENDPOINT || ep == UE_GET_ADDR(sc->sc_intr_ep)) return (EOPNOTSUPP); if (ep != UE_GET_ADDR(sc->sc_bulk_in_ep)) return (EINVAL); sc->sc_refcnt ++; while ((n = min(sizeof(buf), uio->uio_resid)) != 0) { error = uiomove(buf, n, uio); if (error != 0) break; status = usbd_bulk_transfer(sc->sc_bulk_out_xfer, sc->sc_bulk_out_pipe, 0, USBD_DEFAULT_TIMEOUT, buf, &n,"ubtwr"); switch (status) { case USBD_NORMAL_COMPLETION: break; case USBD_INTERRUPTED: error = EINTR; break; case USBD_TIMEOUT: error = ETIMEDOUT; break; default: error = EIO; break; } if (error != 0) break; } if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* ubt_write */ /* * Process ioctl on the endpoint device. Mostly stolen from ugen(4) * XXX FIXME softc locking */ Static int ubt_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { ubt_softc_p sc = NULL; int len, error = 0, ep = UBT_ENDPOINT(dev); usbd_status status; struct usb_string_desc *si = NULL; struct usb_ctl_request *ur = NULL; void *ptr = NULL; struct iovec iov; struct uio uio; USB_GET_SC(ubt, UBT_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (ep != USB_CONTROL_ENDPOINT) return (EOPNOTSUPP); sc->sc_refcnt ++; switch (cmd) { case USB_GET_DEVICE_DESC: *(usb_device_descriptor_t *) data = *usbd_get_device_descriptor(sc->sc_udev); break; case USB_GET_STRING_DESC: si = (struct usb_string_desc *) data; status = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, si->usd_language_id, &si->usd_desc); if (status != USBD_NORMAL_COMPLETION) error = EINVAL; break; case USB_DO_REQUEST: ur = (void *) data; len = UGETW(ur->ucr_request.wLength); if (!(flag & FWRITE)) { error = EPERM; break; } /* Avoid requests that would damage the bus integrity. */ if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && ur->ucr_request.bRequest == UR_SET_ADDRESS) || (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && ur->ucr_request.bRequest == UR_SET_CONFIG) || (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && ur->ucr_request.bRequest == UR_SET_INTERFACE) || len < 0 || len > 32767) { error = EINVAL; break; } if (len != 0) { iov.iov_base = (caddr_t) ur->ucr_data; iov.iov_len = len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_resid = len; uio.uio_offset = 0; uio.uio_segflg = UIO_USERSPACE; uio.uio_rw = ur->ucr_request.bmRequestType & UT_READ ? UIO_READ : UIO_WRITE; uio.uio_procp = p; ptr = malloc(len, M_TEMP, M_WAITOK); if (uio.uio_rw == UIO_WRITE) { error = uiomove(ptr, len, &uio); if (error != 0) goto ret; } } status = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, ptr, ur->ucr_flags, &ur->ucr_actlen, USBD_DEFAULT_TIMEOUT); if (status != USBD_NORMAL_COMPLETION) { error = EIO; goto ret; } if (len != 0) { if (uio.uio_rw == UIO_READ) { error = uiomove(ptr, len, &uio); if (error != 0) goto ret; } } ret: if (ptr != NULL) free(ptr, M_TEMP); break; case USB_GET_DEVICEINFO: usbd_fill_deviceinfo(sc->sc_udev, (struct usb_device_info *) data, 1); break; default: error = EINVAL; break; } if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* ubt_ioctl */ /* * Poll the endpoint device * XXX FIXME softc locking */ Static int ubt_poll(dev_t dev, int events, usb_proc_ptr p) { ubt_softc_p sc = NULL; int revents = 0, ep = UBT_ENDPOINT(dev); USB_GET_SC(ubt, UBT_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (ep == USB_CONTROL_ENDPOINT) return (EOPNOTSUPP); if (ep == UE_GET_ADDR(sc->sc_intr_ep)) { if (sc->sc_intr_pipe != NULL) revents |= events & (POLLIN | POLLRDNORM); else revents = EIO; } else if (ep == UE_GET_ADDR(sc->sc_bulk_in_ep)) { if (sc->sc_bulk_in_pipe != NULL) revents |= events & (POLLIN | POLLRDNORM); if (sc->sc_bulk_out_pipe != NULL) revents |= events & (POLLOUT | POLLWRNORM); if (revents == 0) revents = EIO; /* both pipes closed */ } else revents = EINVAL; return (revents); } /* ubt_poll */ /* * Create device nodes for all endpoints. Must be called with node locked. */ Static void ubt_create_device_nodes(ubt_softc_p sc) { int ep; KASSERT((sc->sc_hook == NULL), ( "%s: %s - hook != NULL!\n", __func__, USBDEVNAME(sc->sc_dev))); /* Control device */ if (sc->sc_ctrl_dev == NODEV) sc->sc_ctrl_dev = make_dev(&ubt_cdevsw, UBT_MINOR(USBDEVUNIT(sc->sc_dev), 0), UID_ROOT, GID_OPERATOR, 0644, "%s", USBDEVNAME(sc->sc_dev)); /* Interrupt device */ if (sc->sc_intr_dev == NODEV && sc->sc_intr_ep != -1) { ep = UE_GET_ADDR(sc->sc_intr_ep); sc->sc_intr_dev = make_dev(&ubt_cdevsw, UBT_MINOR(USBDEVUNIT(sc->sc_dev), ep), UID_ROOT, GID_OPERATOR, 0644, "%s.%d", USBDEVNAME(sc->sc_dev), ep); } /* * Bulk-in and bulk-out device * XXX will create one device for both in and out endpoints. * XXX note that address of the in and out endpoint should be the same */ if (sc->sc_bulk_dev == NODEV && sc->sc_bulk_in_ep != -1 && sc->sc_bulk_out_ep != -1 && UE_GET_ADDR(sc->sc_bulk_in_ep) == UE_GET_ADDR(sc->sc_bulk_out_ep)) { ep = UE_GET_ADDR(sc->sc_bulk_in_ep); sc->sc_bulk_dev = make_dev(&ubt_cdevsw, UBT_MINOR(USBDEVUNIT(sc->sc_dev), ep), UID_ROOT, GID_OPERATOR, 0644, "%s.%d", USBDEVNAME(sc->sc_dev), ep); } } /* ubt_create_device_nodes */ /* * Destroy device nodes for all endpoints * XXX FIXME locking */ Static void ubt_destroy_device_nodes(ubt_softc_p sc) { /* * Wait for processes to go away. This should be safe as we will not * call ubt_destroy_device_nodes() from Netgraph unless all devices * were closed (and thus no active processes). */ if (-- sc->sc_refcnt >= 0) { ubt_reset(sc); usb_detach_wait(USBDEV(sc->sc_dev)); } sc->sc_refcnt = 0; /* Destroy device nodes */ if (sc->sc_bulk_dev != NODEV) { destroy_dev(sc->sc_bulk_dev); sc->sc_bulk_dev = NODEV; } if (sc->sc_intr_dev != NODEV) { destroy_dev(sc->sc_intr_dev); sc->sc_intr_dev = NODEV; } if (sc->sc_ctrl_dev != NODEV) { destroy_dev(sc->sc_ctrl_dev); sc->sc_ctrl_dev = NODEV; } } /* ubt_destroy_device_nodes */ Index: head/sys/netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c =================================================================== --- head/sys/netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c (revision 129879) +++ head/sys/netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c (revision 129880) @@ -1,552 +1,553 @@ /* * ubtbcmfw.c * * Copyright (c) 2003 Maksim Yevmenkin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: ubtbcmfw.c,v 1.3 2003/10/10 19:15:08 max Exp $ * $FreeBSD$ */ #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include /* * Download firmware to BCM2033. */ #define UBTBCMFW_CONFIG_NO 1 /* Config number */ #define UBTBCMFW_IFACE_IDX 0 /* Control interface */ #define UBTBCMFW_INTR_IN_EP 0x81 /* Fixed endpoint */ #define UBTBCMFW_BULK_OUT_EP 0x02 /* Fixed endpoint */ #define UBTBCMFW_INTR_IN UE_GET_ADDR(UBTBCMFW_INTR_IN_EP) #define UBTBCMFW_BULK_OUT UE_GET_ADDR(UBTBCMFW_BULK_OUT_EP) struct ubtbcmfw_softc { USBBASEDEVICE sc_dev; /* base device */ usbd_device_handle sc_udev; /* USB device handle */ dev_t sc_ctrl_dev; /* control device */ dev_t sc_intr_in_dev; /* interrupt device */ dev_t sc_bulk_out_dev; /* bulk device */ usbd_pipe_handle sc_intr_in_pipe; /* interrupt pipe */ usbd_pipe_handle sc_bulk_out_pipe; /* bulk out pipe */ int sc_flags; #define UBTBCMFW_CTRL_DEV (1 << 0) #define UBTBCMFW_INTR_IN_DEV (1 << 1) #define UBTBCMFW_BULK_OUT_DEV (1 << 2) int sc_refcnt; int sc_dying; }; typedef struct ubtbcmfw_softc *ubtbcmfw_softc_p; /* * Device methods */ #define UBTBCMFW_UNIT(n) ((minor(n) >> 4) & 0xf) #define UBTBCMFW_ENDPOINT(n) (minor(n) & 0xf) #define UBTBCMFW_MINOR(u, e) (((u) << 4) | (e)) #define UBTBCMFW_BSIZE 1024 Static d_open_t ubtbcmfw_open; Static d_close_t ubtbcmfw_close; Static d_read_t ubtbcmfw_read; Static d_write_t ubtbcmfw_write; Static d_ioctl_t ubtbcmfw_ioctl; Static d_poll_t ubtbcmfw_poll; Static struct cdevsw ubtbcmfw_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = ubtbcmfw_open, .d_close = ubtbcmfw_close, .d_read = ubtbcmfw_read, .d_write = ubtbcmfw_write, .d_ioctl = ubtbcmfw_ioctl, .d_poll = ubtbcmfw_poll, .d_name = "ubtbcmfw", }; /* * Module */ USB_DECLARE_DRIVER(ubtbcmfw); DRIVER_MODULE(ubtbcmfw, uhub, ubtbcmfw_driver, ubtbcmfw_devclass, usbd_driver_load, 0); /* * Probe for a USB Bluetooth device */ USB_MATCH(ubtbcmfw) { #define USB_PRODUCT_BROADCOM_BCM2033NF 0x2033 USB_MATCH_START(ubtbcmfw, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); /* Match the boot device. */ if (uaa->vendor == USB_VENDOR_BROADCOM && uaa->product == USB_PRODUCT_BROADCOM_BCM2033NF) return (UMATCH_VENDOR_PRODUCT); return (UMATCH_NONE); } /* * Attach the device */ USB_ATTACH(ubtbcmfw) { USB_ATTACH_START(ubtbcmfw, sc, uaa); usbd_interface_handle iface; usbd_status err; char devinfo[1024]; sc->sc_udev = uaa->device; usbd_devinfo(sc->sc_udev, 0, devinfo); USB_ATTACH_SETUP; printf("%s: %s\n", USBDEVNAME(sc->sc_dev), devinfo); sc->sc_ctrl_dev = sc->sc_intr_in_dev = sc->sc_bulk_out_dev = NODEV; sc->sc_intr_in_pipe = sc->sc_bulk_out_pipe = NULL; sc->sc_flags = sc->sc_refcnt = sc->sc_dying = 0; err = usbd_set_config_no(sc->sc_udev, UBTBCMFW_CONFIG_NO, 1); if (err) { printf("%s: setting config no failed. %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); goto bad; } err = usbd_device2interface_handle(sc->sc_udev, UBTBCMFW_IFACE_IDX, &iface); if (err) { printf("%s: getting interface handle failed. %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); goto bad; } /* Will be used as a bulk pipe */ err = usbd_open_pipe(iface, UBTBCMFW_INTR_IN_EP, 0, &sc->sc_intr_in_pipe); if (err) { printf("%s: open intr in failed. %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); goto bad; } err = usbd_open_pipe(iface, UBTBCMFW_BULK_OUT_EP, 0, &sc->sc_bulk_out_pipe); if (err) { printf("%s: open bulk out failed. %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); goto bad; } /* Create device nodes */ sc->sc_ctrl_dev = make_dev(&ubtbcmfw_cdevsw, UBTBCMFW_MINOR(USBDEVUNIT(sc->sc_dev), 0), UID_ROOT, GID_OPERATOR, 0644, "%s", USBDEVNAME(sc->sc_dev)); sc->sc_intr_in_dev = make_dev(&ubtbcmfw_cdevsw, UBTBCMFW_MINOR(USBDEVUNIT(sc->sc_dev), UBTBCMFW_INTR_IN), UID_ROOT, GID_OPERATOR, 0644, "%s.%d", USBDEVNAME(sc->sc_dev), UBTBCMFW_INTR_IN); sc->sc_bulk_out_dev = make_dev(&ubtbcmfw_cdevsw, UBTBCMFW_MINOR(USBDEVUNIT(sc->sc_dev), UBTBCMFW_BULK_OUT), UID_ROOT, GID_OPERATOR, 0644, "%s.%d", USBDEVNAME(sc->sc_dev), UBTBCMFW_BULK_OUT); USB_ATTACH_SUCCESS_RETURN; bad: ubtbcmfw_detach(self); USB_ATTACH_ERROR_RETURN; } /* * Detach the device */ USB_DETACH(ubtbcmfw) { USB_DETACH_START(ubtbcmfw, sc); sc->sc_dying = 1; if (-- sc->sc_refcnt >= 0) { if (sc->sc_intr_in_pipe != NULL) usbd_abort_pipe(sc->sc_intr_in_pipe); if (sc->sc_bulk_out_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_out_pipe); usb_detach_wait(USBDEV(sc->sc_dev)); } /* Destroy device nodes */ if (sc->sc_bulk_out_dev != NODEV) { destroy_dev(sc->sc_bulk_out_dev); sc->sc_bulk_out_dev = NODEV; } if (sc->sc_intr_in_dev != NODEV) { destroy_dev(sc->sc_intr_in_dev); sc->sc_intr_in_dev = NODEV; } if (sc->sc_ctrl_dev != NODEV) { destroy_dev(sc->sc_ctrl_dev); sc->sc_ctrl_dev = NODEV; } /* Close pipes */ if (sc->sc_intr_in_pipe != NULL) { usbd_close_pipe(sc->sc_intr_in_pipe); sc->sc_intr_in_pipe = NULL; } if (sc->sc_bulk_out_pipe != NULL) { usbd_close_pipe(sc->sc_bulk_out_pipe); sc->sc_intr_in_pipe = NULL; } return (0); } /* * Open endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_open(dev_t dev, int flag, int mode, usb_proc_ptr p) { ubtbcmfw_softc_p sc = NULL; int error = 0; /* checks for sc != NULL */ USB_GET_SC_OPEN(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc->sc_dying) return (ENXIO); switch (UBTBCMFW_ENDPOINT(dev)) { case USB_CONTROL_ENDPOINT: if (!(sc->sc_flags & UBTBCMFW_CTRL_DEV)) sc->sc_flags |= UBTBCMFW_CTRL_DEV; else error = EBUSY; break; case UBTBCMFW_INTR_IN: if (!(sc->sc_flags & UBTBCMFW_INTR_IN_DEV)) { if (sc->sc_intr_in_pipe != NULL) sc->sc_flags |= UBTBCMFW_INTR_IN_DEV; else error = ENXIO; } else error = EBUSY; break; case UBTBCMFW_BULK_OUT: if (!(sc->sc_flags & UBTBCMFW_BULK_OUT_DEV)) { if (sc->sc_bulk_out_pipe != NULL) sc->sc_flags |= UBTBCMFW_BULK_OUT_DEV; else error = ENXIO; } else error = EBUSY; break; default: error = ENXIO; break; } return (error); } /* * Close endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_close(dev_t dev, int flag, int mode, usb_proc_ptr p) { ubtbcmfw_softc_p sc = NULL; USB_GET_SC(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc == NULL) return (ENXIO); switch (UBTBCMFW_ENDPOINT(dev)) { case USB_CONTROL_ENDPOINT: sc->sc_flags &= ~UBTBCMFW_CTRL_DEV; break; case UBTBCMFW_INTR_IN: if (sc->sc_intr_in_pipe != NULL) usbd_abort_pipe(sc->sc_intr_in_pipe); sc->sc_flags &= ~UBTBCMFW_INTR_IN_DEV; break; case UBTBCMFW_BULK_OUT: if (sc->sc_bulk_out_pipe != NULL) usbd_abort_pipe(sc->sc_bulk_out_pipe); sc->sc_flags &= ~UBTBCMFW_BULK_OUT_DEV; break; } return (0); } /* * Read from the endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_read(dev_t dev, struct uio *uio, int flag) { ubtbcmfw_softc_p sc = NULL; u_int8_t buf[UBTBCMFW_BSIZE]; usbd_xfer_handle xfer; usbd_status err; int n, tn, error = 0; USB_GET_SC(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (UBTBCMFW_ENDPOINT(dev) != UBTBCMFW_INTR_IN) return (EOPNOTSUPP); if (sc->sc_intr_in_pipe == NULL) return (ENXIO); xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == NULL) return (ENOMEM); sc->sc_refcnt ++; while ((n = min(sizeof(buf), uio->uio_resid)) != 0) { tn = n; err = usbd_bulk_transfer(xfer, sc->sc_intr_in_pipe, USBD_SHORT_XFER_OK, USBD_DEFAULT_TIMEOUT, buf, &tn, "bcmrd"); switch (err) { case USBD_NORMAL_COMPLETION: error = uiomove(buf, tn, uio); break; case USBD_INTERRUPTED: error = EINTR; break; case USBD_TIMEOUT: error = ETIMEDOUT; break; default: error = EIO; break; } if (error != 0 || tn < n) break; } usbd_free_xfer(xfer); if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* * Write into the endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_write(dev_t dev, struct uio *uio, int flag) { ubtbcmfw_softc_p sc = NULL; u_int8_t buf[UBTBCMFW_BSIZE]; usbd_xfer_handle xfer; usbd_status err; int n, error = 0; USB_GET_SC(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (UBTBCMFW_ENDPOINT(dev) != UBTBCMFW_BULK_OUT) return (EOPNOTSUPP); if (sc->sc_bulk_out_pipe == NULL) return (ENXIO); xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == NULL) return (ENOMEM); sc->sc_refcnt ++; while ((n = min(sizeof(buf), uio->uio_resid)) != 0) { error = uiomove(buf, n, uio); if (error != 0) break; err = usbd_bulk_transfer(xfer, sc->sc_bulk_out_pipe, 0, USBD_DEFAULT_TIMEOUT, buf, &n, "bcmwr"); switch (err) { case USBD_NORMAL_COMPLETION: break; case USBD_INTERRUPTED: error = EINTR; break; case USBD_TIMEOUT: error = ETIMEDOUT; break; default: error = EIO; break; } if (error != 0) break; } usbd_free_xfer(xfer); if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* * Process ioctl on the endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { ubtbcmfw_softc_p sc = NULL; int error = 0; USB_GET_SC(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc == NULL || sc->sc_dying) return (ENXIO); if (UBTBCMFW_ENDPOINT(dev) != USB_CONTROL_ENDPOINT) return (EOPNOTSUPP); sc->sc_refcnt ++; switch (cmd) { case USB_GET_DEVICE_DESC: *(usb_device_descriptor_t *) data = *usbd_get_device_descriptor(sc->sc_udev); break; default: error = EINVAL; break; } if (-- sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } /* * Poll the endpoint device * XXX FIXME softc locking */ Static int ubtbcmfw_poll(dev_t dev, int events, usb_proc_ptr p) { ubtbcmfw_softc_p sc = NULL; int revents = 0; USB_GET_SC(ubtbcmfw, UBTBCMFW_UNIT(dev), sc); if (sc == NULL) return (ENXIO); switch (UBTBCMFW_ENDPOINT(dev)) { case UBTBCMFW_INTR_IN: if (sc->sc_intr_in_pipe != NULL) revents |= events & (POLLIN | POLLRDNORM); else revents = ENXIO; break; case UBTBCMFW_BULK_OUT: if (sc->sc_bulk_out_pipe != NULL) revents |= events & (POLLOUT | POLLWRNORM); else revents = ENXIO; break; default: revents = EOPNOTSUPP; break; } return (revents); } Index: head/sys/netinet/accf_data.c =================================================================== --- head/sys/netinet/accf_data.c (revision 129879) +++ head/sys/netinet/accf_data.c (revision 129880) @@ -1,67 +1,68 @@ /*- * Copyright (c) 2000 Alfred Perlstein * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #define ACCEPT_FILTER_MOD #include #include +#include #include #include #include /* accept filter that holds a socket until data arrives */ static void sohasdata(struct socket *so, void *arg, int waitflag); static struct accept_filter accf_data_filter = { "dataready", sohasdata, NULL, NULL }; static moduledata_t accf_data_mod = { "accf_data", accept_filt_generic_mod_event, &accf_data_filter }; DECLARE_MODULE(accf_data, accf_data_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); static void sohasdata(struct socket *so, void *arg, int waitflag) { if (!soreadable(so)) return; so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; soisconnected(so); return; } Index: head/sys/netinet/accf_http.c =================================================================== --- head/sys/netinet/accf_http.c (revision 129879) +++ head/sys/netinet/accf_http.c (revision 129880) @@ -1,360 +1,361 @@ /* * Copyright (c) 2000 Paycounter, Inc. * Author: Alfred Perlstein , * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #define ACCEPT_FILTER_MOD #include #include #include +#include #include #include #include /* check for GET/HEAD */ static void sohashttpget(struct socket *so, void *arg, int waitflag); /* check for HTTP/1.0 or HTTP/1.1 */ static void soparsehttpvers(struct socket *so, void *arg, int waitflag); /* check for end of HTTP/1.x request */ static void soishttpconnected(struct socket *so, void *arg, int waitflag); /* strcmp on an mbuf chain */ static int mbufstrcmp(struct mbuf *m, struct mbuf *npkt, int offset, char *cmp); /* strncmp on an mbuf chain */ static int mbufstrncmp(struct mbuf *m, struct mbuf *npkt, int offset, int max, char *cmp); /* socketbuffer is full */ static int sbfull(struct sockbuf *sb); static struct accept_filter accf_http_filter = { "httpready", sohashttpget, NULL, NULL }; static moduledata_t accf_http_mod = { "accf_http", accept_filt_generic_mod_event, &accf_http_filter }; DECLARE_MODULE(accf_http, accf_http_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); static int parse_http_version = 1; SYSCTL_NODE(_net_inet_accf, OID_AUTO, http, CTLFLAG_RW, 0, "HTTP accept filter"); SYSCTL_INT(_net_inet_accf_http, OID_AUTO, parsehttpversion, CTLFLAG_RW, &parse_http_version, 1, "Parse http version so that non 1.x requests work"); #ifdef ACCF_HTTP_DEBUG #define DPRINT(fmt, args...) \ do { \ printf("%s:%d: " fmt "\n", __func__, __LINE__, ##args); \ } while (0) #else #define DPRINT(fmt, args...) #endif static int sbfull(struct sockbuf *sb) { DPRINT("sbfull, cc(%ld) >= hiwat(%ld): %d, " "mbcnt(%ld) >= mbmax(%ld): %d", sb->sb_cc, sb->sb_hiwat, sb->sb_cc >= sb->sb_hiwat, sb->sb_mbcnt, sb->sb_mbmax, sb->sb_mbcnt >= sb->sb_mbmax); return (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax); } /* * start at mbuf m, (must provide npkt if exists) * starting at offset in m compare characters in mbuf chain for 'cmp' */ static int mbufstrcmp(struct mbuf *m, struct mbuf *npkt, int offset, char *cmp) { struct mbuf *n; for (; m != NULL; m = n) { n = npkt; if (npkt) npkt = npkt->m_nextpkt; for (; m; m = m->m_next) { for (; offset < m->m_len; offset++, cmp++) { if (*cmp == '\0') return (1); else if (*cmp != *(mtod(m, char *) + offset)) return (0); } if (*cmp == '\0') return (1); offset = 0; } } return (0); } /* * start at mbuf m, (must provide npkt if exists) * starting at offset in m compare characters in mbuf chain for 'cmp' * stop at 'max' characters */ static int mbufstrncmp(struct mbuf *m, struct mbuf *npkt, int offset, int max, char *cmp) { struct mbuf *n; for (; m != NULL; m = n) { n = npkt; if (npkt) npkt = npkt->m_nextpkt; for (; m; m = m->m_next) { for (; offset < m->m_len; offset++, cmp++, max--) { if (max == 0 || *cmp == '\0') return (1); else if (*cmp != *(mtod(m, char *) + offset)) return (0); } if (max == 0 || *cmp == '\0') return (1); offset = 0; } } return (0); } #define STRSETUP(sptr, slen, str) \ do { \ sptr = str; \ slen = sizeof(str) - 1; \ } while(0) static void sohashttpget(struct socket *so, void *arg, int waitflag) { if ((so->so_state & SS_CANTRCVMORE) == 0 && !sbfull(&so->so_rcv)) { struct mbuf *m; char *cmp; int cmplen, cc; m = so->so_rcv.sb_mb; cc = so->so_rcv.sb_cc - 1; if (cc < 1) return; switch (*mtod(m, char *)) { case 'G': STRSETUP(cmp, cmplen, "ET "); break; case 'H': STRSETUP(cmp, cmplen, "EAD "); break; default: goto fallout; } if (cc < cmplen) { if (mbufstrncmp(m, m->m_nextpkt, 1, cc, cmp) == 1) { DPRINT("short cc (%d) but mbufstrncmp ok", cc); return; } else { DPRINT("short cc (%d) mbufstrncmp failed", cc); goto fallout; } } if (mbufstrcmp(m, m->m_nextpkt, 1, cmp) == 1) { DPRINT("mbufstrcmp ok"); if (parse_http_version == 0) soishttpconnected(so, arg, waitflag); else soparsehttpvers(so, arg, waitflag); return; } DPRINT("mbufstrcmp bad"); } fallout: DPRINT("fallout"); so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; soisconnected(so); return; } static void soparsehttpvers(struct socket *so, void *arg, int waitflag) { struct mbuf *m, *n; int i, cc, spaces, inspaces; if ((so->so_state & SS_CANTRCVMORE) != 0 || sbfull(&so->so_rcv)) goto fallout; m = so->so_rcv.sb_mb; cc = so->so_rcv.sb_cc; inspaces = spaces = 0; for (m = so->so_rcv.sb_mb; m; m = n) { n = m->m_nextpkt; for (; m; m = m->m_next) { for (i = 0; i < m->m_len; i++, cc--) { switch (*(mtod(m, char *) + i)) { case ' ': /* tabs? '\t' */ if (!inspaces) { spaces++; inspaces = 1; } break; case '\r': case '\n': DPRINT("newline"); goto fallout; default: if (spaces != 2) { inspaces = 0; break; } /* * if we don't have enough characters * left (cc < sizeof("HTTP/1.0") - 1) * then see if the remaining ones * are a request we can parse. */ if (cc < sizeof("HTTP/1.0") - 1) { if (mbufstrncmp(m, n, i, cc, "HTTP/1.") == 1) { DPRINT("ok"); goto readmore; } else { DPRINT("bad"); goto fallout; } } else if ( mbufstrcmp(m, n, i, "HTTP/1.0") || mbufstrcmp(m, n, i, "HTTP/1.1")) { DPRINT("ok"); soishttpconnected(so, arg, waitflag); return; } else { DPRINT("bad"); goto fallout; } } } } } readmore: DPRINT("readmore"); /* * if we hit here we haven't hit something * we don't understand or a newline, so try again */ so->so_upcall = soparsehttpvers; so->so_rcv.sb_flags |= SB_UPCALL; return; fallout: DPRINT("fallout"); so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; soisconnected(so); return; } #define NCHRS 3 static void soishttpconnected(struct socket *so, void *arg, int waitflag) { char a, b, c; struct mbuf *m, *n; int ccleft, copied; DPRINT("start"); if ((so->so_state & SS_CANTRCVMORE) != 0 || sbfull(&so->so_rcv)) goto gotit; /* * Walk the socketbuffer and copy the last NCHRS (3) into a, b, and c * copied - how much we've copied so far * ccleft - how many bytes remaining in the socketbuffer * just loop over the mbufs subtracting from 'ccleft' until we only * have NCHRS left */ copied = 0; ccleft = so->so_rcv.sb_cc; if (ccleft < NCHRS) goto readmore; a = b = c = '\0'; for (m = so->so_rcv.sb_mb; m; m = n) { n = m->m_nextpkt; for (; m; m = m->m_next) { ccleft -= m->m_len; if (ccleft <= NCHRS) { char *src; int tocopy; tocopy = (NCHRS - ccleft) - copied; src = mtod(m, char *) + (m->m_len - tocopy); while (tocopy--) { switch (copied++) { case 0: a = *src++; break; case 1: b = *src++; break; case 2: c = *src++; break; } } } } } if (c == '\n' && (b == '\n' || (b == '\r' && a == '\n'))) { /* we have all request headers */ goto gotit; } readmore: so->so_upcall = soishttpconnected; so->so_rcv.sb_flags |= SB_UPCALL; return; gotit: so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; soisconnected(so); return; } Index: head/sys/netinet/ip_mroute.c =================================================================== --- head/sys/netinet/ip_mroute.c (revision 129879) +++ head/sys/netinet/ip_mroute.c (revision 129880) @@ -1,3429 +1,3430 @@ /* * IP multicast forwarding procedures * * Written by David Waitzman, BBN Labs, August 1988. * Modified by Steve Deering, Stanford, February 1989. * Modified by Mark J. Steiglitz, Stanford, May, 1991 * Modified by Van Jacobson, LBL, January 1993 * Modified by Ajit Thyagarajan, PARC, August 1993 * Modified by Bill Fenner, PARC, April 1995 * Modified by Ahmed Helmy, SGI, June 1996 * Modified by George Edmond Eddy (Rusty), ISI, February 1998 * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000 * Modified by Hitoshi Asaeda, WIDE, August 2000 * Modified by Pavlin Radoslavov, ICSI, October 2002 * * MROUTING Revision: 3.5 * and PIM-SMv2 and PIM-DM support, advanced API support, * bandwidth metering and signaling * * $FreeBSD$ */ #include "opt_mac.h" #include "opt_mrouting.h" #include "opt_random_ip_id.h" #ifdef PIM #define _PIM_VT 1 #endif #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PIM #include #include #endif #include #include /* * Control debugging code for rsvp and multicast routing code. * Can only set them with the debugger. */ static u_int rsvpdebug; /* non-zero enables debugging */ static u_int mrtdebug; /* any set of the flags below */ #define DEBUG_MFC 0x02 #define DEBUG_FORWARD 0x04 #define DEBUG_EXPIRE 0x08 #define DEBUG_XMIT 0x10 #define DEBUG_PIM 0x20 #define VIFI_INVALID ((vifi_t) -1) #define M_HASCL(m) ((m)->m_flags & M_EXT) static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast routing tables"); /* * Locking. We use two locks: one for the virtual interface table and * one for the forwarding table. These locks may be nested in which case * the VIF lock must always be taken first. Note that each lock is used * to cover not only the specific data structure but also related data * structures. It may be better to add more fine-grained locking later; * it's not clear how performance-critical this code is. */ static struct mrtstat mrtstat; SYSCTL_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW, &mrtstat, mrtstat, "Multicast Routing Statistics (struct mrtstat, netinet/ip_mroute.h)"); static struct mfc *mfctable[MFCTBLSIZ]; SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD, &mfctable, sizeof(mfctable), "S,*mfc[MFCTBLSIZ]", "Multicast Forwarding Table (struct *mfc[MFCTBLSIZ], netinet/ip_mroute.h)"); static struct mtx mfc_mtx; #define MFC_LOCK() mtx_lock(&mfc_mtx) #define MFC_UNLOCK() mtx_unlock(&mfc_mtx) #define MFC_LOCK_ASSERT() mtx_assert(&mfc_mtx, MA_OWNED) #define MFC_LOCK_INIT() mtx_init(&mfc_mtx, "mroute mfc table", NULL, MTX_DEF) #define MFC_LOCK_DESTROY() mtx_destroy(&mfc_mtx) static struct vif viftable[MAXVIFS]; SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD, &viftable, sizeof(viftable), "S,vif[MAXVIFS]", "Multicast Virtual Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)"); static struct mtx vif_mtx; #define VIF_LOCK() mtx_lock(&vif_mtx) #define VIF_UNLOCK() mtx_unlock(&vif_mtx) #define VIF_LOCK_ASSERT() mtx_assert(&vif_mtx, MA_OWNED) #define VIF_LOCK_INIT() mtx_init(&vif_mtx, "mroute vif table", NULL, MTX_DEF) #define VIF_LOCK_DESTROY() mtx_destroy(&vif_mtx) static u_char nexpire[MFCTBLSIZ]; static struct callout expire_upcalls_ch; #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */ #define UPCALL_EXPIRE 6 /* number of timeouts */ /* * Define the token bucket filter structures * tbftable -> each vif has one of these for storing info */ static struct tbf tbftable[MAXVIFS]; #define TBF_REPROCESS (hz / 100) /* 100x / second */ /* * 'Interfaces' associated with decapsulator (so we can tell * packets that went through it from ones that get reflected * by a broken gateway). These interfaces are never linked into * the system ifnet list & no routes point to them. I.e., packets * can't be sent this way. They only exist as a placeholder for * multicast source verification. */ static struct ifnet multicast_decap_if[MAXVIFS]; #define ENCAP_TTL 64 #define ENCAP_PROTO IPPROTO_IPIP /* 4 */ /* prototype IP hdr for encapsulated packets */ static struct ip multicast_encap_iphdr = { #if BYTE_ORDER == LITTLE_ENDIAN sizeof(struct ip) >> 2, IPVERSION, #else IPVERSION, sizeof(struct ip) >> 2, #endif 0, /* tos */ sizeof(struct ip), /* total length */ 0, /* id */ 0, /* frag offset */ ENCAP_TTL, ENCAP_PROTO, 0, /* checksum */ }; /* * Bandwidth meter variables and constants */ static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters"); /* * Pending timeouts are stored in a hash table, the key being the * expiration time. Periodically, the entries are analysed and processed. */ #define BW_METER_BUCKETS 1024 static struct bw_meter *bw_meter_timers[BW_METER_BUCKETS]; static struct callout bw_meter_ch; #define BW_METER_PERIOD (hz) /* periodical handling of bw meters */ /* * Pending upcalls are stored in a vector which is flushed when * full, or periodically */ static struct bw_upcall bw_upcalls[BW_UPCALLS_MAX]; static u_int bw_upcalls_n; /* # of pending upcalls */ static struct callout bw_upcalls_ch; #define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */ #ifdef PIM static struct pimstat pimstat; SYSCTL_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD, &pimstat, pimstat, "PIM Statistics (struct pimstat, netinet/pim_var.h)"); /* * Note: the PIM Register encapsulation adds the following in front of a * data packet: * * struct pim_encap_hdr { * struct ip ip; * struct pim_encap_pimhdr pim; * } * */ struct pim_encap_pimhdr { struct pim pim; uint32_t flags; }; static struct ip pim_encap_iphdr = { #if BYTE_ORDER == LITTLE_ENDIAN sizeof(struct ip) >> 2, IPVERSION, #else IPVERSION, sizeof(struct ip) >> 2, #endif 0, /* tos */ sizeof(struct ip), /* total length */ 0, /* id */ 0, /* frag offset */ ENCAP_TTL, IPPROTO_PIM, 0, /* checksum */ }; static struct pim_encap_pimhdr pim_encap_pimhdr = { { PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */ 0, /* reserved */ 0, /* checksum */ }, 0 /* flags */ }; static struct ifnet multicast_register_if; static vifi_t reg_vif_num = VIFI_INVALID; #endif /* PIM */ /* * Private variables. */ static vifi_t numvifs; static const struct encaptab *encap_cookie; /* * one-back cache used by mroute_encapcheck to locate a tunnel's vif * given a datagram's src ip address. */ static u_long last_encap_src; static struct vif *last_encap_vif; /* * Callout for queue processing. */ static struct callout tbf_reprocess_ch; static u_long X_ip_mcast_src(int vifi); static int X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m, struct ip_moptions *imo); static int X_ip_mrouter_done(void); static int X_ip_mrouter_get(struct socket *so, struct sockopt *m); static int X_ip_mrouter_set(struct socket *so, struct sockopt *m); static int X_legal_vif_num(int vif); static int X_mrt_ioctl(int cmd, caddr_t data); static int get_sg_cnt(struct sioc_sg_req *); static int get_vif_cnt(struct sioc_vif_req *); static int ip_mrouter_init(struct socket *, int); static int add_vif(struct vifctl *); static int del_vif(vifi_t); static int add_mfc(struct mfcctl2 *); static int del_mfc(struct mfcctl2 *); static int set_api_config(uint32_t *); /* chose API capabilities */ static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *); static int set_assert(int); static void expire_upcalls(void *); static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t); static void phyint_send(struct ip *, struct vif *, struct mbuf *); static void encap_send(struct ip *, struct vif *, struct mbuf *); static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_long); static void tbf_queue(struct vif *, struct mbuf *); static void tbf_process_q(struct vif *); static void tbf_reprocess_q(void *); static int tbf_dq_sel(struct vif *, struct ip *); static void tbf_send_packet(struct vif *, struct mbuf *); static void tbf_update_tokens(struct vif *); static int priority(struct vif *, struct ip *); /* * Bandwidth monitoring */ static void free_bw_list(struct bw_meter *list); static int add_bw_upcall(struct bw_upcall *); static int del_bw_upcall(struct bw_upcall *); static void bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp); static void bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp); static void bw_upcalls_send(void); static void schedule_bw_meter(struct bw_meter *x, struct timeval *nowp); static void unschedule_bw_meter(struct bw_meter *x); static void bw_meter_process(void); static void expire_bw_upcalls_send(void *); static void expire_bw_meter_process(void *); #ifdef PIM static int pim_register_send(struct ip *, struct vif *, struct mbuf *, struct mfc *); static int pim_register_send_rp(struct ip *, struct vif *, struct mbuf *, struct mfc *); static int pim_register_send_upcall(struct ip *, struct vif *, struct mbuf *, struct mfc *); static struct mbuf *pim_register_prepare(struct ip *, struct mbuf *); #endif /* * whether or not special PIM assert processing is enabled. */ static int pim_assert; /* * Rate limit for assert notification messages, in usec */ #define ASSERT_MSG_TIME 3000000 /* * Kernel multicast routing API capabilities and setup. * If more API capabilities are added to the kernel, they should be * recorded in `mrt_api_support'. */ static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF | MRT_MFC_FLAGS_BORDER_VIF | MRT_MFC_RP | MRT_MFC_BW_UPCALL); static uint32_t mrt_api_config = 0; /* * Hash function for a source, group entry */ #define MFCHASH(a, g) MFCHASHMOD(((a) >> 20) ^ ((a) >> 10) ^ (a) ^ \ ((g) >> 20) ^ ((g) >> 10) ^ (g)) /* * Find a route for a given origin IP address and Multicast group address * Type of service parameter to be added in the future!!! * Statistics are updated by the caller if needed * (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses) */ static struct mfc * mfc_find(in_addr_t o, in_addr_t g) { struct mfc *rt; MFC_LOCK_ASSERT(); for (rt = mfctable[MFCHASH(o,g)]; rt; rt = rt->mfc_next) if ((rt->mfc_origin.s_addr == o) && (rt->mfc_mcastgrp.s_addr == g) && (rt->mfc_stall == NULL)) break; return rt; } /* * Macros to compute elapsed time efficiently * Borrowed from Van Jacobson's scheduling code */ #define TV_DELTA(a, b, delta) { \ int xxs; \ delta = (a).tv_usec - (b).tv_usec; \ if ((xxs = (a).tv_sec - (b).tv_sec)) { \ switch (xxs) { \ case 2: \ delta += 1000000; \ /* FALLTHROUGH */ \ case 1: \ delta += 1000000; \ break; \ default: \ delta += (1000000 * xxs); \ } \ } \ } #define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \ (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec) /* * Handle MRT setsockopt commands to modify the multicast routing tables. */ static int X_ip_mrouter_set(struct socket *so, struct sockopt *sopt) { int error, optval; vifi_t vifi; struct vifctl vifc; struct mfcctl2 mfc; struct bw_upcall bw_upcall; uint32_t i; if (so != ip_mrouter && sopt->sopt_name != MRT_INIT) return EPERM; error = 0; switch (sopt->sopt_name) { case MRT_INIT: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; error = ip_mrouter_init(so, optval); break; case MRT_DONE: error = ip_mrouter_done(); break; case MRT_ADD_VIF: error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc); if (error) break; error = add_vif(&vifc); break; case MRT_DEL_VIF: error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); if (error) break; error = del_vif(vifi); break; case MRT_ADD_MFC: case MRT_DEL_MFC: /* * select data size depending on API version. */ if (sopt->sopt_name == MRT_ADD_MFC && mrt_api_config & MRT_API_FLAGS_ALL) { error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2), sizeof(struct mfcctl2)); } else { error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl), sizeof(struct mfcctl)); bzero((caddr_t)&mfc + sizeof(struct mfcctl), sizeof(mfc) - sizeof(struct mfcctl)); } if (error) break; if (sopt->sopt_name == MRT_ADD_MFC) error = add_mfc(&mfc); else error = del_mfc(&mfc); break; case MRT_ASSERT: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) break; set_assert(optval); break; case MRT_API_CONFIG: error = sooptcopyin(sopt, &i, sizeof i, sizeof i); if (!error) error = set_api_config(&i); if (!error) error = sooptcopyout(sopt, &i, sizeof i); break; case MRT_ADD_BW_UPCALL: case MRT_DEL_BW_UPCALL: error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall, sizeof bw_upcall); if (error) break; if (sopt->sopt_name == MRT_ADD_BW_UPCALL) error = add_bw_upcall(&bw_upcall); else error = del_bw_upcall(&bw_upcall); break; default: error = EOPNOTSUPP; break; } return error; } /* * Handle MRT getsockopt commands */ static int X_ip_mrouter_get(struct socket *so, struct sockopt *sopt) { int error; static int version = 0x0305; /* !!! why is this here? XXX */ switch (sopt->sopt_name) { case MRT_VERSION: error = sooptcopyout(sopt, &version, sizeof version); break; case MRT_ASSERT: error = sooptcopyout(sopt, &pim_assert, sizeof pim_assert); break; case MRT_API_SUPPORT: error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support); break; case MRT_API_CONFIG: error = sooptcopyout(sopt, &mrt_api_config, sizeof mrt_api_config); break; default: error = EOPNOTSUPP; break; } return error; } /* * Handle ioctl commands to obtain information from the cache */ static int X_mrt_ioctl(int cmd, caddr_t data) { int error = 0; switch (cmd) { case (SIOCGETVIFCNT): error = get_vif_cnt((struct sioc_vif_req *)data); break; case (SIOCGETSGCNT): error = get_sg_cnt((struct sioc_sg_req *)data); break; default: error = EINVAL; break; } return error; } /* * returns the packet, byte, rpf-failure count for the source group provided */ static int get_sg_cnt(struct sioc_sg_req *req) { struct mfc *rt; MFC_LOCK(); rt = mfc_find(req->src.s_addr, req->grp.s_addr); if (rt == NULL) { MFC_UNLOCK(); req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff; return EADDRNOTAVAIL; } req->pktcnt = rt->mfc_pkt_cnt; req->bytecnt = rt->mfc_byte_cnt; req->wrong_if = rt->mfc_wrong_if; MFC_UNLOCK(); return 0; } /* * returns the input and output packet and byte counts on the vif provided */ static int get_vif_cnt(struct sioc_vif_req *req) { vifi_t vifi = req->vifi; VIF_LOCK(); if (vifi >= numvifs) { VIF_UNLOCK(); return EINVAL; } req->icount = viftable[vifi].v_pkt_in; req->ocount = viftable[vifi].v_pkt_out; req->ibytes = viftable[vifi].v_bytes_in; req->obytes = viftable[vifi].v_bytes_out; VIF_UNLOCK(); return 0; } static void ip_mrouter_reset(void) { bzero((caddr_t)mfctable, sizeof(mfctable)); bzero((caddr_t)nexpire, sizeof(nexpire)); pim_assert = 0; mrt_api_config = 0; callout_init(&expire_upcalls_ch, CALLOUT_MPSAFE); bw_upcalls_n = 0; bzero((caddr_t)bw_meter_timers, sizeof(bw_meter_timers)); callout_init(&bw_upcalls_ch, CALLOUT_MPSAFE); callout_init(&bw_meter_ch, CALLOUT_MPSAFE); callout_init(&tbf_reprocess_ch, CALLOUT_MPSAFE); } static struct mtx mrouter_mtx; /* used to synch init/done work */ /* * Enable multicast routing */ static int ip_mrouter_init(struct socket *so, int version) { if (mrtdebug) log(LOG_DEBUG, "ip_mrouter_init: so_type = %d, pr_protocol = %d\n", so->so_type, so->so_proto->pr_protocol); if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP) return EOPNOTSUPP; if (version != 1) return ENOPROTOOPT; mtx_lock(&mrouter_mtx); if (ip_mrouter != NULL) { mtx_unlock(&mrouter_mtx); return EADDRINUSE; } callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send, NULL); callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL); ip_mrouter = so; mtx_unlock(&mrouter_mtx); if (mrtdebug) log(LOG_DEBUG, "ip_mrouter_init\n"); return 0; } /* * Disable multicast routing */ static int X_ip_mrouter_done(void) { vifi_t vifi; int i; struct ifnet *ifp; struct ifreq ifr; struct mfc *rt; struct rtdetq *rte; mtx_lock(&mrouter_mtx); if (ip_mrouter == NULL) { mtx_unlock(&mrouter_mtx); return EINVAL; } /* * Detach/disable hooks to the reset of the system. */ ip_mrouter = NULL; mrt_api_config = 0; VIF_LOCK(); if (encap_cookie) { const struct encaptab *c = encap_cookie; encap_cookie = NULL; encap_detach(c); } VIF_UNLOCK(); callout_stop(&tbf_reprocess_ch); VIF_LOCK(); /* * For each phyint in use, disable promiscuous reception of all IP * multicasts. */ for (vifi = 0; vifi < numvifs; vifi++) { if (viftable[vifi].v_lcl_addr.s_addr != 0 && !(viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) { struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr); so->sin_len = sizeof(struct sockaddr_in); so->sin_family = AF_INET; so->sin_addr.s_addr = INADDR_ANY; ifp = viftable[vifi].v_ifp; if_allmulti(ifp, 0); } } bzero((caddr_t)tbftable, sizeof(tbftable)); bzero((caddr_t)viftable, sizeof(viftable)); numvifs = 0; pim_assert = 0; VIF_UNLOCK(); /* * Free all multicast forwarding cache entries. */ callout_stop(&expire_upcalls_ch); callout_stop(&bw_upcalls_ch); callout_stop(&bw_meter_ch); MFC_LOCK(); for (i = 0; i < MFCTBLSIZ; i++) { for (rt = mfctable[i]; rt != NULL; ) { struct mfc *nr = rt->mfc_next; for (rte = rt->mfc_stall; rte != NULL; ) { struct rtdetq *n = rte->next; m_freem(rte->m); free(rte, M_MRTABLE); rte = n; } free_bw_list(rt->mfc_bw_meter); free(rt, M_MRTABLE); rt = nr; } } bzero((caddr_t)mfctable, sizeof(mfctable)); bzero((caddr_t)nexpire, sizeof(nexpire)); bw_upcalls_n = 0; bzero(bw_meter_timers, sizeof(bw_meter_timers)); MFC_UNLOCK(); /* * Reset de-encapsulation cache */ last_encap_src = INADDR_ANY; last_encap_vif = NULL; #ifdef PIM reg_vif_num = VIFI_INVALID; #endif mtx_unlock(&mrouter_mtx); if (mrtdebug) log(LOG_DEBUG, "ip_mrouter_done\n"); return 0; } /* * Set PIM assert processing global */ static int set_assert(int i) { if ((i != 1) && (i != 0)) return EINVAL; pim_assert = i; return 0; } /* * Configure API capabilities */ int set_api_config(uint32_t *apival) { int i; /* * We can set the API capabilities only if it is the first operation * after MRT_INIT. I.e.: * - there are no vifs installed * - pim_assert is not enabled * - the MFC table is empty */ if (numvifs > 0) { *apival = 0; return EPERM; } if (pim_assert) { *apival = 0; return EPERM; } for (i = 0; i < MFCTBLSIZ; i++) { if (mfctable[i] != NULL) { *apival = 0; return EPERM; } } mrt_api_config = *apival & mrt_api_support; *apival = mrt_api_config; return 0; } /* * Decide if a packet is from a tunnelled peer. * Return 0 if not, 64 if so. XXX yuck.. 64 ??? */ static int mroute_encapcheck(const struct mbuf *m, int off, int proto, void *arg) { struct ip *ip = mtod(m, struct ip *); int hlen = ip->ip_hl << 2; /* * don't claim the packet if it's not to a multicast destination or if * we don't have an encapsulating tunnel with the source. * Note: This code assumes that the remote site IP address * uniquely identifies the tunnel (i.e., that this site has * at most one tunnel with the remote site). */ if (!IN_MULTICAST(ntohl(((struct ip *)((char *)ip+hlen))->ip_dst.s_addr))) return 0; if (ip->ip_src.s_addr != last_encap_src) { struct vif *vifp = viftable; struct vif *vife = vifp + numvifs; last_encap_src = ip->ip_src.s_addr; last_encap_vif = NULL; for ( ; vifp < vife; ++vifp) if (vifp->v_rmt_addr.s_addr == ip->ip_src.s_addr) { if ((vifp->v_flags & (VIFF_TUNNEL|VIFF_SRCRT)) == VIFF_TUNNEL) last_encap_vif = vifp; break; } } if (last_encap_vif == NULL) { last_encap_src = INADDR_ANY; return 0; } return 64; } /* * De-encapsulate a packet and feed it back through ip input (this * routine is called whenever IP gets a packet that mroute_encap_func() * claimed). */ static void mroute_encap_input(struct mbuf *m, int off) { struct ip *ip = mtod(m, struct ip *); int hlen = ip->ip_hl << 2; if (hlen > sizeof(struct ip)) ip_stripoptions(m, (struct mbuf *) 0); m->m_data += sizeof(struct ip); m->m_len -= sizeof(struct ip); m->m_pkthdr.len -= sizeof(struct ip); m->m_pkthdr.rcvif = last_encap_vif->v_ifp; netisr_queue(NETISR_IP, m); /* * normally we would need a "schednetisr(NETISR_IP)" * here but we were called by ip_input and it is going * to loop back & try to dequeue the packet we just * queued as soon as we return so we avoid the * unnecessary software interrrupt. * * XXX * This no longer holds - we may have direct-dispatched the packet, * or there may be a queue processing limit. */ } extern struct domain inetdomain; static struct protosw mroute_encap_protosw = { SOCK_RAW, &inetdomain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR, mroute_encap_input, 0, 0, rip_ctloutput, 0, 0, 0, 0, 0, &rip_usrreqs }; /* * Add a vif to the vif table */ static int add_vif(struct vifctl *vifcp) { struct vif *vifp = viftable + vifcp->vifc_vifi; struct sockaddr_in sin = {sizeof sin, AF_INET}; struct ifaddr *ifa; struct ifnet *ifp; int error; struct tbf *v_tbf = tbftable + vifcp->vifc_vifi; VIF_LOCK(); if (vifcp->vifc_vifi >= MAXVIFS) { VIF_UNLOCK(); return EINVAL; } if (vifp->v_lcl_addr.s_addr != INADDR_ANY) { VIF_UNLOCK(); return EADDRINUSE; } if (vifcp->vifc_lcl_addr.s_addr == INADDR_ANY) { VIF_UNLOCK(); return EADDRNOTAVAIL; } /* Find the interface with an address in AF_INET family */ #ifdef PIM if (vifcp->vifc_flags & VIFF_REGISTER) { /* * XXX: Because VIFF_REGISTER does not really need a valid * local interface (e.g. it could be 127.0.0.2), we don't * check its address. */ ifp = NULL; } else #endif { sin.sin_addr = vifcp->vifc_lcl_addr; ifa = ifa_ifwithaddr((struct sockaddr *)&sin); if (ifa == NULL) { VIF_UNLOCK(); return EADDRNOTAVAIL; } ifp = ifa->ifa_ifp; } if (vifcp->vifc_flags & VIFF_TUNNEL) { if ((vifcp->vifc_flags & VIFF_SRCRT) == 0) { /* * An encapsulating tunnel is wanted. Tell * mroute_encap_input() to start paying attention * to encapsulated packets. */ if (encap_cookie == NULL) { int i; encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4, mroute_encapcheck, (struct protosw *)&mroute_encap_protosw, NULL); if (encap_cookie == NULL) { printf("ip_mroute: unable to attach encap\n"); VIF_UNLOCK(); return EIO; /* XXX */ } for (i = 0; i < MAXVIFS; ++i) { if_initname(&multicast_decap_if[i], "mdecap", i); } } /* * Set interface to fake encapsulator interface */ ifp = &multicast_decap_if[vifcp->vifc_vifi]; /* * Prepare cached route entry */ bzero(&vifp->v_route, sizeof(vifp->v_route)); } else { log(LOG_ERR, "source routed tunnels not supported\n"); VIF_UNLOCK(); return EOPNOTSUPP; } #ifdef PIM } else if (vifcp->vifc_flags & VIFF_REGISTER) { ifp = &multicast_register_if; if (mrtdebug) log(LOG_DEBUG, "Adding a register vif, ifp: %p\n", (void *)&multicast_register_if); if (reg_vif_num == VIFI_INVALID) { if_initname(&multicast_register_if, "register_vif", 0); multicast_register_if.if_flags = IFF_LOOPBACK; bzero(&vifp->v_route, sizeof(vifp->v_route)); reg_vif_num = vifcp->vifc_vifi; } #endif } else { /* Make sure the interface supports multicast */ if ((ifp->if_flags & IFF_MULTICAST) == 0) { VIF_UNLOCK(); return EOPNOTSUPP; } /* Enable promiscuous reception of all IP multicasts from the if */ error = if_allmulti(ifp, 1); if (error) { VIF_UNLOCK(); return error; } } /* define parameters for the tbf structure */ vifp->v_tbf = v_tbf; GET_TIME(vifp->v_tbf->tbf_last_pkt_t); vifp->v_tbf->tbf_n_tok = 0; vifp->v_tbf->tbf_q_len = 0; vifp->v_tbf->tbf_max_q_len = MAXQSIZE; vifp->v_tbf->tbf_q = vifp->v_tbf->tbf_t = NULL; vifp->v_flags = vifcp->vifc_flags; vifp->v_threshold = vifcp->vifc_threshold; vifp->v_lcl_addr = vifcp->vifc_lcl_addr; vifp->v_rmt_addr = vifcp->vifc_rmt_addr; vifp->v_ifp = ifp; /* scaling up here allows division by 1024 in critical code */ vifp->v_rate_limit= vifcp->vifc_rate_limit * 1024 / 1000; vifp->v_rsvp_on = 0; vifp->v_rsvpd = NULL; /* initialize per vif pkt counters */ vifp->v_pkt_in = 0; vifp->v_pkt_out = 0; vifp->v_bytes_in = 0; vifp->v_bytes_out = 0; /* Adjust numvifs up if the vifi is higher than numvifs */ if (numvifs <= vifcp->vifc_vifi) numvifs = vifcp->vifc_vifi + 1; VIF_UNLOCK(); if (mrtdebug) log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n", vifcp->vifc_vifi, (u_long)ntohl(vifcp->vifc_lcl_addr.s_addr), (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask", (u_long)ntohl(vifcp->vifc_rmt_addr.s_addr), vifcp->vifc_threshold, vifcp->vifc_rate_limit); return 0; } /* * Delete a vif from the vif table */ static int del_vif(vifi_t vifi) { struct vif *vifp; VIF_LOCK(); if (vifi >= numvifs) { VIF_UNLOCK(); return EINVAL; } vifp = &viftable[vifi]; if (vifp->v_lcl_addr.s_addr == INADDR_ANY) { VIF_UNLOCK(); return EADDRNOTAVAIL; } if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) if_allmulti(vifp->v_ifp, 0); if (vifp == last_encap_vif) { last_encap_vif = NULL; last_encap_src = INADDR_ANY; } /* * Free packets queued at the interface */ while (vifp->v_tbf->tbf_q) { struct mbuf *m = vifp->v_tbf->tbf_q; vifp->v_tbf->tbf_q = m->m_act; m_freem(m); } #ifdef PIM if (vifp->v_flags & VIFF_REGISTER) reg_vif_num = VIFI_INVALID; #endif bzero((caddr_t)vifp->v_tbf, sizeof(*(vifp->v_tbf))); bzero((caddr_t)vifp, sizeof (*vifp)); if (mrtdebug) log(LOG_DEBUG, "del_vif %d, numvifs %d\n", vifi, numvifs); /* Adjust numvifs down */ for (vifi = numvifs; vifi > 0; vifi--) if (viftable[vifi-1].v_lcl_addr.s_addr != INADDR_ANY) break; numvifs = vifi; VIF_UNLOCK(); return 0; } /* * update an mfc entry without resetting counters and S,G addresses. */ static void update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) { int i; rt->mfc_parent = mfccp->mfcc_parent; for (i = 0; i < numvifs; i++) { rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; rt->mfc_flags[i] = mfccp->mfcc_flags[i] & mrt_api_config & MRT_MFC_FLAGS_ALL; } /* set the RP address */ if (mrt_api_config & MRT_MFC_RP) rt->mfc_rp = mfccp->mfcc_rp; else rt->mfc_rp.s_addr = INADDR_ANY; } /* * fully initialize an mfc entry from the parameter. */ static void init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) { rt->mfc_origin = mfccp->mfcc_origin; rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; update_mfc_params(rt, mfccp); /* initialize pkt counters per src-grp */ rt->mfc_pkt_cnt = 0; rt->mfc_byte_cnt = 0; rt->mfc_wrong_if = 0; rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0; } /* * Add an mfc entry */ static int add_mfc(struct mfcctl2 *mfccp) { struct mfc *rt; u_long hash; struct rtdetq *rte; u_short nstl; VIF_LOCK(); MFC_LOCK(); rt = mfc_find(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr); /* If an entry already exists, just update the fields */ if (rt) { if (mrtdebug & DEBUG_MFC) log(LOG_DEBUG,"add_mfc update o %lx g %lx p %x\n", (u_long)ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent); update_mfc_params(rt, mfccp); MFC_UNLOCK(); VIF_UNLOCK(); return 0; } /* * Find the entry for which the upcall was made and update */ hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr); for (rt = mfctable[hash], nstl = 0; rt; rt = rt->mfc_next) { if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) && (rt->mfc_stall != NULL)) { if (nstl++) log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n", "multiple kernel entries", (u_long)ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent, (void *)rt->mfc_stall); if (mrtdebug & DEBUG_MFC) log(LOG_DEBUG,"add_mfc o %lx g %lx p %x dbg %p\n", (u_long)ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent, (void *)rt->mfc_stall); init_mfc_params(rt, mfccp); rt->mfc_expire = 0; /* Don't clean this guy up */ nexpire[hash]--; /* free packets Qed at the end of this entry */ for (rte = rt->mfc_stall; rte != NULL; ) { struct rtdetq *n = rte->next; ip_mdq(rte->m, rte->ifp, rt, -1); m_freem(rte->m); free(rte, M_MRTABLE); rte = n; } rt->mfc_stall = NULL; } } /* * It is possible that an entry is being inserted without an upcall */ if (nstl == 0) { if (mrtdebug & DEBUG_MFC) log(LOG_DEBUG,"add_mfc no upcall h %lu o %lx g %lx p %x\n", hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr), (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), mfccp->mfcc_parent); for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) { if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) { init_mfc_params(rt, mfccp); if (rt->mfc_expire) nexpire[hash]--; rt->mfc_expire = 0; break; /* XXX */ } } if (rt == NULL) { /* no upcall, so make a new entry */ rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT); if (rt == NULL) { MFC_UNLOCK(); VIF_UNLOCK(); return ENOBUFS; } init_mfc_params(rt, mfccp); rt->mfc_expire = 0; rt->mfc_stall = NULL; rt->mfc_bw_meter = NULL; /* insert new entry at head of hash chain */ rt->mfc_next = mfctable[hash]; mfctable[hash] = rt; } } MFC_UNLOCK(); VIF_UNLOCK(); return 0; } /* * Delete an mfc entry */ static int del_mfc(struct mfcctl2 *mfccp) { struct in_addr origin; struct in_addr mcastgrp; struct mfc *rt; struct mfc **nptr; u_long hash; struct bw_meter *list; origin = mfccp->mfcc_origin; mcastgrp = mfccp->mfcc_mcastgrp; if (mrtdebug & DEBUG_MFC) log(LOG_DEBUG,"del_mfc orig %lx mcastgrp %lx\n", (u_long)ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr)); MFC_LOCK(); hash = MFCHASH(origin.s_addr, mcastgrp.s_addr); for (nptr = &mfctable[hash]; (rt = *nptr) != NULL; nptr = &rt->mfc_next) if (origin.s_addr == rt->mfc_origin.s_addr && mcastgrp.s_addr == rt->mfc_mcastgrp.s_addr && rt->mfc_stall == NULL) break; if (rt == NULL) { MFC_UNLOCK(); return EADDRNOTAVAIL; } *nptr = rt->mfc_next; /* * free the bw_meter entries */ list = rt->mfc_bw_meter; rt->mfc_bw_meter = NULL; free(rt, M_MRTABLE); free_bw_list(list); MFC_UNLOCK(); return 0; } /* * Send a message to mrouted on the multicast routing socket */ static int socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src) { if (s) { mtx_lock(&Giant); /* XXX until sockets are locked */ if (sbappendaddr(&s->so_rcv, (struct sockaddr *)src, mm, NULL) != 0) { sorwakeup(s); mtx_unlock(&Giant); return 0; } mtx_unlock(&Giant); } m_freem(mm); return -1; } /* * IP multicast forwarding function. This function assumes that the packet * pointed to by "ip" has arrived on (or is about to be sent to) the interface * pointed to by "ifp", and the packet is to be relayed to other networks * that have members of the packet's destination IP multicast group. * * The packet is returned unscathed to the caller, unless it is * erroneous, in which case a non-zero return value tells the caller to * discard it. */ #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */ static int X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m, struct ip_moptions *imo) { struct mfc *rt; int error; vifi_t vifi; if (mrtdebug & DEBUG_FORWARD) log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n", (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr), (void *)ifp); if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 || ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) { /* * Packet arrived via a physical interface or * an encapsulated tunnel or a register_vif. */ } else { /* * Packet arrived through a source-route tunnel. * Source-route tunnels are no longer supported. */ static int last_log; if (last_log != time_second) { last_log = time_second; log(LOG_ERR, "ip_mforward: received source-routed packet from %lx\n", (u_long)ntohl(ip->ip_src.s_addr)); } return 1; } VIF_LOCK(); MFC_LOCK(); if (imo && ((vifi = imo->imo_multicast_vif) < numvifs)) { if (ip->ip_ttl < 255) ip->ip_ttl++; /* compensate for -1 in *_send routines */ if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { struct vif *vifp = viftable + vifi; printf("Sending IPPROTO_RSVP from %lx to %lx on vif %d (%s%s)\n", (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr), vifi, (vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "", vifp->v_ifp->if_xname); } error = ip_mdq(m, ifp, NULL, vifi); MFC_UNLOCK(); VIF_UNLOCK(); return error; } if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { printf("Warning: IPPROTO_RSVP from %lx to %lx without vif option\n", (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr)); if (!imo) printf("In fact, no options were specified at all\n"); } /* * Don't forward a packet with time-to-live of zero or one, * or a packet destined to a local-only group. */ if (ip->ip_ttl <= 1 || ntohl(ip->ip_dst.s_addr) <= INADDR_MAX_LOCAL_GROUP) { MFC_UNLOCK(); VIF_UNLOCK(); return 0; } /* * Determine forwarding vifs from the forwarding cache table */ ++mrtstat.mrts_mfc_lookups; rt = mfc_find(ip->ip_src.s_addr, ip->ip_dst.s_addr); /* Entry exists, so forward if necessary */ if (rt != NULL) { error = ip_mdq(m, ifp, rt, -1); MFC_UNLOCK(); VIF_UNLOCK(); return error; } else { /* * If we don't have a route for packet's origin, * Make a copy of the packet & send message to routing daemon */ struct mbuf *mb0; struct rtdetq *rte; u_long hash; int hlen = ip->ip_hl << 2; ++mrtstat.mrts_mfc_misses; mrtstat.mrts_no_route++; if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC)) log(LOG_DEBUG, "ip_mforward: no rte s %lx g %lx\n", (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr)); /* * Allocate mbufs early so that we don't do extra work if we are * just going to fail anyway. Make sure to pullup the header so * that other people can't step on it. */ rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE, M_NOWAIT); if (rte == NULL) { MFC_UNLOCK(); VIF_UNLOCK(); return ENOBUFS; } mb0 = m_copypacket(m, M_DONTWAIT); if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen)) mb0 = m_pullup(mb0, hlen); if (mb0 == NULL) { free(rte, M_MRTABLE); MFC_UNLOCK(); VIF_UNLOCK(); return ENOBUFS; } /* is there an upcall waiting for this flow ? */ hash = MFCHASH(ip->ip_src.s_addr, ip->ip_dst.s_addr); for (rt = mfctable[hash]; rt; rt = rt->mfc_next) { if ((ip->ip_src.s_addr == rt->mfc_origin.s_addr) && (ip->ip_dst.s_addr == rt->mfc_mcastgrp.s_addr) && (rt->mfc_stall != NULL)) break; } if (rt == NULL) { int i; struct igmpmsg *im; struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; struct mbuf *mm; /* * Locate the vifi for the incoming interface for this packet. * If none found, drop packet. */ for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++) ; if (vifi >= numvifs) /* vif not found, drop packet */ goto non_fatal; /* no upcall, so make a new entry */ rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT); if (rt == NULL) goto fail; /* Make a copy of the header to send to the user level process */ mm = m_copy(mb0, 0, hlen); if (mm == NULL) goto fail1; /* * Send message to routing daemon to install * a route into the kernel table */ im = mtod(mm, struct igmpmsg *); im->im_msgtype = IGMPMSG_NOCACHE; im->im_mbz = 0; im->im_vif = vifi; mrtstat.mrts_upcalls++; k_igmpsrc.sin_addr = ip->ip_src; if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) { log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n"); ++mrtstat.mrts_upq_sockfull; fail1: free(rt, M_MRTABLE); fail: free(rte, M_MRTABLE); m_freem(mb0); MFC_UNLOCK(); VIF_UNLOCK(); return ENOBUFS; } /* insert new entry at head of hash chain */ rt->mfc_origin.s_addr = ip->ip_src.s_addr; rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr; rt->mfc_expire = UPCALL_EXPIRE; nexpire[hash]++; for (i = 0; i < numvifs; i++) { rt->mfc_ttls[i] = 0; rt->mfc_flags[i] = 0; } rt->mfc_parent = -1; rt->mfc_rp.s_addr = INADDR_ANY; /* clear the RP address */ rt->mfc_bw_meter = NULL; /* link into table */ rt->mfc_next = mfctable[hash]; mfctable[hash] = rt; rt->mfc_stall = rte; } else { /* determine if q has overflowed */ int npkts = 0; struct rtdetq **p; /* * XXX ouch! we need to append to the list, but we * only have a pointer to the front, so we have to * scan the entire list every time. */ for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next) npkts++; if (npkts > MAX_UPQ) { mrtstat.mrts_upq_ovflw++; non_fatal: free(rte, M_MRTABLE); m_freem(mb0); MFC_UNLOCK(); VIF_UNLOCK(); return 0; } /* Add this entry to the end of the queue */ *p = rte; } rte->m = mb0; rte->ifp = ifp; rte->next = NULL; MFC_UNLOCK(); VIF_UNLOCK(); return 0; } } /* * Clean up the cache entry if upcall is not serviced */ static void expire_upcalls(void *unused) { struct rtdetq *rte; struct mfc *mfc, **nptr; int i; MFC_LOCK(); for (i = 0; i < MFCTBLSIZ; i++) { if (nexpire[i] == 0) continue; nptr = &mfctable[i]; for (mfc = *nptr; mfc != NULL; mfc = *nptr) { /* * Skip real cache entries * Make sure it wasn't marked to not expire (shouldn't happen) * If it expires now */ if (mfc->mfc_stall != NULL && mfc->mfc_expire != 0 && --mfc->mfc_expire == 0) { if (mrtdebug & DEBUG_EXPIRE) log(LOG_DEBUG, "expire_upcalls: expiring (%lx %lx)\n", (u_long)ntohl(mfc->mfc_origin.s_addr), (u_long)ntohl(mfc->mfc_mcastgrp.s_addr)); /* * drop all the packets * free the mbuf with the pkt, if, timing info */ for (rte = mfc->mfc_stall; rte; ) { struct rtdetq *n = rte->next; m_freem(rte->m); free(rte, M_MRTABLE); rte = n; } ++mrtstat.mrts_cache_cleanups; nexpire[i]--; /* * free the bw_meter entries */ while (mfc->mfc_bw_meter != NULL) { struct bw_meter *x = mfc->mfc_bw_meter; mfc->mfc_bw_meter = x->bm_mfc_next; free(x, M_BWMETER); } *nptr = mfc->mfc_next; free(mfc, M_MRTABLE); } else { nptr = &mfc->mfc_next; } } } MFC_UNLOCK(); callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); } /* * Packet forwarding routine once entry in the cache is made */ static int ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif) { struct ip *ip = mtod(m, struct ip *); vifi_t vifi; int plen = ip->ip_len; VIF_LOCK_ASSERT(); /* * Macro to send packet on vif. Since RSVP packets don't get counted on * input, they shouldn't get counted on output, so statistics keeping is * separate. */ #define MC_SEND(ip,vifp,m) { \ if ((vifp)->v_flags & VIFF_TUNNEL) \ encap_send((ip), (vifp), (m)); \ else \ phyint_send((ip), (vifp), (m)); \ } /* * If xmt_vif is not -1, send on only the requested vif. * * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.) */ if (xmt_vif < numvifs) { #ifdef PIM if (viftable[xmt_vif].v_flags & VIFF_REGISTER) pim_register_send(ip, viftable + xmt_vif, m, rt); else #endif MC_SEND(ip, viftable + xmt_vif, m); return 1; } /* * Don't forward if it didn't arrive from the parent vif for its origin. */ vifi = rt->mfc_parent; if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) { /* came in the wrong interface */ if (mrtdebug & DEBUG_FORWARD) log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n", (void *)ifp, vifi, (void *)viftable[vifi].v_ifp); ++mrtstat.mrts_wrong_if; ++rt->mfc_wrong_if; /* * If we are doing PIM assert processing, send a message * to the routing daemon. * * XXX: A PIM-SM router needs the WRONGVIF detection so it * can complete the SPT switch, regardless of the type * of the iif (broadcast media, GRE tunnel, etc). */ if (pim_assert && (vifi < numvifs) && viftable[vifi].v_ifp) { struct timeval now; u_long delta; #ifdef PIM if (ifp == &multicast_register_if) pimstat.pims_rcv_registers_wrongiif++; #endif /* Get vifi for the incoming packet */ for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++) ; if (vifi >= numvifs) return 0; /* The iif is not found: ignore the packet. */ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF) return 0; /* WRONGVIF disabled: ignore the packet */ GET_TIME(now); TV_DELTA(rt->mfc_last_assert, now, delta); if (delta > ASSERT_MSG_TIME) { struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; struct igmpmsg *im; int hlen = ip->ip_hl << 2; struct mbuf *mm = m_copy(m, 0, hlen); if (mm && (M_HASCL(mm) || mm->m_len < hlen)) mm = m_pullup(mm, hlen); if (mm == NULL) return ENOBUFS; rt->mfc_last_assert = now; im = mtod(mm, struct igmpmsg *); im->im_msgtype = IGMPMSG_WRONGVIF; im->im_mbz = 0; im->im_vif = vifi; mrtstat.mrts_upcalls++; k_igmpsrc.sin_addr = im->im_src; if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) { log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n"); ++mrtstat.mrts_upq_sockfull; return ENOBUFS; } } } return 0; } /* If I sourced this packet, it counts as output, else it was input. */ if (ip->ip_src.s_addr == viftable[vifi].v_lcl_addr.s_addr) { viftable[vifi].v_pkt_out++; viftable[vifi].v_bytes_out += plen; } else { viftable[vifi].v_pkt_in++; viftable[vifi].v_bytes_in += plen; } rt->mfc_pkt_cnt++; rt->mfc_byte_cnt += plen; /* * For each vif, decide if a copy of the packet should be forwarded. * Forward if: * - the ttl exceeds the vif's threshold * - there are group members downstream on interface */ for (vifi = 0; vifi < numvifs; vifi++) if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) { viftable[vifi].v_pkt_out++; viftable[vifi].v_bytes_out += plen; #ifdef PIM if (viftable[vifi].v_flags & VIFF_REGISTER) pim_register_send(ip, viftable + vifi, m, rt); else #endif MC_SEND(ip, viftable+vifi, m); } /* * Perform upcall-related bw measuring. */ if (rt->mfc_bw_meter != NULL) { struct bw_meter *x; struct timeval now; GET_TIME(now); MFC_LOCK_ASSERT(); for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) bw_meter_receive_packet(x, plen, &now); } return 0; } /* * check if a vif number is legal/ok. This is used by ip_output. */ static int X_legal_vif_num(int vif) { /* XXX unlocked, matter? */ return (vif >= 0 && vif < numvifs); } /* * Return the local address used by this vif */ static u_long X_ip_mcast_src(int vifi) { /* XXX unlocked, matter? */ if (vifi >= 0 && vifi < numvifs) return viftable[vifi].v_lcl_addr.s_addr; else return INADDR_ANY; } static void phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m) { struct mbuf *mb_copy; int hlen = ip->ip_hl << 2; VIF_LOCK_ASSERT(); /* * Make a new reference to the packet; make sure that * the IP header is actually copied, not just referenced, * so that ip_output() only scribbles on the copy. */ mb_copy = m_copypacket(m, M_DONTWAIT); if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen)) mb_copy = m_pullup(mb_copy, hlen); if (mb_copy == NULL) return; if (vifp->v_rate_limit == 0) tbf_send_packet(vifp, mb_copy); else tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *), ip->ip_len); } static void encap_send(struct ip *ip, struct vif *vifp, struct mbuf *m) { struct mbuf *mb_copy; struct ip *ip_copy; int i, len = ip->ip_len; VIF_LOCK_ASSERT(); /* Take care of delayed checksums */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* * copy the old packet & pullup its IP header into the * new mbuf so we can modify it. Try to fill the new * mbuf since if we don't the ethernet driver will. */ MGETHDR(mb_copy, M_DONTWAIT, MT_HEADER); if (mb_copy == NULL) return; #ifdef MAC mac_create_mbuf_multicast_encap(m, vifp->v_ifp, mb_copy); #endif mb_copy->m_data += max_linkhdr; mb_copy->m_len = sizeof(multicast_encap_iphdr); if ((mb_copy->m_next = m_copypacket(m, M_DONTWAIT)) == NULL) { m_freem(mb_copy); return; } i = MHLEN - M_LEADINGSPACE(mb_copy); if (i > len) i = len; mb_copy = m_pullup(mb_copy, i); if (mb_copy == NULL) return; mb_copy->m_pkthdr.len = len + sizeof(multicast_encap_iphdr); /* * fill in the encapsulating IP header. */ ip_copy = mtod(mb_copy, struct ip *); *ip_copy = multicast_encap_iphdr; #ifdef RANDOM_IP_ID ip_copy->ip_id = ip_randomid(); #else ip_copy->ip_id = htons(ip_id++); #endif ip_copy->ip_len += len; ip_copy->ip_src = vifp->v_lcl_addr; ip_copy->ip_dst = vifp->v_rmt_addr; /* * turn the encapsulated IP header back into a valid one. */ ip = (struct ip *)((caddr_t)ip_copy + sizeof(multicast_encap_iphdr)); --ip->ip_ttl; ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); ip->ip_sum = 0; mb_copy->m_data += sizeof(multicast_encap_iphdr); ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); mb_copy->m_data -= sizeof(multicast_encap_iphdr); if (vifp->v_rate_limit == 0) tbf_send_packet(vifp, mb_copy); else tbf_control(vifp, mb_copy, ip, ip_copy->ip_len); } /* * Token bucket filter module */ static void tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_long p_len) { struct tbf *t = vifp->v_tbf; VIF_LOCK_ASSERT(); if (p_len > MAX_BKT_SIZE) { /* drop if packet is too large */ mrtstat.mrts_pkt2large++; m_freem(m); return; } tbf_update_tokens(vifp); if (t->tbf_q_len == 0) { /* queue empty... */ if (p_len <= t->tbf_n_tok) { /* send packet if enough tokens */ t->tbf_n_tok -= p_len; tbf_send_packet(vifp, m); } else { /* no, queue packet and try later */ tbf_queue(vifp, m); callout_reset(&tbf_reprocess_ch, TBF_REPROCESS, tbf_reprocess_q, vifp); } } else if (t->tbf_q_len < t->tbf_max_q_len) { /* finite queue length, so queue pkts and process queue */ tbf_queue(vifp, m); tbf_process_q(vifp); } else { /* queue full, try to dq and queue and process */ if (!tbf_dq_sel(vifp, ip)) { mrtstat.mrts_q_overflow++; m_freem(m); } else { tbf_queue(vifp, m); tbf_process_q(vifp); } } } /* * adds a packet to the queue at the interface */ static void tbf_queue(struct vif *vifp, struct mbuf *m) { struct tbf *t = vifp->v_tbf; VIF_LOCK_ASSERT(); if (t->tbf_t == NULL) /* Queue was empty */ t->tbf_q = m; else /* Insert at tail */ t->tbf_t->m_act = m; t->tbf_t = m; /* Set new tail pointer */ #ifdef DIAGNOSTIC /* Make sure we didn't get fed a bogus mbuf */ if (m->m_act) panic("tbf_queue: m_act"); #endif m->m_act = NULL; t->tbf_q_len++; } /* * processes the queue at the interface */ static void tbf_process_q(struct vif *vifp) { struct tbf *t = vifp->v_tbf; VIF_LOCK_ASSERT(); /* loop through the queue at the interface and send as many packets * as possible */ while (t->tbf_q_len > 0) { struct mbuf *m = t->tbf_q; int len = mtod(m, struct ip *)->ip_len; /* determine if the packet can be sent */ if (len > t->tbf_n_tok) /* not enough tokens, we are done */ break; /* ok, reduce no of tokens, dequeue and send the packet. */ t->tbf_n_tok -= len; t->tbf_q = m->m_act; if (--t->tbf_q_len == 0) t->tbf_t = NULL; m->m_act = NULL; tbf_send_packet(vifp, m); } } static void tbf_reprocess_q(void *xvifp) { struct vif *vifp = xvifp; if (ip_mrouter == NULL) return; VIF_LOCK(); tbf_update_tokens(vifp); tbf_process_q(vifp); if (vifp->v_tbf->tbf_q_len) callout_reset(&tbf_reprocess_ch, TBF_REPROCESS, tbf_reprocess_q, vifp); VIF_UNLOCK(); } /* function that will selectively discard a member of the queue * based on the precedence value and the priority */ static int tbf_dq_sel(struct vif *vifp, struct ip *ip) { u_int p; struct mbuf *m, *last; struct mbuf **np; struct tbf *t = vifp->v_tbf; VIF_LOCK_ASSERT(); p = priority(vifp, ip); np = &t->tbf_q; last = NULL; while ((m = *np) != NULL) { if (p > priority(vifp, mtod(m, struct ip *))) { *np = m->m_act; /* If we're removing the last packet, fix the tail pointer */ if (m == t->tbf_t) t->tbf_t = last; m_freem(m); /* It's impossible for the queue to be empty, but check anyways. */ if (--t->tbf_q_len == 0) t->tbf_t = NULL; mrtstat.mrts_drop_sel++; return 1; } np = &m->m_act; last = m; } return 0; } static void tbf_send_packet(struct vif *vifp, struct mbuf *m) { VIF_LOCK_ASSERT(); if (vifp->v_flags & VIFF_TUNNEL) /* If tunnel options */ ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, NULL, NULL); else { struct ip_moptions imo; int error; static struct route ro; /* XXX check this */ imo.imo_multicast_ifp = vifp->v_ifp; imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1; imo.imo_multicast_loop = 1; imo.imo_multicast_vif = -1; /* * Re-entrancy should not be a problem here, because * the packets that we send out and are looped back at us * should get rejected because they appear to come from * the loopback interface, thus preventing looping. */ error = ip_output(m, NULL, &ro, IP_FORWARDING, &imo, NULL); if (mrtdebug & DEBUG_XMIT) log(LOG_DEBUG, "phyint_send on vif %d err %d\n", (int)(vifp - viftable), error); } } /* determine the current time and then * the elapsed time (between the last time and time now) * in milliseconds & update the no. of tokens in the bucket */ static void tbf_update_tokens(struct vif *vifp) { struct timeval tp; u_long tm; struct tbf *t = vifp->v_tbf; VIF_LOCK_ASSERT(); GET_TIME(tp); TV_DELTA(tp, t->tbf_last_pkt_t, tm); /* * This formula is actually * "time in seconds" * "bytes/second". * * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8) * * The (1000/1024) was introduced in add_vif to optimize * this divide into a shift. */ t->tbf_n_tok += tm * vifp->v_rate_limit / 1024 / 8; t->tbf_last_pkt_t = tp; if (t->tbf_n_tok > MAX_BKT_SIZE) t->tbf_n_tok = MAX_BKT_SIZE; } static int priority(struct vif *vifp, struct ip *ip) { int prio = 50; /* the lowest priority -- default case */ /* temporary hack; may add general packet classifier some day */ /* * The UDP port space is divided up into four priority ranges: * [0, 16384) : unclassified - lowest priority * [16384, 32768) : audio - highest priority * [32768, 49152) : whiteboard - medium priority * [49152, 65536) : video - low priority * * Everything else gets lowest priority. */ if (ip->ip_p == IPPROTO_UDP) { struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2)); switch (ntohs(udp->uh_dport) & 0xc000) { case 0x4000: prio = 70; break; case 0x8000: prio = 60; break; case 0xc000: prio = 55; break; } } return prio; } /* * End of token bucket filter modifications */ static int X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt) { int error, vifi; if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) return EOPNOTSUPP; error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); if (error) return error; VIF_LOCK(); if (vifi < 0 || vifi >= numvifs) { /* Error if vif is invalid */ VIF_UNLOCK(); return EADDRNOTAVAIL; } if (sopt->sopt_name == IP_RSVP_VIF_ON) { /* Check if socket is available. */ if (viftable[vifi].v_rsvpd != NULL) { VIF_UNLOCK(); return EADDRINUSE; } viftable[vifi].v_rsvpd = so; /* This may seem silly, but we need to be sure we don't over-increment * the RSVP counter, in case something slips up. */ if (!viftable[vifi].v_rsvp_on) { viftable[vifi].v_rsvp_on = 1; rsvp_on++; } } else { /* must be VIF_OFF */ /* * XXX as an additional consistency check, one could make sure * that viftable[vifi].v_rsvpd == so, otherwise passing so as * first parameter is pretty useless. */ viftable[vifi].v_rsvpd = NULL; /* * This may seem silly, but we need to be sure we don't over-decrement * the RSVP counter, in case something slips up. */ if (viftable[vifi].v_rsvp_on) { viftable[vifi].v_rsvp_on = 0; rsvp_on--; } } VIF_UNLOCK(); return 0; } static void X_ip_rsvp_force_done(struct socket *so) { int vifi; /* Don't bother if it is not the right type of socket. */ if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) return; VIF_LOCK(); /* The socket may be attached to more than one vif...this * is perfectly legal. */ for (vifi = 0; vifi < numvifs; vifi++) { if (viftable[vifi].v_rsvpd == so) { viftable[vifi].v_rsvpd = NULL; /* This may seem silly, but we need to be sure we don't * over-decrement the RSVP counter, in case something slips up. */ if (viftable[vifi].v_rsvp_on) { viftable[vifi].v_rsvp_on = 0; rsvp_on--; } } } VIF_UNLOCK(); } static void X_rsvp_input(struct mbuf *m, int off) { int vifi; struct ip *ip = mtod(m, struct ip *); struct sockaddr_in rsvp_src = { sizeof rsvp_src, AF_INET }; struct ifnet *ifp; if (rsvpdebug) printf("rsvp_input: rsvp_on %d\n",rsvp_on); /* Can still get packets with rsvp_on = 0 if there is a local member * of the group to which the RSVP packet is addressed. But in this * case we want to throw the packet away. */ if (!rsvp_on) { m_freem(m); return; } if (rsvpdebug) printf("rsvp_input: check vifs\n"); #ifdef DIAGNOSTIC M_ASSERTPKTHDR(m); #endif ifp = m->m_pkthdr.rcvif; VIF_LOCK(); /* Find which vif the packet arrived on. */ for (vifi = 0; vifi < numvifs; vifi++) if (viftable[vifi].v_ifp == ifp) break; if (vifi == numvifs || viftable[vifi].v_rsvpd == NULL) { /* * Drop the lock here to avoid holding it across rip_input. * This could make rsvpdebug printfs wrong. If you care, * record the state of stuff before dropping the lock. */ VIF_UNLOCK(); /* * If the old-style non-vif-associated socket is set, * then use it. Otherwise, drop packet since there * is no specific socket for this vif. */ if (ip_rsvpd != NULL) { if (rsvpdebug) printf("rsvp_input: Sending packet up old-style socket\n"); rip_input(m, off); /* xxx */ } else { if (rsvpdebug && vifi == numvifs) printf("rsvp_input: Can't find vif for packet.\n"); else if (rsvpdebug && viftable[vifi].v_rsvpd == NULL) printf("rsvp_input: No socket defined for vif %d\n",vifi); m_freem(m); } return; } rsvp_src.sin_addr = ip->ip_src; if (rsvpdebug && m) printf("rsvp_input: m->m_len = %d, sbspace() = %ld\n", m->m_len,sbspace(&(viftable[vifi].v_rsvpd->so_rcv))); if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0) { if (rsvpdebug) printf("rsvp_input: Failed to append to socket\n"); } else { if (rsvpdebug) printf("rsvp_input: send packet up\n"); } VIF_UNLOCK(); } /* * Code for bandwidth monitors */ /* * Define common interface for timeval-related methods */ #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp) #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp)) #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp)) static uint32_t compute_bw_meter_flags(struct bw_upcall *req) { uint32_t flags = 0; if (req->bu_flags & BW_UPCALL_UNIT_PACKETS) flags |= BW_METER_UNIT_PACKETS; if (req->bu_flags & BW_UPCALL_UNIT_BYTES) flags |= BW_METER_UNIT_BYTES; if (req->bu_flags & BW_UPCALL_GEQ) flags |= BW_METER_GEQ; if (req->bu_flags & BW_UPCALL_LEQ) flags |= BW_METER_LEQ; return flags; } /* * Add a bw_meter entry */ static int add_bw_upcall(struct bw_upcall *req) { struct mfc *mfc; struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC, BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC }; struct timeval now; struct bw_meter *x; uint32_t flags; if (!(mrt_api_config & MRT_MFC_BW_UPCALL)) return EOPNOTSUPP; /* Test if the flags are valid */ if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES))) return EINVAL; if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))) return EINVAL; if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) == (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) return EINVAL; /* Test if the threshold time interval is valid */ if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <)) return EINVAL; flags = compute_bw_meter_flags(req); /* * Find if we have already same bw_meter entry */ MFC_LOCK(); mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr); if (mfc == NULL) { MFC_UNLOCK(); return EADDRNOTAVAIL; } for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) { if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, &req->bu_threshold.b_time, ==)) && (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && (x->bm_flags & BW_METER_USER_FLAGS) == flags) { MFC_UNLOCK(); return 0; /* XXX Already installed */ } } /* Allocate the new bw_meter entry */ x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT); if (x == NULL) { MFC_UNLOCK(); return ENOBUFS; } /* Set the new bw_meter entry */ x->bm_threshold.b_time = req->bu_threshold.b_time; GET_TIME(now); x->bm_start_time = now; x->bm_threshold.b_packets = req->bu_threshold.b_packets; x->bm_threshold.b_bytes = req->bu_threshold.b_bytes; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags = flags; x->bm_time_next = NULL; x->bm_time_hash = BW_METER_BUCKETS; /* Add the new bw_meter entry to the front of entries for this MFC */ x->bm_mfc = mfc; x->bm_mfc_next = mfc->mfc_bw_meter; mfc->mfc_bw_meter = x; schedule_bw_meter(x, &now); MFC_UNLOCK(); return 0; } static void free_bw_list(struct bw_meter *list) { while (list != NULL) { struct bw_meter *x = list; list = list->bm_mfc_next; unschedule_bw_meter(x); free(x, M_BWMETER); } } /* * Delete one or multiple bw_meter entries */ static int del_bw_upcall(struct bw_upcall *req) { struct mfc *mfc; struct bw_meter *x; if (!(mrt_api_config & MRT_MFC_BW_UPCALL)) return EOPNOTSUPP; MFC_LOCK(); /* Find the corresponding MFC entry */ mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr); if (mfc == NULL) { MFC_UNLOCK(); return EADDRNOTAVAIL; } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) { /* * Delete all bw_meter entries for this mfc */ struct bw_meter *list; list = mfc->mfc_bw_meter; mfc->mfc_bw_meter = NULL; free_bw_list(list); MFC_UNLOCK(); return 0; } else { /* Delete a single bw_meter entry */ struct bw_meter *prev; uint32_t flags = 0; flags = compute_bw_meter_flags(req); /* Find the bw_meter entry to delete */ for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) { if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, &req->bu_threshold.b_time, ==)) && (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && (x->bm_flags & BW_METER_USER_FLAGS) == flags) break; } if (x != NULL) { /* Delete entry from the list for this MFC */ if (prev != NULL) prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/ else x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */ unschedule_bw_meter(x); MFC_UNLOCK(); /* Free the bw_meter entry */ free(x, M_BWMETER); return 0; } else { MFC_UNLOCK(); return EINVAL; } } /* NOTREACHED */ } /* * Perform bandwidth measurement processing that may result in an upcall */ static void bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp) { struct timeval delta; MFC_LOCK_ASSERT(); delta = *nowp; BW_TIMEVALDECR(&delta, &x->bm_start_time); if (x->bm_flags & BW_METER_GEQ) { /* * Processing for ">=" type of bw_meter entry */ if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) { /* Reset the bw_meter entry */ x->bm_start_time = *nowp; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; } /* Record that a packet is received */ x->bm_measured.b_packets++; x->bm_measured.b_bytes += plen; /* * Test if we should deliver an upcall */ if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) { if (((x->bm_flags & BW_METER_UNIT_PACKETS) && (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) || ((x->bm_flags & BW_METER_UNIT_BYTES) && (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) { /* Prepare an upcall for delivery */ bw_meter_prepare_upcall(x, nowp); x->bm_flags |= BW_METER_UPCALL_DELIVERED; } } } else if (x->bm_flags & BW_METER_LEQ) { /* * Processing for "<=" type of bw_meter entry */ if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) { /* * We are behind time with the multicast forwarding table * scanning for "<=" type of bw_meter entries, so test now * if we should deliver an upcall. */ if (((x->bm_flags & BW_METER_UNIT_PACKETS) && (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) || ((x->bm_flags & BW_METER_UNIT_BYTES) && (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) { /* Prepare an upcall for delivery */ bw_meter_prepare_upcall(x, nowp); } /* Reschedule the bw_meter entry */ unschedule_bw_meter(x); schedule_bw_meter(x, nowp); } /* Record that a packet is received */ x->bm_measured.b_packets++; x->bm_measured.b_bytes += plen; /* * Test if we should restart the measuring interval */ if ((x->bm_flags & BW_METER_UNIT_PACKETS && x->bm_measured.b_packets <= x->bm_threshold.b_packets) || (x->bm_flags & BW_METER_UNIT_BYTES && x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) { /* Don't restart the measuring interval */ } else { /* Do restart the measuring interval */ /* * XXX: note that we don't unschedule and schedule, because this * might be too much overhead per packet. Instead, when we process * all entries for a given timer hash bin, we check whether it is * really a timeout. If not, we reschedule at that time. */ x->bm_start_time = *nowp; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; } } } /* * Prepare a bandwidth-related upcall */ static void bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp) { struct timeval delta; struct bw_upcall *u; MFC_LOCK_ASSERT(); /* * Compute the measured time interval */ delta = *nowp; BW_TIMEVALDECR(&delta, &x->bm_start_time); /* * If there are too many pending upcalls, deliver them now */ if (bw_upcalls_n >= BW_UPCALLS_MAX) bw_upcalls_send(); /* * Set the bw_upcall entry */ u = &bw_upcalls[bw_upcalls_n++]; u->bu_src = x->bm_mfc->mfc_origin; u->bu_dst = x->bm_mfc->mfc_mcastgrp; u->bu_threshold.b_time = x->bm_threshold.b_time; u->bu_threshold.b_packets = x->bm_threshold.b_packets; u->bu_threshold.b_bytes = x->bm_threshold.b_bytes; u->bu_measured.b_time = delta; u->bu_measured.b_packets = x->bm_measured.b_packets; u->bu_measured.b_bytes = x->bm_measured.b_bytes; u->bu_flags = 0; if (x->bm_flags & BW_METER_UNIT_PACKETS) u->bu_flags |= BW_UPCALL_UNIT_PACKETS; if (x->bm_flags & BW_METER_UNIT_BYTES) u->bu_flags |= BW_UPCALL_UNIT_BYTES; if (x->bm_flags & BW_METER_GEQ) u->bu_flags |= BW_UPCALL_GEQ; if (x->bm_flags & BW_METER_LEQ) u->bu_flags |= BW_UPCALL_LEQ; } /* * Send the pending bandwidth-related upcalls */ static void bw_upcalls_send(void) { struct mbuf *m; int len = bw_upcalls_n * sizeof(bw_upcalls[0]); struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; static struct igmpmsg igmpmsg = { 0, /* unused1 */ 0, /* unused2 */ IGMPMSG_BW_UPCALL,/* im_msgtype */ 0, /* im_mbz */ 0, /* im_vif */ 0, /* unused3 */ { 0 }, /* im_src */ { 0 } }; /* im_dst */ MFC_LOCK_ASSERT(); if (bw_upcalls_n == 0) return; /* No pending upcalls */ bw_upcalls_n = 0; /* * Allocate a new mbuf, initialize it with the header and * the payload for the pending calls. */ MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n"); return; } m->m_len = m->m_pkthdr.len = 0; m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg); m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&bw_upcalls[0]); /* * Send the upcalls * XXX do we need to set the address in k_igmpsrc ? */ mrtstat.mrts_upcalls++; if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) { log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n"); ++mrtstat.mrts_upq_sockfull; } } /* * Compute the timeout hash value for the bw_meter entries */ #define BW_METER_TIMEHASH(bw_meter, hash) \ do { \ struct timeval next_timeval = (bw_meter)->bm_start_time; \ \ BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \ (hash) = next_timeval.tv_sec; \ if (next_timeval.tv_usec) \ (hash)++; /* XXX: make sure we don't timeout early */ \ (hash) %= BW_METER_BUCKETS; \ } while (0) /* * Schedule a timer to process periodically bw_meter entry of type "<=" * by linking the entry in the proper hash bucket. */ static void schedule_bw_meter(struct bw_meter *x, struct timeval *nowp) { int time_hash; MFC_LOCK_ASSERT(); if (!(x->bm_flags & BW_METER_LEQ)) return; /* XXX: we schedule timers only for "<=" entries */ /* * Reset the bw_meter entry */ x->bm_start_time = *nowp; x->bm_measured.b_packets = 0; x->bm_measured.b_bytes = 0; x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; /* * Compute the timeout hash value and insert the entry */ BW_METER_TIMEHASH(x, time_hash); x->bm_time_next = bw_meter_timers[time_hash]; bw_meter_timers[time_hash] = x; x->bm_time_hash = time_hash; } /* * Unschedule the periodic timer that processes bw_meter entry of type "<=" * by removing the entry from the proper hash bucket. */ static void unschedule_bw_meter(struct bw_meter *x) { int time_hash; struct bw_meter *prev, *tmp; MFC_LOCK_ASSERT(); if (!(x->bm_flags & BW_METER_LEQ)) return; /* XXX: we schedule timers only for "<=" entries */ /* * Compute the timeout hash value and delete the entry */ time_hash = x->bm_time_hash; if (time_hash >= BW_METER_BUCKETS) return; /* Entry was not scheduled */ for (prev = NULL, tmp = bw_meter_timers[time_hash]; tmp != NULL; prev = tmp, tmp = tmp->bm_time_next) if (tmp == x) break; if (tmp == NULL) panic("unschedule_bw_meter: bw_meter entry not found"); if (prev != NULL) prev->bm_time_next = x->bm_time_next; else bw_meter_timers[time_hash] = x->bm_time_next; x->bm_time_next = NULL; x->bm_time_hash = BW_METER_BUCKETS; } /* * Process all "<=" type of bw_meter that should be processed now, * and for each entry prepare an upcall if necessary. Each processed * entry is rescheduled again for the (periodic) processing. * * This is run periodically (once per second normally). On each round, * all the potentially matching entries are in the hash slot that we are * looking at. */ static void bw_meter_process() { static uint32_t last_tv_sec; /* last time we processed this */ uint32_t loops; int i; struct timeval now, process_endtime; GET_TIME(now); if (last_tv_sec == now.tv_sec) return; /* nothing to do */ loops = now.tv_sec - last_tv_sec; last_tv_sec = now.tv_sec; if (loops > BW_METER_BUCKETS) loops = BW_METER_BUCKETS; MFC_LOCK(); /* * Process all bins of bw_meter entries from the one after the last * processed to the current one. On entry, i points to the last bucket * visited, so we need to increment i at the beginning of the loop. */ for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) { struct bw_meter *x, *tmp_list; if (++i >= BW_METER_BUCKETS) i = 0; /* Disconnect the list of bw_meter entries from the bin */ tmp_list = bw_meter_timers[i]; bw_meter_timers[i] = NULL; /* Process the list of bw_meter entries */ while (tmp_list != NULL) { x = tmp_list; tmp_list = tmp_list->bm_time_next; /* Test if the time interval is over */ process_endtime = x->bm_start_time; BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time); if (BW_TIMEVALCMP(&process_endtime, &now, >)) { /* Not yet: reschedule, but don't reset */ int time_hash; BW_METER_TIMEHASH(x, time_hash); if (time_hash == i && process_endtime.tv_sec == now.tv_sec) { /* * XXX: somehow the bin processing is a bit ahead of time. * Put the entry in the next bin. */ if (++time_hash >= BW_METER_BUCKETS) time_hash = 0; } x->bm_time_next = bw_meter_timers[time_hash]; bw_meter_timers[time_hash] = x; x->bm_time_hash = time_hash; continue; } /* * Test if we should deliver an upcall */ if (((x->bm_flags & BW_METER_UNIT_PACKETS) && (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) || ((x->bm_flags & BW_METER_UNIT_BYTES) && (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) { /* Prepare an upcall for delivery */ bw_meter_prepare_upcall(x, &now); } /* * Reschedule for next processing */ schedule_bw_meter(x, &now); } } /* Send all upcalls that are pending delivery */ bw_upcalls_send(); MFC_UNLOCK(); } /* * A periodic function for sending all upcalls that are pending delivery */ static void expire_bw_upcalls_send(void *unused) { MFC_LOCK(); bw_upcalls_send(); MFC_UNLOCK(); callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send, NULL); } /* * A periodic function for periodic scanning of the multicast forwarding * table for processing all "<=" bw_meter entries. */ static void expire_bw_meter_process(void *unused) { if (mrt_api_config & MRT_MFC_BW_UPCALL) bw_meter_process(); callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL); } /* * End of bandwidth monitoring code */ #ifdef PIM /* * Send the packet up to the user daemon, or eventually do kernel encapsulation * */ static int pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m, struct mfc *rt) { struct mbuf *mb_copy, *mm; if (mrtdebug & DEBUG_PIM) log(LOG_DEBUG, "pim_register_send: "); mb_copy = pim_register_prepare(ip, m); if (mb_copy == NULL) return ENOBUFS; /* * Send all the fragments. Note that the mbuf for each fragment * is freed by the sending machinery. */ for (mm = mb_copy; mm; mm = mb_copy) { mb_copy = mm->m_nextpkt; mm->m_nextpkt = 0; mm = m_pullup(mm, sizeof(struct ip)); if (mm != NULL) { ip = mtod(mm, struct ip *); if ((mrt_api_config & MRT_MFC_RP) && (rt->mfc_rp.s_addr != INADDR_ANY)) { pim_register_send_rp(ip, vifp, mm, rt); } else { pim_register_send_upcall(ip, vifp, mm, rt); } } } return 0; } /* * Return a copy of the data packet that is ready for PIM Register * encapsulation. * XXX: Note that in the returned copy the IP header is a valid one. */ static struct mbuf * pim_register_prepare(struct ip *ip, struct mbuf *m) { struct mbuf *mb_copy = NULL; int mtu; /* Take care of delayed checksums */ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; } /* * Copy the old packet & pullup its IP header into the * new mbuf so we can modify it. */ mb_copy = m_copypacket(m, M_DONTWAIT); if (mb_copy == NULL) return NULL; mb_copy = m_pullup(mb_copy, ip->ip_hl << 2); if (mb_copy == NULL) return NULL; /* take care of the TTL */ ip = mtod(mb_copy, struct ip *); --ip->ip_ttl; /* Compute the MTU after the PIM Register encapsulation */ mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr); if (ip->ip_len <= mtu) { /* Turn the IP header into a valid one */ ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); ip->ip_sum = 0; ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); } else { /* Fragment the packet */ if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) { m_freem(mb_copy); return NULL; } } return mb_copy; } /* * Send an upcall with the data packet to the user-level process. */ static int pim_register_send_upcall(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy, struct mfc *rt) { struct mbuf *mb_first; int len = ntohs(ip->ip_len); struct igmpmsg *im; struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; VIF_LOCK_ASSERT(); /* * Add a new mbuf with an upcall header */ MGETHDR(mb_first, M_DONTWAIT, MT_HEADER); if (mb_first == NULL) { m_freem(mb_copy); return ENOBUFS; } mb_first->m_data += max_linkhdr; mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg); mb_first->m_len = sizeof(struct igmpmsg); mb_first->m_next = mb_copy; /* Send message to routing daemon */ im = mtod(mb_first, struct igmpmsg *); im->im_msgtype = IGMPMSG_WHOLEPKT; im->im_mbz = 0; im->im_vif = vifp - viftable; im->im_src = ip->ip_src; im->im_dst = ip->ip_dst; k_igmpsrc.sin_addr = ip->ip_src; mrtstat.mrts_upcalls++; if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) { if (mrtdebug & DEBUG_PIM) log(LOG_WARNING, "mcast: pim_register_send_upcall: ip_mrouter socket queue full"); ++mrtstat.mrts_upq_sockfull; return ENOBUFS; } /* Keep statistics */ pimstat.pims_snd_registers_msgs++; pimstat.pims_snd_registers_bytes += len; return 0; } /* * Encapsulate the data packet in PIM Register message and send it to the RP. */ static int pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy, struct mfc *rt) { struct mbuf *mb_first; struct ip *ip_outer; struct pim_encap_pimhdr *pimhdr; int len = ntohs(ip->ip_len); vifi_t vifi = rt->mfc_parent; VIF_LOCK_ASSERT(); if ((vifi >= numvifs) || (viftable[vifi].v_lcl_addr.s_addr == 0)) { m_freem(mb_copy); return EADDRNOTAVAIL; /* The iif vif is invalid */ } /* * Add a new mbuf with the encapsulating header */ MGETHDR(mb_first, M_DONTWAIT, MT_HEADER); if (mb_first == NULL) { m_freem(mb_copy); return ENOBUFS; } mb_first->m_data += max_linkhdr; mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr); mb_first->m_next = mb_copy; mb_first->m_pkthdr.len = len + mb_first->m_len; /* * Fill in the encapsulating IP and PIM header */ ip_outer = mtod(mb_first, struct ip *); *ip_outer = pim_encap_iphdr; #ifdef RANDOM_IP_ID ip_outer->ip_id = ip_randomid(); #else ip_outer->ip_id = htons(ip_id++); #endif ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr); ip_outer->ip_src = viftable[vifi].v_lcl_addr; ip_outer->ip_dst = rt->mfc_rp; /* * Copy the inner header TOS to the outer header, and take care of the * IP_DF bit. */ ip_outer->ip_tos = ip->ip_tos; if (ntohs(ip->ip_off) & IP_DF) ip_outer->ip_off |= IP_DF; pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer + sizeof(pim_encap_iphdr)); *pimhdr = pim_encap_pimhdr; /* If the iif crosses a border, set the Border-bit */ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config) pimhdr->flags |= htonl(PIM_BORDER_REGISTER); mb_first->m_data += sizeof(pim_encap_iphdr); pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr)); mb_first->m_data -= sizeof(pim_encap_iphdr); if (vifp->v_rate_limit == 0) tbf_send_packet(vifp, mb_first); else tbf_control(vifp, mb_first, ip, ip_outer->ip_len); /* Keep statistics */ pimstat.pims_snd_registers_msgs++; pimstat.pims_snd_registers_bytes += len; return 0; } /* * PIM-SMv2 and PIM-DM messages processing. * Receives and verifies the PIM control messages, and passes them * up to the listening socket, using rip_input(). * The only message with special processing is the PIM_REGISTER message * (used by PIM-SM): the PIM header is stripped off, and the inner packet * is passed to if_simloop(). */ void pim_input(struct mbuf *m, int off) { struct ip *ip = mtod(m, struct ip *); struct pim *pim; int minlen; int datalen = ip->ip_len; int ip_tos; int iphlen = off; /* Keep statistics */ pimstat.pims_rcv_total_msgs++; pimstat.pims_rcv_total_bytes += datalen; /* * Validate lengths */ if (datalen < PIM_MINLEN) { pimstat.pims_rcv_tooshort++; log(LOG_ERR, "pim_input: packet size too small %d from %lx\n", datalen, (u_long)ip->ip_src.s_addr); m_freem(m); return; } /* * If the packet is at least as big as a REGISTER, go agead * and grab the PIM REGISTER header size, to avoid another * possible m_pullup() later. * * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8 * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28 */ minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN); /* * Get the IP and PIM headers in contiguous memory, and * possibly the PIM REGISTER header. */ if ((m->m_flags & M_EXT || m->m_len < minlen) && (m = m_pullup(m, minlen)) == 0) { log(LOG_ERR, "pim_input: m_pullup failure\n"); return; } /* m_pullup() may have given us a new mbuf so reset ip. */ ip = mtod(m, struct ip *); ip_tos = ip->ip_tos; /* adjust mbuf to point to the PIM header */ m->m_data += iphlen; m->m_len -= iphlen; pim = mtod(m, struct pim *); /* * Validate checksum. If PIM REGISTER, exclude the data packet. * * XXX: some older PIMv2 implementations don't make this distinction, * so for compatibility reason perform the checksum over part of the * message, and if error, then over the whole message. */ if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) { /* do nothing, checksum okay */ } else if (in_cksum(m, datalen)) { pimstat.pims_rcv_badsum++; if (mrtdebug & DEBUG_PIM) log(LOG_DEBUG, "pim_input: invalid checksum"); m_freem(m); return; } /* PIM version check */ if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) { pimstat.pims_rcv_badversion++; log(LOG_ERR, "pim_input: incorrect version %d, expecting %d\n", PIM_VT_V(pim->pim_vt), PIM_VERSION); m_freem(m); return; } /* restore mbuf back to the outer IP */ m->m_data -= iphlen; m->m_len += iphlen; if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) { /* * Since this is a REGISTER, we'll make a copy of the register * headers ip + pim + u_int32 + encap_ip, to be passed up to the * routing daemon. */ struct sockaddr_in dst = { sizeof(dst), AF_INET }; struct mbuf *mcp; struct ip *encap_ip; u_int32_t *reghdr; struct ifnet *vifp; VIF_LOCK(); if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) { VIF_UNLOCK(); if (mrtdebug & DEBUG_PIM) log(LOG_DEBUG, "pim_input: register vif not set: %d\n", reg_vif_num); m_freem(m); return; } /* XXX need refcnt? */ vifp = viftable[reg_vif_num].v_ifp; VIF_UNLOCK(); /* * Validate length */ if (datalen < PIM_REG_MINLEN) { pimstat.pims_rcv_tooshort++; pimstat.pims_rcv_badregisters++; log(LOG_ERR, "pim_input: register packet size too small %d from %lx\n", datalen, (u_long)ip->ip_src.s_addr); m_freem(m); return; } reghdr = (u_int32_t *)(pim + 1); encap_ip = (struct ip *)(reghdr + 1); if (mrtdebug & DEBUG_PIM) { log(LOG_DEBUG, "pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n", (u_long)ntohl(encap_ip->ip_src.s_addr), (u_long)ntohl(encap_ip->ip_dst.s_addr), ntohs(encap_ip->ip_len)); } /* verify the version number of the inner packet */ if (encap_ip->ip_v != IPVERSION) { pimstat.pims_rcv_badregisters++; if (mrtdebug & DEBUG_PIM) { log(LOG_DEBUG, "pim_input: invalid IP version (%d) " "of the inner packet\n", encap_ip->ip_v); } m_freem(m); return; } /* verify the inner packet is destined to a mcast group */ if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) { pimstat.pims_rcv_badregisters++; if (mrtdebug & DEBUG_PIM) log(LOG_DEBUG, "pim_input: inner packet of register is not " "multicast %lx\n", (u_long)ntohl(encap_ip->ip_dst.s_addr)); m_freem(m); return; } /* If a NULL_REGISTER, pass it to the daemon */ if ((ntohl(*reghdr) & PIM_NULL_REGISTER)) goto pim_input_to_daemon; /* * Copy the TOS from the outer IP header to the inner IP header. */ if (encap_ip->ip_tos != ip_tos) { /* Outer TOS -> inner TOS */ encap_ip->ip_tos = ip_tos; /* Recompute the inner header checksum. Sigh... */ /* adjust mbuf to point to the inner IP header */ m->m_data += (iphlen + PIM_MINLEN); m->m_len -= (iphlen + PIM_MINLEN); encap_ip->ip_sum = 0; encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2); /* restore mbuf to point back to the outer IP header */ m->m_data -= (iphlen + PIM_MINLEN); m->m_len += (iphlen + PIM_MINLEN); } /* * Decapsulate the inner IP packet and loopback to forward it * as a normal multicast packet. Also, make a copy of the * outer_iphdr + pimhdr + reghdr + encap_iphdr * to pass to the daemon later, so it can take the appropriate * actions (e.g., send back PIM_REGISTER_STOP). * XXX: here m->m_data points to the outer IP header. */ mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN); if (mcp == NULL) { log(LOG_ERR, "pim_input: pim register: could not copy register head\n"); m_freem(m); return; } /* Keep statistics */ /* XXX: registers_bytes include only the encap. mcast pkt */ pimstat.pims_rcv_registers_msgs++; pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len); /* * forward the inner ip packet; point m_data at the inner ip. */ m_adj(m, iphlen + PIM_MINLEN); if (mrtdebug & DEBUG_PIM) { log(LOG_DEBUG, "pim_input: forwarding decapsulated register: " "src %lx, dst %lx, vif %d\n", (u_long)ntohl(encap_ip->ip_src.s_addr), (u_long)ntohl(encap_ip->ip_dst.s_addr), reg_vif_num); } /* NB: vifp was collected above; can it change on us? */ if_simloop(vifp, m, dst.sin_family, 0); /* prepare the register head to send to the mrouting daemon */ m = mcp; } pim_input_to_daemon: /* * Pass the PIM message up to the daemon; if it is a Register message, * pass the 'head' only up to the daemon. This includes the * outer IP header, PIM header, PIM-Register header and the * inner IP header. * XXX: the outer IP header pkt size of a Register is not adjust to * reflect the fact that the inner multicast data is truncated. */ rip_input(m, iphlen); return; } #endif /* PIM */ static int ip_mroute_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: mtx_init(&mrouter_mtx, "mrouter initialization", NULL, MTX_DEF); MFC_LOCK_INIT(); VIF_LOCK_INIT(); ip_mrouter_reset(); ip_mcast_src = X_ip_mcast_src; ip_mforward = X_ip_mforward; ip_mrouter_done = X_ip_mrouter_done; ip_mrouter_get = X_ip_mrouter_get; ip_mrouter_set = X_ip_mrouter_set; ip_rsvp_force_done = X_ip_rsvp_force_done; ip_rsvp_vif = X_ip_rsvp_vif; legal_vif_num = X_legal_vif_num; mrt_ioctl = X_mrt_ioctl; rsvp_input_p = X_rsvp_input; break; case MOD_UNLOAD: /* * Typically module unload happens after the user-level * process has shutdown the kernel services (the check * below insures someone can't just yank the module out * from under a running process). But if the module is * just loaded and then unloaded w/o starting up a user * process we still need to cleanup. */ if (ip_mrouter) return EINVAL; X_ip_mrouter_done(); ip_mcast_src = NULL; ip_mforward = NULL; ip_mrouter_done = NULL; ip_mrouter_get = NULL; ip_mrouter_set = NULL; ip_rsvp_force_done = NULL; ip_rsvp_vif = NULL; legal_vif_num = NULL; mrt_ioctl = NULL; rsvp_input_p = NULL; VIF_LOCK_DESTROY(); MFC_LOCK_DESTROY(); mtx_destroy(&mrouter_mtx); break; } return 0; } static moduledata_t ip_mroutemod = { "ip_mroute", ip_mroute_modevent, 0 }; DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY); Index: head/sys/netinet6/ip6_fw.c =================================================================== --- head/sys/netinet6/ip6_fw.c (revision 129879) +++ head/sys/netinet6/ip6_fw.c (revision 129880) @@ -1,1305 +1,1306 @@ /* $FreeBSD$ */ /* $KAME: ip6_fw.c,v 1.21 2001/01/24 01:25:32 itojun Exp $ */ /* * Copyright (C) 1998, 1999, 2000 and 2001 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 1993 Daniel Boulet * Copyright (c) 1994 Ugen J.S.Antsilevich * Copyright (c) 1996 Alex Nash * * Redistribution and use in source forms, with and without modification, * are permitted provided that this entire comment appears intact. * * Redistribution in binary form may occur without any restrictions. * Obviously, it would be nice if you gave credit where credit is due * but requiring it would be too onerous. * * This software is provided ``AS IS'' without any warranties of any kind. */ /* * Implement IPv6 packet firewall */ #if !defined(KLD_MODULE) #include "opt_ip6fw.h" #include "opt_inet.h" #include "opt_inet6.h" #endif #ifdef IP6DIVERT #error "NOT SUPPORTED IPV6 DIVERT" #endif #ifdef IP6FW_DIVERT_RESTART #error "NOT SUPPORTED IPV6 DIVERT" #endif #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_IP6FW, "Ip6Fw/Ip6Acct", "Ip6Fw/Ip6Acct chain's"); static int fw6_debug = 1; #ifdef IPV6FIREWALL_VERBOSE static int fw6_verbose = 1; #else static int fw6_verbose = 0; #endif #ifdef IPV6FIREWALL_VERBOSE_LIMIT static int fw6_verbose_limit = IPV6FIREWALL_VERBOSE_LIMIT; #else static int fw6_verbose_limit = 0; #endif static LIST_HEAD (ip6_fw_head, ip6_fw_chain) ip6_fw_chain; #ifdef SYSCTL_NODE SYSCTL_DECL(_net_inet6_ip6); SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_SECURE, 0, "Firewall"); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_SECURE, &ip6_fw_enable, 0, "Enable ip6fw"); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, debug, CTLFLAG_RW, &fw6_debug, 0, ""); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_SECURE, &fw6_verbose, 0, ""); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose_limit, CTLFLAG_RW, &fw6_verbose_limit, 0, ""); #endif #define dprintf(a) do { \ if (fw6_debug) \ printf a; \ } while (/*CONSTCOND*/ 0) #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 static int add_entry6 __P((struct ip6_fw_head *chainptr, struct ip6_fw *frwl)); static int del_entry6 __P((struct ip6_fw_head *chainptr, u_short number)); static int zero_entry6 __P((struct mbuf *m)); static struct ip6_fw *check_ip6fw_struct __P((struct ip6_fw *m)); static struct ip6_fw *check_ip6fw_mbuf __P((struct mbuf *fw)); static int ip6opts_match __P((struct ip6_hdr **ip6, struct ip6_fw *f, struct mbuf **m, int *off, int *nxt, u_short *offset)); static int port_match6 __P((u_short *portptr, int nports, u_short port, int range_flag)); static int tcp6flg_match __P((struct tcphdr *tcp6, struct ip6_fw *f)); static int icmp6type_match __P((struct icmp6_hdr * icmp, struct ip6_fw * f)); static void ip6fw_report __P((struct ip6_fw *f, struct ip6_hdr *ip6, struct ifnet *rif, struct ifnet *oif, int off, int nxt)); static int ip6_fw_chk __P((struct ip6_hdr **pip6, struct ifnet *oif, u_int16_t *cookie, struct mbuf **m)); static int ip6_fw_ctl __P((int stage, struct mbuf **mm)); static char err_prefix[] = "ip6_fw_ctl:"; /* * Returns 1 if the port is matched by the vector, 0 otherwise */ static __inline int port_match6(u_short *portptr, int nports, u_short port, int range_flag) { if (!nports) return 1; if (range_flag) { if (portptr[0] <= port && port <= portptr[1]) { return 1; } nports -= 2; portptr += 2; } while (nports-- > 0) { if (*portptr++ == port) { return 1; } } return 0; } static int tcp6flg_match(struct tcphdr *tcp6, struct ip6_fw *f) { u_char flg_set, flg_clr; /* * If an established connection is required, reject packets that * have only SYN of RST|ACK|SYN set. Otherwise, fall through to * other flag requirements. */ if ((f->fw_ipflg & IPV6_FW_IF_TCPEST) && ((tcp6->th_flags & (IPV6_FW_TCPF_RST | IPV6_FW_TCPF_ACK | IPV6_FW_TCPF_SYN)) == IPV6_FW_TCPF_SYN)) return 0; flg_set = tcp6->th_flags & f->fw_tcpf; flg_clr = tcp6->th_flags & f->fw_tcpnf; if (flg_set != f->fw_tcpf) return 0; if (flg_clr) return 0; return 1; } static int icmp6type_match(struct icmp6_hdr *icmp6, struct ip6_fw *f) { int type; if (!(f->fw_flg & IPV6_FW_F_ICMPBIT)) return (1); type = icmp6->icmp6_type; /* check for matching type in the bitmap */ if (type < IPV6_FW_ICMPTYPES_DIM * sizeof(unsigned) * 8 && (f->fw_icmp6types[type / (sizeof(unsigned) * 8)] & (1U << (type % (8 * sizeof(unsigned)))))) return (1); return (0); /* no match */ } static int is_icmp6_query(struct ip6_hdr *ip6, int off) { const struct icmp6_hdr *icmp6; int icmp6_type; icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); icmp6_type = icmp6->icmp6_type; if (icmp6_type == ICMP6_ECHO_REQUEST || icmp6_type == ICMP6_MEMBERSHIP_QUERY || icmp6_type == ICMP6_WRUREQUEST || icmp6_type == ICMP6_FQDN_QUERY || icmp6_type == ICMP6_NI_QUERY) return (1); return (0); } static int ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, int *off, int *nxt, u_short *offset) { int len; struct ip6_hdr *ip6 = *pip6; struct ip6_ext *ip6e; u_char opts, nopts, nopts_sve; opts = f->fw_ip6opt; nopts = nopts_sve = f->fw_ip6nopt; *nxt = ip6->ip6_nxt; *off = sizeof(struct ip6_hdr); len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); while (*off < len) { ip6e = (struct ip6_ext *)((caddr_t) ip6 + *off); if ((*m)->m_len < *off + sizeof(*ip6e)) goto opts_check; /* XXX */ switch (*nxt) { case IPPROTO_FRAGMENT: if ((*m)->m_len >= *off + sizeof(struct ip6_frag)) { struct ip6_frag *ip6f; ip6f = (struct ip6_frag *) ((caddr_t)ip6 + *off); *offset = ip6f->ip6f_offlg & IP6F_OFF_MASK; } opts &= ~IPV6_FW_IP6OPT_FRAG; nopts &= ~IPV6_FW_IP6OPT_FRAG; *off += sizeof(struct ip6_frag); break; case IPPROTO_AH: opts &= ~IPV6_FW_IP6OPT_AH; nopts &= ~IPV6_FW_IP6OPT_AH; *off += (ip6e->ip6e_len + 2) << 2; break; default: switch (*nxt) { case IPPROTO_HOPOPTS: opts &= ~IPV6_FW_IP6OPT_HOPOPT; nopts &= ~IPV6_FW_IP6OPT_HOPOPT; break; case IPPROTO_ROUTING: opts &= ~IPV6_FW_IP6OPT_ROUTE; nopts &= ~IPV6_FW_IP6OPT_ROUTE; break; case IPPROTO_ESP: opts &= ~IPV6_FW_IP6OPT_ESP; nopts &= ~IPV6_FW_IP6OPT_ESP; goto opts_check; case IPPROTO_NONE: opts &= ~IPV6_FW_IP6OPT_NONXT; nopts &= ~IPV6_FW_IP6OPT_NONXT; goto opts_check; case IPPROTO_DSTOPTS: opts &= ~IPV6_FW_IP6OPT_OPTS; nopts &= ~IPV6_FW_IP6OPT_OPTS; break; default: goto opts_check; } *off += (ip6e->ip6e_len + 1) << 3; break; } *nxt = ip6e->ip6e_nxt; } opts_check: if (f->fw_ip6opt == f->fw_ip6nopt) /* XXX */ return 1; if (opts == 0 && nopts == nopts_sve) return 1; else return 0; } static __inline int iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) { /* Check by name or by IP address */ if (byname) { /* Check name */ if (ifu->fu_via_if.glob) { if (fnmatch(ifu->fu_via_if.name, ifp->if_xname, 0) == FNM_NOMATCH) return(0); } else { if (strncmp(ifp->if_xname, ifu->fu_via_if.name, IP6FW_IFNLEN) != 0) return(0); } return(1); } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ struct ifaddr *ia; for (ia = ifp->if_addrlist.tqh_first; ia; ia = ia->ifa_list.tqe_next) { if (ia->ifa_addr == NULL) continue; if (ia->ifa_addr->sa_family != AF_INET6) continue; if (!IN6_ARE_ADDR_EQUAL(&ifu->fu_via_ip6, &(((struct sockaddr_in6 *) (ia->ifa_addr))->sin6_addr))) continue; return (1); } return (0); } return (1); } static void ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, struct ifnet *rif, struct ifnet *oif, int off, int nxt) { static int counter; struct tcphdr *const tcp6 = (struct tcphdr *) ((caddr_t) ip6+ off); struct udphdr *const udp = (struct udphdr *) ((caddr_t) ip6+ off); struct icmp6_hdr *const icmp6 = (struct icmp6_hdr *) ((caddr_t) ip6+ off); int count; char *action; char action2[32], proto[102], name[18]; int len; count = f ? f->fw_pcnt : ++counter; if (fw6_verbose_limit != 0 && count > fw6_verbose_limit) return; /* Print command name */ snprintf(SNPARGS(name, 0), "ip6fw: %d", f ? f->fw_number : -1); action = action2; if (!f) action = "Refuse"; else { switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_DENY: action = "Deny"; break; case IPV6_FW_F_REJECT: if (f->fw_reject_code == IPV6_FW_REJECT_RST) action = "Reset"; else action = "Unreach"; break; case IPV6_FW_F_ACCEPT: action = "Accept"; break; case IPV6_FW_F_COUNT: action = "Count"; break; case IPV6_FW_F_DIVERT: snprintf(SNPARGS(action2, 0), "Divert %d", f->fw_divert_port); break; case IPV6_FW_F_TEE: snprintf(SNPARGS(action2, 0), "Tee %d", f->fw_divert_port); break; case IPV6_FW_F_SKIPTO: snprintf(SNPARGS(action2, 0), "SkipTo %d", f->fw_skipto_rule); break; default: action = "UNKNOWN"; break; } } switch (nxt) { case IPPROTO_TCP: len = snprintf(SNPARGS(proto, 0), "TCP [%s]", ip6_sprintf(&ip6->ip6_src)); if (off > 0) len += snprintf(SNPARGS(proto, len), ":%d ", ntohs(tcp6->th_sport)); else len += snprintf(SNPARGS(proto, len), " "); len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_dst)); if (off > 0) snprintf(SNPARGS(proto, len), ":%d", ntohs(tcp6->th_dport)); break; case IPPROTO_UDP: len = snprintf(SNPARGS(proto, 0), "UDP [%s]", ip6_sprintf(&ip6->ip6_src)); if (off > 0) len += snprintf(SNPARGS(proto, len), ":%d ", ntohs(udp->uh_sport)); else len += snprintf(SNPARGS(proto, len), " "); len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_dst)); if (off > 0) snprintf(SNPARGS(proto, len), ":%d", ntohs(udp->uh_dport)); break; case IPPROTO_ICMPV6: if (off > 0) len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP:%u.%u ", icmp6->icmp6_type, icmp6->icmp6_code); else len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP "); len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_src)); snprintf(SNPARGS(proto, len), " [%s]", ip6_sprintf(&ip6->ip6_dst)); break; default: len = snprintf(SNPARGS(proto, 0), "P:%d [%s]", nxt, ip6_sprintf(&ip6->ip6_src)); snprintf(SNPARGS(proto, len), " [%s]", ip6_sprintf(&ip6->ip6_dst)); break; } if (oif) log(LOG_SECURITY | LOG_INFO, "%s %s %s out via %s\n", name, action, proto, if_name(oif)); else if (rif) log(LOG_SECURITY | LOG_INFO, "%s %s %s in via %s\n", name, action, proto, if_name(rif)); else log(LOG_SECURITY | LOG_INFO, "%s %s %s", name, action, proto); if (fw6_verbose_limit != 0 && count == fw6_verbose_limit) log(LOG_SECURITY | LOG_INFO, "ip6fw: limit reached on entry %d\n", f ? f->fw_number : -1); } /* * Parameters: * * ip Pointer to packet header (struct ip6_hdr *) * hlen Packet header length * oif Outgoing interface, or NULL if packet is incoming * #ifndef IP6FW_DIVERT_RESTART * *cookie Ignore all divert/tee rules to this port (if non-zero) * #else * *cookie Skip up to the first rule past this rule number; * #endif * *m The packet; we set to NULL when/if we nuke it. * * Return value: * * 0 The packet is to be accepted and routed normally OR * the packet was denied/rejected and has been dropped; * in the latter case, *m is equal to NULL upon return. * port Divert the packet to port. */ static int ip6_fw_chk(struct ip6_hdr **pip6, struct ifnet *oif, u_int16_t *cookie, struct mbuf **m) { struct ip6_fw_chain *chain; struct ip6_fw *rule = NULL; struct ip6_hdr *ip6 = *pip6; struct ifnet *const rif = (*m)->m_pkthdr.rcvif; u_short offset = 0; int off = sizeof(struct ip6_hdr), nxt = ip6->ip6_nxt; u_short src_port, dst_port; #ifdef IP6FW_DIVERT_RESTART u_int16_t skipto = *cookie; #else u_int16_t ignport = ntohs(*cookie); #endif *cookie = 0; /* * Go down the chain, looking for enlightment * #ifdef IP6FW_DIVERT_RESTART * If we've been asked to start at a given rule immediatly, do so. * #endif */ chain = LIST_FIRST(&ip6_fw_chain); #ifdef IP6FW_DIVERT_RESTART if (skipto) { if (skipto >= 65535) goto dropit; while (chain && (chain->rule->fw_number <= skipto)) { chain = LIST_NEXT(chain, chain); } if (! chain) goto dropit; } #endif /* IP6FW_DIVERT_RESTART */ for (; chain; chain = LIST_NEXT(chain, chain)) { struct ip6_fw *const f = chain->rule; if (oif) { /* Check direction outbound */ if (!(f->fw_flg & IPV6_FW_F_OUT)) continue; } else { /* Check direction inbound */ if (!(f->fw_flg & IPV6_FW_F_IN)) continue; } #define IN6_ARE_ADDR_MASKEQUAL(x,y,z) (\ (((x)->s6_addr32[0] & (y)->s6_addr32[0]) == (z)->s6_addr32[0]) && \ (((x)->s6_addr32[1] & (y)->s6_addr32[1]) == (z)->s6_addr32[1]) && \ (((x)->s6_addr32[2] & (y)->s6_addr32[2]) == (z)->s6_addr32[2]) && \ (((x)->s6_addr32[3] & (y)->s6_addr32[3]) == (z)->s6_addr32[3])) /* If src-addr doesn't match, not this rule. */ if (((f->fw_flg & IPV6_FW_F_INVSRC) != 0) ^ (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_src,&f->fw_smsk,&f->fw_src))) continue; /* If dest-addr doesn't match, not this rule. */ if (((f->fw_flg & IPV6_FW_F_INVDST) != 0) ^ (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_dst,&f->fw_dmsk,&f->fw_dst))) continue; #undef IN6_ARE_ADDR_MASKEQUAL /* Interface check */ if ((f->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { struct ifnet *const iface = oif ? oif : rif; /* Backwards compatibility hack for "via" */ if (!iface || !iface_match(iface, &f->fw_in_if, f->fw_flg & IPV6_FW_F_OIFNAME)) continue; } else { /* Check receive interface */ if ((f->fw_flg & IPV6_FW_F_IIFACE) && (!rif || !iface_match(rif, &f->fw_in_if, f->fw_flg & IPV6_FW_F_IIFNAME))) continue; /* Check outgoing interface */ if ((f->fw_flg & IPV6_FW_F_OIFACE) && (!oif || !iface_match(oif, &f->fw_out_if, f->fw_flg & IPV6_FW_F_OIFNAME))) continue; } /* Check IP options */ if (!ip6opts_match(&ip6, f, m, &off, &nxt, &offset)) continue; /* Fragments */ if ((f->fw_flg & IPV6_FW_F_FRAG) && !offset) continue; /* Check protocol; if wildcard, match */ if (f->fw_prot == IPPROTO_IPV6) goto got_match; /* If different, don't match */ if (nxt != f->fw_prot) continue; #define PULLUP_TO(len) do { \ if ((*m)->m_len < (len) \ && (*m = m_pullup(*m, (len))) == 0) { \ goto dropit; \ } \ *pip6 = ip6 = mtod(*m, struct ip6_hdr *); \ } while (/*CONSTCOND*/ 0) /* Protocol specific checks */ switch (nxt) { case IPPROTO_TCP: { struct tcphdr *tcp6; if (offset == 1) { /* cf. RFC 1858 */ PULLUP_TO(off + 4); /* XXX ? */ goto bogusfrag; } if (offset != 0) { /* * TCP flags and ports aren't available in this * packet -- if this rule specified either one, * we consider the rule a non-match. */ if (f->fw_nports != 0 || f->fw_tcpf != f->fw_tcpnf) continue; break; } PULLUP_TO(off + 14); tcp6 = (struct tcphdr *) ((caddr_t)ip6 + off); if (((f->fw_tcpf != f->fw_tcpnf) || (f->fw_ipflg & IPV6_FW_IF_TCPEST)) && !tcp6flg_match(tcp6, f)) continue; src_port = ntohs(tcp6->th_sport); dst_port = ntohs(tcp6->th_dport); goto check_ports; } case IPPROTO_UDP: { struct udphdr *udp; if (offset != 0) { /* * Port specification is unavailable -- if this * rule specifies a port, we consider the rule * a non-match. */ if (f->fw_nports != 0) continue; break; } PULLUP_TO(off + 4); udp = (struct udphdr *) ((caddr_t)ip6 + off); src_port = ntohs(udp->uh_sport); dst_port = ntohs(udp->uh_dport); check_ports: if (!port_match6(&f->fw_pts[0], IPV6_FW_GETNSRCP(f), src_port, f->fw_flg & IPV6_FW_F_SRNG)) continue; if (!port_match6(&f->fw_pts[IPV6_FW_GETNSRCP(f)], IPV6_FW_GETNDSTP(f), dst_port, f->fw_flg & IPV6_FW_F_DRNG)) continue; break; } case IPPROTO_ICMPV6: { struct icmp6_hdr *icmp; if (offset != 0) /* Type isn't valid */ break; PULLUP_TO(off + 2); icmp = (struct icmp6_hdr *) ((caddr_t)ip6 + off); if (!icmp6type_match(icmp, f)) continue; break; } #undef PULLUP_TO bogusfrag: if (fw6_verbose) ip6fw_report(NULL, ip6, rif, oif, off, nxt); goto dropit; } got_match: #ifndef IP6FW_DIVERT_RESTART /* Ignore divert/tee rule if socket port is "ignport" */ switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_DIVERT: case IPV6_FW_F_TEE: if (f->fw_divert_port == ignport) continue; /* ignore this rule */ break; } #endif /* IP6FW_DIVERT_RESTART */ /* Update statistics */ f->fw_pcnt += 1; f->fw_bcnt += ntohs(ip6->ip6_plen); f->timestamp = time_second; /* Log to console if desired */ if ((f->fw_flg & IPV6_FW_F_PRN) && fw6_verbose) ip6fw_report(f, ip6, rif, oif, off, nxt); /* Take appropriate action */ switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_ACCEPT: return (0); case IPV6_FW_F_COUNT: continue; case IPV6_FW_F_DIVERT: #ifdef IP6FW_DIVERT_RESTART *cookie = f->fw_number; #else *cookie = htons(f->fw_divert_port); #endif /* IP6FW_DIVERT_RESTART */ return (f->fw_divert_port); case IPV6_FW_F_TEE: /* * XXX someday tee packet here, but beware that you * can't use m_copym() or m_copypacket() because * the divert input routine modifies the mbuf * (and these routines only increment reference * counts in the case of mbuf clusters), so need * to write custom routine. */ continue; case IPV6_FW_F_SKIPTO: #ifdef DIAGNOSTIC while (chain->chain.le_next && chain->chain.le_next->rule->fw_number < f->fw_skipto_rule) #else while (chain->chain.le_next->rule->fw_number < f->fw_skipto_rule) #endif chain = chain->chain.le_next; continue; } /* Deny/reject this packet using this rule */ rule = f; break; } #ifdef DIAGNOSTIC /* Rule 65535 should always be there and should always match */ if (!chain) panic("ip6_fw: chain"); #endif /* * At this point, we're going to drop the packet. * Send a reject notice if all of the following are true: * * - The packet matched a reject rule * - The packet is not an ICMP packet, or is an ICMP query packet * - The packet is not a multicast or broadcast packet */ if ((rule->fw_flg & IPV6_FW_F_COMMAND) == IPV6_FW_F_REJECT && (nxt != IPPROTO_ICMPV6 || is_icmp6_query(ip6, off)) && !((*m)->m_flags & (M_BCAST|M_MCAST)) && !IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { switch (rule->fw_reject_code) { case IPV6_FW_REJECT_RST: #if 1 /* not tested */ { struct tcphdr *const tcp = (struct tcphdr *) ((caddr_t)ip6 + off); struct { struct ip6_hdr ip6; struct tcphdr th; } ti; tcp_seq ack, seq; int flags; if (offset != 0 || (tcp->th_flags & TH_RST)) break; ti.ip6 = *ip6; ti.th = *tcp; ti.th.th_seq = ntohl(ti.th.th_seq); ti.th.th_ack = ntohl(ti.th.th_ack); ti.ip6.ip6_nxt = IPPROTO_TCP; if (ti.th.th_flags & TH_ACK) { ack = 0; seq = ti.th.th_ack; flags = TH_RST; } else { ack = ti.th.th_seq; if (((*m)->m_flags & M_PKTHDR) != 0) { ack += (*m)->m_pkthdr.len - off - (ti.th.th_off << 2); } else if (ip6->ip6_plen) { ack += ntohs(ip6->ip6_plen) + sizeof(*ip6) - off - (ti.th.th_off << 2); } else { m_freem(*m); *m = 0; break; } if (tcp->th_flags & TH_SYN) ack++; seq = 0; flags = TH_RST|TH_ACK; } bcopy(&ti, ip6, sizeof(ti)); tcp_respond(NULL, ip6, (struct tcphdr *)(ip6 + 1), *m, ack, seq, flags); *m = NULL; break; } #endif default: /* Send an ICMP unreachable using code */ if (oif) (*m)->m_pkthdr.rcvif = oif; icmp6_error(*m, ICMP6_DST_UNREACH, rule->fw_reject_code, 0); *m = NULL; break; } } dropit: /* * Finally, drop the packet. */ if (*m) { m_freem(*m); *m = NULL; } return (0); } static int add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) { struct ip6_fw *ftmp = 0; struct ip6_fw_chain *fwc = 0, *fcp, *fcpl = 0; u_short nbr = 0; int s; fwc = malloc(sizeof *fwc, M_IP6FW, M_NOWAIT); ftmp = malloc(sizeof *ftmp, M_IP6FW, M_NOWAIT); if (!fwc || !ftmp) { dprintf(("%s malloc said no\n", err_prefix)); if (fwc) free(fwc, M_IP6FW); if (ftmp) free(ftmp, M_IP6FW); return (ENOSPC); } bcopy(frwl, ftmp, sizeof(struct ip6_fw)); ftmp->fw_in_if.fu_via_if.name[IP6FW_IFNLEN - 1] = '\0'; ftmp->fw_pcnt = 0L; ftmp->fw_bcnt = 0L; fwc->rule = ftmp; s = splnet(); if (!chainptr->lh_first) { LIST_INSERT_HEAD(chainptr, fwc, chain); splx(s); return (0); } else if (ftmp->fw_number == (u_short)-1) { if (fwc) free(fwc, M_IP6FW); if (ftmp) free(ftmp, M_IP6FW); splx(s); dprintf(("%s bad rule number\n", err_prefix)); return (EINVAL); } /* If entry number is 0, find highest numbered rule and add 100 */ if (ftmp->fw_number == 0) { for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number != (u_short)-1) nbr = fcp->rule->fw_number; else break; } if (nbr < (u_short)-1 - 100) nbr += 100; ftmp->fw_number = nbr; } /* Got a valid number; now insert it, keeping the list ordered */ for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number > ftmp->fw_number) { if (fcpl) { LIST_INSERT_AFTER(fcpl, fwc, chain); } else { LIST_INSERT_HEAD(chainptr, fwc, chain); } break; } else { fcpl = fcp; } } splx(s); return (0); } static int del_entry6(struct ip6_fw_head *chainptr, u_short number) { struct ip6_fw_chain *fcp; int s; s = splnet(); fcp = chainptr->lh_first; if (number != (u_short)-1) { for (; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number == number) { LIST_REMOVE(fcp, chain); splx(s); free(fcp->rule, M_IP6FW); free(fcp, M_IP6FW); return 0; } } } splx(s); return (EINVAL); } static int zero_entry6(struct mbuf *m) { struct ip6_fw *frwl; struct ip6_fw_chain *fcp; int s; if (m && m->m_len != 0) { if (m->m_len != sizeof(struct ip6_fw)) return (EINVAL); frwl = mtod(m, struct ip6_fw *); } else frwl = NULL; /* * It's possible to insert multiple chain entries with the * same number, so we don't stop after finding the first * match if zeroing a specific entry. */ s = splnet(); for (fcp = ip6_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) if (!frwl || frwl->fw_number == fcp->rule->fw_number) { fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; fcp->rule->timestamp = 0; } splx(s); if (fw6_verbose) { if (frwl) log(LOG_SECURITY | LOG_NOTICE, "ip6fw: Entry %d cleared.\n", frwl->fw_number); else log(LOG_SECURITY | LOG_NOTICE, "ip6fw: Accounting cleared.\n"); } return (0); } static struct ip6_fw * check_ip6fw_mbuf(struct mbuf *m) { /* Check length */ if (m->m_len != sizeof(struct ip6_fw)) { dprintf(("%s len=%d, want %zu\n", err_prefix, m->m_len, sizeof(struct ip6_fw))); return (NULL); } return (check_ip6fw_struct(mtod(m, struct ip6_fw *))); } static struct ip6_fw * check_ip6fw_struct(struct ip6_fw *frwl) { /* Check for invalid flag bits */ if ((frwl->fw_flg & ~IPV6_FW_F_MASK) != 0) { dprintf(("%s undefined flag bits set (flags=%x)\n", err_prefix, frwl->fw_flg)); return (NULL); } /* Must apply to incoming or outgoing (or both) */ if (!(frwl->fw_flg & (IPV6_FW_F_IN | IPV6_FW_F_OUT))) { dprintf(("%s neither in nor out\n", err_prefix)); return (NULL); } /* Empty interface name is no good */ if (((frwl->fw_flg & IPV6_FW_F_IIFNAME) && !*frwl->fw_in_if.fu_via_if.name) || ((frwl->fw_flg & IPV6_FW_F_OIFNAME) && !*frwl->fw_out_if.fu_via_if.name)) { dprintf(("%s empty interface name\n", err_prefix)); return (NULL); } /* Sanity check interface matching */ if ((frwl->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { ; /* allow "via" backwards compatibility */ } else if ((frwl->fw_flg & IPV6_FW_F_IN) && (frwl->fw_flg & IPV6_FW_F_OIFACE)) { dprintf(("%s outgoing interface check on incoming\n", err_prefix)); return (NULL); } /* Sanity check port ranges */ if ((frwl->fw_flg & IPV6_FW_F_SRNG) && IPV6_FW_GETNSRCP(frwl) < 2) { dprintf(("%s src range set but n_src_p=%d\n", err_prefix, IPV6_FW_GETNSRCP(frwl))); return (NULL); } if ((frwl->fw_flg & IPV6_FW_F_DRNG) && IPV6_FW_GETNDSTP(frwl) < 2) { dprintf(("%s dst range set but n_dst_p=%d\n", err_prefix, IPV6_FW_GETNDSTP(frwl))); return (NULL); } if (IPV6_FW_GETNSRCP(frwl) + IPV6_FW_GETNDSTP(frwl) > IPV6_FW_MAX_PORTS) { dprintf(("%s too many ports (%d+%d)\n", err_prefix, IPV6_FW_GETNSRCP(frwl), IPV6_FW_GETNDSTP(frwl))); return (NULL); } /* * Protocols other than TCP/UDP don't use port range */ if ((frwl->fw_prot != IPPROTO_TCP) && (frwl->fw_prot != IPPROTO_UDP) && (IPV6_FW_GETNSRCP(frwl) || IPV6_FW_GETNDSTP(frwl))) { dprintf(("%s port(s) specified for non TCP/UDP rule\n", err_prefix)); return (NULL); } /* * Rather than modify the entry to make such entries work, * we reject this rule and require user level utilities * to enforce whatever policy they deem appropriate. */ if ((frwl->fw_src.s6_addr32[0] & (~frwl->fw_smsk.s6_addr32[0])) || (frwl->fw_src.s6_addr32[1] & (~frwl->fw_smsk.s6_addr32[1])) || (frwl->fw_src.s6_addr32[2] & (~frwl->fw_smsk.s6_addr32[2])) || (frwl->fw_src.s6_addr32[3] & (~frwl->fw_smsk.s6_addr32[3])) || (frwl->fw_dst.s6_addr32[0] & (~frwl->fw_dmsk.s6_addr32[0])) || (frwl->fw_dst.s6_addr32[1] & (~frwl->fw_dmsk.s6_addr32[1])) || (frwl->fw_dst.s6_addr32[2] & (~frwl->fw_dmsk.s6_addr32[2])) || (frwl->fw_dst.s6_addr32[3] & (~frwl->fw_dmsk.s6_addr32[3]))) { dprintf(("%s rule never matches\n", err_prefix)); return (NULL); } if ((frwl->fw_flg & IPV6_FW_F_FRAG) && (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { if (frwl->fw_nports) { dprintf(("%s cannot mix 'frag' and ports\n", err_prefix)); return (NULL); } if (frwl->fw_prot == IPPROTO_TCP && frwl->fw_tcpf != frwl->fw_tcpnf) { dprintf(("%s cannot mix 'frag' with TCP flags\n", err_prefix)); return (NULL); } } /* Check command specific stuff */ switch (frwl->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_REJECT: if (frwl->fw_reject_code >= 0x100 && !(frwl->fw_prot == IPPROTO_TCP && frwl->fw_reject_code == IPV6_FW_REJECT_RST)) { dprintf(("%s unknown reject code\n", err_prefix)); return (NULL); } break; case IPV6_FW_F_DIVERT: /* Diverting to port zero is invalid */ case IPV6_FW_F_TEE: if (frwl->fw_divert_port == 0) { dprintf(("%s can't divert to port 0\n", err_prefix)); return (NULL); } break; case IPV6_FW_F_DENY: case IPV6_FW_F_ACCEPT: case IPV6_FW_F_COUNT: case IPV6_FW_F_SKIPTO: break; default: dprintf(("%s invalid command\n", err_prefix)); return (NULL); } return frwl; } static int ip6_fw_ctl(int stage, struct mbuf **mm) { int error; struct mbuf *m; if (stage == IPV6_FW_GET) { struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; *mm = m = m_get(M_TRYWAIT, MT_DATA); /* XXX */ if (!m) return (ENOBUFS); if (sizeof *(fcp->rule) > MLEN) { MCLGET(m, M_TRYWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); return (ENOBUFS); } } for (; fcp; fcp = fcp->chain.le_next) { bcopy(fcp->rule, m->m_data, sizeof *(fcp->rule)); m->m_len = sizeof *(fcp->rule); m->m_next = m_get(M_TRYWAIT, MT_DATA); /* XXX */ if (!m->m_next) { m_freem(*mm); return (ENOBUFS); } m = m->m_next; if (sizeof *(fcp->rule) > MLEN) { MCLGET(m, M_TRYWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(*mm); return (ENOBUFS); } } m->m_len = 0; } return (0); } m = *mm; /* only allow get calls if secure mode > 2 */ if (securelevel > 2) { if (m) { (void)m_freem(m); *mm = 0; } return (EPERM); } if (stage == IPV6_FW_FLUSH) { while (ip6_fw_chain.lh_first != NULL && ip6_fw_chain.lh_first->rule->fw_number != (u_short)-1) { struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; int s = splnet(); LIST_REMOVE(ip6_fw_chain.lh_first, chain); splx(s); free(fcp->rule, M_IP6FW); free(fcp, M_IP6FW); } if (m) { (void)m_freem(m); *mm = 0; } return (0); } if (stage == IPV6_FW_ZERO) { error = zero_entry6(m); if (m) { (void)m_freem(m); *mm = 0; } return (error); } if (m == NULL) { printf("%s NULL mbuf ptr\n", err_prefix); return (EINVAL); } if (stage == IPV6_FW_ADD) { struct ip6_fw *frwl = check_ip6fw_mbuf(m); if (!frwl) error = EINVAL; else error = add_entry6(&ip6_fw_chain, frwl); if (m) { (void)m_freem(m); *mm = 0; } return error; } if (stage == IPV6_FW_DEL) { if (m->m_len != sizeof(struct ip6_fw)) { dprintf(("%s len=%d, want %zu\n", err_prefix, m->m_len, sizeof(struct ip6_fw))); error = EINVAL; } else if (mtod(m, struct ip6_fw *)->fw_number == (u_short)-1) { dprintf(("%s can't delete rule 65535\n", err_prefix)); error = EINVAL; } else error = del_entry6(&ip6_fw_chain, mtod(m, struct ip6_fw *)->fw_number); if (m) { (void)m_freem(m); *mm = 0; } return error; } dprintf(("%s unknown request %d\n", err_prefix, stage)); if (m) { (void)m_freem(m); *mm = 0; } return (EINVAL); } void ip6_fw_init(void) { struct ip6_fw default_rule; ip6_fw_chk_ptr = ip6_fw_chk; ip6_fw_ctl_ptr = ip6_fw_ctl; LIST_INIT(&ip6_fw_chain); bzero(&default_rule, sizeof default_rule); default_rule.fw_prot = IPPROTO_IPV6; default_rule.fw_number = (u_short)-1; #ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT default_rule.fw_flg |= IPV6_FW_F_ACCEPT; #else default_rule.fw_flg |= IPV6_FW_F_DENY; #endif default_rule.fw_flg |= IPV6_FW_F_IN | IPV6_FW_F_OUT; if (check_ip6fw_struct(&default_rule) == NULL || add_entry6(&ip6_fw_chain, &default_rule)) panic(__FUNCTION__); printf("IPv6 packet filtering initialized, "); #ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT printf("default to accept, "); #endif #ifndef IPV6FIREWALL_VERBOSE printf("logging disabled\n"); #else if (fw6_verbose_limit == 0) printf("unlimited logging\n"); else printf("logging limited to %d packets/entry\n", fw6_verbose_limit); #endif } static ip6_fw_chk_t *old_chk_ptr; static ip6_fw_ctl_t *old_ctl_ptr; static int ip6fw_modevent(module_t mod, int type, void *unused) { int s; switch (type) { case MOD_LOAD: s = splnet(); old_chk_ptr = ip6_fw_chk_ptr; old_ctl_ptr = ip6_fw_ctl_ptr; ip6_fw_init(); splx(s); return 0; case MOD_UNLOAD: s = splnet(); ip6_fw_chk_ptr = old_chk_ptr; ip6_fw_ctl_ptr = old_ctl_ptr; while (LIST_FIRST(&ip6_fw_chain) != NULL) { struct ip6_fw_chain *fcp = LIST_FIRST(&ip6_fw_chain); LIST_REMOVE(LIST_FIRST(&ip6_fw_chain), chain); free(fcp->rule, M_IP6FW); free(fcp, M_IP6FW); } splx(s); printf("IPv6 firewall unloaded\n"); return 0; default: break; } return 0; } static moduledata_t ip6fwmod = { "ip6fw", ip6fw_modevent, 0 }; DECLARE_MODULE(ip6fw, ip6fwmod, SI_SUB_PSEUDO, SI_ORDER_ANY); Index: head/sys/netncp/ncp_mod.c =================================================================== --- head/sys/netncp/ncp_mod.c (revision 129879) +++ head/sys/netncp/ncp_mod.c (revision 129880) @@ -1,527 +1,528 @@ /* * Copyright (c) 2003 Tim J. Robbins. * Copyright (c) 1999, 2000, 2001 Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Boris Popov. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include int ncp_version = NCP_VERSION; SYSCTL_NODE(_net, OID_AUTO, ncp, CTLFLAG_RW, NULL, "NetWare requester"); SYSCTL_INT(_net_ncp, OID_AUTO, version, CTLFLAG_RD, &ncp_version, 0, ""); MODULE_VERSION(ncp, 1); MODULE_DEPEND(ncp, libmchain, 1, 1, 1); static dev_t ncp_dev; static d_ioctl_t ncp_ioctl; static struct cdevsw ncp_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_ioctl = ncp_ioctl, .d_name = "ncp", }; static int ncp_conn_frag_rq(struct ncp_conn *, struct thread *, struct ncp_conn_frag *); static int ncp_conn_handler(struct thread *, struct ncpioc_request *, struct ncp_conn *, struct ncp_handle *); static int sncp_conn_scan(struct thread *, struct ncpioc_connscan *); static int sncp_connect(struct thread *, struct ncpioc_connect *); static int sncp_request(struct thread *, struct ncpioc_request *); static int ncp_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { switch (cmd) { case NCPIOC_CONNECT: return (sncp_connect(td, (struct ncpioc_connect *)data)); case NCPIOC_CONNSCAN: return (sncp_conn_scan(td, (struct ncpioc_connscan *)data)); case NCPIOC_REQUEST: return (sncp_request(td, (struct ncpioc_request *)data)); } return (EINVAL); } /* * Attach to NCP server */ static int sncp_connect(struct thread *td, struct ncpioc_connect *args) { int connHandle = 0, error; struct ncp_conn *conn; struct ncp_handle *handle; struct ncp_conn_args li; checkbad(copyin(args->ioc_li,&li,sizeof(li))); /* XXX Should be useracc() */ checkbad(copyout(&connHandle,args->ioc_connhandle, sizeof(connHandle))); li.password = li.user = NULL; error = ncp_conn_getattached(&li, td, td->td_ucred, NCPM_WRITE | NCPM_EXECUTE, &conn); if (error) { error = ncp_conn_alloc(&li, td, td->td_ucred, &conn); if (error) goto bad; error = ncp_conn_reconnect(conn); if (error) ncp_conn_free(conn); } if (!error) { error = ncp_conn_gethandle(conn, td, &handle); copyout(&handle->nh_id, args->ioc_connhandle, sizeof(args->ioc_connhandle)); ncp_conn_unlock(conn,td); } bad: return error; } static int sncp_request(struct thread *td, struct ncpioc_request *args) { struct ncp_rq *rqp; struct ncp_conn *conn; struct ncp_handle *handle; int error = 0, rqsize; error = ncp_conn_findhandle(args->ioc_connhandle, td, &handle); if (error) return error; conn = handle->nh_conn; if (args->ioc_fn == NCP_CONN) return ncp_conn_handler(td, args, conn, handle); error = copyin(&args->ioc_ncpbuf->rqsize, &rqsize, sizeof(int)); if (error) return(error); error = ncp_rq_alloc(args->ioc_fn, conn, td, td->td_ucred, &rqp); if (error) return error; if (rqsize) { error = mb_put_mem(&rqp->rq, (caddr_t)args->ioc_ncpbuf->packet, rqsize, MB_MUSER); if (error) goto bad; } rqp->nr_flags |= NCPR_DONTFREEONERR; error = ncp_request(rqp); if (error == 0 && rqp->nr_rpsize) error = md_get_mem(&rqp->rp, (caddr_t)args->ioc_ncpbuf->packet, rqp->nr_rpsize, MB_MUSER); copyout(&rqp->nr_cs, &args->ioc_ncpbuf->cs, sizeof(rqp->nr_cs)); copyout(&rqp->nr_cc, &args->ioc_ncpbuf->cc, sizeof(rqp->nr_cc)); copyout(&rqp->nr_rpsize, &args->ioc_ncpbuf->rpsize, sizeof(rqp->nr_rpsize)); bad: ncp_rq_done(rqp); return error; } static int ncp_mod_login(struct ncp_conn *conn, char *user, int objtype, char *password, struct thread *td, struct ucred *cred) { int error; if (ncp_suser(cred) != 0 && cred->cr_uid != conn->nc_owner->cr_uid) return EACCES; conn->li.user = ncp_str_dup(user); if (conn->li.user == NULL) return ENOMEM; conn->li.password = ncp_str_dup(password); if (conn->li.password == NULL) { error = ENOMEM; goto bad; } ncp_str_upper(conn->li.user); if ((conn->li.opt & NCP_OPT_NOUPCASEPASS) == 0) ncp_str_upper(conn->li.password); conn->li.objtype = objtype; error = ncp_conn_login(conn, td, cred); return error; bad: if (conn->li.user) { free(conn->li.user, M_NCPDATA); conn->li.user = NULL; } if (conn->li.password) { free(conn->li.password, M_NCPDATA); conn->li.password = NULL; } return error; } static int ncp_conn_handler(struct thread *td, struct ncpioc_request *args, struct ncp_conn *conn, struct ncp_handle *hp) { int error = 0, rqsize, subfn; struct ucred *cred; char *pdata; cred = td->td_ucred; error = copyin(&args->ioc_ncpbuf->rqsize, &rqsize, sizeof(int)); if (error) return(error); error = 0; pdata = args->ioc_ncpbuf->packet; subfn = *(pdata++) & 0xff; rqsize--; switch (subfn) { case NCP_CONN_READ: case NCP_CONN_WRITE: { struct ncp_rw rwrq; struct uio auio; struct iovec iov; if (rqsize != sizeof(rwrq)) return (EBADRPC); error = copyin(pdata,&rwrq,rqsize); if (error) return (error); iov.iov_base = rwrq.nrw_base; iov.iov_len = rwrq.nrw_cnt; auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_offset = rwrq.nrw_offset; auio.uio_resid = rwrq.nrw_cnt; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = (subfn == NCP_CONN_READ) ? UIO_READ : UIO_WRITE; auio.uio_td = td; if (subfn == NCP_CONN_READ) error = ncp_read(conn, &rwrq.nrw_fh, &auio, cred); else error = ncp_write(conn, &rwrq.nrw_fh, &auio, cred); rwrq.nrw_cnt -= auio.uio_resid; /*td->td_retval[0] = rwrq.nrw_cnt;*/ break; } /* case int_read/write */ case NCP_CONN_SETFLAGS: { u_int16_t mask, flags; error = copyin(pdata,&mask, sizeof(mask)); if (error) return error; pdata += sizeof(mask); error = copyin(pdata,&flags,sizeof(flags)); if (error) return error; error = ncp_conn_lock(conn, td, cred, NCPM_WRITE); if (error) return error; if (mask & NCPFL_PERMANENT) { conn->flags &= ~NCPFL_PERMANENT; conn->flags |= (flags & NCPFL_PERMANENT); } if (mask & NCPFL_PRIMARY) { error = ncp_conn_setprimary(conn, flags & NCPFL_PRIMARY); if (error) { ncp_conn_unlock(conn, td); break; } } ncp_conn_unlock(conn, td); break; } case NCP_CONN_LOGIN: { struct ncp_conn_login la; if (rqsize != sizeof(la)) return EBADRPC; if (conn->flags & NCPFL_LOGGED) return EALREADY; if ((error = copyin(pdata,&la,rqsize)) != 0) break; error = ncp_conn_lock(conn, td, cred, NCPM_EXECUTE | NCPM_WRITE); if (error) return error; error = ncp_mod_login(conn, la.username, la.objtype, la.password, td, td->td_ucred); ncp_conn_unlock(conn, td); break; } case NCP_CONN_GETINFO: { struct ncp_conn_stat ncs; int len = sizeof(ncs); error = ncp_conn_lock(conn, td, td->td_ucred, NCPM_READ); if (error) return error; ncp_conn_getinfo(conn, &ncs); copyout(&len, &args->ioc_ncpbuf->rpsize, sizeof(int)); error = copyout(&ncs, &args->ioc_ncpbuf->packet, len); ncp_conn_unlock(conn, td); break; } case NCP_CONN_GETUSER: { int len; error = ncp_conn_lock(conn, td, td->td_ucred, NCPM_READ); if (error) return error; len = (conn->li.user) ? strlen(conn->li.user) + 1 : 0; copyout(&len, &args->ioc_ncpbuf->rpsize, sizeof(int)); if (len) { error = copyout(conn->li.user, &args->ioc_ncpbuf->packet, len); } ncp_conn_unlock(conn, td); break; } case NCP_CONN_CONN2REF: { int len = sizeof(int); error = ncp_conn_lock(conn, td, td->td_ucred, NCPM_READ); if (error) return error; copyout(&len, &args->ioc_ncpbuf->rpsize, sizeof(int)); if (len) { error = copyout(&conn->nc_id, &args->ioc_ncpbuf->packet, len); } ncp_conn_unlock(conn, td); break; } case NCP_CONN_FRAG: { struct ncp_conn_frag nf; if (rqsize != sizeof(nf)) return (EBADRPC); if ((error = copyin(pdata, &nf, rqsize)) != 0) break; error = ncp_conn_lock(conn, td, cred, NCPM_EXECUTE); if (error) return error; error = ncp_conn_frag_rq(conn, td, &nf); ncp_conn_unlock(conn, td); copyout(&nf, &pdata, sizeof(nf)); td->td_retval[0] = error; break; } case NCP_CONN_DUP: { struct ncp_handle *newhp; int len = sizeof(NWCONN_HANDLE); error = ncp_conn_lock(conn, td, cred, NCPM_READ); if (error) break; copyout(&len, &args->ioc_ncpbuf->rpsize, len); error = ncp_conn_gethandle(conn, td, &newhp); if (!error) error = copyout(&newhp->nh_id, args->ioc_ncpbuf->packet, len); ncp_conn_unlock(conn, td); break; } case NCP_CONN_CONNCLOSE: { error = ncp_conn_lock(conn, td, cred, NCPM_EXECUTE); if (error) break; ncp_conn_puthandle(hp, td, 0); error = ncp_conn_free(conn); if (error) ncp_conn_unlock(conn, td); break; } default: error = EOPNOTSUPP; } return error; } static int sncp_conn_scan(struct thread *td, struct ncpioc_connscan *args) { int connHandle = 0, error; struct ncp_conn_args li, *lip; struct ncp_conn *conn; struct ncp_handle *hp; char *user = NULL, *password = NULL; if (args->ioc_li) { if (copyin(args->ioc_li, &li, sizeof(li))) return EFAULT; lip = &li; } else { lip = NULL; } if (lip != NULL) { lip->server[sizeof(lip->server)-1]=0; /* just to make sure */ ncp_str_upper(lip->server); if (lip->user) { user = ncp_str_dup(lip->user); if (user == NULL) return EINVAL; ncp_str_upper(user); } if (lip->password) { password = ncp_str_dup(lip->password); if (password == NULL) { if (user) free(user, M_NCPDATA); return EINVAL; } ncp_str_upper(password); } lip->user = user; lip->password = password; } error = ncp_conn_getbyli(lip, td, td->td_ucred, NCPM_EXECUTE, &conn); if (!error) { /* already have this login */ ncp_conn_gethandle(conn, td, &hp); connHandle = hp->nh_id; ncp_conn_unlock(conn, td); copyout(&connHandle, args->ioc_connhandle, sizeof(connHandle)); } if (user) free(user, M_NCPDATA); if (password) free(password, M_NCPDATA); return error; } int ncp_conn_frag_rq(struct ncp_conn *conn, struct thread *td, struct ncp_conn_frag *nfp) { NW_FRAGMENT *fp; struct ncp_rq *rqp; u_int32_t fsize; int error, i, rpsize; error = ncp_rq_alloc(nfp->fn, conn, td, td->td_ucred, &rqp); if (error) return error; for(fp = nfp->rqf, i = 0; i < nfp->rqfcnt; i++, fp++) { error = mb_put_mem(&rqp->rq, (caddr_t)fp->fragAddress, fp->fragSize, MB_MUSER); if (error) goto bad; } rqp->nr_flags |= NCPR_DONTFREEONERR; error = ncp_request(rqp); if (error) goto bad; rpsize = rqp->nr_rpsize; if (rpsize && nfp->rpfcnt) { for(fp = nfp->rpf, i = 0; i < nfp->rpfcnt; i++, fp++) { error = copyin(&fp->fragSize, &fsize, sizeof (fsize)); if (error) break; fsize = min(fsize, rpsize); error = md_get_mem(&rqp->rp, (caddr_t)fp->fragAddress, fsize, MB_MUSER); if (error) break; rpsize -= fsize; error = copyout(&fsize, &fp->fragSize, sizeof (fsize)); if (error) break; } } nfp->cs = rqp->nr_cs; nfp->cc = rqp->nr_cc; bad: ncp_rq_done(rqp); return error; } static int ncp_load(void) { int error; if ((error = ncp_init()) != 0) return (error); ncp_dev = make_dev(&ncp_cdevsw, 0, 0, 0, 0666, "ncp"); printf("ncp_load: loaded\n"); return (0); } static int ncp_unload(void) { int error; error = ncp_done(); if (error) return (error); destroy_dev(ncp_dev); printf("ncp_unload: unloaded\n"); return (0); } static int ncp_mod_handler(module_t mod, int type, void *data) { int error; switch (type) { case MOD_LOAD: error = ncp_load(); break; case MOD_UNLOAD: error = ncp_unload(); break; default: error = EINVAL; } return error; } static moduledata_t ncp_mod = { "ncp", ncp_mod_handler, NULL }; DECLARE_MODULE(ncp, ncp_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); Index: head/sys/netsmb/smb_dev.c =================================================================== --- head/sys/netsmb/smb_dev.c (revision 129879) +++ head/sys/netsmb/smb_dev.c (revision 129880) @@ -1,415 +1,416 @@ /* * Copyright (c) 2000-2001 Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Boris Popov. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SMB_GETDEV(dev) ((struct smb_dev*)(dev)->si_drv1) #define SMB_CHECKMINOR(dev) do { \ sdp = SMB_GETDEV(dev); \ if (sdp == NULL) return ENXIO; \ } while(0) static d_open_t nsmb_dev_open; static d_close_t nsmb_dev_close; static d_ioctl_t nsmb_dev_ioctl; MODULE_DEPEND(netsmb, libiconv, 1, 1, 2); MODULE_VERSION(netsmb, NSMB_VERSION); static int smb_version = NSMB_VERSION; SYSCTL_DECL(_net_smb); SYSCTL_INT(_net_smb, OID_AUTO, version, CTLFLAG_RD, &smb_version, 0, ""); static MALLOC_DEFINE(M_NSMBDEV, "NETSMBDEV", "NET/SMB device"); /* int smb_dev_queue(struct smb_dev *ndp, struct smb_rq *rqp, int prio); */ static struct cdevsw nsmb_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = nsmb_dev_open, .d_close = nsmb_dev_close, .d_ioctl = nsmb_dev_ioctl, .d_name = NSMB_NAME }; static eventhandler_tag nsmb_dev_tag; static void nsmb_dev_clone(void *arg, char *name, int namelen, dev_t *dev) { int u; if (*dev != NODEV) return; if (dev_stdclone(name, NULL, NSMB_NAME, &u) != 1) return; *dev = make_dev(&nsmb_cdevsw, unit2minor(u), 0, 0, 0600, NSMB_NAME"%d", u); } static int nsmb_dev_open(dev_t dev, int oflags, int devtype, struct thread *td) { struct smb_dev *sdp; struct ucred *cred = td->td_ucred; int s; sdp = SMB_GETDEV(dev); if (sdp && (sdp->sd_flags & NSMBFL_OPEN)) return EBUSY; if (sdp == NULL) { sdp = malloc(sizeof(*sdp), M_NSMBDEV, M_WAITOK); dev->si_drv1 = (void*)sdp; } /* * XXX: this is just crazy - make a device for an already passed device... * someone should take care of it. */ if ((dev->si_flags & SI_NAMED) == 0) make_dev(&nsmb_cdevsw, minor(dev), cred->cr_uid, cred->cr_gid, 0700, NSMB_NAME"%d", dev2unit(dev)); bzero(sdp, sizeof(*sdp)); /* STAILQ_INIT(&sdp->sd_rqlist); STAILQ_INIT(&sdp->sd_rplist); bzero(&sdp->sd_pollinfo, sizeof(struct selinfo)); */ s = splimp(); sdp->sd_level = -1; sdp->sd_flags |= NSMBFL_OPEN; splx(s); return 0; } static int nsmb_dev_close(dev_t dev, int flag, int fmt, struct thread *td) { struct smb_dev *sdp; struct smb_vc *vcp; struct smb_share *ssp; struct smb_cred scred; int s; SMB_CHECKMINOR(dev); s = splimp(); if ((sdp->sd_flags & NSMBFL_OPEN) == 0) { splx(s); return EBADF; } smb_makescred(&scred, td, NULL); ssp = sdp->sd_share; if (ssp != NULL) smb_share_rele(ssp, &scred); vcp = sdp->sd_vc; if (vcp != NULL) smb_vc_rele(vcp, &scred); /* smb_flushq(&sdp->sd_rqlist); smb_flushq(&sdp->sd_rplist); */ dev->si_drv1 = NULL; free(sdp, M_NSMBDEV); destroy_dev(dev); splx(s); return 0; } static int nsmb_dev_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct smb_dev *sdp; struct smb_vc *vcp; struct smb_share *ssp; struct smb_cred scred; int error = 0; SMB_CHECKMINOR(dev); if ((sdp->sd_flags & NSMBFL_OPEN) == 0) return EBADF; smb_makescred(&scred, td, NULL); switch (cmd) { case SMBIOC_OPENSESSION: if (sdp->sd_vc) return EISCONN; error = smb_usr_opensession((struct smbioc_ossn*)data, &scred, &vcp); if (error) break; sdp->sd_vc = vcp; smb_vc_unlock(vcp, 0, td); sdp->sd_level = SMBL_VC; break; case SMBIOC_OPENSHARE: if (sdp->sd_share) return EISCONN; if (sdp->sd_vc == NULL) return ENOTCONN; error = smb_usr_openshare(sdp->sd_vc, (struct smbioc_oshare*)data, &scred, &ssp); if (error) break; sdp->sd_share = ssp; smb_share_unlock(ssp, 0, td); sdp->sd_level = SMBL_SHARE; break; case SMBIOC_REQUEST: if (sdp->sd_share == NULL) return ENOTCONN; error = smb_usr_simplerequest(sdp->sd_share, (struct smbioc_rq*)data, &scred); break; case SMBIOC_T2RQ: if (sdp->sd_share == NULL) return ENOTCONN; error = smb_usr_t2request(sdp->sd_share, (struct smbioc_t2rq*)data, &scred); break; case SMBIOC_SETFLAGS: { struct smbioc_flags *fl = (struct smbioc_flags*)data; int on; if (fl->ioc_level == SMBL_VC) { if (fl->ioc_mask & SMBV_PERMANENT) { on = fl->ioc_flags & SMBV_PERMANENT; if ((vcp = sdp->sd_vc) == NULL) return ENOTCONN; error = smb_vc_get(vcp, LK_EXCLUSIVE, &scred); if (error) break; if (on && (vcp->obj.co_flags & SMBV_PERMANENT) == 0) { vcp->obj.co_flags |= SMBV_PERMANENT; smb_vc_ref(vcp); } else if (!on && (vcp->obj.co_flags & SMBV_PERMANENT)) { vcp->obj.co_flags &= ~SMBV_PERMANENT; smb_vc_rele(vcp, &scred); } smb_vc_put(vcp, &scred); } else error = EINVAL; } else if (fl->ioc_level == SMBL_SHARE) { if (fl->ioc_mask & SMBS_PERMANENT) { on = fl->ioc_flags & SMBS_PERMANENT; if ((ssp = sdp->sd_share) == NULL) return ENOTCONN; error = smb_share_get(ssp, LK_EXCLUSIVE, &scred); if (error) break; if (on && (ssp->obj.co_flags & SMBS_PERMANENT) == 0) { ssp->obj.co_flags |= SMBS_PERMANENT; smb_share_ref(ssp); } else if (!on && (ssp->obj.co_flags & SMBS_PERMANENT)) { ssp->obj.co_flags &= ~SMBS_PERMANENT; smb_share_rele(ssp, &scred); } smb_share_put(ssp, &scred); } else error = EINVAL; break; } else error = EINVAL; break; } case SMBIOC_LOOKUP: if (sdp->sd_vc || sdp->sd_share) return EISCONN; vcp = NULL; ssp = NULL; error = smb_usr_lookup((struct smbioc_lookup*)data, &scred, &vcp, &ssp); if (error) break; if (vcp) { sdp->sd_vc = vcp; smb_vc_unlock(vcp, 0, td); sdp->sd_level = SMBL_VC; } if (ssp) { sdp->sd_share = ssp; smb_share_unlock(ssp, 0, td); sdp->sd_level = SMBL_SHARE; } break; case SMBIOC_READ: case SMBIOC_WRITE: { struct smbioc_rw *rwrq = (struct smbioc_rw*)data; struct uio auio; struct iovec iov; if ((ssp = sdp->sd_share) == NULL) return ENOTCONN; iov.iov_base = rwrq->ioc_base; iov.iov_len = rwrq->ioc_cnt; auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_offset = rwrq->ioc_offset; auio.uio_resid = rwrq->ioc_cnt; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = (cmd == SMBIOC_READ) ? UIO_READ : UIO_WRITE; auio.uio_td = td; if (cmd == SMBIOC_READ) error = smb_read(ssp, rwrq->ioc_fh, &auio, &scred); else error = smb_write(ssp, rwrq->ioc_fh, &auio, &scred); rwrq->ioc_cnt -= auio.uio_resid; break; } default: error = ENODEV; } return error; } static int nsmb_dev_load(module_t mod, int cmd, void *arg) { int error = 0; switch (cmd) { case MOD_LOAD: error = smb_sm_init(); if (error) break; error = smb_iod_init(); if (error) { smb_sm_done(); break; } nsmb_dev_tag = EVENTHANDLER_REGISTER(dev_clone, nsmb_dev_clone, 0, 1000); printf("netsmb_dev: loaded\n"); break; case MOD_UNLOAD: smb_iod_done(); error = smb_sm_done(); error = 0; EVENTHANDLER_DEREGISTER(dev_clone, nsmb_dev_tag); printf("netsmb_dev: unloaded\n"); break; default: error = EINVAL; break; } return error; } DEV_MODULE (dev_netsmb, nsmb_dev_load, 0); /* * Convert a file descriptor to appropriate smb_share pointer */ static struct file* nsmb_getfp(struct filedesc* fdp, int fd, int flag) { struct file* fp; FILEDESC_LOCK(fdp); if (((u_int)fd) >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fp->f_flag & flag) == 0) { FILEDESC_UNLOCK(fdp); return (NULL); } fhold(fp); FILEDESC_UNLOCK(fdp); return (fp); } int smb_dev2share(int fd, int mode, struct smb_cred *scred, struct smb_share **sspp) { struct file *fp; struct vnode *vp; struct smb_dev *sdp; struct smb_share *ssp; dev_t dev; int error; fp = nsmb_getfp(scred->scr_td->td_proc->p_fd, fd, FREAD | FWRITE); if (fp == NULL) return EBADF; vp = fp->f_vnode; if (vp == NULL) { fdrop(fp, curthread); return EBADF; } dev = vn_todev(vp); if (dev == NODEV) { fdrop(fp, curthread); return EBADF; } SMB_CHECKMINOR(dev); ssp = sdp->sd_share; if (ssp == NULL) { fdrop(fp, curthread); return ENOTCONN; } error = smb_share_get(ssp, LK_EXCLUSIVE, scred); if (error == 0) *sspp = ssp; fdrop(fp, curthread); return error; } Index: head/sys/netsmb/smb_rq.c =================================================================== --- head/sys/netsmb/smb_rq.c (revision 129879) +++ head/sys/netsmb/smb_rq.c (revision 129880) @@ -1,770 +1,771 @@ /* * Copyright (c) 2000-2001, Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Boris Popov. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request"); MODULE_DEPEND(netsmb, libmchain, 1, 1, 1); static int smb_rq_reply(struct smb_rq *rqp); static int smb_rq_enqueue(struct smb_rq *rqp); static int smb_rq_getenv(struct smb_connobj *layer, struct smb_vc **vcpp, struct smb_share **sspp); static int smb_rq_new(struct smb_rq *rqp, u_char cmd); static int smb_t2_reply(struct smb_t2rq *t2p); int smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred, struct smb_rq **rqpp) { struct smb_rq *rqp; int error; MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK); if (rqp == NULL) return ENOMEM; error = smb_rq_init(rqp, layer, cmd, scred); rqp->sr_flags |= SMBR_ALLOCED; if (error) { smb_rq_done(rqp); return error; } *rqpp = rqp; return 0; } static char tzero[12]; int smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd, struct smb_cred *scred) { int error; bzero(rqp, sizeof(*rqp)); smb_sl_init(&rqp->sr_slock, "srslock"); error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share); if (error) return error; error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC); if (error) return error; if (rqp->sr_share) { error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC); if (error) return error; } rqp->sr_cred = scred; rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc); return smb_rq_new(rqp, cmd); } static int smb_rq_new(struct smb_rq *rqp, u_char cmd) { struct smb_vc *vcp = rqp->sr_vc; struct mbchain *mbp = &rqp->sr_rq; int error; u_int16_t flags2; rqp->sr_sendcnt = 0; mb_done(mbp); md_done(&rqp->sr_rp); error = mb_init(mbp); if (error) return error; mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM); mb_put_uint8(mbp, cmd); mb_put_uint32le(mbp, 0); /* DosError */ mb_put_uint8(mbp, vcp->vc_hflags); flags2 = vcp->vc_hflags2; if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY) flags2 &= ~SMB_FLAGS2_UNICODE; if (cmd == SMB_COM_NEGOTIATE) flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE; mb_put_uint16le(mbp, flags2); if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) { mb_put_mem(mbp, tzero, 12, MB_MSYSTEM); rqp->sr_rqsig = NULL; } else { mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/); rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8); mb_put_uint16le(mbp, 0); } rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t)); mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/); rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t)); mb_put_uint16le(mbp, rqp->sr_mid); return 0; } void smb_rq_done(struct smb_rq *rqp) { mb_done(&rqp->sr_rq); md_done(&rqp->sr_rp); smb_sl_destroy(&rqp->sr_slock); if (rqp->sr_flags & SMBR_ALLOCED) free(rqp, M_SMBRQ); } /* * Simple request-reply exchange */ int smb_rq_simple(struct smb_rq *rqp) { struct smb_vc *vcp = rqp->sr_vc; int error = EINVAL, i; for (i = 0; i < SMB_MAXRCN; i++) { rqp->sr_flags &= ~SMBR_RESTART; rqp->sr_timo = vcp->vc_timo; rqp->sr_state = SMBRQ_NOTSENT; error = smb_rq_enqueue(rqp); if (error) return error; error = smb_rq_reply(rqp); if (error == 0) break; if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART) break; } return error; } static int smb_rq_enqueue(struct smb_rq *rqp) { struct smb_share *ssp = rqp->sr_share; int error; if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) { return smb_iod_addrq(rqp); } for (;;) { SMBS_ST_LOCK(ssp); if (ssp->ss_flags & SMBS_RECONNECTING) { msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp), PWAIT | PDROP, "90trcn", hz); if (smb_td_intr(rqp->sr_cred->scr_td)) return EINTR; continue; } if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) { SMBS_ST_UNLOCK(ssp); } else { SMBS_ST_UNLOCK(ssp); error = smb_iod_request(rqp->sr_vc->vc_iod, SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp); if (error) return error; } error = smb_iod_addrq(rqp); if (error != EXDEV) break; } return error; } void smb_rq_wstart(struct smb_rq *rqp) { rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t)); rqp->sr_rq.mb_count = 0; } void smb_rq_wend(struct smb_rq *rqp) { if (rqp->sr_wcount == NULL) { SMBERROR("no wcount\n"); /* actually panic */ return; } if (rqp->sr_rq.mb_count & 1) SMBERROR("odd word count\n"); *rqp->sr_wcount = rqp->sr_rq.mb_count / 2; } void smb_rq_bstart(struct smb_rq *rqp) { rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short)); rqp->sr_rq.mb_count = 0; } void smb_rq_bend(struct smb_rq *rqp) { int bcnt; if (rqp->sr_bcount == NULL) { SMBERROR("no bcount\n"); /* actually panic */ return; } bcnt = rqp->sr_rq.mb_count; if (bcnt > 0xffff) SMBERROR("byte count too large (%d)\n", bcnt); *rqp->sr_bcount = htole16(bcnt); } int smb_rq_intr(struct smb_rq *rqp) { if (rqp->sr_flags & SMBR_INTR) return EINTR; return smb_td_intr(rqp->sr_cred->scr_td); } int smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp) { *mbpp = &rqp->sr_rq; return 0; } int smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp) { *mbpp = &rqp->sr_rp; return 0; } static int smb_rq_getenv(struct smb_connobj *layer, struct smb_vc **vcpp, struct smb_share **sspp) { struct smb_vc *vcp = NULL; struct smb_share *ssp = NULL; struct smb_connobj *cp; int error = 0; switch (layer->co_level) { case SMBL_VC: vcp = CPTOVC(layer); if (layer->co_parent == NULL) { SMBERROR("zombie VC %s\n", vcp->vc_srvname); error = EINVAL; break; } break; case SMBL_SHARE: ssp = CPTOSS(layer); cp = layer->co_parent; if (cp == NULL) { SMBERROR("zombie share %s\n", ssp->ss_name); error = EINVAL; break; } error = smb_rq_getenv(cp, &vcp, NULL); if (error) break; break; default: SMBERROR("invalid layer %d passed\n", layer->co_level); error = EINVAL; } if (vcpp) *vcpp = vcp; if (sspp) *sspp = ssp; return error; } /* * Wait for reply on the request */ static int smb_rq_reply(struct smb_rq *rqp) { struct mdchain *mdp = &rqp->sr_rp; u_int32_t tdw; u_int8_t tb; int error, rperror = 0; error = smb_iod_waitrq(rqp); if (error) return error; error = md_get_uint32(mdp, &tdw); if (error) return error; error = md_get_uint8(mdp, &tb); if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) { error = md_get_uint32le(mdp, &rqp->sr_error); } else { error = md_get_uint8(mdp, &rqp->sr_errclass); error = md_get_uint8(mdp, &tb); error = md_get_uint16le(mdp, &rqp->sr_serror); if (!error) rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror); } error = md_get_uint8(mdp, &rqp->sr_rpflags); error = md_get_uint16le(mdp, &rqp->sr_rpflags2); error = md_get_uint32(mdp, &tdw); error = md_get_uint32(mdp, &tdw); error = md_get_uint32(mdp, &tdw); error = md_get_uint16le(mdp, &rqp->sr_rptid); error = md_get_uint16le(mdp, &rqp->sr_rppid); error = md_get_uint16le(mdp, &rqp->sr_rpuid); error = md_get_uint16le(mdp, &rqp->sr_rpmid); if (error == 0 && (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)) error = smb_rq_verify(rqp); SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n", rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid, rqp->sr_errclass, rqp->sr_serror); return error ? error : rperror; } #define ALIGN4(a) (((a) + 3) & ~3) /* * TRANS2 request implementation */ int smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred, struct smb_t2rq **t2pp) { struct smb_t2rq *t2p; int error; MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK); if (t2p == NULL) return ENOMEM; error = smb_t2_init(t2p, layer, setup, scred); t2p->t2_flags |= SMBT2_ALLOCED; if (error) { smb_t2_done(t2p); return error; } *t2pp = t2p; return 0; } int smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup, struct smb_cred *scred) { int error; bzero(t2p, sizeof(*t2p)); t2p->t2_source = source; t2p->t2_setupcount = 1; t2p->t2_setupdata = t2p->t2_setup; t2p->t2_setup[0] = setup; t2p->t2_fid = 0xffff; t2p->t2_cred = scred; error = smb_rq_getenv(source, &t2p->t2_vc, NULL); if (error) return error; return 0; } void smb_t2_done(struct smb_t2rq *t2p) { mb_done(&t2p->t2_tparam); mb_done(&t2p->t2_tdata); md_done(&t2p->t2_rparam); md_done(&t2p->t2_rdata); if (t2p->t2_flags & SMBT2_ALLOCED) free(t2p, M_SMBRQ); } static int smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count, struct mdchain *mdp) { struct mbuf *m, *m0; int len; m0 = m_split(mtop, offset, M_TRYWAIT); if (m0 == NULL) return EBADRPC; len = m_length(m0, &m); m->m_len -= len - count; if (mdp->md_top == NULL) { md_initm(mdp, m0); } else m_cat(mdp->md_top, m0); return 0; } static int smb_t2_reply(struct smb_t2rq *t2p) { struct mdchain *mdp; struct smb_rq *rqp = t2p->t2_rq; int error, totpgot, totdgot; u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; u_int16_t tmp, bc, dcount; u_int8_t wc; error = smb_rq_reply(rqp); if (error) return error; if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) { /* * this is an interim response, ignore it. */ SMBRQ_SLOCK(rqp); md_next_record(&rqp->sr_rp); SMBRQ_SUNLOCK(rqp); return 0; } /* * Now we have to get all subsequent responses. The CIFS specification * says that they can be disordered which is weird. * TODO: timo */ totpgot = totdgot = 0; totpcount = totdcount = 0xffff; mdp = &rqp->sr_rp; for (;;) { m_dumpm(mdp->md_top); if ((error = md_get_uint8(mdp, &wc)) != 0) break; if (wc < 10) { error = ENOENT; break; } if ((error = md_get_uint16le(mdp, &tmp)) != 0) break; if (totpcount > tmp) totpcount = tmp; md_get_uint16le(mdp, &tmp); if (totdcount > tmp) totdcount = tmp; if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */ (error = md_get_uint16le(mdp, &pcount)) != 0 || (error = md_get_uint16le(mdp, &poff)) != 0 || (error = md_get_uint16le(mdp, &pdisp)) != 0) break; if (pcount != 0 && pdisp != totpgot) { SMBERROR("Can't handle disordered parameters %d:%d\n", pdisp, totpgot); error = EINVAL; break; } if ((error = md_get_uint16le(mdp, &dcount)) != 0 || (error = md_get_uint16le(mdp, &doff)) != 0 || (error = md_get_uint16le(mdp, &ddisp)) != 0) break; if (dcount != 0 && ddisp != totdgot) { SMBERROR("Can't handle disordered data\n"); error = EINVAL; break; } md_get_uint8(mdp, &wc); md_get_uint8(mdp, NULL); tmp = wc; while (tmp--) md_get_uint16(mdp, NULL); if ((error = md_get_uint16le(mdp, &bc)) != 0) break; /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/ if (dcount) { error = smb_t2_placedata(mdp->md_top, doff, dcount, &t2p->t2_rdata); if (error) break; } if (pcount) { error = smb_t2_placedata(mdp->md_top, poff, pcount, &t2p->t2_rparam); if (error) break; } totpgot += pcount; totdgot += dcount; if (totpgot >= totpcount && totdgot >= totdcount) { error = 0; t2p->t2_flags |= SMBT2_ALLRECV; break; } /* * We're done with this reply, look for the next one. */ SMBRQ_SLOCK(rqp); md_next_record(&rqp->sr_rp); SMBRQ_SUNLOCK(rqp); error = smb_rq_reply(rqp); if (error) break; } return error; } /* * Perform a full round of TRANS2 request */ static int smb_t2_request_int(struct smb_t2rq *t2p) { struct smb_vc *vcp = t2p->t2_vc; struct smb_cred *scred = t2p->t2_cred; struct mbchain *mbp; struct mdchain *mdp, mbparam, mbdata; struct mbuf *m; struct smb_rq *rqp; int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i; int error, doff, poff, txdcount, txpcount, nmlen; m = t2p->t2_tparam.mb_top; if (m) { md_initm(&mbparam, m); /* do not free it! */ totpcount = m_fixhdr(m); if (totpcount > 0xffff) /* maxvalue for u_short */ return EINVAL; } else totpcount = 0; m = t2p->t2_tdata.mb_top; if (m) { md_initm(&mbdata, m); /* do not free it! */ totdcount = m_fixhdr(m); if (totdcount > 0xffff) return EINVAL; } else totdcount = 0; leftdcount = totdcount; leftpcount = totpcount; txmax = vcp->vc_txmax; error = smb_rq_alloc(t2p->t2_source, t2p->t_name ? SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp); if (error) return error; rqp->sr_flags |= SMBR_MULTIPACKET; t2p->t2_rq = rqp; rqp->sr_t2 = t2p; mbp = &rqp->sr_rq; smb_rq_wstart(rqp); mb_put_uint16le(mbp, totpcount); mb_put_uint16le(mbp, totdcount); mb_put_uint16le(mbp, t2p->t2_maxpcount); mb_put_uint16le(mbp, t2p->t2_maxdcount); mb_put_uint8(mbp, t2p->t2_maxscount); mb_put_uint8(mbp, 0); /* reserved */ mb_put_uint16le(mbp, 0); /* flags */ mb_put_uint32le(mbp, 0); /* Timeout */ mb_put_uint16le(mbp, 0); /* reserved 2 */ len = mb_fixhdr(mbp); /* * now we have known packet size as * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1), * and need to decide which parts should go into the first request */ nmlen = t2p->t_name ? strlen(t2p->t_name) : 0; len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1); if (len + leftpcount > txmax) { txpcount = min(leftpcount, txmax - len); poff = len; txdcount = 0; doff = 0; } else { txpcount = leftpcount; poff = txpcount ? len : 0; len = ALIGN4(len + txpcount); txdcount = min(leftdcount, txmax - len); doff = txdcount ? len : 0; } leftpcount -= txpcount; leftdcount -= txdcount; mb_put_uint16le(mbp, txpcount); mb_put_uint16le(mbp, poff); mb_put_uint16le(mbp, txdcount); mb_put_uint16le(mbp, doff); mb_put_uint8(mbp, t2p->t2_setupcount); mb_put_uint8(mbp, 0); for (i = 0; i < t2p->t2_setupcount; i++) mb_put_uint16le(mbp, t2p->t2_setupdata[i]); smb_rq_wend(rqp); smb_rq_bstart(rqp); /* TDUNICODE */ if (t2p->t_name) mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM); mb_put_uint8(mbp, 0); /* terminating zero */ len = mb_fixhdr(mbp); if (txpcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbparam, txpcount, &m); SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); if (error) goto freerq; mb_put_mbuf(mbp, m); } len = mb_fixhdr(mbp); if (txdcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbdata, txdcount, &m); if (error) goto freerq; mb_put_mbuf(mbp, m); } smb_rq_bend(rqp); /* incredible, but thats it... */ error = smb_rq_enqueue(rqp); if (error) goto freerq; if (leftpcount == 0 && leftdcount == 0) t2p->t2_flags |= SMBT2_ALLSENT; error = smb_t2_reply(t2p); if (error) goto bad; while (leftpcount || leftdcount) { t2p->t2_flags |= SMBT2_SECONDARY; error = smb_rq_new(rqp, t2p->t_name ? SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY); if (error) goto bad; mbp = &rqp->sr_rq; smb_rq_wstart(rqp); mb_put_uint16le(mbp, totpcount); mb_put_uint16le(mbp, totdcount); len = mb_fixhdr(mbp); /* * now we have known packet size as * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one, * and need to decide which parts should go into request */ len = ALIGN4(len + 6 * 2 + 2); if (t2p->t_name == NULL) len += 2; if (len + leftpcount > txmax) { txpcount = min(leftpcount, txmax - len); poff = len; txdcount = 0; doff = 0; } else { txpcount = leftpcount; poff = txpcount ? len : 0; len = ALIGN4(len + txpcount); txdcount = min(leftdcount, txmax - len); doff = txdcount ? len : 0; } mb_put_uint16le(mbp, txpcount); mb_put_uint16le(mbp, poff); mb_put_uint16le(mbp, totpcount - leftpcount); mb_put_uint16le(mbp, txdcount); mb_put_uint16le(mbp, doff); mb_put_uint16le(mbp, totdcount - leftdcount); leftpcount -= txpcount; leftdcount -= txdcount; if (t2p->t_name == NULL) mb_put_uint16le(mbp, t2p->t2_fid); smb_rq_wend(rqp); smb_rq_bstart(rqp); mb_put_uint8(mbp, 0); /* name */ len = mb_fixhdr(mbp); if (txpcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbparam, txpcount, &m); if (error) goto bad; mb_put_mbuf(mbp, m); } len = mb_fixhdr(mbp); if (txdcount) { mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); error = md_get_mbuf(&mbdata, txdcount, &m); if (error) goto bad; mb_put_mbuf(mbp, m); } smb_rq_bend(rqp); rqp->sr_state = SMBRQ_NOTSENT; error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL); if (error) goto bad; } /* while left params or data */ t2p->t2_flags |= SMBT2_ALLSENT; mdp = &t2p->t2_rdata; if (mdp->md_top) { m_fixhdr(mdp->md_top); md_initm(mdp, mdp->md_top); } mdp = &t2p->t2_rparam; if (mdp->md_top) { m_fixhdr(mdp->md_top); md_initm(mdp, mdp->md_top); } bad: smb_iod_removerq(rqp); freerq: smb_rq_done(rqp); if (error) { if (rqp->sr_flags & SMBR_RESTART) t2p->t2_flags |= SMBT2_RESTART; md_done(&t2p->t2_rparam); md_done(&t2p->t2_rdata); } return error; } int smb_t2_request(struct smb_t2rq *t2p) { int error = EINVAL, i; for (i = 0; i < SMB_MAXRCN; i++) { t2p->t2_flags &= ~SMBR_RESTART; error = smb_t2_request_int(t2p); if (error == 0) break; if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART) break; } return error; } Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 129879) +++ head/sys/opencrypto/crypto.c (revision 129880) @@ -1,1246 +1,1247 @@ /* $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $ */ /* * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #define CRYPTO_TIMING /* enable timing support */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include /* XXX for M_XDATA */ /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each algorithm they support with crypto_register() and crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) static struct cryptocap *crypto_drivers = NULL; static int crypto_drivers_num = 0; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static TAILQ_HEAD(,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) /* * There are two queues for processing completed crypto requests; one * for the symmetric and one for the asymmetric ops. We only need one * but have two to avoid type futzing (cryptop vs. cryptkop). A single * mutex is used to lock access to both queues. Note that this lock * must be separate from the lock on request queues to insure driver * callbacks don't generate lock order reversals. */ static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ static TAILQ_HEAD(,cryptkop) crp_ret_kq; static struct mtx crypto_ret_q_mtx; #define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx) #define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx) static uma_zone_t cryptop_zone; static uma_zone_t cryptodesc_zone; int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); int crypto_devallowsoft = 0; /* only use hardware crypto for asym */ SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software asym crypto support"); MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(void); static struct proc *cryptoretproc; static void crypto_destroy(void); static int crypto_invoke(struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp, int hint); static struct cryptostats cryptostats; SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, cryptostats, "Crypto system statistics"); #ifdef CRYPTO_TIMING static int crypto_timing = 0; SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, &crypto_timing, 0, "Enable/disable crypto timing support"); #endif static int crypto_init(void) { int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); TAILQ_INIT(&crp_ret_q); TAILQ_INIT(&crp_ret_kq); mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); if (cryptodesc_zone == NULL || cryptop_zone == NULL) { printf("crypto_init: cannot setup crypto zones\n"); error = ENOMEM; goto bad; } crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_num * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); if (crypto_drivers == NULL) { printf("crypto_init: cannot setup crypto drivers\n"); error = ENOMEM; goto bad; } error = kthread_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL, &cryptoretproc, 0, 0, "crypto returns"); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void crypto_destroy(void) { /* * Terminate any crypto threads. */ CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); crypto_terminate(&cryptoretproc, &crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ if (crypto_drivers != NULL) free(crypto_drivers, M_CRYPTO_DATA); if (cryptodesc_zone != NULL) uma_zdestroy(cryptodesc_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); mtx_destroy(&crypto_ret_q_mtx); mtx_destroy(&crypto_drivers_mtx); } /* * Initialization code, both for static and dynamic loading. */ static int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } static moduledata_t crypto_mod = { "crypto", crypto_modevent, 0 }; MODULE_VERSION(crypto, 1); DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /* * Create a new session. */ int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) { struct cryptoini *cr; u_int32_t hid, lid; int err = EINVAL; CRYPTO_DRIVER_LOCK(); if (crypto_drivers == NULL) goto done; /* * The algorithm we use here is pretty stupid; just use the * first driver that supports all the algorithms we need. * * XXX We need more smarts here (in real life too, but that's * XXX another story altogether). */ for (hid = 0; hid < crypto_drivers_num; hid++) { struct cryptocap *cap = &crypto_drivers[hid]; /* * If it's not initialized or has remaining sessions * referencing it, skip. */ if (cap->cc_newsession == NULL || (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) continue; /* Hardware required -- ignore software drivers. */ if (hard > 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) continue; /* Software required -- ignore hardware drivers. */ if (hard < 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) continue; /* See if all the algorithms are supported. */ for (cr = cri; cr; cr = cr->cri_next) if (cap->cc_alg[cr->cri_alg] == 0) break; if (cr == NULL) { /* Ok, all algorithms are supported. */ /* * Can't do everything in one session. * * XXX Fix this. We need to inject a "virtual" session layer right * XXX about here. */ /* Call the driver initialization routine. */ lid = hid; /* Pass the driver ID. */ err = (*cap->cc_newsession)(cap->cc_arg, &lid, cri); if (err == 0) { /* XXX assert (hid &~ 0xffffff) == 0 */ /* XXX assert (cap->cc_flags &~ 0xff) == 0 */ (*sid) = ((cap->cc_flags & 0xff) << 24) | hid; (*sid) <<= 32; (*sid) |= (lid & 0xffffffff); cap->cc_sessions++; } break; } } done: CRYPTO_DRIVER_UNLOCK(); return err; } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ int crypto_freesession(u_int64_t sid) { u_int32_t hid; int err; CRYPTO_DRIVER_LOCK(); if (crypto_drivers == NULL) { err = EINVAL; goto done; } /* Determine two IDs. */ hid = CRYPTO_SESID2HID(sid); if (hid >= crypto_drivers_num) { err = ENOENT; goto done; } if (crypto_drivers[hid].cc_sessions) crypto_drivers[hid].cc_sessions--; /* Call the driver cleanup routine, if available. */ if (crypto_drivers[hid].cc_freesession) err = crypto_drivers[hid].cc_freesession( crypto_drivers[hid].cc_arg, sid); else err = 0; /* * If this was the last session of a driver marked as invalid, * make the entry available for reuse. */ if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && crypto_drivers[hid].cc_sessions == 0) bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); done: CRYPTO_DRIVER_UNLOCK(); return err; } /* * Return an unused driver id. Used by drivers prior to registering * support for the algorithms they handle. */ int32_t crypto_get_driverid(u_int32_t flags) { struct cryptocap *newdrv; int i; CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_num; i++) if (crypto_drivers[i].cc_process == NULL && (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 && crypto_drivers[i].cc_sessions == 0) break; /* Out of entries, allocate some more. */ if (i == crypto_drivers_num) { /* Be careful about wrap-around. */ if (2 * crypto_drivers_num <= crypto_drivers_num) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); return -1; } newdrv = malloc(2 * crypto_drivers_num * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (newdrv == NULL) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: no space to expand driver table!\n"); return -1; } bcopy(crypto_drivers, newdrv, crypto_drivers_num * sizeof(struct cryptocap)); crypto_drivers_num *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } /* NB: state is zero'd on free */ crypto_drivers[i].cc_sessions = 1; /* Mark */ crypto_drivers[i].cc_flags = flags; if (bootverbose) printf("crypto: assign driver %u, flags %u\n", i, flags); CRYPTO_DRIVER_UNLOCK(); return i; } static struct cryptocap * crypto_checkdriver(u_int32_t hid) { if (crypto_drivers == NULL) return NULL; return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, int (*kprocess)(void*, struct cryptkop *, int), void *karg) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: driver %u registers key alg %u flags %u\n" , driverid , kalg , flags ); if (cap->cc_kprocess == NULL) { cap->cc_karg = karg; cap->cc_kprocess = kprocess; } err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Register support for a non-key-related algorithm. This routine * is called once for each such algorithm supported by a driver. */ int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, u_int32_t flags, int (*newses)(void*, u_int32_t*, struct cryptoini*), int (*freeses)(void*, u_int64_t), int (*process)(void*, struct cryptop *, int), void *arg) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); /* NB: algorithms are in the range [1..max] */ if (cap != NULL && (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; cap->cc_max_op_len[alg] = maxoplen; if (bootverbose) printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n" , driverid , alg , flags , maxoplen ); if (cap->cc_process == NULL) { cap->cc_arg = arg; cap->cc_newsession = newses; cap->cc_process = process; cap->cc_freesession = freeses; cap->cc_sessions = 0; /* Unmark */ } err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister a crypto driver. If there are pending sessions using it, * leave enough information around so that subsequent calls using those * sessions will correctly detect the driver has been unregistered and * reroute requests. */ int crypto_unregister(u_int32_t driverid, int alg) { int i, err; u_int32_t ses; struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && cap->cc_alg[alg] != 0) { cap->cc_alg[alg] = 0; cap->cc_max_op_len[alg] = 0; /* Was this the last algorithm ? */ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) if (cap->cc_alg[i] != 0) break; if (i == CRYPTO_ALGORITHM_MAX + 1) { ses = cap->cc_sessions; bzero(cap, sizeof(struct cryptocap)); if (ses != 0) { /* * If there are pending sessions, just mark as invalid. */ cap->cc_flags |= CRYPTOCAP_F_CLEANUP; cap->cc_sessions = ses; } } err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(u_int32_t driverid) { int i, err; u_int32_t ses; struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { cap->cc_alg[i] = 0; cap->cc_max_op_len[i] = 0; } ses = cap->cc_sessions; bzero(cap, sizeof(struct cryptocap)); if (ses != 0) { /* * If there are pending sessions, just mark as invalid. */ cap->cc_flags |= CRYPTOCAP_F_CLEANUP; cap->cc_sessions = ses; } err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; int needwakeup, err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { needwakeup = 0; if (what & CRYPTO_SYMQ) { needwakeup |= cap->cc_qblocked; cap->cc_qblocked = 0; } if (what & CRYPTO_ASYMQ) { needwakeup |= cap->cc_kqblocked; cap->cc_kqblocked = 0; } if (needwakeup) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); int result; cryptostats.cs_ops++; #ifdef CRYPTO_TIMING if (crypto_timing) binuptime(&crp->crp_tstamp); #endif CRYPTO_Q_LOCK(); if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { struct cryptocap *cap; /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crypto_checkdriver(hid); if (cap && !cap->cc_qblocked) { result = crypto_invoke(crp, 0); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request on the queue. * * XXX ops are placed at the tail so their * order is preserved but this can place them * behind batch'd ops. */ crypto_drivers[hid].cc_qblocked = 1; TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); cryptostats.cs_blocks++; result = 0; } } else { /* * The driver is blocked, just queue the op until * it unblocks and the kernel thread gets kicked. */ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); result = 0; } } else { int wasempty; /* * Caller marked the request as ``ok to delay''; * queue it for the dispatch thread. This is desirable * when the operation is low priority and/or suitable * for batching. */ wasempty = TAILQ_EMPTY(&crp_q); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (wasempty) wakeup_one(&crp_q); result = 0; } CRYPTO_Q_UNLOCK(); return result; } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { struct cryptocap *cap; int result; cryptostats.cs_kops++; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(krp->krp_hid); if (cap && !cap->cc_kqblocked) { result = crypto_kinvoke(krp, 0); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ crypto_drivers[krp->krp_hid].cc_kqblocked = 1; TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); cryptostats.cs_kblocks++; } } else { /* * The driver is blocked, just queue the op until * it unblocks and the kernel thread gets kicked. */ TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); result = 0; } CRYPTO_Q_UNLOCK(); return result; } /* * Dispatch an assymetric crypto request to the appropriate crypto devices. */ static int crypto_kinvoke(struct cryptkop *krp, int hint) { u_int32_t hid; int error; mtx_assert(&crypto_q_mtx, MA_OWNED); /* Sanity checks. */ if (krp == NULL) return EINVAL; if (krp->krp_callback == NULL) { free(krp, M_XDATA); /* XXX allocated in cryptodev */ return EINVAL; } for (hid = 0; hid < crypto_drivers_num; hid++) { if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft) continue; if (crypto_drivers[hid].cc_kprocess == NULL) continue; if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) == 0) continue; break; } if (hid < crypto_drivers_num) { krp->krp_hid = hid; error = crypto_drivers[hid].cc_kprocess( crypto_drivers[hid].cc_karg, krp, hint); } else error = ENODEV; if (error) { krp->krp_status = error; crypto_kdone(krp); } return 0; } #ifdef CRYPTO_TIMING static void crypto_tstat(struct cryptotstat *ts, struct bintime *bt) { struct bintime now, delta; struct timespec t; uint64_t u; binuptime(&now); u = now.frac; delta.frac = now.frac - bt->frac; delta.sec = now.sec - bt->sec; if (u < delta.frac) delta.sec--; bintime2timespec(&delta, &t); timespecadd(&ts->acc, &t); if (timespeccmp(&t, &ts->min, <)) ts->min = t; if (timespeccmp(&t, &ts->max, >)) ts->max = t; ts->count++; *bt = now; } #endif /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptop *crp, int hint) { u_int32_t hid; int (*process)(void*, struct cryptop *, int); #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); #endif /* Sanity checks. */ if (crp == NULL) return EINVAL; if (crp->crp_callback == NULL) { crypto_freereq(crp); return EINVAL; } if (crp->crp_desc == NULL) { crp->crp_etype = EINVAL; crypto_done(crp); return 0; } hid = CRYPTO_SESID2HID(crp->crp_sid); if (hid < crypto_drivers_num) { if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) crypto_freesession(crp->crp_sid); process = crypto_drivers[hid].cc_process; } else { process = NULL; } if (process == NULL) { struct cryptodesc *crd; u_int64_t nid; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. */ for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) crp->crp_sid = nid; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return (*process)(crypto_drivers[hid].cc_arg, crp, hint); } } /* * Release a set of crypto descriptors. */ void crypto_freereq(struct cryptop *crp) { struct cryptodesc *crd; if (crp == NULL) return; while ((crd = crp->crp_desc) != NULL) { crp->crp_desc = crd->crd_next; uma_zfree(cryptodesc_zone, crd); } uma_zfree(cryptop_zone, crp); } /* * Acquire a set of crypto descriptors. */ struct cryptop * crypto_getreq(int num) { struct cryptodesc *crd; struct cryptop *crp; crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); if (crp != NULL) { while (num--) { crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); if (crd == NULL) { crypto_freereq(crp); return NULL; } crd->crd_next = crp->crp_desc; crp->crp_desc = crd; } } return crp; } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) cryptostats.cs_errs++; #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); #endif /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crp->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crp->crp_callback(crp); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crp->crp_callback(crp); } else { int wasempty; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETQ_LOCK(); wasempty = TAILQ_EMPTY(&crp_ret_q); TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); if (wasempty) wakeup_one(&crp_ret_q); /* shared wait channel */ CRYPTO_RETQ_UNLOCK(); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { int wasempty; if (krp->krp_status != 0) cryptostats.cs_kerrs++; CRYPTO_RETQ_LOCK(); wasempty = TAILQ_EMPTY(&crp_ret_kq); TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); if (wasempty) wakeup_one(&crp_ret_q); /* shared wait channel */ CRYPTO_RETQ_UNLOCK(); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; if (!crypto_userasymcrypto) goto out; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_num; hid++) { if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft) { continue; } if (crypto_drivers[hid].cc_kprocess == NULL) continue; for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if ((crypto_drivers[hid].cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) != 0) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); out: *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kthread_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); cap = crypto_checkdriver(hid); if (cap == NULL || cap->cc_process == NULL) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (CRYPTO_SESID2HID(submit->crp_sid) == hid) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); result = crypto_invoke(submit, hint); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ /* XXX validate sid again? */ crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); cryptostats.cs_blocks++; } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = crypto_checkdriver(krp->krp_hid); if (cap == NULL || cap->cc_kprocess == NULL) { /* Op needs to be migrated, process it. */ break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); result = crypto_kinvoke(krp, 0); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ /* XXX validate sid again? */ crypto_drivers[krp->krp_hid].cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); cryptostats.cs_kblocks++; } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); if (cryptoproc == NULL) break; cryptostats.cs_intrs++; } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(void) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETQ_LOCK(); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&crp_ret_q, crpt, crp_next); krpt = TAILQ_FIRST(&crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETQ_UNLOCK(); /* * Run callbacks unlocked. */ if (crpt != NULL) { #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crpt->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crpt->crp_callback(crpt); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crpt->crp_callback(crpt); } if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETQ_LOCK(); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT, "crypto_ret_wait", 0); if (cryptoretproc == NULL) break; cryptostats.cs_rets++; } } CRYPTO_RETQ_UNLOCK(); crypto_finis(&crp_ret_q); } Index: head/sys/opencrypto/cryptodev.c =================================================================== --- head/sys/opencrypto/cryptodev.c (revision 129879) +++ head/sys/opencrypto/cryptodev.c (revision 129880) @@ -1,808 +1,809 @@ /* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ /* * Copyright (c) 2001 Theo de Raadt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include struct csession { TAILQ_ENTRY(csession) next; u_int64_t sid; u_int32_t ses; struct mtx lock; /* for op submission */ u_int32_t cipher; struct enc_xform *txform; u_int32_t mac; struct auth_hash *thash; caddr_t key; int keylen; u_char tmp_iv[EALG_MAX_BLOCK_LEN]; caddr_t mackey; int mackeylen; u_char tmp_mac[CRYPTO_MAX_MAC_LEN]; struct iovec iovec; struct uio uio; int error; }; struct fcrypt { TAILQ_HEAD(csessionlist, csession) csessions; int sesn; }; static int cryptof_rw(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *); static int cryptof_ioctl(struct file *, u_long, void *, struct ucred *, struct thread *); static int cryptof_poll(struct file *, int, struct ucred *, struct thread *); static int cryptof_kqfilter(struct file *, struct knote *); static int cryptof_stat(struct file *, struct stat *, struct ucred *, struct thread *); static int cryptof_close(struct file *, struct thread *); static struct fileops cryptofops = { .fo_read = cryptof_rw, .fo_write = cryptof_rw, .fo_ioctl = cryptof_ioctl, .fo_poll = cryptof_poll, .fo_kqfilter = cryptof_kqfilter, .fo_stat = cryptof_stat, .fo_close = cryptof_close }; static struct csession *csefind(struct fcrypt *, u_int); static int csedelete(struct fcrypt *, struct csession *); static struct csession *cseadd(struct fcrypt *, struct csession *); static struct csession *csecreate(struct fcrypt *, u_int64_t, caddr_t, u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *, struct auth_hash *); static int csefree(struct csession *); static int cryptodev_op(struct csession *, struct crypt_op *, struct ucred *, struct thread *td); static int cryptodev_key(struct crypt_kop *); static int cryptof_rw( struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { return (EIO); } /* ARGSUSED */ static int cryptof_ioctl( struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { struct cryptoini cria, crie; struct fcrypt *fcr = fp->f_data; struct csession *cse; struct session_op *sop; struct crypt_op *cop; struct enc_xform *txform = NULL; struct auth_hash *thash = NULL; u_int64_t sid; u_int32_t ses; int error = 0; switch (cmd) { case CIOCGSESSION: sop = (struct session_op *)data; switch (sop->cipher) { case 0: break; case CRYPTO_DES_CBC: txform = &enc_xform_des; break; case CRYPTO_3DES_CBC: txform = &enc_xform_3des; break; case CRYPTO_BLF_CBC: txform = &enc_xform_blf; break; case CRYPTO_CAST_CBC: txform = &enc_xform_cast5; break; case CRYPTO_SKIPJACK_CBC: txform = &enc_xform_skipjack; break; case CRYPTO_AES_CBC: txform = &enc_xform_rijndael128; break; case CRYPTO_NULL_CBC: txform = &enc_xform_null; break; case CRYPTO_ARC4: txform = &enc_xform_arc4; break; default: return (EINVAL); } switch (sop->mac) { case 0: break; case CRYPTO_MD5_HMAC: thash = &auth_hash_hmac_md5_96; break; case CRYPTO_SHA1_HMAC: thash = &auth_hash_hmac_sha1_96; break; case CRYPTO_SHA2_HMAC: if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) thash = &auth_hash_hmac_sha2_256; else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) thash = &auth_hash_hmac_sha2_384; else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) thash = &auth_hash_hmac_sha2_512; else return (EINVAL); break; case CRYPTO_RIPEMD160_HMAC: thash = &auth_hash_hmac_ripemd_160_96; break; #ifdef notdef case CRYPTO_MD5: thash = &auth_hash_md5; break; case CRYPTO_SHA1: thash = &auth_hash_sha1; break; #endif case CRYPTO_NULL_HMAC: thash = &auth_hash_null; break; default: return (EINVAL); } bzero(&crie, sizeof(crie)); bzero(&cria, sizeof(cria)); if (txform) { crie.cri_alg = txform->type; crie.cri_klen = sop->keylen * 8; if (sop->keylen > txform->maxkey || sop->keylen < txform->minkey) { error = EINVAL; goto bail; } MALLOC(crie.cri_key, u_int8_t *, crie.cri_klen / 8, M_XDATA, M_WAITOK); if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8))) goto bail; if (thash) crie.cri_next = &cria; } if (thash) { cria.cri_alg = thash->type; cria.cri_klen = sop->mackeylen * 8; if (sop->mackeylen != thash->keysize) { error = EINVAL; goto bail; } if (cria.cri_klen) { MALLOC(cria.cri_key, u_int8_t *, cria.cri_klen / 8, M_XDATA, M_WAITOK); if ((error = copyin(sop->mackey, cria.cri_key, cria.cri_klen / 8))) goto bail; } } error = crypto_newsession(&sid, (txform ? &crie : &cria), 1); if (error) goto bail; cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen, cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform, thash); if (cse == NULL) { crypto_freesession(sid); error = EINVAL; goto bail; } sop->ses = cse->ses; bail: if (error) { if (crie.cri_key) FREE(crie.cri_key, M_XDATA); if (cria.cri_key) FREE(cria.cri_key, M_XDATA); } break; case CIOCFSESSION: ses = *(u_int32_t *)data; cse = csefind(fcr, ses); if (cse == NULL) return (EINVAL); csedelete(fcr, cse); error = csefree(cse); break; case CIOCCRYPT: cop = (struct crypt_op *)data; cse = csefind(fcr, cop->ses); if (cse == NULL) return (EINVAL); error = cryptodev_op(cse, cop, active_cred, td); break; case CIOCKEY: error = cryptodev_key((struct crypt_kop *)data); break; case CIOCASYMFEAT: error = crypto_getfeat((int *)data); break; default: error = EINVAL; } return (error); } static int cryptodev_cb(void *); static int cryptodev_op( struct csession *cse, struct crypt_op *cop, struct ucred *active_cred, struct thread *td) { struct cryptop *crp = NULL; struct cryptodesc *crde = NULL, *crda = NULL; int error; if (cop->len > 256*1024-4) return (E2BIG); if (cse->txform && (cop->len % cse->txform->blocksize) != 0) return (EINVAL); cse->uio.uio_iov = &cse->iovec; cse->uio.uio_iovcnt = 1; cse->uio.uio_offset = 0; cse->uio.uio_resid = cop->len; cse->uio.uio_segflg = UIO_SYSSPACE; cse->uio.uio_rw = UIO_WRITE; cse->uio.uio_td = td; cse->uio.uio_iov[0].iov_len = cop->len; cse->uio.uio_iov[0].iov_base = malloc(cop->len, M_XDATA, M_WAITOK); crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL)); if (crp == NULL) { error = ENOMEM; goto bail; } if (cse->thash) { crda = crp->crp_desc; if (cse->txform) crde = crda->crd_next; } else { if (cse->txform) crde = crp->crp_desc; else { error = EINVAL; goto bail; } } if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len))) goto bail; if (crda) { crda->crd_skip = 0; crda->crd_len = cop->len; crda->crd_inject = 0; /* ??? */ crda->crd_alg = cse->mac; crda->crd_key = cse->mackey; crda->crd_klen = cse->mackeylen * 8; } if (crde) { if (cop->op == COP_ENCRYPT) crde->crd_flags |= CRD_F_ENCRYPT; else crde->crd_flags &= ~CRD_F_ENCRYPT; crde->crd_len = cop->len; crde->crd_inject = 0; crde->crd_alg = cse->cipher; crde->crd_key = cse->key; crde->crd_klen = cse->keylen * 8; } crp->crp_ilen = cop->len; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); crp->crp_buf = (caddr_t)&cse->uio; crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb; crp->crp_sid = cse->sid; crp->crp_opaque = (void *)cse; if (cop->iv) { if (crde == NULL) { error = EINVAL; goto bail; } if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ error = EINVAL; goto bail; } if ((error = copyin(cop->iv, cse->tmp_iv, cse->txform->blocksize))) goto bail; bcopy(cse->tmp_iv, crde->crd_iv, cse->txform->blocksize); crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; crde->crd_skip = 0; } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ crde->crd_skip = 0; } else if (crde) { crde->crd_flags |= CRD_F_IV_PRESENT; crde->crd_skip = cse->txform->blocksize; crde->crd_len -= cse->txform->blocksize; } if (cop->mac) { if (crda == NULL) { error = EINVAL; goto bail; } crp->crp_mac=cse->tmp_mac; } /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); mtx_lock(&cse->lock); if (error == 0 && (crp->crp_flags & CRYPTO_F_DONE) == 0) error = msleep(crp, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (error != 0) goto bail; if (crp->crp_etype != 0) { error = crp->crp_etype; goto bail; } if (cse->error) { error = cse->error; goto bail; } if (cop->dst && (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, cop->len))) goto bail; if (cop->mac && (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) goto bail; bail: if (crp) crypto_freereq(crp); if (cse->uio.uio_iov[0].iov_base) free(cse->uio.uio_iov[0].iov_base, M_XDATA); return (error); } static int cryptodev_cb(void *op) { struct cryptop *crp = (struct cryptop *) op; struct csession *cse = (struct csession *)crp->crp_opaque; cse->error = crp->crp_etype; if (crp->crp_etype == EAGAIN) return crypto_dispatch(crp); mtx_lock(&cse->lock); wakeup_one(crp); mtx_unlock(&cse->lock); return (0); } static int cryptodevkey_cb(void *op) { struct cryptkop *krp = (struct cryptkop *) op; wakeup(krp); return (0); } static int cryptodev_key(struct crypt_kop *kop) { struct cryptkop *krp = NULL; int error = EINVAL; int in, out, size, i; if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { return (EFBIG); } in = kop->crk_iparams; out = kop->crk_oparams; switch (kop->crk_op) { case CRK_MOD_EXP: if (in == 3 && out == 1) break; return (EINVAL); case CRK_MOD_EXP_CRT: if (in == 6 && out == 1) break; return (EINVAL); case CRK_DSA_SIGN: if (in == 5 && out == 2) break; return (EINVAL); case CRK_DSA_VERIFY: if (in == 7 && out == 0) break; return (EINVAL); case CRK_DH_COMPUTE_KEY: if (in == 3 && out == 1) break; return (EINVAL); default: return (EINVAL); } krp = (struct cryptkop *)malloc(sizeof *krp, M_XDATA, M_WAITOK); if (!krp) return (ENOMEM); bzero(krp, sizeof *krp); krp->krp_op = kop->crk_op; krp->krp_status = kop->crk_status; krp->krp_iparams = kop->crk_iparams; krp->krp_oparams = kop->crk_oparams; krp->krp_status = 0; krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb; for (i = 0; i < CRK_MAXPARAM; i++) krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; MALLOC(krp->krp_param[i].crp_p, caddr_t, size, M_XDATA, M_WAITOK); if (i >= krp->krp_iparams) continue; error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size); if (error) goto fail; } error = crypto_kdispatch(krp); if (error) goto fail; error = tsleep(krp, PSOCK, "crydev", 0); if (error) { /* XXX can this happen? if so, how do we recover? */ goto fail; } if (krp->krp_status != 0) { error = krp->krp_status; goto fail; } for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size); if (error) goto fail; } fail: if (krp) { kop->crk_status = krp->krp_status; for (i = 0; i < CRK_MAXPARAM; i++) { if (krp->krp_param[i].crp_p) FREE(krp->krp_param[i].crp_p, M_XDATA); } free(krp, M_XDATA); } return (error); } /* ARGSUSED */ static int cryptof_poll( struct file *fp, int events, struct ucred *active_cred, struct thread *td) { return (0); } /* ARGSUSED */ static int cryptof_kqfilter(struct file *fp, struct knote *kn) { return (0); } /* ARGSUSED */ static int cryptof_stat( struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } /* ARGSUSED */ static int cryptof_close(struct file *fp, struct thread *td) { struct fcrypt *fcr = fp->f_data; struct csession *cse; while ((cse = TAILQ_FIRST(&fcr->csessions))) { TAILQ_REMOVE(&fcr->csessions, cse, next); (void)csefree(cse); } FREE(fcr, M_XDATA); fp->f_data = NULL; return 0; } static struct csession * csefind(struct fcrypt *fcr, u_int ses) { struct csession *cse; TAILQ_FOREACH(cse, &fcr->csessions, next) if (cse->ses == ses) return (cse); return (NULL); } static int csedelete(struct fcrypt *fcr, struct csession *cse_del) { struct csession *cse; TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse == cse_del) { TAILQ_REMOVE(&fcr->csessions, cse, next); return (1); } } return (0); } static struct csession * cseadd(struct fcrypt *fcr, struct csession *cse) { TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); cse->ses = fcr->sesn++; return (cse); } struct csession * csecreate(struct fcrypt *fcr, u_int64_t sid, caddr_t key, u_int64_t keylen, caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac, struct enc_xform *txform, struct auth_hash *thash) { struct csession *cse; #ifdef INVARIANTS /* NB: required when mtx_init is built with INVARIANTS */ MALLOC(cse, struct csession *, sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO); #else MALLOC(cse, struct csession *, sizeof(struct csession), M_XDATA, M_NOWAIT); #endif if (cse == NULL) return NULL; mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF); cse->key = key; cse->keylen = keylen/8; cse->mackey = mackey; cse->mackeylen = mackeylen/8; cse->sid = sid; cse->cipher = cipher; cse->mac = mac; cse->txform = txform; cse->thash = thash; cseadd(fcr, cse); return (cse); } static int csefree(struct csession *cse) { int error; error = crypto_freesession(cse->sid); mtx_destroy(&cse->lock); if (cse->key) FREE(cse->key, M_XDATA); if (cse->mackey) FREE(cse->mackey, M_XDATA); FREE(cse, M_XDATA); return (error); } static int cryptoopen(dev_t dev, int oflags, int devtype, struct thread *td) { return (0); } static int cryptoread(dev_t dev, struct uio *uio, int ioflag) { return (EIO); } static int cryptowrite(dev_t dev, struct uio *uio, int ioflag) { return (EIO); } static int cryptoioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct file *f; struct fcrypt *fcr; int fd, error; switch (cmd) { case CRIOGET: MALLOC(fcr, struct fcrypt *, sizeof(struct fcrypt), M_XDATA, M_WAITOK); TAILQ_INIT(&fcr->csessions); fcr->sesn = 0; error = falloc(td, &f, &fd); if (error) { FREE(fcr, M_XDATA); return (error); } /* falloc automatically provides an extra reference to 'f'. */ f->f_flag = FREAD | FWRITE; f->f_type = DTYPE_CRYPTO; f->f_ops = &cryptofops; f->f_data = fcr; *(u_int32_t *)data = fd; fdrop(f, td); break; default: error = EINVAL; break; } return (error); } #define CRYPTO_MAJOR 70 /* from openbsd */ static struct cdevsw crypto_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = cryptoopen, .d_read = cryptoread, .d_write = cryptowrite, .d_ioctl = cryptoioctl, .d_name = "crypto", .d_maj = CRYPTO_MAJOR, }; static dev_t crypto_dev; /* * Initialization code, both for static and dynamic loading. */ static int cryptodev_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("crypto: \n"); crypto_dev = make_dev(&crypto_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "crypto"); return 0; case MOD_UNLOAD: /*XXX disallow if active sessions */ destroy_dev(crypto_dev); return 0; } return EINVAL; } static moduledata_t cryptodev_mod = { "cryptodev", cryptodev_modevent, 0 }; MODULE_VERSION(cryptodev, 1); DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_DEPEND(cryptodev, crypto, 1, 1, 1); Index: head/sys/security/mac/mac_pipe.c =================================================================== --- head/sys/security/mac/mac_pipe.c (revision 129879) +++ head/sys/security/mac/mac_pipe.c (revision 129880) @@ -1,249 +1,250 @@ /*- * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project in part by Network * Associates Laboratories, the Security Research Division of Network * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), * as part of the DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mac.h" #include #include #include #include +#include #include #include #include #include #include #include #include #include #include static int mac_enforce_pipe = 1; SYSCTL_INT(_security_mac, OID_AUTO, enforce_pipe, CTLFLAG_RW, &mac_enforce_pipe, 0, "Enforce MAC policy on pipe operations"); TUNABLE_INT("security.mac.enforce_pipe", &mac_enforce_pipe); #ifdef MAC_DEBUG static unsigned int nmacpipes; SYSCTL_UINT(_security_mac_debug_counters, OID_AUTO, pipes, CTLFLAG_RD, &nmacpipes, 0, "number of pipes in use"); #endif struct label * mac_pipe_label_alloc(void) { struct label *label; label = mac_labelzone_alloc(M_WAITOK); MAC_PERFORM(init_pipe_label, label); MAC_DEBUG_COUNTER_INC(&nmacpipes); return (label); } void mac_init_pipe(struct pipepair *pp) { pp->pp_label = mac_pipe_label_alloc(); } void mac_pipe_label_free(struct label *label) { MAC_PERFORM(destroy_pipe_label, label); mac_labelzone_free(label); MAC_DEBUG_COUNTER_DEC(&nmacpipes); } void mac_destroy_pipe(struct pipepair *pp) { mac_pipe_label_free(pp->pp_label); pp->pp_label = NULL; } void mac_copy_pipe_label(struct label *src, struct label *dest) { MAC_PERFORM(copy_pipe_label, src, dest); } int mac_externalize_pipe_label(struct label *label, char *elements, char *outbuf, size_t outbuflen) { int error; MAC_EXTERNALIZE(pipe, label, elements, outbuf, outbuflen); return (error); } int mac_internalize_pipe_label(struct label *label, char *string) { int error; MAC_INTERNALIZE(pipe, label, string); return (error); } void mac_create_pipe(struct ucred *cred, struct pipepair *pp) { MAC_PERFORM(create_pipe, cred, pp, pp->pp_label); } static void mac_relabel_pipe(struct ucred *cred, struct pipepair *pp, struct label *newlabel) { MAC_PERFORM(relabel_pipe, cred, pp, pp->pp_label, newlabel); } int mac_check_pipe_ioctl(struct ucred *cred, struct pipepair *pp, unsigned long cmd, void *data) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_ioctl, cred, pp, pp->pp_label, cmd, data); return (error); } int mac_check_pipe_poll(struct ucred *cred, struct pipepair *pp) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_poll, cred, pp, pp->pp_label); return (error); } int mac_check_pipe_read(struct ucred *cred, struct pipepair *pp) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_read, cred, pp, pp->pp_label); return (error); } static int mac_check_pipe_relabel(struct ucred *cred, struct pipepair *pp, struct label *newlabel) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_relabel, cred, pp, pp->pp_label, newlabel); return (error); } int mac_check_pipe_stat(struct ucred *cred, struct pipepair *pp) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_stat, cred, pp, pp->pp_label); return (error); } int mac_check_pipe_write(struct ucred *cred, struct pipepair *pp) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); if (!mac_enforce_pipe) return (0); MAC_CHECK(check_pipe_write, cred, pp, pp->pp_label); return (error); } int mac_pipe_label_set(struct ucred *cred, struct pipepair *pp, struct label *label) { int error; mtx_assert(&pp->pp_mtx, MA_OWNED); error = mac_check_pipe_relabel(cred, pp, label); if (error) return (error); mac_relabel_pipe(cred, pp, label); return (0); } Index: head/sys/security/mac/mac_system.c =================================================================== --- head/sys/security/mac/mac_system.c (revision 129879) +++ head/sys/security/mac/mac_system.c (revision 129880) @@ -1,267 +1,268 @@ /*- * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project in part by Network * Associates Laboratories, the Security Research Division of Network * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), * as part of the DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mac.h" #include #include #include #include +#include #include #include #include #include #include #include #include static int mac_enforce_kld = 1; SYSCTL_INT(_security_mac, OID_AUTO, enforce_kld, CTLFLAG_RW, &mac_enforce_kld, 0, "Enforce MAC policy on kld operations"); TUNABLE_INT("security.mac.enforce_kld", &mac_enforce_kld); static int mac_enforce_system = 1; SYSCTL_INT(_security_mac, OID_AUTO, enforce_system, CTLFLAG_RW, &mac_enforce_system, 0, "Enforce MAC policy on system operations"); TUNABLE_INT("security.mac.enforce_system", &mac_enforce_system); int mac_check_kenv_dump(struct ucred *cred) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_kenv_dump, cred); return (error); } int mac_check_kenv_get(struct ucred *cred, char *name) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_kenv_get, cred, name); return (error); } int mac_check_kenv_set(struct ucred *cred, char *name, char *value) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_kenv_set, cred, name, value); return (error); } int mac_check_kenv_unset(struct ucred *cred, char *name) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_kenv_unset, cred, name); return (error); } int mac_check_kld_load(struct ucred *cred, struct vnode *vp) { int error; ASSERT_VOP_LOCKED(vp, "mac_check_kld_load"); if (!mac_enforce_kld) return (0); MAC_CHECK(check_kld_load, cred, vp, vp->v_label); return (error); } int mac_check_kld_stat(struct ucred *cred) { int error; if (!mac_enforce_kld) return (0); MAC_CHECK(check_kld_stat, cred); return (error); } int mac_check_kld_unload(struct ucred *cred) { int error; if (!mac_enforce_kld) return (0); MAC_CHECK(check_kld_unload, cred); return (error); } int mac_check_sysarch_ioperm(struct ucred *cred) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_sysarch_ioperm, cred); return (error); } int mac_check_system_acct(struct ucred *cred, struct vnode *vp) { int error; if (vp != NULL) { ASSERT_VOP_LOCKED(vp, "mac_check_system_acct"); } if (!mac_enforce_system) return (0); MAC_CHECK(check_system_acct, cred, vp, vp != NULL ? vp->v_label : NULL); return (error); } int mac_check_system_nfsd(struct ucred *cred) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_system_nfsd, cred); return (error); } int mac_check_system_reboot(struct ucred *cred, int howto) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_system_reboot, cred, howto); return (error); } int mac_check_system_settime(struct ucred *cred) { int error; if (!mac_enforce_system) return (0); MAC_CHECK(check_system_settime, cred); return (error); } int mac_check_system_swapon(struct ucred *cred, struct vnode *vp) { int error; ASSERT_VOP_LOCKED(vp, "mac_check_system_swapon"); if (!mac_enforce_system) return (0); MAC_CHECK(check_system_swapon, cred, vp, vp->v_label); return (error); } int mac_check_system_swapoff(struct ucred *cred, struct vnode *vp) { int error; ASSERT_VOP_LOCKED(vp, "mac_check_system_swapoff"); if (!mac_enforce_system) return (0); MAC_CHECK(check_system_swapoff, cred, vp, vp->v_label); return (error); } int mac_check_system_sysctl(struct ucred *cred, struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) { int error; /* * XXXMAC: We're very much like to assert the SYSCTL_LOCK here, * but since it's not exported from kern_sysctl.c, we can't. */ if (!mac_enforce_system) return (0); MAC_CHECK(check_system_sysctl, cred, oidp, arg1, arg2, req); return (error); }