Index: head/sys/kern/kern_conf.c =================================================================== --- head/sys/kern/kern_conf.c (revision 353128) +++ head/sys/kern/kern_conf.c (revision 353129) @@ -1,1588 +1,1570 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999-2002 Poul-Henning Kamp * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); struct mtx devmtx; static void destroy_devl(struct cdev *dev); static int destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg); static void destroy_dev_tq(void *ctx, int pending); static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, va_list ap); static struct cdev_priv_list cdevp_free_list = TAILQ_HEAD_INITIALIZER(cdevp_free_list); static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = SLIST_HEAD_INITIALIZER(cdevsw_gt_post_list); void dev_lock(void) { mtx_lock(&devmtx); } /* * Free all the memory collected while the cdev mutex was * locked. Since devmtx is after the system map mutex, free() cannot * be called immediately and is postponed until cdev mutex can be * dropped. */ static void dev_unlock_and_free(void) { struct cdev_priv_list cdp_free; struct free_cdevsw csw_free; struct cdev_priv *cdp; struct cdevsw *csw; mtx_assert(&devmtx, MA_OWNED); /* * Make the local copy of the list heads while the dev_mtx is * held. Free it later. */ TAILQ_INIT(&cdp_free); TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); csw_free = cdevsw_gt_post_list; SLIST_INIT(&cdevsw_gt_post_list); mtx_unlock(&devmtx); while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { TAILQ_REMOVE(&cdp_free, cdp, cdp_list); devfs_free(&cdp->cdp_c); } while ((csw = SLIST_FIRST(&csw_free)) != NULL) { SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); free(csw, M_DEVT); } } static void dev_free_devlocked(struct cdev *cdev) { struct cdev_priv *cdp; mtx_assert(&devmtx, MA_OWNED); cdp = cdev2priv(cdev); KASSERT((cdp->cdp_flags & CDP_UNREF_DTR) == 0, ("destroy_dev() was not called after delist_dev(%p)", cdev)); TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); } static void cdevsw_free_devlocked(struct cdevsw *csw) { mtx_assert(&devmtx, MA_OWNED); SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); } void dev_unlock(void) { mtx_unlock(&devmtx); } void dev_ref(struct cdev *dev) { mtx_assert(&devmtx, MA_NOTOWNED); mtx_lock(&devmtx); dev->si_refcount++; mtx_unlock(&devmtx); } void dev_refl(struct cdev *dev) { mtx_assert(&devmtx, MA_OWNED); dev->si_refcount++; } void dev_rel(struct cdev *dev) { int flag = 0; mtx_assert(&devmtx, MA_NOTOWNED); dev_lock(); dev->si_refcount--; KASSERT(dev->si_refcount >= 0, ("dev_rel(%s) gave negative count", devtoname(dev))); -#if 0 - if (dev->si_usecount == 0 && - (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED)) - ; - else -#endif if (dev->si_devsw == NULL && dev->si_refcount == 0) { LIST_REMOVE(dev, si_list); flag = 1; } dev_unlock(); if (flag) devfs_free(dev); } struct cdevsw * dev_refthread(struct cdev *dev, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_NOTOWNED); if ((dev->si_flags & SI_ETERNAL) != 0) { *ref = 0; return (dev->si_devsw); } dev_lock(); csw = dev->si_devsw; if (csw != NULL) { cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) atomic_add_long(&dev->si_threadcount, 1); else csw = NULL; } dev_unlock(); if (csw != NULL) *ref = 1; return (csw); } struct cdevsw * devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; struct cdev *dev; mtx_assert(&devmtx, MA_NOTOWNED); if ((vp->v_vflag & VV_ETERNALDEV) != 0) { dev = vp->v_rdev; if (dev == NULL) return (NULL); KASSERT((dev->si_flags & SI_ETERNAL) != 0, ("Not eternal cdev")); *ref = 0; csw = dev->si_devsw; KASSERT(csw != NULL, ("Eternal cdev is destroyed")); *devp = dev; return (csw); } csw = NULL; dev_lock(); dev = vp->v_rdev; if (dev == NULL) { dev_unlock(); return (NULL); } cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { csw = dev->si_devsw; if (csw != NULL) atomic_add_long(&dev->si_threadcount, 1); } dev_unlock(); if (csw != NULL) { *devp = dev; *ref = 1; } return (csw); } void dev_relthread(struct cdev *dev, int ref) { mtx_assert(&devmtx, MA_NOTOWNED); if (!ref) return; KASSERT(dev->si_threadcount > 0, ("%s threadcount is wrong", dev->si_name)); atomic_subtract_rel_long(&dev->si_threadcount, 1); } int nullop(void) { return (0); } int eopnotsupp(void) { return (EOPNOTSUPP); } static int enxio(void) { return (ENXIO); } static int enodev(void) { return (ENODEV); } /* Define a dead_cdevsw for use when devices leave unexpectedly. */ #define dead_open (d_open_t *)enxio #define dead_close (d_close_t *)enxio #define dead_read (d_read_t *)enxio #define dead_write (d_write_t *)enxio #define dead_ioctl (d_ioctl_t *)enxio #define dead_poll (d_poll_t *)enodev #define dead_mmap (d_mmap_t *)enodev static void dead_strategy(struct bio *bp) { biofinish(bp, NULL, ENXIO); } #define dead_dump (dumper_t *)enxio #define dead_kqfilter (d_kqfilter_t *)enxio #define dead_mmap_single (d_mmap_single_t *)enodev static struct cdevsw dead_cdevsw = { .d_version = D_VERSION, .d_open = dead_open, .d_close = dead_close, .d_read = dead_read, .d_write = dead_write, .d_ioctl = dead_ioctl, .d_poll = dead_poll, .d_mmap = dead_mmap, .d_strategy = dead_strategy, .d_name = "dead", .d_dump = dead_dump, .d_kqfilter = dead_kqfilter, .d_mmap_single = dead_mmap_single }; /* Default methods if driver does not specify method */ #define null_open (d_open_t *)nullop #define null_close (d_close_t *)nullop #define no_read (d_read_t *)enodev #define no_write (d_write_t *)enodev #define no_ioctl (d_ioctl_t *)enodev #define no_mmap (d_mmap_t *)enodev #define no_kqfilter (d_kqfilter_t *)enodev #define no_mmap_single (d_mmap_single_t *)enodev static void no_strategy(struct bio *bp) { biofinish(bp, NULL, ENODEV); } static int no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) { return (poll_no_poll(events)); } #define no_dump (dumper_t *)enodev static int giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static void giant_strategy(struct bio *bp) { struct cdevsw *dsw; struct cdev *dev; int ref; dev = bp->bio_dev; dsw = dev_refthread(dev, &ref); if (dsw == NULL) { biofinish(bp, NULL, ENXIO); return; } mtx_lock(&Giant); dsw->d_gianttrick->d_strategy(bp); mtx_unlock(&Giant); dev_relthread(dev, ref); } static int giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_read(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_write(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_poll(struct cdev *dev, int events, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_poll(dev, events, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_kqfilter(struct cdev *dev, struct knote *kn) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_kqfilter(dev, kn); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot, memattr); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, vm_object_t *object, int nprot) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object, nprot); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static void notify(struct cdev *dev, const char *ev, int flags) { static const char prefix[] = "cdev="; char *data; int namelen, mflags; if (cold) return; mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK; namelen = strlen(dev->si_name); data = malloc(namelen + sizeof(prefix), M_TEMP, mflags); if (data == NULL) return; memcpy(data, prefix, sizeof(prefix) - 1); memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); devctl_notify_f("DEVFS", "CDEV", ev, data, mflags); free(data, M_TEMP); } static void notify_create(struct cdev *dev, int flags) { notify(dev, "CREATE", flags); } static void notify_destroy(struct cdev *dev) { notify(dev, "DESTROY", MAKEDEV_WAITOK); } static struct cdev * newdev(struct make_dev_args *args, struct cdev *si) { struct cdev *si2; struct cdevsw *csw; mtx_assert(&devmtx, MA_OWNED); csw = args->mda_devsw; si2 = NULL; if (csw->d_flags & D_NEEDMINOR) { /* We may want to return an existing device */ LIST_FOREACH(si2, &csw->d_devs, si_list) { if (dev2unit(si2) == args->mda_unit) { dev_free_devlocked(si); si = si2; break; } } /* * If we're returning an existing device, we should make sure * it isn't already initialized. This would have been caught * in consumers anyways, but it's good to catch such a case * early. We still need to complete initialization of the * device, and we'll use whatever make_dev_args were passed in * to do so. */ KASSERT(si2 == NULL || (si2->si_flags & SI_NAMED) == 0, ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", args->mda_devsw->d_name, dev2unit(si2), devtoname(si2))); } si->si_drv0 = args->mda_unit; si->si_drv1 = args->mda_si_drv1; si->si_drv2 = args->mda_si_drv2; /* Only push to csw->d_devs if it's not a cloned device. */ if (si2 == NULL) { si->si_devsw = csw; LIST_INSERT_HEAD(&csw->d_devs, si, si_list); } else { KASSERT(si->si_devsw == csw, ("%s: inconsistent devsw between clone_create() and make_dev()", __func__)); } return (si); } static void fini_cdevsw(struct cdevsw *devsw) { struct cdevsw *gt; if (devsw->d_gianttrick != NULL) { gt = devsw->d_gianttrick; memcpy(devsw, gt, sizeof *devsw); cdevsw_free_devlocked(gt); devsw->d_gianttrick = NULL; } devsw->d_flags &= ~D_INIT; } static int prep_cdevsw(struct cdevsw *devsw, int flags) { struct cdevsw *dsw2; mtx_assert(&devmtx, MA_OWNED); if (devsw->d_flags & D_INIT) return (0); if (devsw->d_flags & D_NEEDGIANT) { dev_unlock(); dsw2 = malloc(sizeof *dsw2, M_DEVT, (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK); dev_lock(); if (dsw2 == NULL && !(devsw->d_flags & D_INIT)) return (ENOMEM); } else dsw2 = NULL; if (devsw->d_flags & D_INIT) { if (dsw2 != NULL) cdevsw_free_devlocked(dsw2); return (0); } if (devsw->d_version != D_VERSION_04) { printf( "WARNING: Device driver \"%s\" has wrong version %s\n", devsw->d_name == NULL ? "???" : devsw->d_name, "and is disabled. Recompile KLD module."); devsw->d_open = dead_open; devsw->d_close = dead_close; devsw->d_read = dead_read; devsw->d_write = dead_write; devsw->d_ioctl = dead_ioctl; devsw->d_poll = dead_poll; devsw->d_mmap = dead_mmap; devsw->d_mmap_single = dead_mmap_single; devsw->d_strategy = dead_strategy; devsw->d_dump = dead_dump; devsw->d_kqfilter = dead_kqfilter; } if (devsw->d_flags & D_NEEDGIANT) { if (devsw->d_gianttrick == NULL) { memcpy(dsw2, devsw, sizeof *dsw2); devsw->d_gianttrick = dsw2; dsw2 = NULL; } } #define FIXUP(member, noop, giant) \ do { \ if (devsw->member == NULL) { \ devsw->member = noop; \ } else if (devsw->d_flags & D_NEEDGIANT) \ devsw->member = giant; \ } \ while (0) FIXUP(d_open, null_open, giant_open); FIXUP(d_fdopen, NULL, giant_fdopen); FIXUP(d_close, null_close, giant_close); FIXUP(d_read, no_read, giant_read); FIXUP(d_write, no_write, giant_write); FIXUP(d_ioctl, no_ioctl, giant_ioctl); FIXUP(d_poll, no_poll, giant_poll); FIXUP(d_mmap, no_mmap, giant_mmap); FIXUP(d_strategy, no_strategy, giant_strategy); FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single); if (devsw->d_dump == NULL) devsw->d_dump = no_dump; LIST_INIT(&devsw->d_devs); devsw->d_flags |= D_INIT; if (dsw2 != NULL) cdevsw_free_devlocked(dsw2); return (0); } static int prep_devname(struct cdev *dev, const char *fmt, va_list ap) { int len; char *from, *q, *s, *to; mtx_assert(&devmtx, MA_OWNED); len = vsnrprintf(dev->si_name, sizeof(dev->si_name), 32, fmt, ap); if (len > sizeof(dev->si_name) - 1) return (ENAMETOOLONG); /* Strip leading slashes. */ for (from = dev->si_name; *from == '/'; from++) ; for (to = dev->si_name; *from != '\0'; from++, to++) { /* * Spaces and double quotation marks cause * problems for the devctl(4) protocol. * Reject names containing those characters. */ if (isspace(*from) || *from == '"') return (EINVAL); /* Treat multiple sequential slashes as single. */ while (from[0] == '/' && from[1] == '/') from++; /* Trailing slash is considered invalid. */ if (from[0] == '/' && from[1] == '\0') return (EINVAL); *to = *from; } *to = '\0'; if (dev->si_name[0] == '\0') return (EINVAL); /* Disallow "." and ".." components. */ for (s = dev->si_name;;) { for (q = s; *q != '/' && *q != '\0'; q++) ; if (q - s == 1 && s[0] == '.') return (EINVAL); if (q - s == 2 && s[0] == '.' && s[1] == '.') return (EINVAL); if (*q != '/') break; s = q + 1; } if (devfs_dev_exists(dev->si_name) != 0) return (EEXIST); return (0); } void make_dev_args_init_impl(struct make_dev_args *args, size_t sz) { bzero(args, sz); args->mda_size = sz; } static int make_dev_sv(struct make_dev_args *args1, struct cdev **dres, const char *fmt, va_list ap) { struct cdev *dev, *dev_new; struct make_dev_args args; int res; bzero(&args, sizeof(args)); if (sizeof(args) < args1->mda_size) return (EINVAL); bcopy(args1, &args, args1->mda_size); KASSERT((args.mda_flags & MAKEDEV_WAITOK) == 0 || (args.mda_flags & MAKEDEV_NOWAIT) == 0, ("make_dev_sv: both WAITOK and NOWAIT specified")); dev_new = devfs_alloc(args.mda_flags); if (dev_new == NULL) return (ENOMEM); dev_lock(); res = prep_cdevsw(args.mda_devsw, args.mda_flags); if (res != 0) { dev_unlock(); devfs_free(dev_new); return (res); } dev = newdev(&args, dev_new); if ((dev->si_flags & SI_NAMED) == 0) { res = prep_devname(dev, fmt, ap); if (res != 0) { if ((args.mda_flags & MAKEDEV_CHECKNAME) == 0) { panic( "make_dev_sv: bad si_name (error=%d, si_name=%s)", res, dev->si_name); } if (dev == dev_new) { LIST_REMOVE(dev, si_list); dev_unlock(); devfs_free(dev); } else dev_unlock(); return (res); } } if ((args.mda_flags & MAKEDEV_REF) != 0) dev_refl(dev); if ((args.mda_flags & MAKEDEV_ETERNAL) != 0) dev->si_flags |= SI_ETERNAL; - if (dev->si_flags & SI_CHEAPCLONE && - dev->si_flags & SI_NAMED) { - /* - * This is allowed as it removes races and generally - * simplifies cloning devices. - * XXX: still ?? - */ - dev_unlock_and_free(); - *dres = dev; - return (0); - } KASSERT(!(dev->si_flags & SI_NAMED), ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", args.mda_devsw->d_name, dev2unit(dev), devtoname(dev))); dev->si_flags |= SI_NAMED; if (args.mda_cr != NULL) dev->si_cred = crhold(args.mda_cr); dev->si_uid = args.mda_uid; dev->si_gid = args.mda_gid; dev->si_mode = args.mda_mode; devfs_create(dev); clean_unrhdrl(devfs_inos); dev_unlock_and_free(); notify_create(dev, args.mda_flags); *dres = dev; return (0); } int make_dev_s(struct make_dev_args *args, struct cdev **dres, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_sv(args, dres, fmt, ap); va_end(ap); return (res); } static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, va_list ap) { struct make_dev_args args; make_dev_args_init(&args); args.mda_flags = flags; args.mda_devsw = devsw; args.mda_cr = cr; args.mda_uid = uid; args.mda_gid = gid; args.mda_mode = mode; args.mda_unit = unit; return (make_dev_sv(&args, dres, fmt, ap)); } struct cdev * make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev: failed make_dev_credv (error=%d)", res)); return (dev); } struct cdev * make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev_cred: failed make_dev_credv (error=%d)", res)); return (dev); } struct cdev * make_dev_credf(int flags, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res; va_start(ap, fmt); res = make_dev_credv(flags, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) || ((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0, ("make_dev_credf: failed make_dev_credv (error=%d)", res)); return (res == 0 ? dev : NULL); } int make_dev_p(int flags, struct cdev **cdev, struct cdevsw *devsw, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_credv(flags, cdev, devsw, 0, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) || ((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0, ("make_dev_p: failed make_dev_credv (error=%d)", res)); return (res); } static void dev_dependsl(struct cdev *pdev, struct cdev *cdev) { cdev->si_parent = pdev; cdev->si_flags |= SI_CHILD; LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); } void dev_depends(struct cdev *pdev, struct cdev *cdev) { dev_lock(); dev_dependsl(pdev, cdev); dev_unlock(); } static int make_dev_alias_v(int flags, struct cdev **cdev, struct cdev *pdev, const char *fmt, va_list ap) { struct cdev *dev; int error; KASSERT(pdev != NULL, ("make_dev_alias_v: pdev is NULL")); KASSERT((flags & MAKEDEV_WAITOK) == 0 || (flags & MAKEDEV_NOWAIT) == 0, ("make_dev_alias_v: both WAITOK and NOWAIT specified")); KASSERT((flags & ~(MAKEDEV_WAITOK | MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME)) == 0, ("make_dev_alias_v: invalid flags specified (flags=%02x)", flags)); dev = devfs_alloc(flags); if (dev == NULL) return (ENOMEM); dev_lock(); dev->si_flags |= SI_ALIAS; error = prep_devname(dev, fmt, ap); if (error != 0) { if ((flags & MAKEDEV_CHECKNAME) == 0) { panic("make_dev_alias_v: bad si_name " "(error=%d, si_name=%s)", error, dev->si_name); } dev_unlock(); devfs_free(dev); return (error); } dev->si_flags |= SI_NAMED; devfs_create(dev); dev_dependsl(pdev, dev); clean_unrhdrl(devfs_inos); dev_unlock(); notify_create(dev, flags); *cdev = dev; return (0); } struct cdev * make_dev_alias(struct cdev *pdev, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev_alias: failed make_dev_alias_v (error=%d)", res)); return (dev); } int make_dev_alias_p(int flags, struct cdev **cdev, struct cdev *pdev, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_alias_v(flags, cdev, pdev, fmt, ap); va_end(ap); return (res); } int make_dev_physpath_alias(int flags, struct cdev **cdev, struct cdev *pdev, struct cdev *old_alias, const char *physpath) { char *devfspath; int physpath_len; int max_parentpath_len; int parentpath_len; int devfspathbuf_len; int mflags; int ret; *cdev = NULL; devfspath = NULL; physpath_len = strlen(physpath); ret = EINVAL; if (physpath_len == 0) goto out; if (strncmp("id1,", physpath, 4) == 0) { physpath += 4; physpath_len -= 4; if (physpath_len == 0) goto out; } max_parentpath_len = SPECNAMELEN - physpath_len - /*/*/1; parentpath_len = strlen(pdev->si_name); if (max_parentpath_len < parentpath_len) { if (bootverbose) printf("WARNING: Unable to alias %s " "to %s/%s - path too long\n", pdev->si_name, physpath, pdev->si_name); ret = ENAMETOOLONG; goto out; } mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK; devfspathbuf_len = physpath_len + /*/*/1 + parentpath_len + /*NUL*/1; devfspath = malloc(devfspathbuf_len, M_DEVBUF, mflags); if (devfspath == NULL) { ret = ENOMEM; goto out; } sprintf(devfspath, "%s/%s", physpath, pdev->si_name); if (old_alias != NULL && strcmp(old_alias->si_name, devfspath) == 0) { /* Retain the existing alias. */ *cdev = old_alias; old_alias = NULL; ret = 0; } else { ret = make_dev_alias_p(flags, cdev, pdev, "%s", devfspath); } out: if (old_alias != NULL) destroy_dev(old_alias); if (devfspath != NULL) free(devfspath, M_DEVBUF); return (ret); } static void destroy_devl(struct cdev *dev) { struct cdevsw *csw; struct cdev_privdata *p; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_OWNED); KASSERT(dev->si_flags & SI_NAMED, ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); KASSERT((dev->si_flags & SI_ETERNAL) == 0, ("WARNING: Driver mistake: destroy_dev on eternal %d\n", dev2unit(dev))); cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) { /* * Avoid race with dev_rel(), e.g. from the populate * loop. If CDP_UNREF_DTR flag is set, the reference * to be dropped at the end of destroy_devl() was * already taken by delist_dev_locked(). */ dev_refl(dev); devfs_destroy(dev); } /* Remove name marking */ dev->si_flags &= ~SI_NAMED; /* If we are a child, remove us from the parents list */ if (dev->si_flags & SI_CHILD) { LIST_REMOVE(dev, si_siblings); dev->si_flags &= ~SI_CHILD; } /* Kill our children */ while (!LIST_EMPTY(&dev->si_children)) destroy_devl(LIST_FIRST(&dev->si_children)); /* Remove from clone list */ if (dev->si_flags & SI_CLONELIST) { LIST_REMOVE(dev, si_clone); dev->si_flags &= ~SI_CLONELIST; } csw = dev->si_devsw; dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { csw->d_purge(dev); msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); if (dev->si_threadcount) printf("Still %lu threads in %s\n", dev->si_threadcount, devtoname(dev)); } while (dev->si_threadcount != 0) { /* Use unique dummy wait ident */ msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); } dev_unlock(); if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) { /* avoid out of order notify events */ notify_destroy(dev); } mtx_lock(&cdevpriv_mtx); while ((p = LIST_FIRST(&cdp->cdp_fdpriv)) != NULL) { devfs_destroy_cdevpriv(p); mtx_lock(&cdevpriv_mtx); } mtx_unlock(&cdevpriv_mtx); dev_lock(); dev->si_drv1 = 0; dev->si_drv2 = 0; bzero(&dev->__si_u, sizeof(dev->__si_u)); if (!(dev->si_flags & SI_ALIAS)) { /* Remove from cdevsw list */ LIST_REMOVE(dev, si_list); /* If cdevsw has no more struct cdev *'s, clean it */ if (LIST_EMPTY(&csw->d_devs)) { fini_cdevsw(csw); wakeup(&csw->d_devs); } } dev->si_flags &= ~SI_ALIAS; cdp->cdp_flags &= ~CDP_UNREF_DTR; dev->si_refcount--; if (dev->si_refcount > 0) LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); else dev_free_devlocked(dev); } static void delist_dev_locked(struct cdev *dev) { struct cdev_priv *cdp; struct cdev *child; mtx_assert(&devmtx, MA_OWNED); cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_UNREF_DTR) != 0) return; cdp->cdp_flags |= CDP_UNREF_DTR; dev_refl(dev); devfs_destroy(dev); LIST_FOREACH(child, &dev->si_children, si_siblings) delist_dev_locked(child); dev_unlock(); /* ensure the destroy event is queued in order */ notify_destroy(dev); dev_lock(); } /* * This function will delist a character device and its children from * the directory listing and create a destroy event without waiting * for all character device references to go away. At some later point * destroy_dev() must be called to complete the character device * destruction. After calling this function the character device name * can instantly be re-used. */ void delist_dev(struct cdev *dev) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "delist_dev"); dev_lock(); delist_dev_locked(dev); dev_unlock(); } void destroy_dev(struct cdev *dev) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev"); dev_lock(); destroy_devl(dev); dev_unlock_and_free(); } const char * devtoname(struct cdev *dev) { return (dev->si_name); } int dev_stdclone(char *name, char **namep, const char *stem, int *unit) { int u, i; i = strlen(stem); if (bcmp(stem, name, i) != 0) return (0); if (!isdigit(name[i])) return (0); u = 0; if (name[i] == '0' && isdigit(name[i+1])) return (0); while (isdigit(name[i])) { u *= 10; u += name[i++] - '0'; } if (u > 0xffffff) return (0); *unit = u; if (namep) *namep = &name[i]; if (name[i]) return (2); return (1); } /* * Helper functions for cloning device drivers. * * The objective here is to make it unnecessary for the device drivers to * use rman or similar to manage their unit number space. Due to the way * we do "on-demand" devices, using rman or other "private" methods * will be very tricky to lock down properly once we lock down this file. * * Instead we give the drivers these routines which puts the struct cdev *'s * that are to be managed on their own list, and gives the driver the ability * to ask for the first free unit number or a given specified unit number. * * In addition these routines support paired devices (pty, nmdm and similar) * by respecting a number of "flag" bits in the minor number. * */ struct clonedevs { LIST_HEAD(,cdev) head; }; void clone_setup(struct clonedevs **cdp) { *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); LIST_INIT(&(*cdp)->head); } int clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) { struct clonedevs *cd; struct cdev *dev, *ndev, *dl, *de; struct make_dev_args args; int unit, low, u; KASSERT(*cdp != NULL, ("clone_setup() not called in driver \"%s\"", csw->d_name)); KASSERT(!(extra & CLONE_UNITMASK), ("Illegal extra bits (0x%x) in clone_create", extra)); KASSERT(*up <= CLONE_UNITMASK, ("Too high unit (0x%x) in clone_create", *up)); KASSERT(csw->d_flags & D_NEEDMINOR, ("clone_create() on cdevsw without minor numbers")); /* * Search the list for a lot of things in one go: * A preexisting match is returned immediately. * The lowest free unit number if we are passed -1, and the place * in the list where we should insert that new element. * The place to insert a specified unit number, if applicable * the end of the list. */ unit = *up; ndev = devfs_alloc(MAKEDEV_WAITOK); dev_lock(); prep_cdevsw(csw, MAKEDEV_WAITOK); low = extra; de = dl = NULL; cd = *cdp; LIST_FOREACH(dev, &cd->head, si_clone) { KASSERT(dev->si_flags & SI_CLONELIST, ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); u = dev2unit(dev); if (u == (unit | extra)) { *dp = dev; dev_unlock(); devfs_free(ndev); return (0); } if (unit == -1 && u == low) { low++; de = dev; continue; } else if (u < (unit | extra)) { de = dev; continue; } else if (u > (unit | extra)) { dl = dev; break; } } if (unit == -1) unit = low & CLONE_UNITMASK; make_dev_args_init(&args); args.mda_unit = unit | extra; args.mda_devsw = csw; dev = newdev(&args, ndev); if (dev->si_flags & SI_CLONELIST) { printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); LIST_FOREACH(dev, &cd->head, si_clone) { printf("\t%p %s\n", dev, dev->si_name); } panic("foo"); } KASSERT(!(dev->si_flags & SI_CLONELIST), ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); if (dl != NULL) LIST_INSERT_BEFORE(dl, dev, si_clone); else if (de != NULL) LIST_INSERT_AFTER(de, dev, si_clone); else LIST_INSERT_HEAD(&cd->head, dev, si_clone); dev->si_flags |= SI_CLONELIST; *up = unit; dev_unlock_and_free(); return (1); } /* * Kill everything still on the list. The driver should already have * disposed of any softc hung of the struct cdev *'s at this time. */ void clone_cleanup(struct clonedevs **cdp) { struct cdev *dev; struct cdev_priv *cp; struct clonedevs *cd; cd = *cdp; if (cd == NULL) return; dev_lock(); while (!LIST_EMPTY(&cd->head)) { dev = LIST_FIRST(&cd->head); LIST_REMOVE(dev, si_clone); KASSERT(dev->si_flags & SI_CLONELIST, ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); dev->si_flags &= ~SI_CLONELIST; cp = cdev2priv(dev); if (!(cp->cdp_flags & CDP_SCHED_DTR)) { cp->cdp_flags |= CDP_SCHED_DTR; KASSERT(dev->si_flags & SI_NAMED, ("Driver has goofed in cloning underways udev %jx unit %x", (uintmax_t)dev2udev(dev), dev2unit(dev))); destroy_devl(dev); } } dev_unlock_and_free(); free(cd, M_DEVBUF); *cdp = NULL; } static TAILQ_HEAD(, cdev_priv) dev_ddtr = TAILQ_HEAD_INITIALIZER(dev_ddtr); static struct task dev_dtr_task = TASK_INITIALIZER(0, destroy_dev_tq, NULL); static void destroy_dev_tq(void *ctx, int pending) { struct cdev_priv *cp; struct cdev *dev; void (*cb)(void *); void *cb_arg; dev_lock(); while (!TAILQ_EMPTY(&dev_ddtr)) { cp = TAILQ_FIRST(&dev_ddtr); dev = &cp->cdp_c; KASSERT(cp->cdp_flags & CDP_SCHED_DTR, ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); TAILQ_REMOVE(&dev_ddtr, cp, cdp_dtr_list); cb = cp->cdp_dtr_cb; cb_arg = cp->cdp_dtr_cb_arg; destroy_devl(dev); dev_unlock_and_free(); dev_rel(dev); if (cb != NULL) cb(cb_arg); dev_lock(); } dev_unlock(); } /* * devmtx shall be locked on entry. devmtx will be unlocked after * function return. */ static int destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) { struct cdev_priv *cp; mtx_assert(&devmtx, MA_OWNED); cp = cdev2priv(dev); if (cp->cdp_flags & CDP_SCHED_DTR) { dev_unlock(); return (0); } dev_refl(dev); cp->cdp_flags |= CDP_SCHED_DTR; cp->cdp_dtr_cb = cb; cp->cdp_dtr_cb_arg = arg; TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); dev_unlock(); taskqueue_enqueue(taskqueue_swi_giant, &dev_dtr_task); return (1); } int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) { dev_lock(); return (destroy_dev_sched_cbl(dev, cb, arg)); } int destroy_dev_sched(struct cdev *dev) { return (destroy_dev_sched_cb(dev, NULL, NULL)); } void destroy_dev_drain(struct cdevsw *csw) { dev_lock(); while (!LIST_EMPTY(&csw->d_devs)) { msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); } dev_unlock(); } void drain_dev_clone_events(void) { sx_xlock(&clone_drain_lock); sx_xunlock(&clone_drain_lock); } #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(cdev, db_show_cdev) { struct cdev_priv *cdp; struct cdev *dev; u_int flags; char buf[512]; if (!have_addr) { TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { dev = &cdp->cdp_c; db_printf("%s %p\n", dev->si_name, dev); if (db_pager_quit) break; } return; } dev = (struct cdev *)addr; cdp = cdev2priv(dev); db_printf("dev %s ref %d use %ld thr %ld inuse %u fdpriv %p\n", dev->si_name, dev->si_refcount, dev->si_usecount, dev->si_threadcount, cdp->cdp_inuse, cdp->cdp_fdpriv.lh_first); db_printf("devsw %p si_drv0 %d si_drv1 %p si_drv2 %p\n", dev->si_devsw, dev->si_drv0, dev->si_drv1, dev->si_drv2); flags = dev->si_flags; #define SI_FLAG(flag) do { \ if (flags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 3, sizeof(buf)); \ flags &= ~(flag); \ } \ } while (0) buf[0] = '\0'; SI_FLAG(SI_ETERNAL); SI_FLAG(SI_ALIAS); SI_FLAG(SI_NAMED); - SI_FLAG(SI_CHEAPCLONE); SI_FLAG(SI_CHILD); SI_FLAG(SI_DUMPDEV); SI_FLAG(SI_CLONELIST); db_printf("si_flags %s\n", buf); flags = cdp->cdp_flags; #define CDP_FLAG(flag) do { \ if (flags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 4, sizeof(buf)); \ flags &= ~(flag); \ } \ } while (0) buf[0] = '\0'; CDP_FLAG(CDP_ACTIVE); CDP_FLAG(CDP_SCHED_DTR); db_printf("cdp_flags %s\n", buf); } #endif Index: head/sys/sys/conf.h =================================================================== --- head/sys/sys/conf.h (revision 353128) +++ head/sys/sys/conf.h (revision 353129) @@ -1,378 +1,378 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2000 * Poul-Henning Kamp. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)conf.h 8.5 (Berkeley) 1/9/95 * $FreeBSD$ */ #ifndef _SYS_CONF_H_ #define _SYS_CONF_H_ #ifdef _KERNEL #include #else #include #endif struct snapdata; struct devfs_dirent; struct cdevsw; struct file; struct cdev { void *si_spare0; u_int si_flags; #define SI_ETERNAL 0x0001 /* never destroyed */ #define SI_ALIAS 0x0002 /* carrier of alias name */ #define SI_NAMED 0x0004 /* make_dev{_alias} has been called */ -#define SI_CHEAPCLONE 0x0008 /* can be removed_dev'ed when vnode reclaims */ +#define SI_UNUSED1 0x0008 /* unused */ #define SI_CHILD 0x0010 /* child of another struct cdev **/ #define SI_DUMPDEV 0x0080 /* is kernel dumpdev */ #define SI_CLONELIST 0x0200 /* on a clone list */ #define SI_UNMAPPED 0x0400 /* can handle unmapped I/O */ #define SI_NOSPLIT 0x0800 /* I/O should not be split up */ struct timespec si_atime; struct timespec si_ctime; struct timespec si_mtime; uid_t si_uid; gid_t si_gid; mode_t si_mode; struct ucred *si_cred; /* cached clone-time credential */ int si_drv0; int si_refcount; LIST_ENTRY(cdev) si_list; LIST_ENTRY(cdev) si_clone; LIST_HEAD(, cdev) si_children; LIST_ENTRY(cdev) si_siblings; struct cdev *si_parent; struct mount *si_mountpt; void *si_drv1, *si_drv2; struct cdevsw *si_devsw; int si_iosize_max; /* maximum I/O size (for physio &al) */ u_long si_usecount; u_long si_threadcount; union { struct snapdata *__sid_snapdata; } __si_u; char si_name[SPECNAMELEN + 1]; }; #define si_snapdata __si_u.__sid_snapdata #ifdef _KERNEL /* * Definitions of device driver entry switches */ struct bio; struct buf; struct dumperinfo; struct kerneldumpheader; struct thread; struct uio; struct knote; struct clonedevs; struct vm_object; struct vnode; typedef int d_open_t(struct cdev *dev, int oflags, int devtype, struct thread *td); typedef int d_fdopen_t(struct cdev *dev, int oflags, struct thread *td, struct file *fp); typedef int d_close_t(struct cdev *dev, int fflag, int devtype, struct thread *td); typedef void d_strategy_t(struct bio *bp); typedef int d_ioctl_t(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); typedef int d_read_t(struct cdev *dev, struct uio *uio, int ioflag); typedef int d_write_t(struct cdev *dev, struct uio *uio, int ioflag); typedef int d_poll_t(struct cdev *dev, int events, struct thread *td); typedef int d_kqfilter_t(struct cdev *dev, struct knote *kn); typedef int d_mmap_t(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr); typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot); typedef void d_purge_t(struct cdev *dev); typedef int dumper_t( void *_priv, /* Private to the driver. */ void *_virtual, /* Virtual (mapped) address. */ vm_offset_t _physical, /* Physical address of virtual. */ off_t _offset, /* Byte-offset to write at. */ size_t _length); /* Number of bytes to dump. */ typedef int dumper_start_t(struct dumperinfo *di); typedef int dumper_hdr_t(struct dumperinfo *di, struct kerneldumpheader *kdh, void *key, uint32_t keylen); #endif /* _KERNEL */ /* * Types for d_flags. */ #define D_TAPE 0x0001 #define D_DISK 0x0002 #define D_TTY 0x0004 #define D_MEM 0x0008 /* /dev/(k)mem */ #ifdef _KERNEL #define D_TYPEMASK 0xffff /* * Flags for d_flags which the drivers can set. */ #define D_TRACKCLOSE 0x00080000 /* track all closes */ #define D_MMAP_ANON 0x00100000 /* special treatment in vm_mmap.c */ #define D_NEEDGIANT 0x00400000 /* driver want Giant */ #define D_NEEDMINOR 0x00800000 /* driver uses clone_create() */ /* * Version numbers. */ #define D_VERSION_00 0x20011966 #define D_VERSION_01 0x17032005 /* Add d_uid,gid,mode & kind */ #define D_VERSION_02 0x28042009 /* Add d_mmap_single */ #define D_VERSION_03 0x17122009 /* d_mmap takes memattr,vm_ooffset_t */ #define D_VERSION_04 0x5c48c353 /* SPECNAMELEN bumped to MAXNAMLEN */ #define D_VERSION D_VERSION_04 /* * Flags used for internal housekeeping */ #define D_INIT 0x80000000 /* cdevsw initialized */ /* * Character device switch table */ struct cdevsw { int d_version; u_int d_flags; const char *d_name; d_open_t *d_open; d_fdopen_t *d_fdopen; d_close_t *d_close; d_read_t *d_read; d_write_t *d_write; d_ioctl_t *d_ioctl; d_poll_t *d_poll; d_mmap_t *d_mmap; d_strategy_t *d_strategy; dumper_t *d_dump; d_kqfilter_t *d_kqfilter; d_purge_t *d_purge; d_mmap_single_t *d_mmap_single; int32_t d_spare0[3]; void *d_spare1[3]; /* These fields should not be messed with by drivers */ LIST_HEAD(, cdev) d_devs; int d_spare2; union { struct cdevsw *gianttrick; SLIST_ENTRY(cdevsw) postfree_list; } __d_giant; }; #define d_gianttrick __d_giant.gianttrick #define d_postfree_list __d_giant.postfree_list struct module; struct devsw_module_data { int (*chainevh)(struct module *, int, void *); /* next handler */ void *chainarg; /* arg for next event handler */ /* Do not initialize fields hereafter */ }; #define DEV_MODULE_ORDERED(name, evh, arg, ord) \ static moduledata_t name##_mod = { \ #name, \ evh, \ arg \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, ord) #define DEV_MODULE(name, evh, arg) \ DEV_MODULE_ORDERED(name, evh, arg, SI_ORDER_MIDDLE) void clone_setup(struct clonedevs **cdp); void clone_cleanup(struct clonedevs **); #define CLONE_UNITMASK 0xfffff #define CLONE_FLAG0 (CLONE_UNITMASK + 1) int clone_create(struct clonedevs **, struct cdevsw *, int *unit, struct cdev **dev, int extra); #define MAKEDEV_REF 0x01 #define MAKEDEV_WHTOUT 0x02 #define MAKEDEV_NOWAIT 0x04 #define MAKEDEV_WAITOK 0x08 #define MAKEDEV_ETERNAL 0x10 #define MAKEDEV_CHECKNAME 0x20 struct make_dev_args { size_t mda_size; int mda_flags; struct cdevsw *mda_devsw; struct ucred *mda_cr; uid_t mda_uid; gid_t mda_gid; int mda_mode; int mda_unit; void *mda_si_drv1; void *mda_si_drv2; }; void make_dev_args_init_impl(struct make_dev_args *_args, size_t _sz); #define make_dev_args_init(a) \ make_dev_args_init_impl((a), sizeof(struct make_dev_args)) int count_dev(struct cdev *_dev); void delist_dev(struct cdev *_dev); void destroy_dev(struct cdev *_dev); int destroy_dev_sched(struct cdev *dev); int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg); void destroy_dev_drain(struct cdevsw *csw); void drain_dev_clone_events(void); struct cdevsw *dev_refthread(struct cdev *_dev, int *_ref); struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp, int *_ref); void dev_relthread(struct cdev *_dev, int _ref); void dev_depends(struct cdev *_pdev, struct cdev *_cdev); void dev_ref(struct cdev *dev); void dev_refl(struct cdev *dev); void dev_rel(struct cdev *dev); struct cdev *make_dev(struct cdevsw *_devsw, int _unit, uid_t _uid, gid_t _gid, int _perms, const char *_fmt, ...) __printflike(6, 7); struct cdev *make_dev_cred(struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _perms, const char *_fmt, ...) __printflike(7, 8); struct cdev *make_dev_credf(int _flags, struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode, const char *_fmt, ...) __printflike(8, 9); int make_dev_p(int _flags, struct cdev **_cdev, struct cdevsw *_devsw, struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode, const char *_fmt, ...) __printflike(8, 9); int make_dev_s(struct make_dev_args *_args, struct cdev **_cdev, const char *_fmt, ...) __printflike(3, 4); struct cdev *make_dev_alias(struct cdev *_pdev, const char *_fmt, ...) __printflike(2, 3); int make_dev_alias_p(int _flags, struct cdev **_cdev, struct cdev *_pdev, const char *_fmt, ...) __printflike(4, 5); int make_dev_physpath_alias(int _flags, struct cdev **_cdev, struct cdev *_pdev, struct cdev *_old_alias, const char *_physpath); void dev_lock(void); void dev_unlock(void); #ifdef KLD_MODULE #define MAKEDEV_ETERNAL_KLD 0 #else #define MAKEDEV_ETERNAL_KLD MAKEDEV_ETERNAL #endif #define dev2unit(d) ((d)->si_drv0) typedef void d_priv_dtor_t(void *data); int devfs_get_cdevpriv(void **datap); int devfs_set_cdevpriv(void *priv, d_priv_dtor_t *dtr); void devfs_clear_cdevpriv(void); ino_t devfs_alloc_cdp_inode(void); void devfs_free_cdp_inode(ino_t ino); #define UID_ROOT 0 #define UID_BIN 3 #define UID_UUCP 66 #define UID_NOBODY 65534 #define GID_WHEEL 0 #define GID_KMEM 2 #define GID_TTY 4 #define GID_OPERATOR 5 #define GID_BIN 7 #define GID_GAMES 13 #define GID_VIDEO 44 #define GID_DIALER 68 #define GID_NOGROUP 65533 #define GID_NOBODY 65534 typedef void (*dev_clone_fn)(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **result); int dev_stdclone(char *_name, char **_namep, const char *_stem, int *_unit); EVENTHANDLER_DECLARE(dev_clone, dev_clone_fn); /* Stuff relating to kernel-dump */ struct kerneldumpcrypto; struct kerneldumpheader; struct dumperinfo { dumper_t *dumper; /* Dumping function. */ dumper_start_t *dumper_start; /* Dumper callback for dump_start(). */ dumper_hdr_t *dumper_hdr; /* Dumper callback for writing headers. */ void *priv; /* Private parts. */ u_int blocksize; /* Size of block in bytes. */ u_int maxiosize; /* Max size allowed for an individual I/O */ off_t mediaoffset; /* Initial offset in bytes. */ off_t mediasize; /* Space available in bytes. */ /* MI kernel dump state. */ void *blockbuf; /* Buffer for padding shorter dump blocks */ off_t dumpoff; /* Offset of ongoing kernel dump. */ off_t origdumpoff; /* Starting dump offset. */ struct kerneldumpcrypto *kdcrypto; /* Kernel dump crypto. */ struct kerneldumpcomp *kdcomp; /* Kernel dump compression. */ TAILQ_ENTRY(dumperinfo) di_next; char di_devname[]; }; extern int dumping; /* system is dumping */ int doadump(boolean_t); struct diocskerneldump_arg; int dumper_insert(const struct dumperinfo *di_template, const char *devname, const struct diocskerneldump_arg *kda); int dumper_remove(const char *devname, const struct diocskerneldump_arg *kda); int dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh); int dump_append(struct dumperinfo *, void *, vm_offset_t, size_t); int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t); int dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh); void dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh, char *magic, uint32_t archver, uint64_t dumplen); #endif /* _KERNEL */ #endif /* !_SYS_CONF_H_ */