diff --git a/sys/fs/devfs/devfs_int.h b/sys/fs/devfs/devfs_int.h index e5afa311cbfd..ddd3c82bac3a 100644 --- a/sys/fs/devfs/devfs_int.h +++ b/sys/fs/devfs/devfs_int.h @@ -1,103 +1,102 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Poul-Henning Kamp. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * This file documents a private interface and it SHALL only be used * by kern/kern_conf.c and fs/devfs/... */ #ifndef _FS_DEVFS_DEVFS_INT_H_ #define _FS_DEVFS_DEVFS_INT_H_ #include struct devfs_dirent; struct devfs_mount; struct cdev_privdata { struct file *cdpd_fp; void *cdpd_data; void (*cdpd_dtr)(void *); LIST_ENTRY(cdev_privdata) cdpd_list; }; struct cdev_priv { struct cdev cdp_c; TAILQ_ENTRY(cdev_priv) cdp_list; u_int cdp_inode; u_int cdp_flags; #define CDP_ACTIVE (1 << 0) #define CDP_SCHED_DTR (1 << 1) #define CDP_UNREF_DTR (1 << 2) u_int cdp_inuse; u_int cdp_maxdirent; struct devfs_dirent **cdp_dirents; struct devfs_dirent *cdp_dirent0; TAILQ_ENTRY(cdev_priv) cdp_dtr_list; void (*cdp_dtr_cb)(void *); void *cdp_dtr_cb_arg; LIST_HEAD(, cdev_privdata) cdp_fdpriv; struct mtx cdp_threadlock; }; #define cdev2priv(c) __containerof(c, struct cdev_priv, cdp_c) #ifdef _KERNEL struct cdev *devfs_alloc(int); int devfs_dev_exists(const char *); void devfs_free(struct cdev *); void devfs_create(struct cdev *); void devfs_destroy(struct cdev *); void devfs_destroy_cdevpriv(struct cdev_privdata *); int devfs_dir_find(const char *); void devfs_dir_ref_de(struct devfs_mount *, struct devfs_dirent *); void devfs_dir_unref_de(struct devfs_mount *, struct devfs_dirent *); int devfs_pathpath(const char *, const char *); extern struct unrhdr *devfs_inos; extern struct mtx devmtx; extern struct mtx devfs_de_interlock; -extern struct sx clone_drain_lock; extern struct mtx cdevpriv_mtx; extern TAILQ_HEAD(cdev_priv_list, cdev_priv) cdevp_list; #define dev_lock_assert_locked() mtx_assert(&devmtx, MA_OWNED) #define dev_lock_assert_unlocked() mtx_assert(&devmtx, MA_NOTOWNED) #endif /* _KERNEL */ #endif /* !_FS_DEVFS_DEVFS_INT_H_ */ diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index 7b571a6821b7..13619d318cfc 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -1,2116 +1,2112 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000-2004 * Poul-Henning Kamp. All rights reserved. * Copyright (c) 1989, 1992-1993, 1995 * The Regents of the University of California. All rights reserved. * * This code is derived from software donated to Berkeley by * Jan-Simon Pendry. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kernfs_vnops.c 8.15 (Berkeley) 5/21/95 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43 * * $FreeBSD$ */ /* * TODO: * mkdir: want it ? */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct vop_vector devfs_vnodeops; static struct vop_vector devfs_specops; static struct fileops devfs_ops_f; #include #include #include #include #include #include static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data"); struct mtx devfs_de_interlock; MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF); -struct sx clone_drain_lock; -SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock"); struct mtx cdevpriv_mtx; MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); SYSCTL_DECL(_vfs_devfs); static int devfs_dotimes; SYSCTL_INT(_vfs_devfs, OID_AUTO, dotimes, CTLFLAG_RW, &devfs_dotimes, 0, "Update timestamps on DEVFS with default precision"); /* * Update devfs node timestamp. Note that updates are unlocked and * stat(2) could see partially updated times. */ static void devfs_timestamp(struct timespec *tsp) { time_t ts; if (devfs_dotimes) { vfs_timestamp(tsp); } else { ts = time_second; if (tsp->tv_sec != ts) { tsp->tv_sec = ts; tsp->tv_nsec = 0; } } } static int devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp, int *ref) { *dswp = devvn_refthread(fp->f_vnode, devp, ref); if (*dswp == NULL || *devp != fp->f_data) { if (*dswp != NULL) dev_relthread(*devp, *ref); return (ENXIO); } KASSERT((*devp)->si_refcount > 0, ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp))); if (*dswp == NULL) return (ENXIO); curthread->td_fpop = fp; return (0); } int devfs_get_cdevpriv(void **datap) { struct file *fp; struct cdev_privdata *p; int error; fp = curthread->td_fpop; if (fp == NULL) return (EBADF); p = fp->f_cdevpriv; if (p != NULL) { error = 0; *datap = p->cdpd_data; } else error = ENOENT; return (error); } int devfs_set_cdevpriv(void *priv, d_priv_dtor_t *priv_dtr) { struct file *fp; struct cdev_priv *cdp; struct cdev_privdata *p; int error; fp = curthread->td_fpop; if (fp == NULL) return (ENOENT); cdp = cdev2priv((struct cdev *)fp->f_data); p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK); p->cdpd_data = priv; p->cdpd_dtr = priv_dtr; p->cdpd_fp = fp; mtx_lock(&cdevpriv_mtx); if (fp->f_cdevpriv == NULL) { LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list); fp->f_cdevpriv = p; mtx_unlock(&cdevpriv_mtx); error = 0; } else { mtx_unlock(&cdevpriv_mtx); free(p, M_CDEVPDATA); error = EBUSY; } return (error); } void devfs_destroy_cdevpriv(struct cdev_privdata *p) { mtx_assert(&cdevpriv_mtx, MA_OWNED); KASSERT(p->cdpd_fp->f_cdevpriv == p, ("devfs_destoy_cdevpriv %p != %p", p->cdpd_fp->f_cdevpriv, p)); p->cdpd_fp->f_cdevpriv = NULL; LIST_REMOVE(p, cdpd_list); mtx_unlock(&cdevpriv_mtx); (p->cdpd_dtr)(p->cdpd_data); free(p, M_CDEVPDATA); } static void devfs_fpdrop(struct file *fp) { struct cdev_privdata *p; mtx_lock(&cdevpriv_mtx); if ((p = fp->f_cdevpriv) == NULL) { mtx_unlock(&cdevpriv_mtx); return; } devfs_destroy_cdevpriv(p); } void devfs_clear_cdevpriv(void) { struct file *fp; fp = curthread->td_fpop; if (fp == NULL) return; devfs_fpdrop(fp); } static void devfs_usecount_add(struct vnode *vp) { struct devfs_dirent *de; struct cdev *dev; mtx_lock(&devfs_de_interlock); VI_LOCK(vp); VNPASS(vp->v_type == VCHR || vp->v_type == VBAD, vp); if (VN_IS_DOOMED(vp)) { goto out_unlock; } de = vp->v_data; dev = vp->v_rdev; MPASS(de != NULL); MPASS(dev != NULL); dev->si_usecount++; de->de_usecount++; out_unlock: VI_UNLOCK(vp); mtx_unlock(&devfs_de_interlock); } static void devfs_usecount_subl(struct vnode *vp) { struct devfs_dirent *de; struct cdev *dev; mtx_assert(&devfs_de_interlock, MA_OWNED); ASSERT_VI_LOCKED(vp, __func__); VNPASS(vp->v_type == VCHR || vp->v_type == VBAD, vp); de = vp->v_data; dev = vp->v_rdev; if (de == NULL) return; if (dev == NULL) { MPASS(de->de_usecount == 0); return; } if (dev->si_usecount < de->de_usecount) panic("%s: si_usecount underflow for dev %p " "(has %ld, dirent has %d)\n", __func__, dev, dev->si_usecount, de->de_usecount); if (VN_IS_DOOMED(vp)) { dev->si_usecount -= de->de_usecount; de->de_usecount = 0; } else { if (de->de_usecount == 0) panic("%s: de_usecount underflow for dev %p\n", __func__, dev); dev->si_usecount--; de->de_usecount--; } } static void devfs_usecount_sub(struct vnode *vp) { mtx_lock(&devfs_de_interlock); VI_LOCK(vp); devfs_usecount_subl(vp); VI_UNLOCK(vp); mtx_unlock(&devfs_de_interlock); } static int devfs_usecountl(struct vnode *vp) { VNPASS(vp->v_type == VCHR, vp); mtx_assert(&devfs_de_interlock, MA_OWNED); ASSERT_VI_LOCKED(vp, __func__); return (vp->v_rdev->si_usecount); } int devfs_usecount(struct vnode *vp) { int count; VNPASS(vp->v_type == VCHR, vp); mtx_lock(&devfs_de_interlock); VI_LOCK(vp); count = devfs_usecountl(vp); VI_UNLOCK(vp); mtx_unlock(&devfs_de_interlock); return (count); } void devfs_ctty_ref(struct vnode *vp) { vrefact(vp); devfs_usecount_add(vp); } void devfs_ctty_unref(struct vnode *vp) { devfs_usecount_sub(vp); vrele(vp); } /* * On success devfs_populate_vp() returns with dmp->dm_lock held. */ static int devfs_populate_vp(struct vnode *vp) { struct devfs_dirent *de; struct devfs_mount *dmp; int locked; ASSERT_VOP_LOCKED(vp, "devfs_populate_vp"); dmp = VFSTODEVFS(vp->v_mount); if (!devfs_populate_needed(dmp)) { sx_xlock(&dmp->dm_lock); goto out_nopopulate; } locked = VOP_ISLOCKED(vp); sx_xlock(&dmp->dm_lock); DEVFS_DMP_HOLD(dmp); /* Can't call devfs_populate() with the vnode lock held. */ VOP_UNLOCK(vp); devfs_populate(dmp); sx_xunlock(&dmp->dm_lock); vn_lock(vp, locked | LK_RETRY); sx_xlock(&dmp->dm_lock); if (DEVFS_DMP_DROP(dmp)) { sx_xunlock(&dmp->dm_lock); devfs_unmount_final(dmp); return (ERESTART); } out_nopopulate: if (VN_IS_DOOMED(vp)) { sx_xunlock(&dmp->dm_lock); return (ERESTART); } de = vp->v_data; KASSERT(de != NULL, ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed")); if ((de->de_flags & DE_DOOMED) != 0) { sx_xunlock(&dmp->dm_lock); return (ERESTART); } return (0); } static int devfs_vptocnp(struct vop_vptocnp_args *ap) { struct vnode *vp = ap->a_vp; struct vnode **dvp = ap->a_vpp; struct devfs_mount *dmp; char *buf = ap->a_buf; size_t *buflen = ap->a_buflen; struct devfs_dirent *dd, *de; int i, error; dmp = VFSTODEVFS(vp->v_mount); error = devfs_populate_vp(vp); if (error != 0) return (error); if (vp->v_type != VCHR && vp->v_type != VDIR) { error = ENOENT; goto finished; } dd = vp->v_data; if (vp->v_type == VDIR && dd == dmp->dm_rootdir) { *dvp = vp; vref(*dvp); goto finished; } i = *buflen; i -= dd->de_dirent->d_namlen; if (i < 0) { error = ENOMEM; goto finished; } bcopy(dd->de_dirent->d_name, buf + i, dd->de_dirent->d_namlen); *buflen = i; de = devfs_parent_dirent(dd); if (de == NULL) { error = ENOENT; goto finished; } mtx_lock(&devfs_de_interlock); *dvp = de->de_vnode; if (*dvp != NULL) { VI_LOCK(*dvp); mtx_unlock(&devfs_de_interlock); vholdl(*dvp); VI_UNLOCK(*dvp); vref(*dvp); vdrop(*dvp); } else { mtx_unlock(&devfs_de_interlock); error = ENOENT; } finished: sx_xunlock(&dmp->dm_lock); return (error); } /* * Construct the fully qualified path name relative to the mountpoint. * If a NULL cnp is provided, no '/' is appended to the resulting path. */ char * devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd, struct componentname *cnp) { int i; struct devfs_dirent *de; sx_assert(&dmp->dm_lock, SA_LOCKED); i = SPECNAMELEN; buf[i] = '\0'; if (cnp != NULL) i -= cnp->cn_namelen; if (i < 0) return (NULL); if (cnp != NULL) bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen); de = dd; while (de != dmp->dm_rootdir) { if (cnp != NULL || i < SPECNAMELEN) { i--; if (i < 0) return (NULL); buf[i] = '/'; } i -= de->de_dirent->d_namlen; if (i < 0) return (NULL); bcopy(de->de_dirent->d_name, buf + i, de->de_dirent->d_namlen); de = devfs_parent_dirent(de); if (de == NULL) return (NULL); } return (buf + i); } static int devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp, struct devfs_dirent *de) { int not_found; not_found = 0; if (de->de_flags & DE_DOOMED) not_found = 1; if (DEVFS_DE_DROP(de)) { KASSERT(not_found == 1, ("DEVFS de dropped but not doomed")); devfs_dirent_free(de); } if (DEVFS_DMP_DROP(dmp)) { KASSERT(not_found == 1, ("DEVFS mount struct freed before dirent")); not_found = 2; sx_xunlock(&dmp->dm_lock); devfs_unmount_final(dmp); } if (not_found == 1 || (drop_dm_lock && not_found != 2)) sx_unlock(&dmp->dm_lock); return (not_found); } /* * devfs_allocv shall be entered with dmp->dm_lock held, and it drops * it on return. */ int devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, struct vnode **vpp) { int error; struct vnode *vp; struct cdev *dev; struct devfs_mount *dmp; struct cdevsw *dsw; enum vgetstate vs; dmp = VFSTODEVFS(mp); if (de->de_flags & DE_DOOMED) { sx_xunlock(&dmp->dm_lock); return (ENOENT); } loop: DEVFS_DE_HOLD(de); DEVFS_DMP_HOLD(dmp); mtx_lock(&devfs_de_interlock); vp = de->de_vnode; if (vp != NULL) { vs = vget_prep(vp); mtx_unlock(&devfs_de_interlock); sx_xunlock(&dmp->dm_lock); vget_finish(vp, lockmode | LK_RETRY, vs); sx_xlock(&dmp->dm_lock); if (devfs_allocv_drop_refs(0, dmp, de)) { vput(vp); return (ENOENT); } else if (VN_IS_DOOMED(vp)) { mtx_lock(&devfs_de_interlock); if (de->de_vnode == vp) { de->de_vnode = NULL; vp->v_data = NULL; } mtx_unlock(&devfs_de_interlock); vput(vp); goto loop; } sx_xunlock(&dmp->dm_lock); *vpp = vp; return (0); } mtx_unlock(&devfs_de_interlock); if (de->de_dirent->d_type == DT_CHR) { if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) { devfs_allocv_drop_refs(1, dmp, de); return (ENOENT); } dev = &de->de_cdp->cdp_c; } else { dev = NULL; } error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp); if (error != 0) { devfs_allocv_drop_refs(1, dmp, de); printf("devfs_allocv: failed to allocate new vnode\n"); return (error); } if (de->de_dirent->d_type == DT_CHR) { vp->v_type = VCHR; VI_LOCK(vp); dev_lock(); dev_refl(dev); /* XXX: v_rdev should be protect by vnode lock */ vp->v_rdev = dev; VNPASS(vp->v_usecount == 1, vp); /* Special casing of ttys for deadfs. Probably redundant. */ dsw = dev->si_devsw; if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) vp->v_vflag |= VV_ISTTY; dev_unlock(); VI_UNLOCK(vp); if ((dev->si_flags & SI_ETERNAL) != 0) vp->v_vflag |= VV_ETERNALDEV; vp->v_op = &devfs_specops; } else if (de->de_dirent->d_type == DT_DIR) { vp->v_type = VDIR; } else if (de->de_dirent->d_type == DT_LNK) { vp->v_type = VLNK; } else { vp->v_type = VBAD; } vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); VN_LOCK_ASHARE(vp); mtx_lock(&devfs_de_interlock); vp->v_data = de; de->de_vnode = vp; mtx_unlock(&devfs_de_interlock); error = insmntque1(vp, mp); if (error != 0) { mtx_lock(&devfs_de_interlock); vp->v_data = NULL; de->de_vnode = NULL; mtx_unlock(&devfs_de_interlock); vgone(vp); vput(vp); (void) devfs_allocv_drop_refs(1, dmp, de); return (error); } if (devfs_allocv_drop_refs(0, dmp, de)) { vput(vp); return (ENOENT); } #ifdef MAC mac_devfs_vnode_associate(mp, de, vp); #endif sx_xunlock(&dmp->dm_lock); *vpp = vp; return (0); } static int devfs_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct devfs_dirent *de; struct proc *p; int error; de = vp->v_data; if (vp->v_type == VDIR) de = de->de_dir; error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid, ap->a_accmode, ap->a_cred); if (error == 0) return (0); if (error != EACCES) return (error); p = ap->a_td->td_proc; /* We do, however, allow access to the controlling terminal */ PROC_LOCK(p); if (!(p->p_flag & P_CONTROLT)) { PROC_UNLOCK(p); return (error); } if (p->p_session->s_ttydp == de->de_cdp) error = 0; PROC_UNLOCK(p); return (error); } _Static_assert(((FMASK | FCNTLFLAGS) & (FLASTCLOSE | FREVOKE)) == 0, "devfs-only flag reuse failed"); static int devfs_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp, *oldvp; struct thread *td = ap->a_td; struct proc *p; struct cdev *dev = vp->v_rdev; struct cdevsw *dsw; struct devfs_dirent *de = vp->v_data; int dflags, error, ref, vp_locked; /* * XXX: Don't call d_close() if we were called because of * XXX: insmntque() failure. */ if (vp->v_data == NULL) return (0); /* * Hack: a tty device that is a controlling terminal * has a reference from the session structure. * We cannot easily tell that a character device is * a controlling terminal, unless it is the closing * process' controlling terminal. In that case, * if the reference count is 2 (this last descriptor * plus the session), release the reference from the session. */ if (de->de_usecount == 2 && td != NULL) { p = td->td_proc; PROC_LOCK(p); if (vp == p->p_session->s_ttyvp) { PROC_UNLOCK(p); oldvp = NULL; sx_xlock(&proctree_lock); if (vp == p->p_session->s_ttyvp) { SESS_LOCK(p->p_session); mtx_lock(&devfs_de_interlock); VI_LOCK(vp); if (devfs_usecountl(vp) == 2 && !VN_IS_DOOMED(vp)) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; oldvp = vp; } VI_UNLOCK(vp); mtx_unlock(&devfs_de_interlock); SESS_UNLOCK(p->p_session); } sx_xunlock(&proctree_lock); if (oldvp != NULL) devfs_ctty_unref(oldvp); } else PROC_UNLOCK(p); } /* * We do not want to really close the device if it * is still in use unless we are trying to close it * forcibly. Since every use (buffer, vnode, swap, cmap) * holds a reference to the vnode, and because we mark * any other vnodes that alias this device, when the * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); dflags = 0; mtx_lock(&devfs_de_interlock); VI_LOCK(vp); if (devfs_usecountl(vp) == 1) dflags |= FLASTCLOSE; devfs_usecount_subl(vp); mtx_unlock(&devfs_de_interlock); if (VN_IS_DOOMED(vp)) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; } else if (dsw->d_flags & D_TRACKCLOSE) { /* Keep device updated on status. */ } else if ((dflags & FLASTCLOSE) == 0) { VI_UNLOCK(vp); dev_relthread(dev, ref); return (0); } vholdnz(vp); VI_UNLOCK(vp); vp_locked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp); KASSERT(dev->si_refcount > 0, ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); error = dsw->d_close(dev, ap->a_fflag | dflags, S_IFCHR, td); dev_relthread(dev, ref); vn_lock(vp, vp_locked | LK_RETRY); vdrop(vp); return (error); } static int devfs_close_f(struct file *fp, struct thread *td) { int error; struct file *fpop; /* * NB: td may be NULL if this descriptor is closed due to * garbage collection from a closed UNIX domain socket. */ fpop = curthread->td_fpop; curthread->td_fpop = fp; error = vnops.fo_close(fp, td); curthread->td_fpop = fpop; /* * The f_cdevpriv cannot be assigned non-NULL value while we * are destroying the file. */ if (fp->f_cdevpriv != NULL) devfs_fpdrop(fp); return (error); } static int devfs_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct devfs_dirent *de; struct devfs_mount *dmp; struct cdev *dev; struct timeval boottime; int error; error = devfs_populate_vp(vp); if (error != 0) return (error); dmp = VFSTODEVFS(vp->v_mount); sx_xunlock(&dmp->dm_lock); de = vp->v_data; KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp)); if (vp->v_type == VDIR) { de = de->de_dir; KASSERT(de != NULL, ("Null dir dirent in devfs_getattr vp=%p", vp)); } vap->va_uid = de->de_uid; vap->va_gid = de->de_gid; vap->va_mode = de->de_mode; if (vp->v_type == VLNK) vap->va_size = strlen(de->de_symlink); else if (vp->v_type == VDIR) vap->va_size = vap->va_bytes = DEV_BSIZE; else vap->va_size = 0; if (vp->v_type != VDIR) vap->va_bytes = 0; vap->va_blocksize = DEV_BSIZE; vap->va_type = vp->v_type; getboottime(&boottime); #define fix(aa) \ do { \ if ((aa).tv_sec <= 3600) { \ (aa).tv_sec = boottime.tv_sec; \ (aa).tv_nsec = boottime.tv_usec * 1000; \ } \ } while (0) if (vp->v_type != VCHR) { fix(de->de_atime); vap->va_atime = de->de_atime; fix(de->de_mtime); vap->va_mtime = de->de_mtime; fix(de->de_ctime); vap->va_ctime = de->de_ctime; } else { dev = vp->v_rdev; fix(dev->si_atime); vap->va_atime = dev->si_atime; fix(dev->si_mtime); vap->va_mtime = dev->si_mtime; fix(dev->si_ctime); vap->va_ctime = dev->si_ctime; vap->va_rdev = cdev2priv(dev)->cdp_inode; } vap->va_gen = 0; vap->va_flags = 0; vap->va_filerev = 0; vap->va_nlink = de->de_links; vap->va_fileid = de->de_inode; return (error); } /* ARGSUSED */ static int devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td) { struct file *fpop; int error; fpop = td->td_fpop; td->td_fpop = fp; error = vnops.fo_ioctl(fp, com, data, cred, td); td->td_fpop = fpop; return (error); } void * fiodgname_buf_get_ptr(void *fgnp, u_long com) { union { struct fiodgname_arg fgn; #ifdef COMPAT_FREEBSD32 struct fiodgname_arg32 fgn32; #endif } *fgnup; fgnup = fgnp; switch (com) { case FIODGNAME: return (fgnup->fgn.buf); #ifdef COMPAT_FREEBSD32 case FIODGNAME_32: return ((void *)(uintptr_t)fgnup->fgn32.buf); #endif default: panic("Unhandled ioctl command %ld", com); } } static int devfs_ioctl(struct vop_ioctl_args *ap) { struct fiodgname_arg *fgn; struct vnode *vpold, *vp; struct cdevsw *dsw; struct thread *td; struct session *sess; struct cdev *dev; int error, ref, i; const char *p; u_long com; vp = ap->a_vp; com = ap->a_command; td = ap->a_td; dsw = devvn_refthread(vp, &dev, &ref); if (dsw == NULL) return (ENXIO); KASSERT(dev->si_refcount > 0, ("devfs: un-referenced struct cdev *(%s)", devtoname(dev))); switch (com) { case FIODTYPE: *(int *)ap->a_data = dsw->d_flags & D_TYPEMASK; error = 0; break; case FIODGNAME: #ifdef COMPAT_FREEBSD32 case FIODGNAME_32: #endif fgn = ap->a_data; p = devtoname(dev); i = strlen(p) + 1; if (i > fgn->len) error = EINVAL; else error = copyout(p, fiodgname_buf_get_ptr(fgn, com), i); break; default: error = dsw->d_ioctl(dev, com, ap->a_data, ap->a_fflag, td); } dev_relthread(dev, ref); if (error == ENOIOCTL) error = ENOTTY; if (error == 0 && com == TIOCSCTTY) { /* * Do nothing if reassigning same control tty, or if the * control tty has already disappeared. If it disappeared, * it's because we were racing with TIOCNOTTY. TIOCNOTTY * already took care of releasing the old vnode and we have * nothing left to do. */ sx_slock(&proctree_lock); sess = td->td_proc->p_session; if (sess->s_ttyvp == vp || sess->s_ttyp == NULL) { sx_sunlock(&proctree_lock); return (0); } devfs_ctty_ref(vp); SESS_LOCK(sess); vpold = sess->s_ttyvp; sess->s_ttyvp = vp; sess->s_ttydp = cdev2priv(dev); SESS_UNLOCK(sess); sx_sunlock(&proctree_lock); /* Get rid of reference to old control tty */ if (vpold) devfs_ctty_unref(vpold); } return (error); } /* ARGSUSED */ static int devfs_kqfilter_f(struct file *fp, struct knote *kn) { struct cdev *dev; struct cdevsw *dsw; int error, ref; struct file *fpop; struct thread *td; td = curthread; fpop = td->td_fpop; error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (error); error = dsw->d_kqfilter(dev, kn); td->td_fpop = fpop; dev_relthread(dev, ref); return (error); } static inline int devfs_prison_check(struct devfs_dirent *de, struct thread *td) { struct cdev_priv *cdp; struct ucred *dcr; struct proc *p; int error; cdp = de->de_cdp; if (cdp == NULL) return (0); dcr = cdp->cdp_c.si_cred; if (dcr == NULL) return (0); error = prison_check(td->td_ucred, dcr); if (error == 0) return (0); /* We do, however, allow access to the controlling terminal */ p = td->td_proc; PROC_LOCK(p); if (!(p->p_flag & P_CONTROLT)) { PROC_UNLOCK(p); return (error); } if (p->p_session->s_ttydp == cdp) error = 0; PROC_UNLOCK(p); return (error); } static int devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) { struct componentname *cnp; struct vnode *dvp, **vpp; struct thread *td; struct devfs_dirent *de, *dd; struct devfs_dirent **dde; struct devfs_mount *dmp; struct mount *mp; struct cdev *cdev; int error, flags, nameiop, dvplocked; char specname[SPECNAMELEN + 1], *pname; td = curthread; cnp = ap->a_cnp; vpp = ap->a_vpp; dvp = ap->a_dvp; pname = cnp->cn_nameptr; flags = cnp->cn_flags; nameiop = cnp->cn_nameiop; mp = dvp->v_mount; dmp = VFSTODEVFS(mp); dd = dvp->v_data; *vpp = NULLVP; if ((flags & ISLASTCN) && nameiop == RENAME) return (EOPNOTSUPP); if (dvp->v_type != VDIR) return (ENOTDIR); if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT)) return (EIO); error = vn_dir_check_exec(dvp, cnp); if (error != 0) return (error); if (cnp->cn_namelen == 1 && *pname == '.') { if ((flags & ISLASTCN) && nameiop != LOOKUP) return (EINVAL); *vpp = dvp; VREF(dvp); return (0); } if (flags & ISDOTDOT) { if ((flags & ISLASTCN) && nameiop != LOOKUP) return (EINVAL); de = devfs_parent_dirent(dd); if (de == NULL) return (ENOENT); dvplocked = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp); error = devfs_allocv(de, mp, cnp->cn_lkflags & LK_TYPE_MASK, vpp); *dm_unlock = 0; vn_lock(dvp, dvplocked | LK_RETRY); return (error); } dd = dvp->v_data; de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0); while (de == NULL) { /* While(...) so we can use break */ if (nameiop == DELETE) return (ENOENT); /* * OK, we didn't have an entry for the name we were asked for * so we try to see if anybody can create it on demand. */ pname = devfs_fqpn(specname, dmp, dd, cnp); if (pname == NULL) break; cdev = NULL; DEVFS_DMP_HOLD(dmp); sx_xunlock(&dmp->dm_lock); - sx_slock(&clone_drain_lock); EVENTHANDLER_INVOKE(dev_clone, td->td_ucred, pname, strlen(pname), &cdev); - sx_sunlock(&clone_drain_lock); if (cdev == NULL) sx_xlock(&dmp->dm_lock); else if (devfs_populate_vp(dvp) != 0) { *dm_unlock = 0; sx_xlock(&dmp->dm_lock); if (DEVFS_DMP_DROP(dmp)) { sx_xunlock(&dmp->dm_lock); devfs_unmount_final(dmp); } else sx_xunlock(&dmp->dm_lock); dev_rel(cdev); return (ENOENT); } if (DEVFS_DMP_DROP(dmp)) { *dm_unlock = 0; sx_xunlock(&dmp->dm_lock); devfs_unmount_final(dmp); if (cdev != NULL) dev_rel(cdev); return (ENOENT); } if (cdev == NULL) break; dev_lock(); dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx]; if (dde != NULL && *dde != NULL) de = *dde; dev_unlock(); dev_rel(cdev); break; } if (de == NULL || de->de_flags & DE_WHITEOUT) { if ((nameiop == CREATE || nameiop == RENAME) && (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) { cnp->cn_flags |= SAVENAME; return (EJUSTRETURN); } return (ENOENT); } if (devfs_prison_check(de, td)) return (ENOENT); if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) { error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); if (error) return (error); if (*vpp == dvp) { VREF(dvp); *vpp = dvp; return (0); } } error = devfs_allocv(de, mp, cnp->cn_lkflags & LK_TYPE_MASK, vpp); *dm_unlock = 0; return (error); } static int devfs_lookup(struct vop_lookup_args *ap) { int j; struct devfs_mount *dmp; int dm_unlock; if (devfs_populate_vp(ap->a_dvp) != 0) return (ENOTDIR); dmp = VFSTODEVFS(ap->a_dvp->v_mount); dm_unlock = 1; j = devfs_lookupx(ap, &dm_unlock); if (dm_unlock == 1) sx_xunlock(&dmp->dm_lock); return (j); } static int devfs_mknod(struct vop_mknod_args *ap) { struct componentname *cnp; struct vnode *dvp, **vpp; struct devfs_dirent *dd, *de; struct devfs_mount *dmp; int error; /* * The only type of node we should be creating here is a * character device, for anything else return EOPNOTSUPP. */ if (ap->a_vap->va_type != VCHR) return (EOPNOTSUPP); dvp = ap->a_dvp; dmp = VFSTODEVFS(dvp->v_mount); cnp = ap->a_cnp; vpp = ap->a_vpp; dd = dvp->v_data; error = ENOENT; sx_xlock(&dmp->dm_lock); TAILQ_FOREACH(de, &dd->de_dlist, de_list) { if (cnp->cn_namelen != de->de_dirent->d_namlen) continue; if (de->de_dirent->d_type == DT_CHR && (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0) continue; if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name, de->de_dirent->d_namlen) != 0) continue; if (de->de_flags & DE_WHITEOUT) break; goto notfound; } if (de == NULL) goto notfound; de->de_flags &= ~DE_WHITEOUT; error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp); return (error); notfound: sx_xunlock(&dmp->dm_lock); return (error); } /* ARGSUSED */ static int devfs_open(struct vop_open_args *ap) { struct thread *td = ap->a_td; struct vnode *vp = ap->a_vp; struct cdev *dev = vp->v_rdev; struct file *fp = ap->a_fp; int error, ref, vlocked; struct cdevsw *dsw; struct file *fpop; if (vp->v_type == VBLK) return (ENXIO); if (dev == NULL) return (ENXIO); /* Make this field valid before any I/O in d_open. */ if (dev->si_iosize_max == 0) dev->si_iosize_max = DFLTPHYS; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); if (fp == NULL && dsw->d_fdopen != NULL) { dev_relthread(dev, ref); return (ENXIO); } if (vp->v_type == VCHR) devfs_usecount_add(vp); vlocked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp); fpop = td->td_fpop; td->td_fpop = fp; if (fp != NULL) { fp->f_data = dev; fp->f_vnode = vp; } if (dsw->d_fdopen != NULL) error = dsw->d_fdopen(dev, ap->a_mode, td, fp); else error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); /* Clean up any cdevpriv upon error. */ if (error != 0) devfs_clear_cdevpriv(); td->td_fpop = fpop; vn_lock(vp, vlocked | LK_RETRY); if (error != 0 && vp->v_type == VCHR) devfs_usecount_sub(vp); dev_relthread(dev, ref); if (error != 0) { if (error == ERESTART) error = EINTR; return (error); } #if 0 /* /dev/console */ KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp")); #else if (fp == NULL) return (error); #endif if (fp->f_ops == &badfileops) finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f); return (error); } static int devfs_pathconf(struct vop_pathconf_args *ap) { switch (ap->a_name) { case _PC_FILESIZEBITS: *ap->a_retval = 64; return (0); case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; return (0); case _PC_LINK_MAX: *ap->a_retval = INT_MAX; return (0); case _PC_SYMLINK_MAX: *ap->a_retval = MAXPATHLEN; return (0); case _PC_MAX_CANON: if (ap->a_vp->v_vflag & VV_ISTTY) { *ap->a_retval = MAX_CANON; return (0); } return (EINVAL); case _PC_MAX_INPUT: if (ap->a_vp->v_vflag & VV_ISTTY) { *ap->a_retval = MAX_INPUT; return (0); } return (EINVAL); case _PC_VDISABLE: if (ap->a_vp->v_vflag & VV_ISTTY) { *ap->a_retval = _POSIX_VDISABLE; return (0); } return (EINVAL); case _PC_MAC_PRESENT: #ifdef MAC /* * If MAC is enabled, devfs automatically supports * trivial non-persistent label storage. */ *ap->a_retval = 1; #else *ap->a_retval = 0; #endif return (0); case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; return (0); default: return (vop_stdpathconf(ap)); } /* NOTREACHED */ } /* ARGSUSED */ static int devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) { struct cdev *dev; struct cdevsw *dsw; int error, ref; struct file *fpop; fpop = td->td_fpop; error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error != 0) { error = vnops.fo_poll(fp, events, cred, td); return (error); } error = dsw->d_poll(dev, events, td); td->td_fpop = fpop; dev_relthread(dev, ref); return(error); } /* * Print out the contents of a special device vnode. */ static int devfs_print(struct vop_print_args *ap) { printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev)); return (0); } static int devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) { struct cdev *dev; int ioflag, error, ref; ssize_t resid; struct cdevsw *dsw; struct file *fpop; if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); fpop = td->td_fpop; error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error != 0) { error = vnops.fo_read(fp, uio, cred, flags, td); return (error); } resid = uio->uio_resid; ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT); if (ioflag & O_DIRECT) ioflag |= IO_DIRECT; foffset_lock_uio(fp, uio, flags | FOF_NOLOCK); error = dsw->d_read(dev, uio, ioflag); if (uio->uio_resid != resid || (error == 0 && resid != 0)) devfs_timestamp(&dev->si_atime); td->td_fpop = fpop; dev_relthread(dev, ref); foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF_R); return (error); } static int devfs_readdir(struct vop_readdir_args *ap) { int error; struct uio *uio; struct dirent *dp; struct devfs_dirent *dd; struct devfs_dirent *de; struct devfs_mount *dmp; off_t off; int *tmp_ncookies = NULL; if (ap->a_vp->v_type != VDIR) return (ENOTDIR); uio = ap->a_uio; if (uio->uio_offset < 0) return (EINVAL); /* * XXX: This is a temporary hack to get around this filesystem not * supporting cookies. We store the location of the ncookies pointer * in a temporary variable before calling vfs_subr.c:vfs_read_dirent() * and set the number of cookies to 0. We then set the pointer to * NULL so that vfs_read_dirent doesn't try to call realloc() on * ap->a_cookies. Later in this function, we restore the ap->a_ncookies * pointer to its original location before returning to the caller. */ if (ap->a_ncookies != NULL) { tmp_ncookies = ap->a_ncookies; *ap->a_ncookies = 0; ap->a_ncookies = NULL; } dmp = VFSTODEVFS(ap->a_vp->v_mount); if (devfs_populate_vp(ap->a_vp) != 0) { if (tmp_ncookies != NULL) ap->a_ncookies = tmp_ncookies; return (EIO); } error = 0; de = ap->a_vp->v_data; off = 0; TAILQ_FOREACH(dd, &de->de_dlist, de_list) { KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__)); if (dd->de_flags & (DE_COVERED | DE_WHITEOUT)) continue; if (devfs_prison_check(dd, uio->uio_td)) continue; if (dd->de_dirent->d_type == DT_DIR) de = dd->de_dir; else de = dd; dp = dd->de_dirent; MPASS(dp->d_reclen == GENERIC_DIRSIZ(dp)); if (dp->d_reclen > uio->uio_resid) break; dp->d_fileno = de->de_inode; /* NOTE: d_off is the offset for the *next* entry. */ dp->d_off = off + dp->d_reclen; if (off >= uio->uio_offset) { error = vfs_read_dirent(ap, dp, off); if (error) break; } off += dp->d_reclen; } sx_xunlock(&dmp->dm_lock); uio->uio_offset = off; /* * Restore ap->a_ncookies if it wasn't originally NULL in the first * place. */ if (tmp_ncookies != NULL) ap->a_ncookies = tmp_ncookies; return (error); } static int devfs_readlink(struct vop_readlink_args *ap) { struct devfs_dirent *de; de = ap->a_vp->v_data; return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); } static void devfs_reclaiml(struct vnode *vp) { struct devfs_dirent *de; mtx_assert(&devfs_de_interlock, MA_OWNED); de = vp->v_data; if (de != NULL) { MPASS(de->de_usecount == 0); de->de_vnode = NULL; vp->v_data = NULL; } } static int devfs_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp; vp = ap->a_vp; mtx_lock(&devfs_de_interlock); devfs_reclaiml(vp); mtx_unlock(&devfs_de_interlock); return (0); } static int devfs_reclaim_vchr(struct vop_reclaim_args *ap) { struct vnode *vp; struct cdev *dev; vp = ap->a_vp; MPASS(vp->v_type == VCHR); mtx_lock(&devfs_de_interlock); VI_LOCK(vp); devfs_usecount_subl(vp); devfs_reclaiml(vp); mtx_unlock(&devfs_de_interlock); dev_lock(); dev = vp->v_rdev; vp->v_rdev = NULL; dev_unlock(); VI_UNLOCK(vp); if (dev != NULL) dev_rel(dev); return (0); } static int devfs_remove(struct vop_remove_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; struct devfs_dirent *dd; struct devfs_dirent *de, *de_covered; struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount); ASSERT_VOP_ELOCKED(dvp, "devfs_remove"); ASSERT_VOP_ELOCKED(vp, "devfs_remove"); sx_xlock(&dmp->dm_lock); dd = ap->a_dvp->v_data; de = vp->v_data; if (de->de_cdp == NULL) { TAILQ_REMOVE(&dd->de_dlist, de, de_list); if (de->de_dirent->d_type == DT_LNK) { de_covered = devfs_find(dd, de->de_dirent->d_name, de->de_dirent->d_namlen, 0); if (de_covered != NULL) de_covered->de_flags &= ~DE_COVERED; } /* We need to unlock dvp because devfs_delete() may lock it. */ VOP_UNLOCK(vp); if (dvp != vp) VOP_UNLOCK(dvp); devfs_delete(dmp, de, 0); sx_xunlock(&dmp->dm_lock); if (dvp != vp) vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } else { de->de_flags |= DE_WHITEOUT; sx_xunlock(&dmp->dm_lock); } return (0); } /* * Revoke is called on a tty when a terminal session ends. The vnode * is orphaned by setting v_op to deadfs so we need to let go of it * as well so that we create a new one next time around. * */ static int devfs_revoke(struct vop_revoke_args *ap) { struct vnode *vp = ap->a_vp, *vp2; struct cdev *dev; struct cdev_priv *cdp; struct devfs_dirent *de; enum vgetstate vs; u_int i; KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); dev = vp->v_rdev; cdp = cdev2priv(dev); dev_lock(); cdp->cdp_inuse++; dev_unlock(); vhold(vp); vgone(vp); vdrop(vp); VOP_UNLOCK(vp); loop: for (;;) { mtx_lock(&devfs_de_interlock); dev_lock(); vp2 = NULL; for (i = 0; i <= cdp->cdp_maxdirent; i++) { de = cdp->cdp_dirents[i]; if (de == NULL) continue; vp2 = de->de_vnode; if (vp2 != NULL) { dev_unlock(); vs = vget_prep(vp2); mtx_unlock(&devfs_de_interlock); if (vget_finish(vp2, LK_EXCLUSIVE, vs) != 0) goto loop; vhold(vp2); vgone(vp2); vdrop(vp2); vput(vp2); break; } } if (vp2 != NULL) { continue; } dev_unlock(); mtx_unlock(&devfs_de_interlock); break; } dev_lock(); cdp->cdp_inuse--; if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) { TAILQ_REMOVE(&cdevp_list, cdp, cdp_list); dev_unlock(); dev_rel(&cdp->cdp_c); } else dev_unlock(); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); return (0); } static int devfs_rioctl(struct vop_ioctl_args *ap) { struct vnode *vp; struct devfs_mount *dmp; int error; vp = ap->a_vp; vn_lock(vp, LK_SHARED | LK_RETRY); if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp); return (EBADF); } dmp = VFSTODEVFS(vp->v_mount); sx_xlock(&dmp->dm_lock); VOP_UNLOCK(vp); DEVFS_DMP_HOLD(dmp); devfs_populate(dmp); if (DEVFS_DMP_DROP(dmp)) { sx_xunlock(&dmp->dm_lock); devfs_unmount_final(dmp); return (ENOENT); } error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td); sx_xunlock(&dmp->dm_lock); return (error); } static int devfs_rread(struct vop_read_args *ap) { if (ap->a_vp->v_type != VDIR) return (EINVAL); return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL)); } static int devfs_setattr(struct vop_setattr_args *ap) { struct devfs_dirent *de; struct vattr *vap; struct vnode *vp; struct thread *td; int c, error; uid_t uid; gid_t gid; vap = ap->a_vap; vp = ap->a_vp; td = curthread; if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_flags != VNOVAL && vap->va_flags != 0) || (vap->va_rdev != VNOVAL) || ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { return (EINVAL); } error = devfs_populate_vp(vp); if (error != 0) return (error); de = vp->v_data; if (vp->v_type == VDIR) de = de->de_dir; c = 0; if (vap->va_uid == (uid_t)VNOVAL) uid = de->de_uid; else uid = vap->va_uid; if (vap->va_gid == (gid_t)VNOVAL) gid = de->de_gid; else gid = vap->va_gid; if (uid != de->de_uid || gid != de->de_gid) { if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid || (gid != de->de_gid && !groupmember(gid, ap->a_cred))) { error = priv_check(td, PRIV_VFS_CHOWN); if (error != 0) goto ret; } de->de_uid = uid; de->de_gid = gid; c = 1; } if (vap->va_mode != (mode_t)VNOVAL) { if (ap->a_cred->cr_uid != de->de_uid) { error = priv_check(td, PRIV_VFS_ADMIN); if (error != 0) goto ret; } de->de_mode = vap->va_mode; c = 1; } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { error = vn_utimes_perm(vp, vap, ap->a_cred, td); if (error != 0) goto ret; if (vap->va_atime.tv_sec != VNOVAL) { if (vp->v_type == VCHR) vp->v_rdev->si_atime = vap->va_atime; else de->de_atime = vap->va_atime; } if (vap->va_mtime.tv_sec != VNOVAL) { if (vp->v_type == VCHR) vp->v_rdev->si_mtime = vap->va_mtime; else de->de_mtime = vap->va_mtime; } c = 1; } if (c) { if (vp->v_type == VCHR) vfs_timestamp(&vp->v_rdev->si_ctime); else vfs_timestamp(&de->de_mtime); } ret: sx_xunlock(&VFSTODEVFS(vp->v_mount)->dm_lock); return (error); } #ifdef MAC static int devfs_setlabel(struct vop_setlabel_args *ap) { struct vnode *vp; struct devfs_dirent *de; vp = ap->a_vp; de = vp->v_data; mac_vnode_relabel(ap->a_cred, vp, ap->a_label); mac_devfs_update(vp->v_mount, de, vp); return (0); } #endif static int devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred) { return (vnops.fo_stat(fp, sb, cred)); } static int devfs_symlink(struct vop_symlink_args *ap) { int i, error; struct devfs_dirent *dd; struct devfs_dirent *de, *de_covered, *de_dotdot; struct devfs_mount *dmp; error = priv_check(curthread, PRIV_DEVFS_SYMLINK); if (error) return(error); dmp = VFSTODEVFS(ap->a_dvp->v_mount); if (devfs_populate_vp(ap->a_dvp) != 0) return (ENOENT); dd = ap->a_dvp->v_data; de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen); de->de_flags = DE_USER; de->de_uid = 0; de->de_gid = 0; de->de_mode = 0755; de->de_inode = alloc_unr(devfs_inos); de->de_dir = dd; de->de_dirent->d_type = DT_LNK; i = strlen(ap->a_target) + 1; de->de_symlink = malloc(i, M_DEVFS, M_WAITOK); bcopy(ap->a_target, de->de_symlink, i); #ifdef MAC mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); #endif de_covered = devfs_find(dd, de->de_dirent->d_name, de->de_dirent->d_namlen, 0); if (de_covered != NULL) { if ((de_covered->de_flags & DE_USER) != 0) { devfs_delete(dmp, de, DEVFS_DEL_NORECURSE); sx_xunlock(&dmp->dm_lock); return (EEXIST); } KASSERT((de_covered->de_flags & DE_COVERED) == 0, ("devfs_symlink: entry %p already covered", de_covered)); de_covered->de_flags |= DE_COVERED; } de_dotdot = TAILQ_FIRST(&dd->de_dlist); /* "." */ de_dotdot = TAILQ_NEXT(de_dotdot, de_list); /* ".." */ TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list); devfs_dir_ref_de(dmp, dd); devfs_rules_apply(dmp, de); return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp)); } static int devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td) { return (vnops.fo_truncate(fp, length, cred, td)); } static int devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) { struct cdev *dev; int error, ioflag, ref; ssize_t resid; struct cdevsw *dsw; struct file *fpop; if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); fpop = td->td_fpop; error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error != 0) { error = vnops.fo_write(fp, uio, cred, flags, td); return (error); } KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC); if (ioflag & O_DIRECT) ioflag |= IO_DIRECT; foffset_lock_uio(fp, uio, flags | FOF_NOLOCK); resid = uio->uio_resid; error = dsw->d_write(dev, uio, ioflag); if (uio->uio_resid != resid || (error == 0 && resid != 0)) { devfs_timestamp(&dev->si_ctime); dev->si_mtime = dev->si_ctime; } td->td_fpop = fpop; dev_relthread(dev, ref); foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF_W); return (error); } static int devfs_mmap_f(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct cdev *dev; struct cdevsw *dsw; struct mount *mp; struct vnode *vp; struct file *fpop; vm_object_t object; vm_prot_t maxprot; int error, ref; vp = fp->f_vnode; /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * The one exception is that D_MMAP_ANON devices * (i.e. /dev/zero) permit private writable mappings. * * Rely on vm_mmap_cdev() to fail invalid MAP_PRIVATE requests * as well as updating maxprot to permit writing for * D_MMAP_ANON devices rather than doing that here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; fpop = td->td_fpop; error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error != 0) return (error); error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, dev, dsw, &foff, &object); td->td_fpop = fpop; dev_relthread(dev, ref); if (error != 0) return (error); error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); return (error); } dev_t dev2udev(struct cdev *x) { if (x == NULL) return (NODEV); return (cdev2priv(x)->cdp_inode); } static struct fileops devfs_ops_f = { .fo_read = devfs_read_f, .fo_write = devfs_write_f, .fo_truncate = devfs_truncate_f, .fo_ioctl = devfs_ioctl_f, .fo_poll = devfs_poll_f, .fo_kqfilter = devfs_kqfilter_f, .fo_stat = devfs_stat_f, .fo_close = devfs_close_f, .fo_chmod = vn_chmod, .fo_chown = vn_chown, .fo_sendfile = vn_sendfile, .fo_seek = vn_seek, .fo_fill_kinfo = vn_fill_kinfo, .fo_mmap = devfs_mmap_f, .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE }; /* Vops for non-CHR vnodes in /dev. */ static struct vop_vector devfs_vnodeops = { .vop_default = &default_vnodeops, .vop_access = devfs_access, .vop_getattr = devfs_getattr, .vop_ioctl = devfs_rioctl, .vop_lookup = devfs_lookup, .vop_mknod = devfs_mknod, .vop_pathconf = devfs_pathconf, .vop_read = devfs_rread, .vop_readdir = devfs_readdir, .vop_readlink = devfs_readlink, .vop_reclaim = devfs_reclaim, .vop_remove = devfs_remove, .vop_revoke = devfs_revoke, .vop_setattr = devfs_setattr, #ifdef MAC .vop_setlabel = devfs_setlabel, #endif .vop_symlink = devfs_symlink, .vop_vptocnp = devfs_vptocnp, .vop_lock1 = vop_lock, .vop_unlock = vop_unlock, .vop_islocked = vop_islocked, .vop_add_writecount = vop_stdadd_writecount_nomsync, }; VFS_VOP_VECTOR_REGISTER(devfs_vnodeops); /* Vops for VCHR vnodes in /dev. */ static struct vop_vector devfs_specops = { .vop_default = &default_vnodeops, .vop_access = devfs_access, .vop_bmap = VOP_PANIC, .vop_close = devfs_close, .vop_create = VOP_PANIC, .vop_fsync = vop_stdfsync, .vop_getattr = devfs_getattr, .vop_ioctl = devfs_ioctl, .vop_link = VOP_PANIC, .vop_mkdir = VOP_PANIC, .vop_mknod = VOP_PANIC, .vop_open = devfs_open, .vop_pathconf = devfs_pathconf, .vop_poll = dead_poll, .vop_print = devfs_print, .vop_read = dead_read, .vop_readdir = VOP_PANIC, .vop_readlink = VOP_PANIC, .vop_reallocblks = VOP_PANIC, .vop_reclaim = devfs_reclaim_vchr, .vop_remove = devfs_remove, .vop_rename = VOP_PANIC, .vop_revoke = devfs_revoke, .vop_rmdir = VOP_PANIC, .vop_setattr = devfs_setattr, #ifdef MAC .vop_setlabel = devfs_setlabel, #endif .vop_strategy = VOP_PANIC, .vop_symlink = VOP_PANIC, .vop_vptocnp = devfs_vptocnp, .vop_write = dead_write, .vop_lock1 = vop_lock, .vop_unlock = vop_unlock, .vop_islocked = vop_islocked, .vop_add_writecount = vop_stdadd_writecount_nomsync, }; VFS_VOP_VECTOR_REGISTER(devfs_specops); /* * Our calling convention to the device drivers used to be that we passed * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ * flags instead since that's what open(), close() and ioctl() takes and * we don't really want vnode.h in device drivers. * We solved the source compatibility by redefining some vnode flags to * be the same as the fcntl ones and by sending down the bitwise OR of * the respective fcntl/vnode flags. These CTASSERTS make sure nobody * pulls the rug out under this. */ CTASSERT(O_NONBLOCK == IO_NDELAY); CTASSERT(O_FSYNC == IO_SYNC); diff --git a/sys/kern/kern_conf.c b/sys/kern/kern_conf.c index 1cd5bc2635c1..374aafc91005 100644 --- a/sys/kern/kern_conf.c +++ b/sys/kern/kern_conf.c @@ -1,1596 +1,1588 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999-2002 Poul-Henning Kamp * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); struct mtx devmtx; static void destroy_devl(struct cdev *dev); static int destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg); static void destroy_dev_tq(void *ctx, int pending); static void destroy_dev_tq_giant(void *ctx, int pending); static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, va_list ap); static struct cdev_priv_list cdevp_free_list = TAILQ_HEAD_INITIALIZER(cdevp_free_list); static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = SLIST_HEAD_INITIALIZER(cdevsw_gt_post_list); void dev_lock(void) { mtx_lock(&devmtx); } /* * Free all the memory collected while the cdev mutex was * locked. Since devmtx is after the system map mutex, free() cannot * be called immediately and is postponed until cdev mutex can be * dropped. */ static void dev_unlock_and_free(void) { struct cdev_priv_list cdp_free; struct free_cdevsw csw_free; struct cdev_priv *cdp; struct cdevsw *csw; dev_lock_assert_locked(); /* * Make the local copy of the list heads while the dev_mtx is * held. Free it later. */ TAILQ_INIT(&cdp_free); TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list); csw_free = cdevsw_gt_post_list; SLIST_INIT(&cdevsw_gt_post_list); mtx_unlock(&devmtx); while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) { TAILQ_REMOVE(&cdp_free, cdp, cdp_list); devfs_free(&cdp->cdp_c); } while ((csw = SLIST_FIRST(&csw_free)) != NULL) { SLIST_REMOVE_HEAD(&csw_free, d_postfree_list); free(csw, M_DEVT); } } static void dev_free_devlocked(struct cdev *cdev) { struct cdev_priv *cdp; dev_lock_assert_locked(); cdp = cdev2priv(cdev); KASSERT((cdp->cdp_flags & CDP_UNREF_DTR) == 0, ("destroy_dev() was not called after delist_dev(%p)", cdev)); TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); } static void cdevsw_free_devlocked(struct cdevsw *csw) { dev_lock_assert_locked(); SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list); } void dev_unlock(void) { mtx_unlock(&devmtx); } void dev_ref(struct cdev *dev) { dev_lock_assert_unlocked(); mtx_lock(&devmtx); dev->si_refcount++; mtx_unlock(&devmtx); } void dev_refl(struct cdev *dev) { dev_lock_assert_locked(); dev->si_refcount++; } void dev_rel(struct cdev *dev) { int flag = 0; dev_lock_assert_unlocked(); dev_lock(); dev->si_refcount--; KASSERT(dev->si_refcount >= 0, ("dev_rel(%s) gave negative count", devtoname(dev))); if (dev->si_devsw == NULL && dev->si_refcount == 0) { LIST_REMOVE(dev, si_list); flag = 1; } dev_unlock(); if (flag) devfs_free(dev); } struct cdevsw * dev_refthread(struct cdev *dev, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; dev_lock_assert_unlocked(); if ((dev->si_flags & SI_ETERNAL) != 0) { *ref = 0; return (dev->si_devsw); } cdp = cdev2priv(dev); mtx_lock(&cdp->cdp_threadlock); csw = dev->si_devsw; if (csw != NULL) { if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) atomic_add_long(&dev->si_threadcount, 1); else csw = NULL; } mtx_unlock(&cdp->cdp_threadlock); if (csw != NULL) *ref = 1; return (csw); } struct cdevsw * devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; struct cdev *dev; dev_lock_assert_unlocked(); if ((vp->v_vflag & VV_ETERNALDEV) != 0) { dev = vp->v_rdev; if (dev == NULL) return (NULL); KASSERT((dev->si_flags & SI_ETERNAL) != 0, ("Not eternal cdev")); *ref = 0; csw = dev->si_devsw; KASSERT(csw != NULL, ("Eternal cdev is destroyed")); *devp = dev; return (csw); } csw = NULL; VI_LOCK(vp); dev = vp->v_rdev; if (dev == NULL) { VI_UNLOCK(vp); return (NULL); } cdp = cdev2priv(dev); mtx_lock(&cdp->cdp_threadlock); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { csw = dev->si_devsw; if (csw != NULL) atomic_add_long(&dev->si_threadcount, 1); } mtx_unlock(&cdp->cdp_threadlock); VI_UNLOCK(vp); if (csw != NULL) { *devp = dev; *ref = 1; } return (csw); } void dev_relthread(struct cdev *dev, int ref) { dev_lock_assert_unlocked(); if (!ref) return; KASSERT(dev->si_threadcount > 0, ("%s threadcount is wrong", dev->si_name)); atomic_subtract_rel_long(&dev->si_threadcount, 1); } int nullop(void) { return (0); } int eopnotsupp(void) { return (EOPNOTSUPP); } static int enxio(void) { return (ENXIO); } static int enodev(void) { return (ENODEV); } /* Define a dead_cdevsw for use when devices leave unexpectedly. */ #define dead_open (d_open_t *)enxio #define dead_close (d_close_t *)enxio #define dead_read (d_read_t *)enxio #define dead_write (d_write_t *)enxio #define dead_ioctl (d_ioctl_t *)enxio #define dead_poll (d_poll_t *)enodev #define dead_mmap (d_mmap_t *)enodev static void dead_strategy(struct bio *bp) { biofinish(bp, NULL, ENXIO); } #define dead_kqfilter (d_kqfilter_t *)enxio #define dead_mmap_single (d_mmap_single_t *)enodev static struct cdevsw dead_cdevsw = { .d_version = D_VERSION, .d_open = dead_open, .d_close = dead_close, .d_read = dead_read, .d_write = dead_write, .d_ioctl = dead_ioctl, .d_poll = dead_poll, .d_mmap = dead_mmap, .d_strategy = dead_strategy, .d_name = "dead", .d_kqfilter = dead_kqfilter, .d_mmap_single = dead_mmap_single }; /* Default methods if driver does not specify method */ #define null_open (d_open_t *)nullop #define null_close (d_close_t *)nullop #define no_read (d_read_t *)enodev #define no_write (d_write_t *)enodev #define no_ioctl (d_ioctl_t *)enodev #define no_mmap (d_mmap_t *)enodev #define no_kqfilter (d_kqfilter_t *)enodev #define no_mmap_single (d_mmap_single_t *)enodev static void no_strategy(struct bio *bp) { biofinish(bp, NULL, ENODEV); } static int no_poll(struct cdev *dev __unused, int events, struct thread *td __unused) { return (poll_no_poll(events)); } static int giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static void giant_strategy(struct bio *bp) { struct cdevsw *dsw; struct cdev *dev; int ref; dev = bp->bio_dev; dsw = dev_refthread(dev, &ref); if (dsw == NULL) { biofinish(bp, NULL, ENXIO); return; } mtx_lock(&Giant); dsw->d_gianttrick->d_strategy(bp); mtx_unlock(&Giant); dev_relthread(dev, ref); } static int giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_read(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_write(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_poll(struct cdev *dev, int events, struct thread *td) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_poll(dev, events, td); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_kqfilter(struct cdev *dev, struct knote *kn) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_kqfilter(dev, kn); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot, memattr); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static int giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, vm_object_t *object, int nprot) { struct cdevsw *dsw; int ref, retval; dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object, nprot); mtx_unlock(&Giant); dev_relthread(dev, ref); return (retval); } static void notify(struct cdev *dev, const char *ev, int flags) { static const char prefix[] = "cdev="; char *data; int namelen, mflags; if (cold) return; mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK; namelen = strlen(dev->si_name); data = malloc(namelen + sizeof(prefix), M_TEMP, mflags); if (data == NULL) return; memcpy(data, prefix, sizeof(prefix) - 1); memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1); devctl_notify("DEVFS", "CDEV", ev, data); free(data, M_TEMP); } static void notify_create(struct cdev *dev, int flags) { notify(dev, "CREATE", flags); } static void notify_destroy(struct cdev *dev) { notify(dev, "DESTROY", MAKEDEV_WAITOK); } static struct cdev * newdev(struct make_dev_args *args, struct cdev *si) { struct cdev *si2; struct cdevsw *csw; dev_lock_assert_locked(); csw = args->mda_devsw; si2 = NULL; if (csw->d_flags & D_NEEDMINOR) { /* We may want to return an existing device */ LIST_FOREACH(si2, &csw->d_devs, si_list) { if (dev2unit(si2) == args->mda_unit) { dev_free_devlocked(si); si = si2; break; } } /* * If we're returning an existing device, we should make sure * it isn't already initialized. This would have been caught * in consumers anyways, but it's good to catch such a case * early. We still need to complete initialization of the * device, and we'll use whatever make_dev_args were passed in * to do so. */ KASSERT(si2 == NULL || (si2->si_flags & SI_NAMED) == 0, ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", args->mda_devsw->d_name, dev2unit(si2), devtoname(si2))); } si->si_drv0 = args->mda_unit; si->si_drv1 = args->mda_si_drv1; si->si_drv2 = args->mda_si_drv2; /* Only push to csw->d_devs if it's not a cloned device. */ if (si2 == NULL) { si->si_devsw = csw; LIST_INSERT_HEAD(&csw->d_devs, si, si_list); } else { KASSERT(si->si_devsw == csw, ("%s: inconsistent devsw between clone_create() and make_dev()", __func__)); } return (si); } static void fini_cdevsw(struct cdevsw *devsw) { struct cdevsw *gt; if (devsw->d_gianttrick != NULL) { gt = devsw->d_gianttrick; memcpy(devsw, gt, sizeof *devsw); cdevsw_free_devlocked(gt); devsw->d_gianttrick = NULL; } devsw->d_flags &= ~D_INIT; } static int prep_cdevsw(struct cdevsw *devsw, int flags) { struct cdevsw *dsw2; dev_lock_assert_locked(); if (devsw->d_flags & D_INIT) return (0); if (devsw->d_flags & D_NEEDGIANT) { dev_unlock(); dsw2 = malloc(sizeof *dsw2, M_DEVT, (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK); dev_lock(); if (dsw2 == NULL && !(devsw->d_flags & D_INIT)) return (ENOMEM); } else dsw2 = NULL; if (devsw->d_flags & D_INIT) { if (dsw2 != NULL) cdevsw_free_devlocked(dsw2); return (0); } if (devsw->d_version != D_VERSION_04) { printf( "WARNING: Device driver \"%s\" has wrong version %s\n", devsw->d_name == NULL ? "???" : devsw->d_name, "and is disabled. Recompile KLD module."); devsw->d_open = dead_open; devsw->d_close = dead_close; devsw->d_read = dead_read; devsw->d_write = dead_write; devsw->d_ioctl = dead_ioctl; devsw->d_poll = dead_poll; devsw->d_mmap = dead_mmap; devsw->d_mmap_single = dead_mmap_single; devsw->d_strategy = dead_strategy; devsw->d_kqfilter = dead_kqfilter; } if ((devsw->d_flags & D_NEEDGIANT) != 0) { if ((devsw->d_flags & D_GIANTOK) == 0) { printf( "WARNING: Device \"%s\" is Giant locked and may be " "deleted before FreeBSD 14.0.\n", devsw->d_name == NULL ? "???" : devsw->d_name); } if (devsw->d_gianttrick == NULL) { memcpy(dsw2, devsw, sizeof *dsw2); devsw->d_gianttrick = dsw2; dsw2 = NULL; } } #define FIXUP(member, noop, giant) \ do { \ if (devsw->member == NULL) { \ devsw->member = noop; \ } else if (devsw->d_flags & D_NEEDGIANT) \ devsw->member = giant; \ } \ while (0) FIXUP(d_open, null_open, giant_open); FIXUP(d_fdopen, NULL, giant_fdopen); FIXUP(d_close, null_close, giant_close); FIXUP(d_read, no_read, giant_read); FIXUP(d_write, no_write, giant_write); FIXUP(d_ioctl, no_ioctl, giant_ioctl); FIXUP(d_poll, no_poll, giant_poll); FIXUP(d_mmap, no_mmap, giant_mmap); FIXUP(d_strategy, no_strategy, giant_strategy); FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter); FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single); LIST_INIT(&devsw->d_devs); devsw->d_flags |= D_INIT; if (dsw2 != NULL) cdevsw_free_devlocked(dsw2); return (0); } static int prep_devname(struct cdev *dev, const char *fmt, va_list ap) { int len; char *from, *q, *s, *to; dev_lock_assert_locked(); len = vsnrprintf(dev->si_name, sizeof(dev->si_name), 32, fmt, ap); if (len > sizeof(dev->si_name) - 1) return (ENAMETOOLONG); /* Strip leading slashes. */ for (from = dev->si_name; *from == '/'; from++) ; for (to = dev->si_name; *from != '\0'; from++, to++) { /* * Spaces and double quotation marks cause * problems for the devctl(4) protocol. * Reject names containing those characters. */ if (isspace(*from) || *from == '"') return (EINVAL); /* Treat multiple sequential slashes as single. */ while (from[0] == '/' && from[1] == '/') from++; /* Trailing slash is considered invalid. */ if (from[0] == '/' && from[1] == '\0') return (EINVAL); *to = *from; } *to = '\0'; if (dev->si_name[0] == '\0') return (EINVAL); /* Disallow "." and ".." components. */ for (s = dev->si_name;;) { for (q = s; *q != '/' && *q != '\0'; q++) ; if (q - s == 1 && s[0] == '.') return (EINVAL); if (q - s == 2 && s[0] == '.' && s[1] == '.') return (EINVAL); if (*q != '/') break; s = q + 1; } if (devfs_dev_exists(dev->si_name) != 0) return (EEXIST); return (0); } void make_dev_args_init_impl(struct make_dev_args *args, size_t sz) { bzero(args, sz); args->mda_size = sz; } static int make_dev_sv(struct make_dev_args *args1, struct cdev **dres, const char *fmt, va_list ap) { struct cdev *dev, *dev_new; struct make_dev_args args; int res; bzero(&args, sizeof(args)); if (sizeof(args) < args1->mda_size) return (EINVAL); bcopy(args1, &args, args1->mda_size); KASSERT((args.mda_flags & MAKEDEV_WAITOK) == 0 || (args.mda_flags & MAKEDEV_NOWAIT) == 0, ("make_dev_sv: both WAITOK and NOWAIT specified")); dev_new = devfs_alloc(args.mda_flags); if (dev_new == NULL) return (ENOMEM); dev_lock(); res = prep_cdevsw(args.mda_devsw, args.mda_flags); if (res != 0) { dev_unlock(); devfs_free(dev_new); return (res); } dev = newdev(&args, dev_new); if ((dev->si_flags & SI_NAMED) == 0) { res = prep_devname(dev, fmt, ap); if (res != 0) { if ((args.mda_flags & MAKEDEV_CHECKNAME) == 0) { panic( "make_dev_sv: bad si_name (error=%d, si_name=%s)", res, dev->si_name); } if (dev == dev_new) { LIST_REMOVE(dev, si_list); dev_unlock(); devfs_free(dev); } else dev_unlock(); return (res); } } if ((args.mda_flags & MAKEDEV_REF) != 0) dev_refl(dev); if ((args.mda_flags & MAKEDEV_ETERNAL) != 0) dev->si_flags |= SI_ETERNAL; KASSERT(!(dev->si_flags & SI_NAMED), ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)", args.mda_devsw->d_name, dev2unit(dev), devtoname(dev))); dev->si_flags |= SI_NAMED; if (args.mda_cr != NULL) dev->si_cred = crhold(args.mda_cr); dev->si_uid = args.mda_uid; dev->si_gid = args.mda_gid; dev->si_mode = args.mda_mode; devfs_create(dev); clean_unrhdrl(devfs_inos); dev_unlock_and_free(); notify_create(dev, args.mda_flags); *dres = dev; return (0); } int make_dev_s(struct make_dev_args *args, struct cdev **dres, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_sv(args, dres, fmt, ap); va_end(ap); return (res); } static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, va_list ap) { struct make_dev_args args; make_dev_args_init(&args); args.mda_flags = flags; args.mda_devsw = devsw; args.mda_cr = cr; args.mda_uid = uid; args.mda_gid = gid; args.mda_mode = mode; args.mda_unit = unit; return (make_dev_sv(&args, dres, fmt, ap)); } struct cdev * make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev: failed make_dev_credv (error=%d)", res)); return (dev); } struct cdev * make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev_cred: failed make_dev_credv (error=%d)", res)); return (dev); } struct cdev * make_dev_credf(int flags, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { struct cdev *dev; va_list ap; int res; va_start(ap, fmt); res = make_dev_credv(flags, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) || ((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0, ("make_dev_credf: failed make_dev_credv (error=%d)", res)); return (res == 0 ? dev : NULL); } int make_dev_p(int flags, struct cdev **cdev, struct cdevsw *devsw, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_credv(flags, cdev, devsw, 0, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) || ((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0, ("make_dev_p: failed make_dev_credv (error=%d)", res)); return (res); } static void dev_dependsl(struct cdev *pdev, struct cdev *cdev) { cdev->si_parent = pdev; cdev->si_flags |= SI_CHILD; LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings); } void dev_depends(struct cdev *pdev, struct cdev *cdev) { dev_lock(); dev_dependsl(pdev, cdev); dev_unlock(); } static int make_dev_alias_v(int flags, struct cdev **cdev, struct cdev *pdev, const char *fmt, va_list ap) { struct cdev *dev; int error; KASSERT(pdev != NULL, ("make_dev_alias_v: pdev is NULL")); KASSERT((flags & MAKEDEV_WAITOK) == 0 || (flags & MAKEDEV_NOWAIT) == 0, ("make_dev_alias_v: both WAITOK and NOWAIT specified")); KASSERT((flags & ~(MAKEDEV_WAITOK | MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME)) == 0, ("make_dev_alias_v: invalid flags specified (flags=%02x)", flags)); dev = devfs_alloc(flags); if (dev == NULL) return (ENOMEM); dev_lock(); dev->si_flags |= SI_ALIAS; error = prep_devname(dev, fmt, ap); if (error != 0) { if ((flags & MAKEDEV_CHECKNAME) == 0) { panic("make_dev_alias_v: bad si_name " "(error=%d, si_name=%s)", error, dev->si_name); } dev_unlock(); devfs_free(dev); return (error); } dev->si_flags |= SI_NAMED; devfs_create(dev); dev_dependsl(pdev, dev); clean_unrhdrl(devfs_inos); dev_unlock(); notify_create(dev, flags); *cdev = dev; return (0); } struct cdev * make_dev_alias(struct cdev *pdev, const char *fmt, ...) { struct cdev *dev; va_list ap; int res __unused; va_start(ap, fmt); res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev_alias: failed make_dev_alias_v (error=%d)", res)); return (dev); } int make_dev_alias_p(int flags, struct cdev **cdev, struct cdev *pdev, const char *fmt, ...) { va_list ap; int res; va_start(ap, fmt); res = make_dev_alias_v(flags, cdev, pdev, fmt, ap); va_end(ap); return (res); } int make_dev_physpath_alias(int flags, struct cdev **cdev, struct cdev *pdev, struct cdev *old_alias, const char *physpath) { char *devfspath; int physpath_len; int max_parentpath_len; int parentpath_len; int devfspathbuf_len; int mflags; int ret; *cdev = NULL; devfspath = NULL; physpath_len = strlen(physpath); ret = EINVAL; if (physpath_len == 0) goto out; if (strncmp("id1,", physpath, 4) == 0) { physpath += 4; physpath_len -= 4; if (physpath_len == 0) goto out; } max_parentpath_len = SPECNAMELEN - physpath_len - /*/*/1; parentpath_len = strlen(pdev->si_name); if (max_parentpath_len < parentpath_len) { if (bootverbose) printf("WARNING: Unable to alias %s " "to %s/%s - path too long\n", pdev->si_name, physpath, pdev->si_name); ret = ENAMETOOLONG; goto out; } mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK; devfspathbuf_len = physpath_len + /*/*/1 + parentpath_len + /*NUL*/1; devfspath = malloc(devfspathbuf_len, M_DEVBUF, mflags); if (devfspath == NULL) { ret = ENOMEM; goto out; } sprintf(devfspath, "%s/%s", physpath, pdev->si_name); if (old_alias != NULL && strcmp(old_alias->si_name, devfspath) == 0) { /* Retain the existing alias. */ *cdev = old_alias; old_alias = NULL; ret = 0; } else { ret = make_dev_alias_p(flags, cdev, pdev, "%s", devfspath); } out: if (old_alias != NULL) destroy_dev(old_alias); if (devfspath != NULL) free(devfspath, M_DEVBUF); return (ret); } static void destroy_devl(struct cdev *dev) { struct cdevsw *csw; struct cdev_privdata *p; struct cdev_priv *cdp; dev_lock_assert_locked(); KASSERT(dev->si_flags & SI_NAMED, ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); KASSERT((dev->si_flags & SI_ETERNAL) == 0, ("WARNING: Driver mistake: destroy_dev on eternal %d\n", dev2unit(dev))); cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) { /* * Avoid race with dev_rel(), e.g. from the populate * loop. If CDP_UNREF_DTR flag is set, the reference * to be dropped at the end of destroy_devl() was * already taken by delist_dev_locked(). */ dev_refl(dev); devfs_destroy(dev); } /* Remove name marking */ dev->si_flags &= ~SI_NAMED; /* If we are a child, remove us from the parents list */ if (dev->si_flags & SI_CHILD) { LIST_REMOVE(dev, si_siblings); dev->si_flags &= ~SI_CHILD; } /* Kill our children */ while (!LIST_EMPTY(&dev->si_children)) destroy_devl(LIST_FIRST(&dev->si_children)); /* Remove from clone list */ if (dev->si_flags & SI_CLONELIST) { LIST_REMOVE(dev, si_clone); dev->si_flags &= ~SI_CLONELIST; } mtx_lock(&cdp->cdp_threadlock); csw = dev->si_devsw; dev->si_devsw = NULL; /* already NULL for SI_ALIAS */ while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) { csw->d_purge(dev); mtx_unlock(&cdp->cdp_threadlock); msleep(csw, &devmtx, PRIBIO, "devprg", hz/10); mtx_lock(&cdp->cdp_threadlock); if (dev->si_threadcount) printf("Still %lu threads in %s\n", dev->si_threadcount, devtoname(dev)); } while (dev->si_threadcount != 0) { /* Use unique dummy wait ident */ mtx_unlock(&cdp->cdp_threadlock); msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10); mtx_lock(&cdp->cdp_threadlock); } mtx_unlock(&cdp->cdp_threadlock); dev_unlock(); if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) { /* avoid out of order notify events */ notify_destroy(dev); } mtx_lock(&cdevpriv_mtx); while ((p = LIST_FIRST(&cdp->cdp_fdpriv)) != NULL) { devfs_destroy_cdevpriv(p); mtx_lock(&cdevpriv_mtx); } mtx_unlock(&cdevpriv_mtx); dev_lock(); dev->si_drv1 = 0; dev->si_drv2 = 0; if (!(dev->si_flags & SI_ALIAS)) { /* Remove from cdevsw list */ LIST_REMOVE(dev, si_list); /* If cdevsw has no more struct cdev *'s, clean it */ if (LIST_EMPTY(&csw->d_devs)) { fini_cdevsw(csw); wakeup(&csw->d_devs); } } dev->si_flags &= ~SI_ALIAS; cdp->cdp_flags &= ~CDP_UNREF_DTR; dev->si_refcount--; if (dev->si_refcount > 0) LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list); else dev_free_devlocked(dev); } static void delist_dev_locked(struct cdev *dev) { struct cdev_priv *cdp; struct cdev *child; dev_lock_assert_locked(); cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_UNREF_DTR) != 0) return; cdp->cdp_flags |= CDP_UNREF_DTR; dev_refl(dev); devfs_destroy(dev); LIST_FOREACH(child, &dev->si_children, si_siblings) delist_dev_locked(child); dev_unlock(); /* ensure the destroy event is queued in order */ notify_destroy(dev); dev_lock(); } /* * This function will delist a character device and its children from * the directory listing and create a destroy event without waiting * for all character device references to go away. At some later point * destroy_dev() must be called to complete the character device * destruction. After calling this function the character device name * can instantly be re-used. */ void delist_dev(struct cdev *dev) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "delist_dev"); dev_lock(); delist_dev_locked(dev); dev_unlock(); } void destroy_dev(struct cdev *dev) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev"); dev_lock(); destroy_devl(dev); dev_unlock_and_free(); } const char * devtoname(struct cdev *dev) { return (dev->si_name); } int dev_stdclone(char *name, char **namep, const char *stem, int *unit) { int u, i; i = strlen(stem); if (strncmp(stem, name, i) != 0) return (0); if (!isdigit(name[i])) return (0); u = 0; if (name[i] == '0' && isdigit(name[i+1])) return (0); while (isdigit(name[i])) { u *= 10; u += name[i++] - '0'; } if (u > 0xffffff) return (0); *unit = u; if (namep) *namep = &name[i]; if (name[i]) return (2); return (1); } /* * Helper functions for cloning device drivers. * * The objective here is to make it unnecessary for the device drivers to * use rman or similar to manage their unit number space. Due to the way * we do "on-demand" devices, using rman or other "private" methods * will be very tricky to lock down properly once we lock down this file. * * Instead we give the drivers these routines which puts the struct cdev *'s * that are to be managed on their own list, and gives the driver the ability * to ask for the first free unit number or a given specified unit number. * * In addition these routines support paired devices (pty, nmdm and similar) * by respecting a number of "flag" bits in the minor number. * */ struct clonedevs { LIST_HEAD(,cdev) head; }; void clone_setup(struct clonedevs **cdp) { *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO); LIST_INIT(&(*cdp)->head); } int clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra) { struct clonedevs *cd; struct cdev *dev, *ndev, *dl, *de; struct make_dev_args args; int unit, low, u; KASSERT(*cdp != NULL, ("clone_setup() not called in driver \"%s\"", csw->d_name)); KASSERT(!(extra & CLONE_UNITMASK), ("Illegal extra bits (0x%x) in clone_create", extra)); KASSERT(*up <= CLONE_UNITMASK, ("Too high unit (0x%x) in clone_create", *up)); KASSERT(csw->d_flags & D_NEEDMINOR, ("clone_create() on cdevsw without minor numbers")); /* * Search the list for a lot of things in one go: * A preexisting match is returned immediately. * The lowest free unit number if we are passed -1, and the place * in the list where we should insert that new element. * The place to insert a specified unit number, if applicable * the end of the list. */ unit = *up; ndev = devfs_alloc(MAKEDEV_WAITOK); dev_lock(); prep_cdevsw(csw, MAKEDEV_WAITOK); low = extra; de = dl = NULL; cd = *cdp; LIST_FOREACH(dev, &cd->head, si_clone) { KASSERT(dev->si_flags & SI_CLONELIST, ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); u = dev2unit(dev); if (u == (unit | extra)) { *dp = dev; dev_unlock(); devfs_free(ndev); return (0); } if (unit == -1 && u == low) { low++; de = dev; continue; } else if (u < (unit | extra)) { de = dev; continue; } else if (u > (unit | extra)) { dl = dev; break; } } if (unit == -1) unit = low & CLONE_UNITMASK; make_dev_args_init(&args); args.mda_unit = unit | extra; args.mda_devsw = csw; dev = newdev(&args, ndev); if (dev->si_flags & SI_CLONELIST) { printf("dev %p (%s) is on clonelist\n", dev, dev->si_name); printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra); LIST_FOREACH(dev, &cd->head, si_clone) { printf("\t%p %s\n", dev, dev->si_name); } panic("foo"); } KASSERT(!(dev->si_flags & SI_CLONELIST), ("Dev %p(%s) should not be on clonelist", dev, dev->si_name)); if (dl != NULL) LIST_INSERT_BEFORE(dl, dev, si_clone); else if (de != NULL) LIST_INSERT_AFTER(de, dev, si_clone); else LIST_INSERT_HEAD(&cd->head, dev, si_clone); dev->si_flags |= SI_CLONELIST; *up = unit; dev_unlock_and_free(); return (1); } /* * Kill everything still on the list. The driver should already have * disposed of any softc hung of the struct cdev *'s at this time. */ void clone_cleanup(struct clonedevs **cdp) { struct cdev *dev; struct cdev_priv *cp; struct clonedevs *cd; cd = *cdp; if (cd == NULL) return; dev_lock(); while (!LIST_EMPTY(&cd->head)) { dev = LIST_FIRST(&cd->head); LIST_REMOVE(dev, si_clone); KASSERT(dev->si_flags & SI_CLONELIST, ("Dev %p(%s) should be on clonelist", dev, dev->si_name)); dev->si_flags &= ~SI_CLONELIST; cp = cdev2priv(dev); if (!(cp->cdp_flags & CDP_SCHED_DTR)) { cp->cdp_flags |= CDP_SCHED_DTR; KASSERT(dev->si_flags & SI_NAMED, ("Driver has goofed in cloning underways udev %jx unit %x", (uintmax_t)dev2udev(dev), dev2unit(dev))); destroy_devl(dev); } } dev_unlock_and_free(); free(cd, M_DEVBUF); *cdp = NULL; } static TAILQ_HEAD(, cdev_priv) dev_ddtr = TAILQ_HEAD_INITIALIZER(dev_ddtr); static TAILQ_HEAD(, cdev_priv) dev_ddtr_giant = TAILQ_HEAD_INITIALIZER(dev_ddtr_giant); static struct task dev_dtr_task = TASK_INITIALIZER(0, destroy_dev_tq, &dev_ddtr); static struct task dev_dtr_task_giant = TASK_INITIALIZER(0, destroy_dev_tq_giant, &dev_ddtr_giant); static void destroy_dev_tq(void *ctx, int pending) { TAILQ_HEAD(, cdev_priv) *ddtr = ctx; struct cdev_priv *cp; struct cdev *dev; void (*cb)(void *); void *cb_arg; dev_lock(); while (!TAILQ_EMPTY(ddtr)) { cp = TAILQ_FIRST(ddtr); dev = &cp->cdp_c; KASSERT(cp->cdp_flags & CDP_SCHED_DTR, ("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp)); TAILQ_REMOVE(ddtr, cp, cdp_dtr_list); cb = cp->cdp_dtr_cb; cb_arg = cp->cdp_dtr_cb_arg; destroy_devl(dev); dev_unlock_and_free(); dev_rel(dev); if (cb != NULL) cb(cb_arg); dev_lock(); } dev_unlock(); } static void destroy_dev_tq_giant(void *ctx, int pending) { mtx_lock(&Giant); destroy_dev_tq(ctx, pending); mtx_unlock(&Giant); } /* * devmtx shall be locked on entry. devmtx will be unlocked after * function return. */ static int destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg) { struct cdev_priv *cp; bool need_giant; dev_lock_assert_locked(); cp = cdev2priv(dev); if (cp->cdp_flags & CDP_SCHED_DTR) { dev_unlock(); return (0); } dev_refl(dev); cp->cdp_flags |= CDP_SCHED_DTR; cp->cdp_dtr_cb = cb; cp->cdp_dtr_cb_arg = arg; need_giant = (dev->si_devsw->d_flags & D_NEEDGIANT) != 0; if (need_giant) TAILQ_INSERT_TAIL(&dev_ddtr_giant, cp, cdp_dtr_list); else TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list); dev_unlock(); if (need_giant) taskqueue_enqueue(taskqueue_thread, &dev_dtr_task_giant); else taskqueue_enqueue(taskqueue_thread, &dev_dtr_task); return (1); } int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg) { dev_lock(); return (destroy_dev_sched_cbl(dev, cb, arg)); } int destroy_dev_sched(struct cdev *dev) { return (destroy_dev_sched_cb(dev, NULL, NULL)); } void destroy_dev_drain(struct cdevsw *csw) { dev_lock(); while (!LIST_EMPTY(&csw->d_devs)) { msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10); } dev_unlock(); } -void -drain_dev_clone_events(void) -{ - - sx_xlock(&clone_drain_lock); - sx_xunlock(&clone_drain_lock); -} - #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(cdev, db_show_cdev) { struct cdev_priv *cdp; struct cdev *dev; u_int flags; char buf[512]; if (!have_addr) { TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { dev = &cdp->cdp_c; db_printf("%s %p\n", dev->si_name, dev); if (db_pager_quit) break; } return; } dev = (struct cdev *)addr; cdp = cdev2priv(dev); db_printf("dev %s ref %d use %ld thr %ld inuse %u fdpriv %p\n", dev->si_name, dev->si_refcount, dev->si_usecount, dev->si_threadcount, cdp->cdp_inuse, cdp->cdp_fdpriv.lh_first); db_printf("devsw %p si_drv0 %d si_drv1 %p si_drv2 %p\n", dev->si_devsw, dev->si_drv0, dev->si_drv1, dev->si_drv2); flags = dev->si_flags; #define SI_FLAG(flag) do { \ if (flags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 3, sizeof(buf)); \ flags &= ~(flag); \ } \ } while (0) buf[0] = '\0'; SI_FLAG(SI_ETERNAL); SI_FLAG(SI_ALIAS); SI_FLAG(SI_NAMED); SI_FLAG(SI_CHILD); SI_FLAG(SI_DUMPDEV); SI_FLAG(SI_CLONELIST); db_printf("si_flags %s\n", buf); flags = cdp->cdp_flags; #define CDP_FLAG(flag) do { \ if (flags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 4, sizeof(buf)); \ flags &= ~(flag); \ } \ } while (0) buf[0] = '\0'; CDP_FLAG(CDP_ACTIVE); CDP_FLAG(CDP_SCHED_DTR); db_printf("cdp_flags %s\n", buf); } #endif diff --git a/sys/kern/tty_tty.c b/sys/kern/tty_tty.c index c10dac907498..8490a4421f5f 100644 --- a/sys/kern/tty_tty.c +++ b/sys/kern/tty_tty.c @@ -1,99 +1,97 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Poul-Henning Kamp. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include static d_open_t cttyopen; static struct cdevsw ctty_cdevsw = { .d_version = D_VERSION, .d_open = cttyopen, .d_name = "ctty", }; static struct cdev *ctty; static int cttyopen(struct cdev *dev, int flag, int mode, struct thread *td) { return (ENXIO); } static void ctty_clone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev) { struct proc *p; if (*dev != NULL) return; if (strcmp(name, "tty")) return; p = curproc; - sx_sunlock(&clone_drain_lock); sx_slock(&proctree_lock); - sx_slock(&clone_drain_lock); dev_lock(); if (!(p->p_flag & P_CONTROLT)) *dev = ctty; else if (p->p_session->s_ttyvp == NULL) *dev = ctty; else if (p->p_session->s_ttyvp->v_type == VBAD || p->p_session->s_ttyvp->v_rdev == NULL) { /* e.g. s_ttyvp was revoked */ *dev = ctty; } else *dev = p->p_session->s_ttyvp->v_rdev; dev_refl(*dev); dev_unlock(); sx_sunlock(&proctree_lock); } static void ctty_drvinit(void *unused) { EVENTHANDLER_REGISTER(dev_clone, ctty_clone, 0, 1000); ctty = make_dev_credf(MAKEDEV_ETERNAL, &ctty_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0666, "ctty"); } SYSINIT(cttydev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,ctty_drvinit,NULL); diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c index bd9fc811d19f..5e1e60933caa 100644 --- a/sys/net/if_tuntap.c +++ b/sys/net/if_tuntap.c @@ -1,2012 +1,2011 @@ /* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 1999-2000 by Maksim Yevmenkin * All rights reserved. * Copyright (c) 2019 Kyle Evans * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * BASED ON: * ------------------------------------------------------------------------- * * Copyright (c) 1988, Julian Onions * Nottingham University 1987. * * This source may be freely distributed, however I would be interested * in any changes that are made. * * This driver takes packets off the IP i/f and hands them up to a * user process to have its wicked way with. This driver has it's * roots in a similar driver written by Phil Cockcroft (formerly) at * UCL. This driver is based much more on read/write/poll mode of * operation though. * * $FreeBSD$ */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #endif #ifdef INET6 #include #include #endif #include #include #include #include #include #include #include #include #include struct tuntap_driver; /* * tun_list is protected by global tunmtx. Other mutable fields are * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is * static for the duration of a tunnel interface. */ struct tuntap_softc { TAILQ_ENTRY(tuntap_softc) tun_list; struct cdev *tun_alias; struct cdev *tun_dev; u_short tun_flags; /* misc flags */ #define TUN_OPEN 0x0001 #define TUN_INITED 0x0002 #define TUN_UNUSED1 0x0008 #define TUN_UNUSED2 0x0010 #define TUN_LMODE 0x0020 #define TUN_RWAIT 0x0040 #define TUN_ASYNC 0x0080 #define TUN_IFHEAD 0x0100 #define TUN_DYING 0x0200 #define TUN_L2 0x0400 #define TUN_VMNET 0x0800 #define TUN_DRIVER_IDENT_MASK (TUN_L2 | TUN_VMNET) #define TUN_READY (TUN_OPEN | TUN_INITED) pid_t tun_pid; /* owning pid */ struct ifnet *tun_ifp; /* the interface */ struct sigio *tun_sigio; /* async I/O info */ struct tuntap_driver *tun_drv; /* appropriate driver */ struct selinfo tun_rsel; /* read select */ struct mtx tun_mtx; /* softc field mutex */ struct cv tun_cv; /* for ref'd dev destroy */ struct ether_addr tun_ether; /* remote address */ int tun_busy; /* busy count */ int tun_vhdrlen; /* virtio-net header length */ }; #define TUN2IFP(sc) ((sc)->tun_ifp) #define TUNDEBUG if (tundebug) if_printf #define TUN_LOCK(tp) mtx_lock(&(tp)->tun_mtx) #define TUN_UNLOCK(tp) mtx_unlock(&(tp)->tun_mtx) #define TUN_LOCK_ASSERT(tp) mtx_assert(&(tp)->tun_mtx, MA_OWNED); #define TUN_VMIO_FLAG_MASK 0x0fff /* * Interface capabilities of a tap device that supports the virtio-net * header. */ #define TAP_VNET_HDR_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 \ | IFCAP_VLAN_HWCSUM \ | IFCAP_TSO | IFCAP_LRO \ | IFCAP_VLAN_HWTSO) #define TAP_ALL_OFFLOAD (CSUM_TSO | CSUM_TCP | CSUM_UDP |\ CSUM_TCP_IPV6 | CSUM_UDP_IPV6) /* * All mutable global variables in if_tun are locked using tunmtx, with * the exception of tundebug, which is used unlocked, and the drivers' *clones, * which are static after setup. */ static struct mtx tunmtx; static eventhandler_tag arrival_tag; static eventhandler_tag clone_tag; static const char tunname[] = "tun"; static const char tapname[] = "tap"; static const char vmnetname[] = "vmnet"; static MALLOC_DEFINE(M_TUN, tunname, "Tunnel Interface"); static int tundebug = 0; static int tundclone = 1; static int tap_allow_uopen = 0; /* allow user devfs cloning */ static int tapuponopen = 0; /* IFF_UP on open() */ static int tapdclone = 1; /* enable devfs cloning */ static TAILQ_HEAD(,tuntap_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead); SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, ""); static struct sx tun_ioctl_sx; SX_SYSINIT(tun_ioctl_sx, &tun_ioctl_sx, "tun_ioctl"); SYSCTL_DECL(_net_link); /* tun */ static SYSCTL_NODE(_net_link, OID_AUTO, tun, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "IP tunnel software network interface"); SYSCTL_INT(_net_link_tun, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tundclone, 0, "Enable legacy devfs interface creation"); /* tap */ static SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Ethernet tunnel software network interface"); SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tap_allow_uopen, 0, "Enable legacy devfs interface creation for all users"); SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0, "Bring interface up when /dev/tap is opened"); SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tapdclone, 0, "Enable legacy devfs interface creation"); SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tundebug, 0, ""); static int tun_create_device(struct tuntap_driver *drv, int unit, struct ucred *cr, struct cdev **dev, const char *name); static int tun_busy_locked(struct tuntap_softc *tp); static void tun_unbusy_locked(struct tuntap_softc *tp); static int tun_busy(struct tuntap_softc *tp); static void tun_unbusy(struct tuntap_softc *tp); static int tuntap_name2info(const char *name, int *unit, int *flags); static void tunclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev); static void tuncreate(struct cdev *dev); static void tundtor(void *data); static void tunrename(void *arg, struct ifnet *ifp); static int tunifioctl(struct ifnet *, u_long, caddr_t); static void tuninit(struct ifnet *); static void tunifinit(void *xtp); static int tuntapmodevent(module_t, int, void *); static int tunoutput(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *ro); static void tunstart(struct ifnet *); static void tunstart_l2(struct ifnet *); static int tun_clone_match(struct if_clone *ifc, const char *name); static int tap_clone_match(struct if_clone *ifc, const char *name); static int vmnet_clone_match(struct if_clone *ifc, const char *name); static int tun_clone_create(struct if_clone *, char *, size_t, caddr_t); static int tun_clone_destroy(struct if_clone *, struct ifnet *); static void tun_vnethdr_set(struct ifnet *ifp, int vhdrlen); static d_open_t tunopen; static d_read_t tunread; static d_write_t tunwrite; static d_ioctl_t tunioctl; static d_poll_t tunpoll; static d_kqfilter_t tunkqfilter; static int tunkqread(struct knote *, long); static int tunkqwrite(struct knote *, long); static void tunkqdetach(struct knote *); static struct filterops tun_read_filterops = { .f_isfd = 1, .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqread, }; static struct filterops tun_write_filterops = { .f_isfd = 1, .f_attach = NULL, .f_detach = tunkqdetach, .f_event = tunkqwrite, }; static struct tuntap_driver { struct cdevsw cdevsw; int ident_flags; struct unrhdr *unrhdr; struct clonedevs *clones; ifc_match_t *clone_match_fn; ifc_create_t *clone_create_fn; ifc_destroy_t *clone_destroy_fn; } tuntap_drivers[] = { { .ident_flags = 0, .cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDMINOR, .d_open = tunopen, .d_read = tunread, .d_write = tunwrite, .d_ioctl = tunioctl, .d_poll = tunpoll, .d_kqfilter = tunkqfilter, .d_name = tunname, }, .clone_match_fn = tun_clone_match, .clone_create_fn = tun_clone_create, .clone_destroy_fn = tun_clone_destroy, }, { .ident_flags = TUN_L2, .cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDMINOR, .d_open = tunopen, .d_read = tunread, .d_write = tunwrite, .d_ioctl = tunioctl, .d_poll = tunpoll, .d_kqfilter = tunkqfilter, .d_name = tapname, }, .clone_match_fn = tap_clone_match, .clone_create_fn = tun_clone_create, .clone_destroy_fn = tun_clone_destroy, }, { .ident_flags = TUN_L2 | TUN_VMNET, .cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDMINOR, .d_open = tunopen, .d_read = tunread, .d_write = tunwrite, .d_ioctl = tunioctl, .d_poll = tunpoll, .d_kqfilter = tunkqfilter, .d_name = vmnetname, }, .clone_match_fn = vmnet_clone_match, .clone_create_fn = tun_clone_create, .clone_destroy_fn = tun_clone_destroy, }, }; struct tuntap_driver_cloner { SLIST_ENTRY(tuntap_driver_cloner) link; struct tuntap_driver *drv; struct if_clone *cloner; }; VNET_DEFINE_STATIC(SLIST_HEAD(, tuntap_driver_cloner), tuntap_driver_cloners) = SLIST_HEAD_INITIALIZER(tuntap_driver_cloners); #define V_tuntap_driver_cloners VNET(tuntap_driver_cloners) /* * Mechanism for marking a tunnel device as busy so that we can safely do some * orthogonal operations (such as operations on devices) without racing against * tun_destroy. tun_destroy will wait on the condvar if we're at all busy or * open, to be woken up when the condition is alleviated. */ static int tun_busy_locked(struct tuntap_softc *tp) { TUN_LOCK_ASSERT(tp); if ((tp->tun_flags & TUN_DYING) != 0) { /* * Perhaps unintuitive, but the device is busy going away. * Other interpretations of EBUSY from tun_busy make little * sense, since making a busy device even more busy doesn't * sound like a problem. */ return (EBUSY); } ++tp->tun_busy; return (0); } static void tun_unbusy_locked(struct tuntap_softc *tp) { TUN_LOCK_ASSERT(tp); KASSERT(tp->tun_busy != 0, ("tun_unbusy: called for non-busy tunnel")); --tp->tun_busy; /* Wake up anything that may be waiting on our busy tunnel. */ if (tp->tun_busy == 0) cv_broadcast(&tp->tun_cv); } static int tun_busy(struct tuntap_softc *tp) { int ret; TUN_LOCK(tp); ret = tun_busy_locked(tp); TUN_UNLOCK(tp); return (ret); } static void tun_unbusy(struct tuntap_softc *tp) { TUN_LOCK(tp); tun_unbusy_locked(tp); TUN_UNLOCK(tp); } /* * Sets unit and/or flags given the device name. Must be called with correct * vnet context. */ static int tuntap_name2info(const char *name, int *outunit, int *outflags) { struct tuntap_driver *drv; struct tuntap_driver_cloner *drvc; char *dname; int flags, unit; bool found; if (name == NULL) return (EINVAL); /* * Needed for dev_stdclone, but dev_stdclone will not modify, it just * wants to be able to pass back a char * through the second param. We * will always set that as NULL here, so we'll fake it. */ dname = __DECONST(char *, name); found = false; KASSERT(!SLIST_EMPTY(&V_tuntap_driver_cloners), ("tuntap_driver_cloners failed to initialize")); SLIST_FOREACH(drvc, &V_tuntap_driver_cloners, link) { KASSERT(drvc->drv != NULL, ("tuntap_driver_cloners entry not properly initialized")); drv = drvc->drv; if (strcmp(name, drv->cdevsw.d_name) == 0) { found = true; unit = -1; flags = drv->ident_flags; break; } if (dev_stdclone(dname, NULL, drv->cdevsw.d_name, &unit) == 1) { found = true; flags = drv->ident_flags; break; } } if (!found) return (ENXIO); if (outunit != NULL) *outunit = unit; if (outflags != NULL) *outflags = flags; return (0); } /* * Get driver information from a set of flags specified. Masks the identifying * part of the flags and compares it against all of the available * tuntap_drivers. Must be called with correct vnet context. */ static struct tuntap_driver * tuntap_driver_from_flags(int tun_flags) { struct tuntap_driver *drv; struct tuntap_driver_cloner *drvc; KASSERT(!SLIST_EMPTY(&V_tuntap_driver_cloners), ("tuntap_driver_cloners failed to initialize")); SLIST_FOREACH(drvc, &V_tuntap_driver_cloners, link) { KASSERT(drvc->drv != NULL, ("tuntap_driver_cloners entry not properly initialized")); drv = drvc->drv; if ((tun_flags & TUN_DRIVER_IDENT_MASK) == drv->ident_flags) return (drv); } return (NULL); } static int tun_clone_match(struct if_clone *ifc, const char *name) { int tunflags; if (tuntap_name2info(name, NULL, &tunflags) == 0) { if ((tunflags & TUN_L2) == 0) return (1); } return (0); } static int tap_clone_match(struct if_clone *ifc, const char *name) { int tunflags; if (tuntap_name2info(name, NULL, &tunflags) == 0) { if ((tunflags & (TUN_L2 | TUN_VMNET)) == TUN_L2) return (1); } return (0); } static int vmnet_clone_match(struct if_clone *ifc, const char *name) { int tunflags; if (tuntap_name2info(name, NULL, &tunflags) == 0) { if ((tunflags & TUN_VMNET) != 0) return (1); } return (0); } static int tun_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) { struct tuntap_driver *drv; struct cdev *dev; int err, i, tunflags, unit; tunflags = 0; /* The name here tells us exactly what we're creating */ err = tuntap_name2info(name, &unit, &tunflags); if (err != 0) return (err); drv = tuntap_driver_from_flags(tunflags); if (drv == NULL) return (ENXIO); if (unit != -1) { /* If this unit number is still available that's okay. */ if (alloc_unr_specific(drv->unrhdr, unit) == -1) return (EEXIST); } else { unit = alloc_unr(drv->unrhdr); } snprintf(name, IFNAMSIZ, "%s%d", drv->cdevsw.d_name, unit); /* find any existing device, or allocate new unit number */ dev = NULL; i = clone_create(&drv->clones, &drv->cdevsw, &unit, &dev, 0); /* No preexisting struct cdev *, create one */ if (i != 0) i = tun_create_device(drv, unit, NULL, &dev, name); if (i == 0) tuncreate(dev); return (i); } static void tunclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev) { char devname[SPECNAMELEN + 1]; struct tuntap_driver *drv; int append_unit, i, u, tunflags; bool mayclone; if (*dev != NULL) return; tunflags = 0; CURVNET_SET(CRED_TO_VNET(cred)); if (tuntap_name2info(name, &u, &tunflags) != 0) goto out; /* Not recognized */ if (u != -1 && u > IF_MAXUNIT) goto out; /* Unit number too high */ mayclone = priv_check_cred(cred, PRIV_NET_IFCREATE) == 0; if ((tunflags & TUN_L2) != 0) { /* tap/vmnet allow user open with a sysctl */ mayclone = (mayclone || tap_allow_uopen) && tapdclone; } else { mayclone = mayclone && tundclone; } /* * If tun cloning is enabled, only the superuser can create an * interface. */ if (!mayclone) goto out; if (u == -1) append_unit = 1; else append_unit = 0; drv = tuntap_driver_from_flags(tunflags); if (drv == NULL) goto out; /* find any existing device, or allocate new unit number */ i = clone_create(&drv->clones, &drv->cdevsw, &u, dev, 0); if (i) { if (append_unit) { namelen = snprintf(devname, sizeof(devname), "%s%d", name, u); name = devname; } i = tun_create_device(drv, u, cred, dev, name); } if (i == 0) if_clone_create(name, namelen, NULL); out: CURVNET_RESTORE(); } static void tun_destroy(struct tuntap_softc *tp) { TUN_LOCK(tp); tp->tun_flags |= TUN_DYING; if (tp->tun_busy != 0) cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx); else TUN_UNLOCK(tp); CURVNET_SET(TUN2IFP(tp)->if_vnet); /* destroy_dev will take care of any alias. */ destroy_dev(tp->tun_dev); seldrain(&tp->tun_rsel); knlist_clear(&tp->tun_rsel.si_note, 0); knlist_destroy(&tp->tun_rsel.si_note); if ((tp->tun_flags & TUN_L2) != 0) { ether_ifdetach(TUN2IFP(tp)); } else { bpfdetach(TUN2IFP(tp)); if_detach(TUN2IFP(tp)); } sx_xlock(&tun_ioctl_sx); TUN2IFP(tp)->if_softc = NULL; sx_xunlock(&tun_ioctl_sx); free_unr(tp->tun_drv->unrhdr, TUN2IFP(tp)->if_dunit); if_free(TUN2IFP(tp)); mtx_destroy(&tp->tun_mtx); cv_destroy(&tp->tun_cv); free(tp, M_TUN); CURVNET_RESTORE(); } static int tun_clone_destroy(struct if_clone *ifc __unused, struct ifnet *ifp) { struct tuntap_softc *tp = ifp->if_softc; mtx_lock(&tunmtx); TAILQ_REMOVE(&tunhead, tp, tun_list); mtx_unlock(&tunmtx); tun_destroy(tp); return (0); } static void vnet_tun_init(const void *unused __unused) { struct tuntap_driver *drv; struct tuntap_driver_cloner *drvc; int i; for (i = 0; i < nitems(tuntap_drivers); ++i) { drv = &tuntap_drivers[i]; drvc = malloc(sizeof(*drvc), M_TUN, M_WAITOK | M_ZERO); drvc->drv = drv; drvc->cloner = if_clone_advanced(drv->cdevsw.d_name, 0, drv->clone_match_fn, drv->clone_create_fn, drv->clone_destroy_fn); SLIST_INSERT_HEAD(&V_tuntap_driver_cloners, drvc, link); }; } VNET_SYSINIT(vnet_tun_init, SI_SUB_PROTO_IF, SI_ORDER_ANY, vnet_tun_init, NULL); static void vnet_tun_uninit(const void *unused __unused) { struct tuntap_driver_cloner *drvc; while (!SLIST_EMPTY(&V_tuntap_driver_cloners)) { drvc = SLIST_FIRST(&V_tuntap_driver_cloners); SLIST_REMOVE_HEAD(&V_tuntap_driver_cloners, link); if_clone_detach(drvc->cloner); free(drvc, M_TUN); } } VNET_SYSUNINIT(vnet_tun_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY, vnet_tun_uninit, NULL); static void tun_uninit(const void *unused __unused) { struct tuntap_driver *drv; struct tuntap_softc *tp; int i; EVENTHANDLER_DEREGISTER(ifnet_arrival_event, arrival_tag); EVENTHANDLER_DEREGISTER(dev_clone, clone_tag); - drain_dev_clone_events(); mtx_lock(&tunmtx); while ((tp = TAILQ_FIRST(&tunhead)) != NULL) { TAILQ_REMOVE(&tunhead, tp, tun_list); mtx_unlock(&tunmtx); tun_destroy(tp); mtx_lock(&tunmtx); } mtx_unlock(&tunmtx); for (i = 0; i < nitems(tuntap_drivers); ++i) { drv = &tuntap_drivers[i]; delete_unrhdr(drv->unrhdr); clone_cleanup(&drv->clones); } mtx_destroy(&tunmtx); } SYSUNINIT(tun_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY, tun_uninit, NULL); static struct tuntap_driver * tuntap_driver_from_ifnet(const struct ifnet *ifp) { struct tuntap_driver *drv; int i; if (ifp == NULL) return (NULL); for (i = 0; i < nitems(tuntap_drivers); ++i) { drv = &tuntap_drivers[i]; if (strcmp(ifp->if_dname, drv->cdevsw.d_name) == 0) return (drv); } return (NULL); } static int tuntapmodevent(module_t mod, int type, void *data) { struct tuntap_driver *drv; int i; switch (type) { case MOD_LOAD: mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF); for (i = 0; i < nitems(tuntap_drivers); ++i) { drv = &tuntap_drivers[i]; clone_setup(&drv->clones); drv->unrhdr = new_unrhdr(0, IF_MAXUNIT, &tunmtx); } arrival_tag = EVENTHANDLER_REGISTER(ifnet_arrival_event, tunrename, 0, 1000); if (arrival_tag == NULL) return (ENOMEM); clone_tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000); if (clone_tag == NULL) return (ENOMEM); break; case MOD_UNLOAD: /* See tun_uninit, so it's done after the vnet_sysuninit() */ break; default: return EOPNOTSUPP; } return 0; } static moduledata_t tuntap_mod = { "if_tuntap", tuntapmodevent, 0 }; /* We'll only ever have these two, so no need for a macro. */ static moduledata_t tun_mod = { "if_tun", NULL, 0 }; static moduledata_t tap_mod = { "if_tap", NULL, 0 }; DECLARE_MODULE(if_tuntap, tuntap_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_tuntap, 1); DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_tun, 1); DECLARE_MODULE(if_tap, tap_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_tap, 1); static int tun_create_device(struct tuntap_driver *drv, int unit, struct ucred *cr, struct cdev **dev, const char *name) { struct make_dev_args args; struct tuntap_softc *tp; int error; tp = malloc(sizeof(*tp), M_TUN, M_WAITOK | M_ZERO); mtx_init(&tp->tun_mtx, "tun_mtx", NULL, MTX_DEF); cv_init(&tp->tun_cv, "tun_condvar"); tp->tun_flags = drv->ident_flags; tp->tun_drv = drv; make_dev_args_init(&args); if (cr != NULL) args.mda_flags = MAKEDEV_REF; args.mda_devsw = &drv->cdevsw; args.mda_cr = cr; args.mda_uid = UID_UUCP; args.mda_gid = GID_DIALER; args.mda_mode = 0600; args.mda_unit = unit; args.mda_si_drv1 = tp; error = make_dev_s(&args, dev, "%s", name); if (error != 0) { free(tp, M_TUN); return (error); } KASSERT((*dev)->si_drv1 != NULL, ("Failed to set si_drv1 at %s creation", name)); tp->tun_dev = *dev; knlist_init_mtx(&tp->tun_rsel.si_note, &tp->tun_mtx); mtx_lock(&tunmtx); TAILQ_INSERT_TAIL(&tunhead, tp, tun_list); mtx_unlock(&tunmtx); return (0); } static void tunstart(struct ifnet *ifp) { struct tuntap_softc *tp = ifp->if_softc; struct mbuf *m; TUNDEBUG(ifp, "starting\n"); if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_LOCK(&ifp->if_snd); IFQ_POLL_NOLOCK(&ifp->if_snd, m); if (m == NULL) { IFQ_UNLOCK(&ifp->if_snd); return; } IFQ_UNLOCK(&ifp->if_snd); } TUN_LOCK(tp); if (tp->tun_flags & TUN_RWAIT) { tp->tun_flags &= ~TUN_RWAIT; wakeup(tp); } selwakeuppri(&tp->tun_rsel, PZERO + 1); KNOTE_LOCKED(&tp->tun_rsel.si_note, 0); if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) { TUN_UNLOCK(tp); pgsigio(&tp->tun_sigio, SIGIO, 0); } else TUN_UNLOCK(tp); } /* * tunstart_l2 * * queue packets from higher level ready to put out */ static void tunstart_l2(struct ifnet *ifp) { struct tuntap_softc *tp = ifp->if_softc; TUNDEBUG(ifp, "starting\n"); /* * do not junk pending output if we are in VMnet mode. * XXX: can this do any harm because of queue overflow? */ TUN_LOCK(tp); if (((tp->tun_flags & TUN_VMNET) == 0) && ((tp->tun_flags & TUN_READY) != TUN_READY)) { struct mbuf *m; /* Unlocked read. */ TUNDEBUG(ifp, "not ready, tun_flags = 0x%x\n", tp->tun_flags); for (;;) { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else break; } TUN_UNLOCK(tp); return; } ifp->if_drv_flags |= IFF_DRV_OACTIVE; if (!IFQ_IS_EMPTY(&ifp->if_snd)) { if (tp->tun_flags & TUN_RWAIT) { tp->tun_flags &= ~TUN_RWAIT; wakeup(tp); } if ((tp->tun_flags & TUN_ASYNC) && (tp->tun_sigio != NULL)) { TUN_UNLOCK(tp); pgsigio(&tp->tun_sigio, SIGIO, 0); TUN_LOCK(tp); } selwakeuppri(&tp->tun_rsel, PZERO+1); KNOTE_LOCKED(&tp->tun_rsel.si_note, 0); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* obytes are counted in ether_output */ } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; TUN_UNLOCK(tp); } /* tunstart_l2 */ /* XXX: should return an error code so it can fail. */ static void tuncreate(struct cdev *dev) { struct tuntap_driver *drv; struct tuntap_softc *tp; struct ifnet *ifp; struct ether_addr eaddr; int iflags; u_char type; tp = dev->si_drv1; KASSERT(tp != NULL, ("si_drv1 should have been initialized at creation")); drv = tp->tun_drv; iflags = IFF_MULTICAST; if ((tp->tun_flags & TUN_L2) != 0) { type = IFT_ETHER; iflags |= IFF_BROADCAST | IFF_SIMPLEX; } else { type = IFT_PPP; iflags |= IFF_POINTOPOINT; } ifp = tp->tun_ifp = if_alloc(type); if (ifp == NULL) panic("%s%d: failed to if_alloc() interface.\n", drv->cdevsw.d_name, dev2unit(dev)); ifp->if_softc = tp; if_initname(ifp, drv->cdevsw.d_name, dev2unit(dev)); ifp->if_ioctl = tunifioctl; ifp->if_flags = iflags; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; if ((tp->tun_flags & TUN_L2) != 0) { ifp->if_init = tunifinit; ifp->if_start = tunstart_l2; ether_gen_addr(ifp, &eaddr); ether_ifattach(ifp, eaddr.octet); } else { ifp->if_mtu = TUNMTU; ifp->if_start = tunstart; ifp->if_output = tunoutput; ifp->if_snd.ifq_drv_maxlen = 0; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); } TUN_LOCK(tp); tp->tun_flags |= TUN_INITED; TUN_UNLOCK(tp); TUNDEBUG(ifp, "interface %s is created, minor = %#x\n", ifp->if_xname, dev2unit(dev)); } static void tunrename(void *arg __unused, struct ifnet *ifp) { struct tuntap_softc *tp; int error; if ((ifp->if_flags & IFF_RENAMING) == 0) return; if (tuntap_driver_from_ifnet(ifp) == NULL) return; /* * We need to grab the ioctl sx long enough to make sure the softc is * still there. If it is, we can safely try to busy the tun device. * The busy may fail if the device is currently dying, in which case * we do nothing. If it doesn't fail, the busy count stops the device * from dying until we've created the alias (that will then be * subsequently destroyed). */ sx_xlock(&tun_ioctl_sx); tp = ifp->if_softc; if (tp == NULL) { sx_xunlock(&tun_ioctl_sx); return; } error = tun_busy(tp); sx_xunlock(&tun_ioctl_sx); if (error != 0) return; if (tp->tun_alias != NULL) { destroy_dev(tp->tun_alias); tp->tun_alias = NULL; } if (strcmp(ifp->if_xname, tp->tun_dev->si_name) == 0) goto out; /* * Failure's ok, aliases are created on a best effort basis. If a * tun user/consumer decides to rename the interface to conflict with * another device (non-ifnet) on the system, we will assume they know * what they are doing. make_dev_alias_p won't touch tun_alias on * failure, so we use it but ignore the return value. */ make_dev_alias_p(MAKEDEV_CHECKNAME, &tp->tun_alias, tp->tun_dev, "%s", ifp->if_xname); out: tun_unbusy(tp); } static int tunopen(struct cdev *dev, int flag, int mode, struct thread *td) { struct ifnet *ifp; struct tuntap_softc *tp; int error __diagused, tunflags; tunflags = 0; CURVNET_SET(TD_TO_VNET(td)); error = tuntap_name2info(dev->si_name, NULL, &tunflags); if (error != 0) { CURVNET_RESTORE(); return (error); /* Shouldn't happen */ } tp = dev->si_drv1; KASSERT(tp != NULL, ("si_drv1 should have been initialized at creation")); TUN_LOCK(tp); if ((tp->tun_flags & TUN_INITED) == 0) { TUN_UNLOCK(tp); CURVNET_RESTORE(); return (ENXIO); } if ((tp->tun_flags & (TUN_OPEN | TUN_DYING)) != 0) { TUN_UNLOCK(tp); CURVNET_RESTORE(); return (EBUSY); } error = tun_busy_locked(tp); KASSERT(error == 0, ("Must be able to busy an unopen tunnel")); ifp = TUN2IFP(tp); if ((tp->tun_flags & TUN_L2) != 0) { bcopy(IF_LLADDR(ifp), tp->tun_ether.octet, sizeof(tp->tun_ether.octet)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (tapuponopen) ifp->if_flags |= IFF_UP; } tp->tun_pid = td->td_proc->p_pid; tp->tun_flags |= TUN_OPEN; if_link_state_change(ifp, LINK_STATE_UP); TUNDEBUG(ifp, "open\n"); TUN_UNLOCK(tp); /* * This can fail with either ENOENT or EBUSY. This is in the middle of * d_open, so ENOENT should not be possible. EBUSY is possible, but * the only cdevpriv dtor being set will be tundtor and the softc being * passed is constant for a given cdev. We ignore the possible error * because of this as either "unlikely" or "not actually a problem." */ (void)devfs_set_cdevpriv(tp, tundtor); CURVNET_RESTORE(); return (0); } /* * tundtor - tear down the device - mark i/f down & delete * routing info */ static void tundtor(void *data) { struct proc *p; struct tuntap_softc *tp; struct ifnet *ifp; bool l2tun; tp = data; p = curproc; ifp = TUN2IFP(tp); TUN_LOCK(tp); /* * Realistically, we can't be obstinate here. This only means that the * tuntap device was closed out of order, and the last closer wasn't the * controller. These are still good to know about, though, as software * should avoid multiple processes with a tuntap device open and * ill-defined transfer of control (e.g., handoff, TUNSIFPID, close in * parent). */ if (p->p_pid != tp->tun_pid) { log(LOG_INFO, "pid %d (%s), %s: tun/tap protocol violation, non-controlling process closed last.\n", p->p_pid, p->p_comm, tp->tun_dev->si_name); } /* * junk all pending output */ CURVNET_SET(ifp->if_vnet); l2tun = false; if ((tp->tun_flags & TUN_L2) != 0) { l2tun = true; IF_DRAIN(&ifp->if_snd); } else { IFQ_PURGE(&ifp->if_snd); } /* For vmnet, we won't do most of the address/route bits */ if ((tp->tun_flags & TUN_VMNET) != 0 || (l2tun && (ifp->if_flags & IFF_LINK0) != 0)) goto out; if (ifp->if_flags & IFF_UP) { TUN_UNLOCK(tp); if_down(ifp); TUN_LOCK(tp); } /* Delete all addresses and routes which reference this interface. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; TUN_UNLOCK(tp); if_purgeaddrs(ifp); TUN_LOCK(tp); } out: if_link_state_change(ifp, LINK_STATE_DOWN); CURVNET_RESTORE(); funsetown(&tp->tun_sigio); selwakeuppri(&tp->tun_rsel, PZERO + 1); KNOTE_LOCKED(&tp->tun_rsel.si_note, 0); TUNDEBUG (ifp, "closed\n"); tp->tun_flags &= ~TUN_OPEN; tp->tun_pid = 0; tun_vnethdr_set(ifp, 0); tun_unbusy_locked(tp); TUN_UNLOCK(tp); } static void tuninit(struct ifnet *ifp) { struct tuntap_softc *tp = ifp->if_softc; TUNDEBUG(ifp, "tuninit\n"); TUN_LOCK(tp); ifp->if_drv_flags |= IFF_DRV_RUNNING; if ((tp->tun_flags & TUN_L2) == 0) { ifp->if_flags |= IFF_UP; getmicrotime(&ifp->if_lastchange); TUN_UNLOCK(tp); } else { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; TUN_UNLOCK(tp); /* attempt to start output */ tunstart_l2(ifp); } } /* * Used only for l2 tunnel. */ static void tunifinit(void *xtp) { struct tuntap_softc *tp; tp = (struct tuntap_softc *)xtp; tuninit(tp->tun_ifp); } /* * To be called under TUN_LOCK. Update ifp->if_hwassist according to the * current value of ifp->if_capenable. */ static void tun_caps_changed(struct ifnet *ifp) { uint64_t hwassist = 0; TUN_LOCK_ASSERT((struct tuntap_softc *)ifp->if_softc); if (ifp->if_capenable & IFCAP_TXCSUM) hwassist |= CSUM_TCP | CSUM_UDP; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; if (ifp->if_capenable & IFCAP_TSO4) hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) hwassist |= CSUM_IP6_TSO; ifp->if_hwassist = hwassist; } /* * To be called under TUN_LOCK. Update tp->tun_vhdrlen and adjust * if_capabilities and if_capenable as needed. */ static void tun_vnethdr_set(struct ifnet *ifp, int vhdrlen) { struct tuntap_softc *tp = ifp->if_softc; TUN_LOCK_ASSERT(tp); if (tp->tun_vhdrlen == vhdrlen) return; /* * Update if_capabilities to reflect the * functionalities offered by the virtio-net * header. */ if (vhdrlen != 0) ifp->if_capabilities |= TAP_VNET_HDR_CAPS; else ifp->if_capabilities &= ~TAP_VNET_HDR_CAPS; /* * Disable any capabilities that we don't * support anymore. */ ifp->if_capenable &= ifp->if_capabilities; tun_caps_changed(ifp); tp->tun_vhdrlen = vhdrlen; TUNDEBUG(ifp, "vnet_hdr_len=%d, if_capabilities=%x\n", vhdrlen, ifp->if_capabilities); } /* * Process an ioctl request. */ static int tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct tuntap_softc *tp; struct ifstat *ifs; struct ifmediareq *ifmr; int dummy, error = 0; bool l2tun; ifmr = NULL; sx_xlock(&tun_ioctl_sx); tp = ifp->if_softc; if (tp == NULL) { error = ENXIO; goto bad; } l2tun = (tp->tun_flags & TUN_L2) != 0; switch(cmd) { case SIOCGIFSTATUS: ifs = (struct ifstat *)data; TUN_LOCK(tp); if (tp->tun_pid) snprintf(ifs->ascii, sizeof(ifs->ascii), "\tOpened by PID %d\n", tp->tun_pid); else ifs->ascii[0] = '\0'; TUN_UNLOCK(tp); break; case SIOCSIFADDR: if (l2tun) error = ether_ioctl(ifp, cmd, data); else tuninit(ifp); if (error == 0) TUNDEBUG(ifp, "address set\n"); break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; TUNDEBUG(ifp, "mtu set\n"); break; case SIOCSIFFLAGS: case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCGIFMEDIA: if (!l2tun) { error = EINVAL; break; } ifmr = (struct ifmediareq *)data; dummy = ifmr->ifm_count; ifmr->ifm_count = 1; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (tp->tun_flags & TUN_OPEN) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_current = ifmr->ifm_active; if (dummy >= 1) { int media = IFM_ETHER; error = copyout(&media, ifmr->ifm_ulist, sizeof(int)); } break; case SIOCSIFCAP: TUN_LOCK(tp); ifp->if_capenable = ifr->ifr_reqcap; tun_caps_changed(ifp); TUN_UNLOCK(tp); VLAN_CAPABILITIES(ifp); break; default: if (l2tun) { error = ether_ioctl(ifp, cmd, data); } else { error = EINVAL; } } bad: sx_xunlock(&tun_ioctl_sx); return (error); } /* * tunoutput - queue packets from higher level ready to put out. */ static int tunoutput(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst, struct route *ro) { struct tuntap_softc *tp = ifp->if_softc; u_short cached_tun_flags; int error; u_int32_t af; TUNDEBUG (ifp, "tunoutput\n"); #ifdef MAC error = mac_ifnet_check_transmit(ifp, m0); if (error) { m_freem(m0); return (error); } #endif /* Could be unlocked read? */ TUN_LOCK(tp); cached_tun_flags = tp->tun_flags; TUN_UNLOCK(tp); if ((cached_tun_flags & TUN_READY) != TUN_READY) { TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags); m_freem (m0); return (EHOSTDOWN); } if ((ifp->if_flags & IFF_UP) != IFF_UP) { m_freem (m0); return (EHOSTDOWN); } /* BPF writes need to be handled specially. */ if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &af, sizeof(af)); else af = RO_GET_FAMILY(ro, dst); if (bpf_peers_present(ifp->if_bpf)) bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0); /* prepend sockaddr? this may abort if the mbuf allocation fails */ if (cached_tun_flags & TUN_LMODE) { /* allocate space for sockaddr */ M_PREPEND(m0, dst->sa_len, M_NOWAIT); /* if allocation failed drop packet */ if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } else { bcopy(dst, m0->m_data, dst->sa_len); } } if (cached_tun_flags & TUN_IFHEAD) { /* Prepend the address family */ M_PREPEND(m0, 4, M_NOWAIT); /* if allocation failed drop packet */ if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return (ENOBUFS); } else *(u_int32_t *)m0->m_data = htonl(af); } else { #ifdef INET if (af != AF_INET) #endif { m_freem(m0); return (EAFNOSUPPORT); } } error = (ifp->if_transmit)(ifp, m0); if (error) return (ENOBUFS); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); return (0); } /* * the cdevsw interface is now pretty minimal. */ static int tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct ifreq ifr, *ifrp; struct tuntap_softc *tp = dev->si_drv1; struct ifnet *ifp = TUN2IFP(tp); struct tuninfo *tunp; int error, iflags, ival; bool l2tun; l2tun = (tp->tun_flags & TUN_L2) != 0; if (l2tun) { /* tap specific ioctls */ switch(cmd) { /* VMware/VMnet port ioctl's */ #if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD4) case _IO('V', 0): ival = IOCPARM_IVAL(data); data = (caddr_t)&ival; /* FALLTHROUGH */ #endif case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */ iflags = *(int *)data; iflags &= TUN_VMIO_FLAG_MASK; iflags &= ~IFF_CANTCHANGE; iflags |= IFF_UP; TUN_LOCK(tp); ifp->if_flags = iflags | (ifp->if_flags & IFF_CANTCHANGE); TUN_UNLOCK(tp); return (0); case SIOCGIFADDR: /* get MAC address of the remote side */ TUN_LOCK(tp); bcopy(&tp->tun_ether.octet, data, sizeof(tp->tun_ether.octet)); TUN_UNLOCK(tp); return (0); case SIOCSIFADDR: /* set MAC address of the remote side */ TUN_LOCK(tp); bcopy(data, &tp->tun_ether.octet, sizeof(tp->tun_ether.octet)); TUN_UNLOCK(tp); return (0); case TAPSVNETHDR: ival = *(int *)data; if (ival != 0 && ival != sizeof(struct virtio_net_hdr) && ival != sizeof(struct virtio_net_hdr_mrg_rxbuf)) { return (EINVAL); } TUN_LOCK(tp); tun_vnethdr_set(ifp, ival); TUN_UNLOCK(tp); return (0); case TAPGVNETHDR: TUN_LOCK(tp); *(int *)data = tp->tun_vhdrlen; TUN_UNLOCK(tp); return (0); } /* Fall through to the common ioctls if unhandled */ } else { switch (cmd) { case TUNSLMODE: TUN_LOCK(tp); if (*(int *)data) { tp->tun_flags |= TUN_LMODE; tp->tun_flags &= ~TUN_IFHEAD; } else tp->tun_flags &= ~TUN_LMODE; TUN_UNLOCK(tp); return (0); case TUNSIFHEAD: TUN_LOCK(tp); if (*(int *)data) { tp->tun_flags |= TUN_IFHEAD; tp->tun_flags &= ~TUN_LMODE; } else tp->tun_flags &= ~TUN_IFHEAD; TUN_UNLOCK(tp); return (0); case TUNGIFHEAD: TUN_LOCK(tp); *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0; TUN_UNLOCK(tp); return (0); case TUNSIFMODE: /* deny this if UP */ if (TUN2IFP(tp)->if_flags & IFF_UP) return (EBUSY); switch (*(int *)data & ~IFF_MULTICAST) { case IFF_POINTOPOINT: case IFF_BROADCAST: TUN_LOCK(tp); TUN2IFP(tp)->if_flags &= ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST); TUN2IFP(tp)->if_flags |= *(int *)data; TUN_UNLOCK(tp); break; default: return (EINVAL); } return (0); case TUNSIFPID: TUN_LOCK(tp); tp->tun_pid = curthread->td_proc->p_pid; TUN_UNLOCK(tp); return (0); } /* Fall through to the common ioctls if unhandled */ } switch (cmd) { case TUNGIFNAME: ifrp = (struct ifreq *)data; strlcpy(ifrp->ifr_name, TUN2IFP(tp)->if_xname, IFNAMSIZ); return (0); case TUNSIFINFO: tunp = (struct tuninfo *)data; if (TUN2IFP(tp)->if_type != tunp->type) return (EPROTOTYPE); TUN_LOCK(tp); if (TUN2IFP(tp)->if_mtu != tunp->mtu) { strlcpy(ifr.ifr_name, if_name(TUN2IFP(tp)), IFNAMSIZ); ifr.ifr_mtu = tunp->mtu; CURVNET_SET(TUN2IFP(tp)->if_vnet); error = ifhwioctl(SIOCSIFMTU, TUN2IFP(tp), (caddr_t)&ifr, td); CURVNET_RESTORE(); if (error) { TUN_UNLOCK(tp); return (error); } } TUN2IFP(tp)->if_baudrate = tunp->baudrate; TUN_UNLOCK(tp); break; case TUNGIFINFO: tunp = (struct tuninfo *)data; TUN_LOCK(tp); tunp->mtu = TUN2IFP(tp)->if_mtu; tunp->type = TUN2IFP(tp)->if_type; tunp->baudrate = TUN2IFP(tp)->if_baudrate; TUN_UNLOCK(tp); break; case TUNSDEBUG: tundebug = *(int *)data; break; case TUNGDEBUG: *(int *)data = tundebug; break; case FIONBIO: break; case FIOASYNC: TUN_LOCK(tp); if (*(int *)data) tp->tun_flags |= TUN_ASYNC; else tp->tun_flags &= ~TUN_ASYNC; TUN_UNLOCK(tp); break; case FIONREAD: if (!IFQ_IS_EMPTY(&TUN2IFP(tp)->if_snd)) { struct mbuf *mb; IFQ_LOCK(&TUN2IFP(tp)->if_snd); IFQ_POLL_NOLOCK(&TUN2IFP(tp)->if_snd, mb); for (*(int *)data = 0; mb != NULL; mb = mb->m_next) *(int *)data += mb->m_len; IFQ_UNLOCK(&TUN2IFP(tp)->if_snd); } else *(int *)data = 0; break; case FIOSETOWN: return (fsetown(*(int *)data, &tp->tun_sigio)); case FIOGETOWN: *(int *)data = fgetown(&tp->tun_sigio); return (0); /* This is deprecated, FIOSETOWN should be used instead. */ case TIOCSPGRP: return (fsetown(-(*(int *)data), &tp->tun_sigio)); /* This is deprecated, FIOGETOWN should be used instead. */ case TIOCGPGRP: *(int *)data = -fgetown(&tp->tun_sigio); return (0); default: return (ENOTTY); } return (0); } /* * The cdevsw read interface - reads a packet at a time, or at * least as much of a packet as can be read. */ static int tunread(struct cdev *dev, struct uio *uio, int flag) { struct tuntap_softc *tp = dev->si_drv1; struct ifnet *ifp = TUN2IFP(tp); struct mbuf *m; size_t len; int error = 0; TUNDEBUG (ifp, "read\n"); TUN_LOCK(tp); if ((tp->tun_flags & TUN_READY) != TUN_READY) { TUN_UNLOCK(tp); TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags); return (EHOSTDOWN); } tp->tun_flags &= ~TUN_RWAIT; for (;;) { IFQ_DEQUEUE(&ifp->if_snd, m); if (m != NULL) break; if (flag & O_NONBLOCK) { TUN_UNLOCK(tp); return (EWOULDBLOCK); } tp->tun_flags |= TUN_RWAIT; error = mtx_sleep(tp, &tp->tun_mtx, PCATCH | (PZERO + 1), "tunread", 0); if (error != 0) { TUN_UNLOCK(tp); return (error); } } TUN_UNLOCK(tp); if ((tp->tun_flags & TUN_L2) != 0) BPF_MTAP(ifp, m); len = min(tp->tun_vhdrlen, uio->uio_resid); if (len > 0) { struct virtio_net_hdr_mrg_rxbuf vhdr; bzero(&vhdr, sizeof(vhdr)); if (m->m_pkthdr.csum_flags & TAP_ALL_OFFLOAD) { m = virtio_net_tx_offload(ifp, m, false, &vhdr.hdr); } TUNDEBUG(ifp, "txvhdr: f %u, gt %u, hl %u, " "gs %u, cs %u, co %u\n", vhdr.hdr.flags, vhdr.hdr.gso_type, vhdr.hdr.hdr_len, vhdr.hdr.gso_size, vhdr.hdr.csum_start, vhdr.hdr.csum_offset); error = uiomove(&vhdr, len, uio); } while (m && uio->uio_resid > 0 && error == 0) { len = min(uio->uio_resid, m->m_len); if (len != 0) error = uiomove(mtod(m, void *), len, uio); m = m_free(m); } if (m) { TUNDEBUG(ifp, "Dropping mbuf\n"); m_freem(m); } return (error); } static int tunwrite_l2(struct tuntap_softc *tp, struct mbuf *m, struct virtio_net_hdr_mrg_rxbuf *vhdr) { struct epoch_tracker et; struct ether_header *eh; struct ifnet *ifp; ifp = TUN2IFP(tp); /* * Only pass a unicast frame to ether_input(), if it would * actually have been received by non-virtual hardware. */ if (m->m_len < sizeof(struct ether_header)) { m_freem(m); return (0); } eh = mtod(m, struct ether_header *); if (eh && (ifp->if_flags & IFF_PROMISC) == 0 && !ETHER_IS_MULTICAST(eh->ether_dhost) && bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) { m_freem(m); return (0); } if (vhdr != NULL && virtio_net_rx_csum(m, &vhdr->hdr)) { m_freem(m); return (0); } /* Pass packet up to parent. */ CURVNET_SET(ifp->if_vnet); NET_EPOCH_ENTER(et); (*ifp->if_input)(ifp, m); NET_EPOCH_EXIT(et); CURVNET_RESTORE(); /* ibytes are counted in parent */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); return (0); } static int tunwrite_l3(struct tuntap_softc *tp, struct mbuf *m) { struct epoch_tracker et; struct ifnet *ifp; int family, isr; ifp = TUN2IFP(tp); /* Could be unlocked read? */ TUN_LOCK(tp); if (tp->tun_flags & TUN_IFHEAD) { TUN_UNLOCK(tp); if (m->m_len < sizeof(family) && (m = m_pullup(m, sizeof(family))) == NULL) return (ENOBUFS); family = ntohl(*mtod(m, u_int32_t *)); m_adj(m, sizeof(family)); } else { TUN_UNLOCK(tp); family = AF_INET; } BPF_MTAP2(ifp, &family, sizeof(family), m); switch (family) { #ifdef INET case AF_INET: isr = NETISR_IP; break; #endif #ifdef INET6 case AF_INET6: isr = NETISR_IPV6; break; #endif default: m_freem(m); return (EAFNOSUPPORT); } random_harvest_queue(m, sizeof(*m), RANDOM_NET_TUN); if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); CURVNET_SET(ifp->if_vnet); M_SETFIB(m, ifp->if_fib); NET_EPOCH_ENTER(et); netisr_dispatch(isr, m); NET_EPOCH_EXIT(et); CURVNET_RESTORE(); return (0); } /* * the cdevsw write interface - an atomic write is a packet - or else! */ static int tunwrite(struct cdev *dev, struct uio *uio, int flag) { struct virtio_net_hdr_mrg_rxbuf vhdr; struct tuntap_softc *tp; struct ifnet *ifp; struct mbuf *m; uint32_t mru; int align, vhdrlen, error; bool l2tun; tp = dev->si_drv1; ifp = TUN2IFP(tp); TUNDEBUG(ifp, "tunwrite\n"); if ((ifp->if_flags & IFF_UP) != IFF_UP) /* ignore silently */ return (0); if (uio->uio_resid == 0) return (0); l2tun = (tp->tun_flags & TUN_L2) != 0; mru = l2tun ? TAPMRU : TUNMRU; vhdrlen = tp->tun_vhdrlen; align = 0; if (l2tun) { align = ETHER_ALIGN; mru += vhdrlen; } else if ((tp->tun_flags & TUN_IFHEAD) != 0) mru += sizeof(uint32_t); /* family */ if (uio->uio_resid < 0 || uio->uio_resid > mru) { TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid); return (EIO); } if (vhdrlen > 0) { error = uiomove(&vhdr, vhdrlen, uio); if (error != 0) return (error); TUNDEBUG(ifp, "txvhdr: f %u, gt %u, hl %u, " "gs %u, cs %u, co %u\n", vhdr.hdr.flags, vhdr.hdr.gso_type, vhdr.hdr.hdr_len, vhdr.hdr.gso_size, vhdr.hdr.csum_start, vhdr.hdr.csum_offset); } if ((m = m_uiotombuf(uio, M_NOWAIT, 0, align, M_PKTHDR)) == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return (ENOBUFS); } m->m_pkthdr.rcvif = ifp; #ifdef MAC mac_ifnet_create_mbuf(ifp, m); #endif if (l2tun) return (tunwrite_l2(tp, m, vhdrlen > 0 ? &vhdr : NULL)); return (tunwrite_l3(tp, m)); } /* * tunpoll - the poll interface, this is only useful on reads * really. The write detect always returns true, write never blocks * anyway, it either accepts the packet or drops it. */ static int tunpoll(struct cdev *dev, int events, struct thread *td) { struct tuntap_softc *tp = dev->si_drv1; struct ifnet *ifp = TUN2IFP(tp); int revents = 0; TUNDEBUG(ifp, "tunpoll\n"); if (events & (POLLIN | POLLRDNORM)) { IFQ_LOCK(&ifp->if_snd); if (!IFQ_IS_EMPTY(&ifp->if_snd)) { TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len); revents |= events & (POLLIN | POLLRDNORM); } else { TUNDEBUG(ifp, "tunpoll waiting\n"); selrecord(td, &tp->tun_rsel); } IFQ_UNLOCK(&ifp->if_snd); } revents |= events & (POLLOUT | POLLWRNORM); return (revents); } /* * tunkqfilter - support for the kevent() system call. */ static int tunkqfilter(struct cdev *dev, struct knote *kn) { struct tuntap_softc *tp = dev->si_drv1; struct ifnet *ifp = TUN2IFP(tp); switch(kn->kn_filter) { case EVFILT_READ: TUNDEBUG(ifp, "%s kqfilter: EVFILT_READ, minor = %#x\n", ifp->if_xname, dev2unit(dev)); kn->kn_fop = &tun_read_filterops; break; case EVFILT_WRITE: TUNDEBUG(ifp, "%s kqfilter: EVFILT_WRITE, minor = %#x\n", ifp->if_xname, dev2unit(dev)); kn->kn_fop = &tun_write_filterops; break; default: TUNDEBUG(ifp, "%s kqfilter: invalid filter, minor = %#x\n", ifp->if_xname, dev2unit(dev)); return(EINVAL); } kn->kn_hook = tp; knlist_add(&tp->tun_rsel.si_note, kn, 0); return (0); } /* * Return true of there is data in the interface queue. */ static int tunkqread(struct knote *kn, long hint) { int ret; struct tuntap_softc *tp = kn->kn_hook; struct cdev *dev = tp->tun_dev; struct ifnet *ifp = TUN2IFP(tp); if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) { TUNDEBUG(ifp, "%s have data in the queue. Len = %d, minor = %#x\n", ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev)); ret = 1; } else { TUNDEBUG(ifp, "%s waiting for data, minor = %#x\n", ifp->if_xname, dev2unit(dev)); ret = 0; } return (ret); } /* * Always can write, always return MTU in kn->data. */ static int tunkqwrite(struct knote *kn, long hint) { struct tuntap_softc *tp = kn->kn_hook; struct ifnet *ifp = TUN2IFP(tp); kn->kn_data = ifp->if_mtu; return (1); } static void tunkqdetach(struct knote *kn) { struct tuntap_softc *tp = kn->kn_hook; knlist_remove(&tp->tun_rsel.si_note, kn, 0); } diff --git a/sys/sys/conf.h b/sys/sys/conf.h index 9f60dcaf62fa..ad6ffc31dc2a 100644 --- a/sys/sys/conf.h +++ b/sys/sys/conf.h @@ -1,387 +1,386 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2000 * Poul-Henning Kamp. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)conf.h 8.5 (Berkeley) 1/9/95 * $FreeBSD$ */ #ifndef _SYS_CONF_H_ #define _SYS_CONF_H_ #ifdef _KERNEL #include #else #include #endif struct snapdata; struct devfs_dirent; struct cdevsw; struct file; struct cdev { void *si_spare0; u_int si_flags; #define SI_ETERNAL 0x0001 /* never destroyed */ #define SI_ALIAS 0x0002 /* carrier of alias name */ #define SI_NAMED 0x0004 /* make_dev{_alias} has been called */ #define SI_UNUSED1 0x0008 /* unused */ #define SI_CHILD 0x0010 /* child of another struct cdev **/ #define SI_DUMPDEV 0x0080 /* is kernel dumpdev */ #define SI_CLONELIST 0x0200 /* on a clone list */ #define SI_UNMAPPED 0x0400 /* can handle unmapped I/O */ #define SI_NOSPLIT 0x0800 /* I/O should not be split up */ struct timespec si_atime; struct timespec si_ctime; struct timespec si_mtime; uid_t si_uid; gid_t si_gid; mode_t si_mode; struct ucred *si_cred; /* cached clone-time credential */ int si_drv0; int si_refcount; LIST_ENTRY(cdev) si_list; LIST_ENTRY(cdev) si_clone; LIST_HEAD(, cdev) si_children; LIST_ENTRY(cdev) si_siblings; struct cdev *si_parent; struct mount *si_mountpt; void *si_drv1, *si_drv2; struct cdevsw *si_devsw; int si_iosize_max; /* maximum I/O size (for physio &al) */ u_long si_usecount; u_long si_threadcount; union { struct snapdata *__sid_snapdata; } __si_u; char si_name[SPECNAMELEN + 1]; }; #define si_snapdata __si_u.__sid_snapdata #ifdef _KERNEL /* * Definitions of device driver entry switches */ struct bio; struct buf; struct dumperinfo; struct kerneldumpheader; struct thread; struct uio; struct knote; struct clonedevs; struct vm_object; struct vnode; typedef int d_open_t(struct cdev *dev, int oflags, int devtype, struct thread *td); typedef int d_fdopen_t(struct cdev *dev, int oflags, struct thread *td, struct file *fp); typedef int d_close_t(struct cdev *dev, int fflag, int devtype, struct thread *td); typedef void d_strategy_t(struct bio *bp); typedef int d_ioctl_t(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); typedef int d_read_t(struct cdev *dev, struct uio *uio, int ioflag); typedef int d_write_t(struct cdev *dev, struct uio *uio, int ioflag); typedef int d_poll_t(struct cdev *dev, int events, struct thread *td); typedef int d_kqfilter_t(struct cdev *dev, struct knote *kn); typedef int d_mmap_t(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr); typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot); typedef void d_purge_t(struct cdev *dev); typedef int dumper_t( void *_priv, /* Private to the driver. */ void *_virtual, /* Virtual (mapped) address. */ off_t _offset, /* Byte-offset to write at. */ size_t _length); /* Number of bytes to dump. */ typedef int dumper_start_t(struct dumperinfo *di, void *key, uint32_t keysize); typedef int dumper_hdr_t(struct dumperinfo *di, struct kerneldumpheader *kdh); #endif /* _KERNEL */ /* * Types for d_flags. */ #define D_TAPE 0x0001 #define D_DISK 0x0002 #define D_TTY 0x0004 #define D_MEM 0x0008 /* /dev/(k)mem */ /* Defined uid and gid values. */ #define UID_ROOT 0 #define UID_BIN 3 #define UID_UUCP 66 #define UID_NOBODY 65534 #define GID_WHEEL 0 #define GID_KMEM 2 #define GID_TTY 4 #define GID_OPERATOR 5 #define GID_BIN 7 #define GID_GAMES 13 #define GID_VIDEO 44 #define GID_RT_PRIO 47 #define GID_ID_PRIO 48 #define GID_DIALER 68 #define GID_NOGROUP 65533 #define GID_NOBODY 65534 #ifdef _KERNEL #define D_TYPEMASK 0xffff /* * Flags for d_flags which the drivers can set. */ #define D_TRACKCLOSE 0x00080000 /* track all closes */ #define D_MMAP_ANON 0x00100000 /* special treatment in vm_mmap.c */ #define D_GIANTOK 0x00200000 /* suppress warning about using Giant */ #define D_NEEDGIANT 0x00400000 /* driver want Giant */ #define D_NEEDMINOR 0x00800000 /* driver uses clone_create() */ /* * Version numbers. */ #define D_VERSION_00 0x20011966 #define D_VERSION_01 0x17032005 /* Add d_uid,gid,mode & kind */ #define D_VERSION_02 0x28042009 /* Add d_mmap_single */ #define D_VERSION_03 0x17122009 /* d_mmap takes memattr,vm_ooffset_t */ #define D_VERSION_04 0x5c48c353 /* SPECNAMELEN bumped to MAXNAMLEN */ #define D_VERSION D_VERSION_04 /* * Flags used for internal housekeeping */ #define D_INIT 0x80000000 /* cdevsw initialized */ /* * Character device switch table */ struct cdevsw { int d_version; u_int d_flags; const char *d_name; d_open_t *d_open; d_fdopen_t *d_fdopen; d_close_t *d_close; d_read_t *d_read; d_write_t *d_write; d_ioctl_t *d_ioctl; d_poll_t *d_poll; d_mmap_t *d_mmap; d_strategy_t *d_strategy; void *d_spare0; d_kqfilter_t *d_kqfilter; d_purge_t *d_purge; d_mmap_single_t *d_mmap_single; int32_t d_spare1[3]; void *d_spare2[3]; /* These fields should not be messed with by drivers */ LIST_HEAD(, cdev) d_devs; int d_spare3; union { struct cdevsw *gianttrick; SLIST_ENTRY(cdevsw) postfree_list; } __d_giant; }; #define d_gianttrick __d_giant.gianttrick #define d_postfree_list __d_giant.postfree_list struct module; struct devsw_module_data { int (*chainevh)(struct module *, int, void *); /* next handler */ void *chainarg; /* arg for next event handler */ /* Do not initialize fields hereafter */ }; #define DEV_MODULE_ORDERED(name, evh, arg, ord) \ static moduledata_t name##_mod = { \ #name, \ evh, \ arg \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, ord) #define DEV_MODULE(name, evh, arg) \ DEV_MODULE_ORDERED(name, evh, arg, SI_ORDER_MIDDLE) void clone_setup(struct clonedevs **cdp); void clone_cleanup(struct clonedevs **); #define CLONE_UNITMASK 0xfffff #define CLONE_FLAG0 (CLONE_UNITMASK + 1) int clone_create(struct clonedevs **, struct cdevsw *, int *unit, struct cdev **dev, int extra); #define MAKEDEV_REF 0x01 #define MAKEDEV_WHTOUT 0x02 #define MAKEDEV_NOWAIT 0x04 #define MAKEDEV_WAITOK 0x08 #define MAKEDEV_ETERNAL 0x10 #define MAKEDEV_CHECKNAME 0x20 struct make_dev_args { size_t mda_size; int mda_flags; struct cdevsw *mda_devsw; struct ucred *mda_cr; uid_t mda_uid; gid_t mda_gid; int mda_mode; int mda_unit; void *mda_si_drv1; void *mda_si_drv2; }; void make_dev_args_init_impl(struct make_dev_args *_args, size_t _sz); #define make_dev_args_init(a) \ make_dev_args_init_impl((a), sizeof(struct make_dev_args)) void delist_dev(struct cdev *_dev); void destroy_dev(struct cdev *_dev); int destroy_dev_sched(struct cdev *dev); int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg); void destroy_dev_drain(struct cdevsw *csw); -void drain_dev_clone_events(void); struct cdevsw *dev_refthread(struct cdev *_dev, int *_ref); struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp, int *_ref); void dev_relthread(struct cdev *_dev, int _ref); void dev_depends(struct cdev *_pdev, struct cdev *_cdev); void dev_ref(struct cdev *dev); void dev_refl(struct cdev *dev); void dev_rel(struct cdev *dev); struct cdev *make_dev(struct cdevsw *_devsw, int _unit, uid_t _uid, gid_t _gid, int _perms, const char *_fmt, ...) __printflike(6, 7); struct cdev *make_dev_cred(struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _perms, const char *_fmt, ...) __printflike(7, 8); struct cdev *make_dev_credf(int _flags, struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode, const char *_fmt, ...) __printflike(8, 9); int make_dev_p(int _flags, struct cdev **_cdev, struct cdevsw *_devsw, struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode, const char *_fmt, ...) __printflike(8, 9); int make_dev_s(struct make_dev_args *_args, struct cdev **_cdev, const char *_fmt, ...) __printflike(3, 4); struct cdev *make_dev_alias(struct cdev *_pdev, const char *_fmt, ...) __printflike(2, 3); int make_dev_alias_p(int _flags, struct cdev **_cdev, struct cdev *_pdev, const char *_fmt, ...) __printflike(4, 5); int make_dev_physpath_alias(int _flags, struct cdev **_cdev, struct cdev *_pdev, struct cdev *_old_alias, const char *_physpath); void dev_lock(void); void dev_unlock(void); #ifdef KLD_MODULE #define MAKEDEV_ETERNAL_KLD 0 #else #define MAKEDEV_ETERNAL_KLD MAKEDEV_ETERNAL #endif #define dev2unit(d) ((d)->si_drv0) typedef void d_priv_dtor_t(void *data); int devfs_get_cdevpriv(void **datap); int devfs_set_cdevpriv(void *priv, d_priv_dtor_t *dtr); void devfs_clear_cdevpriv(void); ino_t devfs_alloc_cdp_inode(void); void devfs_free_cdp_inode(ino_t ino); typedef void (*dev_clone_fn)(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **result); int dev_stdclone(char *_name, char **_namep, const char *_stem, int *_unit); EVENTHANDLER_DECLARE(dev_clone, dev_clone_fn); /* Stuff relating to kernel-dump */ struct kerneldumpcrypto; struct kerneldumpheader; struct dumperinfo { dumper_t *dumper; /* Dumping function. */ dumper_start_t *dumper_start; /* Dumper callback for dump_start(). */ dumper_hdr_t *dumper_hdr; /* Dumper callback for writing headers. */ void *priv; /* Private parts. */ u_int blocksize; /* Size of block in bytes. */ u_int maxiosize; /* Max size allowed for an individual I/O */ off_t mediaoffset; /* Initial offset in bytes. */ off_t mediasize; /* Space available in bytes. */ /* MI kernel dump state. */ void *blockbuf; /* Buffer for padding shorter dump blocks */ off_t dumpoff; /* Offset of ongoing kernel dump. */ off_t origdumpoff; /* Starting dump offset. */ struct kerneldumpcrypto *kdcrypto; /* Kernel dump crypto. */ struct kerneldumpcomp *kdcomp; /* Kernel dump compression. */ TAILQ_ENTRY(dumperinfo) di_next; char di_devname[]; }; extern int dumping; /* system is dumping */ void dump_savectx(void); int doadump(boolean_t); struct diocskerneldump_arg; int dumper_create(const struct dumperinfo *di_template, const char *devname, const struct diocskerneldump_arg *kda, struct dumperinfo **dip); void dumper_destroy(struct dumperinfo *di); int dumper_insert(const struct dumperinfo *di_template, const char *devname, const struct diocskerneldump_arg *kda); int dumper_remove(const char *devname, const struct diocskerneldump_arg *kda); /* For ddb(4)-time use only. */ void dumper_ddb_insert(struct dumperinfo *); void dumper_ddb_remove(struct dumperinfo *); int dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh); int dump_append(struct dumperinfo *, void *, size_t); int dump_write(struct dumperinfo *, void *, off_t, size_t); int dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh); void dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh, const char *magic, uint32_t archver, uint64_t dumplen); #endif /* _KERNEL */ #endif /* !_SYS_CONF_H_ */