diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 2455c84ad65c..cad208197e76 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -1,2538 +1,2675 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Scooter Morris at Genentech Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_debug_lockf.h" #include #include #include +#include #include #include #include #include #include #include +#include +#include #include #include +#include #include #include #include #include #include #ifdef LOCKF_DEBUG #include static int lockf_debug = 0; /* control debug output */ SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); #endif static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); struct owner_edge; struct owner_vertex; struct owner_vertex_list; struct owner_graph; #define NOLOCKF (struct lockf_entry *)0 #define SELF 0x1 #define OTHERS 0x2 static void lf_init(void *); static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int); static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, int); static struct lockf_entry * lf_alloc_lock(struct lock_owner *); static int lf_free_lock(struct lockf_entry *); static int lf_clearlock(struct lockf *, struct lockf_entry *); static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); static int lf_blocks(struct lockf_entry *, struct lockf_entry *); static void lf_free_edge(struct lockf_edge *); static struct lockf_edge * lf_alloc_edge(void); static void lf_alloc_vertex(struct lockf_entry *); static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); static void lf_remove_edge(struct lockf_edge *); static void lf_remove_outgoing(struct lockf_entry *); static void lf_remove_incoming(struct lockf_entry *); static int lf_add_outgoing(struct lockf *, struct lockf_entry *); static int lf_add_incoming(struct lockf *, struct lockf_entry *); static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, int); static struct lockf_entry * lf_getblock(struct lockf *, struct lockf_entry *); static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); static void lf_insert_lock(struct lockf *, struct lockf_entry *); static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); static void lf_update_dependancies(struct lockf *, struct lockf_entry *, int all, struct lockf_entry_list *); static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list*); static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, struct lockf_entry_list*); static int lf_setlock(struct lockf *, struct lockf_entry *, struct vnode *, void **cookiep); static int lf_cancel(struct lockf *, struct lockf_entry *, void *); static void lf_split(struct lockf *, struct lockf_entry *, struct lockf_entry *, struct lockf_entry_list *); #ifdef LOCKF_DEBUG static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *path); static void graph_check(struct owner_graph *g, int checkorder); static void graph_print_vertices(struct owner_vertex_list *set); #endif static int graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta); static int graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta); static int graph_add_indices(int *indices, int n, struct owner_vertex_list *set); static int graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, struct owner_vertex_list *set); static int graph_add_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y); static void graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y); static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo); static void graph_free_vertex(struct owner_graph *g, struct owner_vertex *v); static struct owner_graph * graph_init(struct owner_graph *g); #ifdef LOCKF_DEBUG static void lf_print(char *, struct lockf_entry *); static void lf_printlist(char *, struct lockf_entry *); static void lf_print_owner(struct lock_owner *); #endif /* * This structure is used to keep track of both local and remote lock * owners. The lf_owner field of the struct lockf_entry points back at * the lock owner structure. Each possible lock owner (local proc for * POSIX fcntl locks, local file for BSD flock locks or * pair for remote locks) is represented by a unique instance of * struct lock_owner. * * If a lock owner has a lock that blocks some other lock or a lock * that is waiting for some other lock, it also has a vertex in the * owner_graph below. * * Locks: * (s) locked by state->ls_lock * (S) locked by lf_lock_states_lock * (g) locked by lf_owner_graph_lock * (c) const until freeing */ #define LOCK_OWNER_HASH_SIZE 256 struct lock_owner { LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ int lo_refs; /* (l) Number of locks referring to this */ int lo_flags; /* (c) Flags passwd to lf_advlock */ caddr_t lo_id; /* (c) Id value passed to lf_advlock */ pid_t lo_pid; /* (c) Process Id of the lock owner */ int lo_sysid; /* (c) System Id of the lock owner */ int lo_hash; /* (c) Used to lock the appropriate chain */ struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ }; LIST_HEAD(lock_owner_list, lock_owner); struct lock_owner_chain { struct sx lock; struct lock_owner_list list; }; static struct sx lf_lock_states_lock; static struct lockf_list lf_lock_states; /* (S) */ static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* * Structures for deadlock detection. * * We have two types of directed graph, the first is the set of locks, * both active and pending on a vnode. Within this graph, active locks * are terminal nodes in the graph (i.e. have no out-going * edges). Pending locks have out-going edges to each blocking active * lock that prevents the lock from being granted and also to each * older pending lock that would block them if it was active. The * graph for each vnode is naturally acyclic; new edges are only ever * added to or from new nodes (either new pending locks which only add * out-going edges or new active locks which only add in-coming edges) * therefore they cannot create loops in the lock graph. * * The second graph is a global graph of lock owners. Each lock owner * is a vertex in that graph and an edge is added to the graph * whenever an edge is added to a vnode graph, with end points * corresponding to owner of the new pending lock and the owner of the * lock upon which it waits. In order to prevent deadlock, we only add * an edge to this graph if the new edge would not create a cycle. * * The lock owner graph is topologically sorted, i.e. if a node has * any outgoing edges, then it has an order strictly less than any * node to which it has an outgoing edge. We preserve this ordering * (and detect cycles) on edge insertion using Algorithm PK from the * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article * No. 1.7) */ struct owner_vertex; struct owner_edge { LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ int e_refs; /* (g) number of times added */ struct owner_vertex *e_from; /* (c) out-going from here */ struct owner_vertex *e_to; /* (c) in-coming to here */ }; LIST_HEAD(owner_edge_list, owner_edge); struct owner_vertex { TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ uint32_t v_gen; /* (g) workspace for edge insertion */ int v_order; /* (g) order of vertex in graph */ struct owner_edge_list v_outedges;/* (g) list of out-edges */ struct owner_edge_list v_inedges; /* (g) list of in-edges */ struct lock_owner *v_owner; /* (c) corresponding lock owner */ }; TAILQ_HEAD(owner_vertex_list, owner_vertex); struct owner_graph { struct owner_vertex** g_vertices; /* (g) pointers to vertices */ int g_size; /* (g) number of vertices */ int g_space; /* (g) space allocated for vertices */ int *g_indexbuf; /* (g) workspace for loop detection */ uint32_t g_gen; /* (g) increment when re-ordering */ }; static struct sx lf_owner_graph_lock; static struct owner_graph lf_owner_graph; /* * Initialise various structures and locks. */ static void lf_init(void *dummy) { int i; sx_init(&lf_lock_states_lock, "lock states lock"); LIST_INIT(&lf_lock_states); for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) { sx_init(&lf_lock_owners[i].lock, "lock owners lock"); LIST_INIT(&lf_lock_owners[i].list); } sx_init(&lf_owner_graph_lock, "owner graph lock"); graph_init(&lf_owner_graph); } SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); /* * Generate a hash value for a lock owner. */ static int lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags) { uint32_t h; if (flags & F_REMOTE) { h = HASHSTEP(0, fl->l_pid); h = HASHSTEP(h, fl->l_sysid); } else if (flags & F_FLOCK) { h = ((uintptr_t) id) >> 7; } else { h = ((uintptr_t) vp) >> 7; } return (h % LOCK_OWNER_HASH_SIZE); } /* * Return true if a lock owner matches the details passed to * lf_advlock. */ static int lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, int flags) { if (flags & F_REMOTE) { return lo->lo_pid == fl->l_pid && lo->lo_sysid == fl->l_sysid; } else { return lo->lo_id == id; } } static struct lockf_entry * lf_alloc_lock(struct lock_owner *lo) { struct lockf_entry *lf; lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); #ifdef LOCKF_DEBUG if (lockf_debug & 4) printf("Allocated lock %p\n", lf); #endif if (lo) { sx_xlock(&lf_lock_owners[lo->lo_hash].lock); lo->lo_refs++; sx_xunlock(&lf_lock_owners[lo->lo_hash].lock); lf->lf_owner = lo; } return (lf); } static int lf_free_lock(struct lockf_entry *lock) { struct sx *chainlock; KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); if (--lock->lf_refs > 0) return (0); /* * Adjust the lock_owner reference count and * reclaim the entry if this is the last lock * for that owner. */ struct lock_owner *lo = lock->lf_owner; if (lo) { KASSERT(LIST_EMPTY(&lock->lf_outedges), ("freeing lock with dependencies")); KASSERT(LIST_EMPTY(&lock->lf_inedges), ("freeing lock with dependants")); chainlock = &lf_lock_owners[lo->lo_hash].lock; sx_xlock(chainlock); KASSERT(lo->lo_refs > 0, ("lock owner refcount")); lo->lo_refs--; if (lo->lo_refs == 0) { #ifdef LOCKF_DEBUG if (lockf_debug & 1) printf("lf_free_lock: freeing lock owner %p\n", lo); #endif if (lo->lo_vertex) { sx_xlock(&lf_owner_graph_lock); graph_free_vertex(&lf_owner_graph, lo->lo_vertex); sx_xunlock(&lf_owner_graph_lock); } LIST_REMOVE(lo, lo_link); free(lo, M_LOCKF); #ifdef LOCKF_DEBUG if (lockf_debug & 4) printf("Freed lock owner %p\n", lo); #endif } sx_unlock(chainlock); } if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { vrele(lock->lf_vnode); lock->lf_vnode = NULL; } #ifdef LOCKF_DEBUG if (lockf_debug & 4) printf("Freed lock %p\n", lock); #endif free(lock, M_LOCKF); return (1); } /* * Advisory record locking support */ int lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, u_quad_t size) { struct lockf *state; struct flock *fl = ap->a_fl; struct lockf_entry *lock; struct vnode *vp = ap->a_vp; caddr_t id = ap->a_id; int flags = ap->a_flags; int hash; struct lock_owner *lo; off_t start, end, oadd; int error; /* * Handle the F_UNLKSYS case first - no need to mess about * creating a lock owner for this one. */ if (ap->a_op == F_UNLCKSYS) { lf_clearremotesys(fl->l_sysid); return (0); } /* * Convert the flock structure into a start and end. */ switch (fl->l_whence) { case SEEK_SET: case SEEK_CUR: /* * Caller is responsible for adding any necessary offset * when SEEK_CUR is used. */ start = fl->l_start; break; case SEEK_END: if (size > OFF_MAX || (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) return (EOVERFLOW); start = size + fl->l_start; break; default: return (EINVAL); } if (start < 0) return (EINVAL); if (fl->l_len < 0) { if (start == 0) return (EINVAL); end = start - 1; start += fl->l_len; if (start < 0) return (EINVAL); } else if (fl->l_len == 0) { end = OFF_MAX; } else { oadd = fl->l_len - 1; if (oadd > OFF_MAX - start) return (EOVERFLOW); end = start + oadd; } retry_setlock: /* * Avoid the common case of unlocking when inode has no locks. */ if (ap->a_op != F_SETLK && (*statep) == NULL) { VI_LOCK(vp); if ((*statep) == NULL) { fl->l_type = F_UNLCK; VI_UNLOCK(vp); return (0); } VI_UNLOCK(vp); } /* * Map our arguments to an existing lock owner or create one * if this is the first time we have seen this owner. */ hash = lf_hash_owner(id, vp, fl, flags); sx_xlock(&lf_lock_owners[hash].lock); LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link) if (lf_owner_matches(lo, id, fl, flags)) break; if (!lo) { /* * We initialise the lock with a reference * count which matches the new lockf_entry * structure created below. */ lo = malloc(sizeof(struct lock_owner), M_LOCKF, M_WAITOK|M_ZERO); #ifdef LOCKF_DEBUG if (lockf_debug & 4) printf("Allocated lock owner %p\n", lo); #endif lo->lo_refs = 1; lo->lo_flags = flags; lo->lo_id = id; lo->lo_hash = hash; if (flags & F_REMOTE) { lo->lo_pid = fl->l_pid; lo->lo_sysid = fl->l_sysid; } else if (flags & F_FLOCK) { lo->lo_pid = -1; lo->lo_sysid = 0; } else { struct proc *p = (struct proc *) id; lo->lo_pid = p->p_pid; lo->lo_sysid = 0; } lo->lo_vertex = NULL; #ifdef LOCKF_DEBUG if (lockf_debug & 1) { printf("lf_advlockasync: new lock owner %p ", lo); lf_print_owner(lo); printf("\n"); } #endif LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link); } else { /* * We have seen this lock owner before, increase its * reference count to account for the new lockf_entry * structure we create below. */ lo->lo_refs++; } sx_xunlock(&lf_lock_owners[hash].lock); /* * Create the lockf structure. We initialise the lf_owner * field here instead of in lf_alloc_lock() to avoid paying * the lf_lock_owners_lock tax twice. */ lock = lf_alloc_lock(NULL); lock->lf_refs = 1; lock->lf_start = start; lock->lf_end = end; lock->lf_owner = lo; lock->lf_vnode = vp; if (flags & F_REMOTE) { /* * For remote locks, the caller may release its ref to * the vnode at any time - we have to ref it here to * prevent it from being recycled unexpectedly. */ vref(vp); } lock->lf_type = fl->l_type; LIST_INIT(&lock->lf_outedges); LIST_INIT(&lock->lf_inedges); lock->lf_async_task = ap->a_task; lock->lf_flags = ap->a_flags; /* * Do the requested operation. First find our state structure * and create a new one if necessary - the caller's *statep * variable and the state's ls_threads count is protected by * the vnode interlock. */ VI_LOCK(vp); if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); lf_free_lock(lock); return (ENOENT); } /* * Allocate a state structure if necessary. */ state = *statep; if (state == NULL) { struct lockf *ls; VI_UNLOCK(vp); ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); sx_init(&ls->ls_lock, "ls_lock"); LIST_INIT(&ls->ls_active); LIST_INIT(&ls->ls_pending); ls->ls_threads = 1; sx_xlock(&lf_lock_states_lock); LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); sx_xunlock(&lf_lock_states_lock); /* * Cope if we lost a race with some other thread while * trying to allocate memory. */ VI_LOCK(vp); if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); sx_xlock(&lf_lock_states_lock); LIST_REMOVE(ls, ls_link); sx_xunlock(&lf_lock_states_lock); sx_destroy(&ls->ls_lock); free(ls, M_LOCKF); lf_free_lock(lock); return (ENOENT); } if ((*statep) == NULL) { state = *statep = ls; VI_UNLOCK(vp); } else { state = *statep; MPASS(state->ls_threads >= 0); state->ls_threads++; VI_UNLOCK(vp); sx_xlock(&lf_lock_states_lock); LIST_REMOVE(ls, ls_link); sx_xunlock(&lf_lock_states_lock); sx_destroy(&ls->ls_lock); free(ls, M_LOCKF); } } else { MPASS(state->ls_threads >= 0); state->ls_threads++; VI_UNLOCK(vp); } sx_xlock(&state->ls_lock); /* * Recheck the doomed vnode after state->ls_lock is * locked. lf_purgelocks() requires that no new threads add * pending locks when vnode is marked by VIRF_DOOMED flag. */ if (VN_IS_DOOMED(vp)) { VI_LOCK(vp); MPASS(state->ls_threads > 0); state->ls_threads--; wakeup(state); VI_UNLOCK(vp); sx_xunlock(&state->ls_lock); lf_free_lock(lock); return (ENOENT); } switch (ap->a_op) { case F_SETLK: error = lf_setlock(state, lock, vp, ap->a_cookiep); break; case F_UNLCK: error = lf_clearlock(state, lock); lf_free_lock(lock); break; case F_GETLK: error = lf_getlock(state, lock, fl); lf_free_lock(lock); break; case F_CANCEL: if (ap->a_cookiep) error = lf_cancel(state, lock, *ap->a_cookiep); else error = EINVAL; lf_free_lock(lock); break; default: lf_free_lock(lock); error = EINVAL; break; } #ifdef DIAGNOSTIC /* * Check for some can't happen stuff. In this case, the active * lock list becoming disordered or containing mutually * blocking locks. We also check the pending list for locks * which should be active (i.e. have no out-going edges). */ LIST_FOREACH(lock, &state->ls_active, lf_link) { struct lockf_entry *lf; if (LIST_NEXT(lock, lf_link)) KASSERT((lock->lf_start <= LIST_NEXT(lock, lf_link)->lf_start), ("locks disordered")); LIST_FOREACH(lf, &state->ls_active, lf_link) { if (lock == lf) break; KASSERT(!lf_blocks(lock, lf), ("two conflicting active locks")); if (lock->lf_owner == lf->lf_owner) KASSERT(!lf_overlaps(lock, lf), ("two overlapping locks from same owner")); } } LIST_FOREACH(lock, &state->ls_pending, lf_link) { KASSERT(!LIST_EMPTY(&lock->lf_outedges), ("pending lock which should be active")); } #endif sx_xunlock(&state->ls_lock); VI_LOCK(vp); MPASS(state->ls_threads > 0); state->ls_threads--; if (state->ls_threads != 0) { wakeup(state); } VI_UNLOCK(vp); if (error == EDOOFUS) { KASSERT(ap->a_op == F_SETLK, ("EDOOFUS")); goto retry_setlock; } return (error); } int lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) { struct vop_advlockasync_args a; a.a_vp = ap->a_vp; a.a_id = ap->a_id; a.a_op = ap->a_op; a.a_fl = ap->a_fl; a.a_flags = ap->a_flags; a.a_task = NULL; a.a_cookiep = NULL; return (lf_advlockasync(&a, statep, size)); } void lf_purgelocks(struct vnode *vp, struct lockf **statep) { struct lockf *state; struct lockf_entry *lock, *nlock; /* * For this to work correctly, the caller must ensure that no * other threads enter the locking system for this vnode, * e.g. by checking VIRF_DOOMED. We wake up any threads that are * sleeping waiting for locks on this vnode and then free all * the remaining locks. */ VI_LOCK(vp); KASSERT(VN_IS_DOOMED(vp), ("lf_purgelocks: vp %p has not vgone yet", vp)); state = *statep; if (state == NULL) { VI_UNLOCK(vp); return; } *statep = NULL; if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { KASSERT(LIST_EMPTY(&state->ls_pending), ("freeing state with pending locks")); VI_UNLOCK(vp); goto out_free; } MPASS(state->ls_threads >= 0); state->ls_threads++; VI_UNLOCK(vp); sx_xlock(&state->ls_lock); sx_xlock(&lf_owner_graph_lock); LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { LIST_REMOVE(lock, lf_link); lf_remove_outgoing(lock); lf_remove_incoming(lock); /* * If its an async lock, we can just free it * here, otherwise we let the sleeping thread * free it. */ if (lock->lf_async_task) { lf_free_lock(lock); } else { lock->lf_flags |= F_INTR; wakeup(lock); } } sx_xunlock(&lf_owner_graph_lock); sx_xunlock(&state->ls_lock); /* * Wait for all other threads, sleeping and otherwise * to leave. */ VI_LOCK(vp); while (state->ls_threads > 1) msleep(state, VI_MTX(vp), 0, "purgelocks", 0); VI_UNLOCK(vp); /* * We can just free all the active locks since they * will have no dependencies (we removed them all * above). We don't need to bother locking since we * are the last thread using this state structure. */ KASSERT(LIST_EMPTY(&state->ls_pending), ("lock pending for %p", state)); LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { LIST_REMOVE(lock, lf_link); lf_free_lock(lock); } out_free: sx_xlock(&lf_lock_states_lock); LIST_REMOVE(state, ls_link); sx_xunlock(&lf_lock_states_lock); sx_destroy(&state->ls_lock); free(state, M_LOCKF); } /* * Return non-zero if locks 'x' and 'y' overlap. */ static int lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) { return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); } /* * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). */ static int lf_blocks(struct lockf_entry *x, struct lockf_entry *y) { return x->lf_owner != y->lf_owner && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) && lf_overlaps(x, y); } /* * Allocate a lock edge from the free list */ static struct lockf_edge * lf_alloc_edge(void) { return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); } /* * Free a lock edge. */ static void lf_free_edge(struct lockf_edge *e) { free(e, M_LOCKF); } /* * Ensure that the lock's owner has a corresponding vertex in the * owner graph. */ static void lf_alloc_vertex(struct lockf_entry *lock) { struct owner_graph *g = &lf_owner_graph; if (!lock->lf_owner->lo_vertex) lock->lf_owner->lo_vertex = graph_alloc_vertex(g, lock->lf_owner); } /* * Attempt to record an edge from lock x to lock y. Return EDEADLK if * the new edge would cause a cycle in the owner graph. */ static int lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) { struct owner_graph *g = &lf_owner_graph; struct lockf_edge *e; int error; #ifdef DIAGNOSTIC LIST_FOREACH(e, &x->lf_outedges, le_outlink) KASSERT(e->le_to != y, ("adding lock edge twice")); #endif /* * Make sure the two owners have entries in the owner graph. */ lf_alloc_vertex(x); lf_alloc_vertex(y); error = graph_add_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); if (error) return (error); e = lf_alloc_edge(); LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); e->le_from = x; e->le_to = y; return (0); } /* * Remove an edge from the lock graph. */ static void lf_remove_edge(struct lockf_edge *e) { struct owner_graph *g = &lf_owner_graph; struct lockf_entry *x = e->le_from; struct lockf_entry *y = e->le_to; graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); LIST_REMOVE(e, le_outlink); LIST_REMOVE(e, le_inlink); e->le_from = NULL; e->le_to = NULL; lf_free_edge(e); } /* * Remove all out-going edges from lock x. */ static void lf_remove_outgoing(struct lockf_entry *x) { struct lockf_edge *e; while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { lf_remove_edge(e); } } /* * Remove all in-coming edges from lock x. */ static void lf_remove_incoming(struct lockf_entry *x) { struct lockf_edge *e; while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { lf_remove_edge(e); } } /* * Walk the list of locks for the file and create an out-going edge * from lock to each blocking lock. */ static int lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry *overlap; int error; LIST_FOREACH(overlap, &state->ls_active, lf_link) { /* * We may assume that the active list is sorted by * lf_start. */ if (overlap->lf_start > lock->lf_end) break; if (!lf_blocks(lock, overlap)) continue; /* * We've found a blocking lock. Add the corresponding * edge to the graphs and see if it would cause a * deadlock. */ error = lf_add_edge(lock, overlap); /* * The only error that lf_add_edge returns is EDEADLK. * Remove any edges we added and return the error. */ if (error) { lf_remove_outgoing(lock); return (error); } } /* * We also need to add edges to sleeping locks that block * us. This ensures that lf_wakeup_lock cannot grant two * mutually blocking locks simultaneously and also enforces a * 'first come, first served' fairness model. Note that this * only happens if we are blocked by at least one active lock * due to the call to lf_getblock in lf_setlock below. */ LIST_FOREACH(overlap, &state->ls_pending, lf_link) { if (!lf_blocks(lock, overlap)) continue; /* * We've found a blocking lock. Add the corresponding * edge to the graphs and see if it would cause a * deadlock. */ error = lf_add_edge(lock, overlap); /* * The only error that lf_add_edge returns is EDEADLK. * Remove any edges we added and return the error. */ if (error) { lf_remove_outgoing(lock); return (error); } } return (0); } /* * Walk the list of pending locks for the file and create an in-coming * edge from lock to each blocking lock. */ static int lf_add_incoming(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry *overlap; int error; sx_assert(&state->ls_lock, SX_XLOCKED); if (LIST_EMPTY(&state->ls_pending)) return (0); error = 0; sx_xlock(&lf_owner_graph_lock); LIST_FOREACH(overlap, &state->ls_pending, lf_link) { if (!lf_blocks(lock, overlap)) continue; /* * We've found a blocking lock. Add the corresponding * edge to the graphs and see if it would cause a * deadlock. */ error = lf_add_edge(overlap, lock); /* * The only error that lf_add_edge returns is EDEADLK. * Remove any edges we added and return the error. */ if (error) { lf_remove_incoming(lock); break; } } sx_xunlock(&lf_owner_graph_lock); return (error); } /* * Insert lock into the active list, keeping list entries ordered by * increasing values of lf_start. */ static void lf_insert_lock(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry *lf, *lfprev; if (LIST_EMPTY(&state->ls_active)) { LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); return; } lfprev = NULL; LIST_FOREACH(lf, &state->ls_active, lf_link) { if (lf->lf_start > lock->lf_start) { LIST_INSERT_BEFORE(lf, lock, lf_link); return; } lfprev = lf; } LIST_INSERT_AFTER(lfprev, lock, lf_link); } /* * Wake up a sleeping lock and remove it from the pending list now * that all its dependencies have been resolved. The caller should * arrange for the lock to be added to the active list, adjusting any * existing locks for the same owner as needed. */ static void lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) { /* * Remove from ls_pending list and wake up the caller * or start the async notification, as appropriate. */ LIST_REMOVE(wakelock, lf_link); #ifdef LOCKF_DEBUG if (lockf_debug & 1) lf_print("lf_wakeup_lock: awakening", wakelock); #endif /* LOCKF_DEBUG */ if (wakelock->lf_async_task) { taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); } else { wakeup(wakelock); } } /* * Re-check all dependent locks and remove edges to locks that we no * longer block. If 'all' is non-zero, the lock has been removed and * we must remove all the dependencies, otherwise it has simply been * reduced but remains active. Any pending locks which have been been * unblocked are added to 'granted' */ static void lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, struct lockf_entry_list *granted) { struct lockf_edge *e, *ne; struct lockf_entry *deplock; LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { deplock = e->le_from; if (all || !lf_blocks(lock, deplock)) { sx_xlock(&lf_owner_graph_lock); lf_remove_edge(e); sx_xunlock(&lf_owner_graph_lock); if (LIST_EMPTY(&deplock->lf_outedges)) { lf_wakeup_lock(state, deplock); LIST_INSERT_HEAD(granted, deplock, lf_link); } } } } /* * Set the start of an existing active lock, updating dependencies and * adding any newly woken locks to 'granted'. */ static void lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, struct lockf_entry_list *granted) { KASSERT(new_start >= lock->lf_start, ("can't increase lock")); lock->lf_start = new_start; LIST_REMOVE(lock, lf_link); lf_insert_lock(state, lock); lf_update_dependancies(state, lock, FALSE, granted); } /* * Set the end of an existing active lock, updating dependencies and * adding any newly woken locks to 'granted'. */ static void lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, struct lockf_entry_list *granted) { KASSERT(new_end <= lock->lf_end, ("can't increase lock")); lock->lf_end = new_end; lf_update_dependancies(state, lock, FALSE, granted); } /* * Add a lock to the active list, updating or removing any current * locks owned by the same owner and processing any pending locks that * become unblocked as a result. This code is also used for unlock * since the logic for updating existing locks is identical. * * As a result of processing the new lock, we may unblock existing * pending locks as a result of downgrading/unlocking. We simply * activate the newly granted locks by looping. * * Since the new lock already has its dependencies set up, we always * add it to the list (unless its an unlock request). This may * fragment the lock list in some pathological cases but its probably * not a real problem. */ static void lf_activate_lock(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry *overlap, *lf; struct lockf_entry_list granted; int ovcase; LIST_INIT(&granted); LIST_INSERT_HEAD(&granted, lock, lf_link); while (!LIST_EMPTY(&granted)) { lock = LIST_FIRST(&granted); LIST_REMOVE(lock, lf_link); /* * Skip over locks owned by other processes. Handle * any locks that overlap and are owned by ourselves. */ overlap = LIST_FIRST(&state->ls_active); for (;;) { ovcase = lf_findoverlap(&overlap, lock, SELF); #ifdef LOCKF_DEBUG if (ovcase && (lockf_debug & 2)) { printf("lf_setlock: overlap %d", ovcase); lf_print("", overlap); } #endif /* * Six cases: * 0) no overlap * 1) overlap == lock * 2) overlap contains lock * 3) lock contains overlap * 4) overlap starts before lock * 5) overlap ends after lock */ switch (ovcase) { case 0: /* no overlap */ break; case 1: /* overlap == lock */ /* * We have already setup the * dependants for the new lock, taking * into account a possible downgrade * or unlock. Remove the old lock. */ LIST_REMOVE(overlap, lf_link); lf_update_dependancies(state, overlap, TRUE, &granted); lf_free_lock(overlap); break; case 2: /* overlap contains lock */ /* * Just split the existing lock. */ lf_split(state, overlap, lock, &granted); break; case 3: /* lock contains overlap */ /* * Delete the overlap and advance to * the next entry in the list. */ lf = LIST_NEXT(overlap, lf_link); LIST_REMOVE(overlap, lf_link); lf_update_dependancies(state, overlap, TRUE, &granted); lf_free_lock(overlap); overlap = lf; continue; case 4: /* overlap starts before lock */ /* * Just update the overlap end and * move on. */ lf_set_end(state, overlap, lock->lf_start - 1, &granted); overlap = LIST_NEXT(overlap, lf_link); continue; case 5: /* overlap ends after lock */ /* * Change the start of overlap and * re-insert. */ lf_set_start(state, overlap, lock->lf_end + 1, &granted); break; } break; } #ifdef LOCKF_DEBUG if (lockf_debug & 1) { if (lock->lf_type != F_UNLCK) lf_print("lf_activate_lock: activated", lock); else lf_print("lf_activate_lock: unlocked", lock); lf_printlist("lf_activate_lock", lock); } #endif /* LOCKF_DEBUG */ if (lock->lf_type != F_UNLCK) lf_insert_lock(state, lock); } } /* * Cancel a pending lock request, either as a result of a signal or a * cancel request for an async lock. */ static void lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry_list granted; /* * Note it is theoretically possible that cancelling this lock * may allow some other pending lock to become * active. Consider this case: * * Owner Action Result Dependencies * * A: lock [0..0] succeeds * B: lock [2..2] succeeds * C: lock [1..2] blocked C->B * D: lock [0..1] blocked C->B,D->A,D->C * A: unlock [0..0] C->B,D->C * C: cancel [1..2] */ LIST_REMOVE(lock, lf_link); /* * Removing out-going edges is simple. */ sx_xlock(&lf_owner_graph_lock); lf_remove_outgoing(lock); sx_xunlock(&lf_owner_graph_lock); /* * Removing in-coming edges may allow some other lock to * become active - we use lf_update_dependancies to figure * this out. */ LIST_INIT(&granted); lf_update_dependancies(state, lock, TRUE, &granted); lf_free_lock(lock); /* * Feed any newly active locks to lf_activate_lock. */ while (!LIST_EMPTY(&granted)) { lock = LIST_FIRST(&granted); LIST_REMOVE(lock, lf_link); lf_activate_lock(state, lock); } } /* * Set a byte-range lock. */ static int lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, void **cookiep) { static char lockstr[] = "lockf"; int error, priority, stops_deferred; #ifdef LOCKF_DEBUG if (lockf_debug & 1) lf_print("lf_setlock", lock); #endif /* LOCKF_DEBUG */ /* * Set the priority */ priority = PLOCK; if (lock->lf_type == F_WRLCK) priority += 4; if (!(lock->lf_flags & F_NOINTR)) priority |= PCATCH; /* * Scan lock list for this file looking for locks that would block us. */ if (lf_getblock(state, lock)) { /* * Free the structure and return if nonblocking. */ if ((lock->lf_flags & F_WAIT) == 0 && lock->lf_async_task == NULL) { lf_free_lock(lock); error = EAGAIN; goto out; } /* * For flock type locks, we must first remove * any shared locks that we hold before we sleep * waiting for an exclusive lock. */ if ((lock->lf_flags & F_FLOCK) && lock->lf_type == F_WRLCK) { lock->lf_type = F_UNLCK; lf_activate_lock(state, lock); lock->lf_type = F_WRLCK; } /* * We are blocked. Create edges to each blocking lock, * checking for deadlock using the owner graph. For * simplicity, we run deadlock detection for all * locks, posix and otherwise. */ sx_xlock(&lf_owner_graph_lock); error = lf_add_outgoing(state, lock); sx_xunlock(&lf_owner_graph_lock); if (error) { #ifdef LOCKF_DEBUG if (lockf_debug & 1) lf_print("lf_setlock: deadlock", lock); #endif lf_free_lock(lock); goto out; } /* * We have added edges to everything that blocks * us. Sleep until they all go away. */ LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); #ifdef LOCKF_DEBUG if (lockf_debug & 1) { struct lockf_edge *e; LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { lf_print("lf_setlock: blocking on", e->le_to); lf_printlist("lf_setlock", e->le_to); } } #endif /* LOCKF_DEBUG */ if ((lock->lf_flags & F_WAIT) == 0) { /* * The caller requested async notification - * this callback happens when the blocking * lock is released, allowing the caller to * make another attempt to take the lock. */ *cookiep = (void *) lock; error = EINPROGRESS; goto out; } lock->lf_refs++; stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART); error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); sigallowstop(stops_deferred); if (lf_free_lock(lock)) { error = EDOOFUS; goto out; } /* * We may have been awakened by a signal and/or by a * debugger continuing us (in which cases we must * remove our lock graph edges) and/or by another * process releasing a lock (in which case our edges * have already been removed and we have been moved to * the active list). We may also have been woken by * lf_purgelocks which we report to the caller as * EINTR. In that case, lf_purgelocks will have * removed our lock graph edges. * * Note that it is possible to receive a signal after * we were successfully woken (and moved to the active * list) but before we resumed execution. In this * case, our lf_outedges list will be clear. We * pretend there was no error. * * Note also, if we have been sleeping long enough, we * may now have incoming edges from some newer lock * which is waiting behind us in the queue. */ if (lock->lf_flags & F_INTR) { error = EINTR; lf_free_lock(lock); goto out; } if (LIST_EMPTY(&lock->lf_outedges)) { error = 0; } else { lf_cancel_lock(state, lock); goto out; } #ifdef LOCKF_DEBUG if (lockf_debug & 1) { lf_print("lf_setlock: granted", lock); } #endif goto out; } /* * It looks like we are going to grant the lock. First add * edges from any currently pending lock that the new lock * would block. */ error = lf_add_incoming(state, lock); if (error) { #ifdef LOCKF_DEBUG if (lockf_debug & 1) lf_print("lf_setlock: deadlock", lock); #endif lf_free_lock(lock); goto out; } /* * No blocks!! Add the lock. Note that we will * downgrade or upgrade any overlapping locks this * process already owns. */ lf_activate_lock(state, lock); error = 0; out: return (error); } /* * Remove a byte-range lock on an inode. * * Generally, find the lock (or an overlap to that lock) * and remove it (or shrink it), then wakeup anyone we can. */ static int lf_clearlock(struct lockf *state, struct lockf_entry *unlock) { struct lockf_entry *overlap; overlap = LIST_FIRST(&state->ls_active); if (overlap == NOLOCKF) return (0); #ifdef LOCKF_DEBUG if (unlock->lf_type != F_UNLCK) panic("lf_clearlock: bad type"); if (lockf_debug & 1) lf_print("lf_clearlock", unlock); #endif /* LOCKF_DEBUG */ lf_activate_lock(state, unlock); return (0); } /* * Check whether there is a blocking lock, and if so return its * details in '*fl'. */ static int lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) { struct lockf_entry *block; #ifdef LOCKF_DEBUG if (lockf_debug & 1) lf_print("lf_getlock", lock); #endif /* LOCKF_DEBUG */ if ((block = lf_getblock(state, lock))) { fl->l_type = block->lf_type; fl->l_whence = SEEK_SET; fl->l_start = block->lf_start; if (block->lf_end == OFF_MAX) fl->l_len = 0; else fl->l_len = block->lf_end - block->lf_start + 1; fl->l_pid = block->lf_owner->lo_pid; fl->l_sysid = block->lf_owner->lo_sysid; } else { fl->l_type = F_UNLCK; } return (0); } /* * Cancel an async lock request. */ static int lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) { struct lockf_entry *reallock; /* * We need to match this request with an existing lock * request. */ LIST_FOREACH(reallock, &state->ls_pending, lf_link) { if ((void *) reallock == cookie) { /* * Double-check that this lock looks right * (maybe use a rolling ID for the cancel * cookie instead?) */ if (!(reallock->lf_vnode == lock->lf_vnode && reallock->lf_start == lock->lf_start && reallock->lf_end == lock->lf_end)) { return (ENOENT); } /* * Make sure this lock was async and then just * remove it from its wait lists. */ if (!reallock->lf_async_task) { return (ENOENT); } /* * Note that since any other thread must take * state->ls_lock before it can possibly * trigger the async callback, we are safe * from a race with lf_wakeup_lock, i.e. we * can free the lock (actually our caller does * this). */ lf_cancel_lock(state, reallock); return (0); } } /* * We didn't find a matching lock - not much we can do here. */ return (ENOENT); } /* * Walk the list of locks for an inode and * return the first blocking lock. */ static struct lockf_entry * lf_getblock(struct lockf *state, struct lockf_entry *lock) { struct lockf_entry *overlap; LIST_FOREACH(overlap, &state->ls_active, lf_link) { /* * We may assume that the active list is sorted by * lf_start. */ if (overlap->lf_start > lock->lf_end) break; if (!lf_blocks(lock, overlap)) continue; return (overlap); } return (NOLOCKF); } /* * Walk the list of locks for an inode to find an overlapping lock (if * any) and return a classification of that overlap. * * Arguments: * *overlap The place in the lock list to start looking * lock The lock which is being tested * type Pass 'SELF' to test only locks with the same * owner as lock, or 'OTHER' to test only locks * with a different owner * * Returns one of six values: * 0) no overlap * 1) overlap == lock * 2) overlap contains lock * 3) lock contains overlap * 4) overlap starts before lock * 5) overlap ends after lock * * If there is an overlapping lock, '*overlap' is set to point at the * overlapping lock. * * NOTE: this returns only the FIRST overlapping lock. There * may be more than one. */ static int lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) { struct lockf_entry *lf; off_t start, end; int res; if ((*overlap) == NOLOCKF) { return (0); } #ifdef LOCKF_DEBUG if (lockf_debug & 2) lf_print("lf_findoverlap: looking for overlap in", lock); #endif /* LOCKF_DEBUG */ start = lock->lf_start; end = lock->lf_end; res = 0; while (*overlap) { lf = *overlap; if (lf->lf_start > end) break; if (((type & SELF) && lf->lf_owner != lock->lf_owner) || ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { *overlap = LIST_NEXT(lf, lf_link); continue; } #ifdef LOCKF_DEBUG if (lockf_debug & 2) lf_print("\tchecking", lf); #endif /* LOCKF_DEBUG */ /* * OK, check for overlap * * Six cases: * 0) no overlap * 1) overlap == lock * 2) overlap contains lock * 3) lock contains overlap * 4) overlap starts before lock * 5) overlap ends after lock */ if (start > lf->lf_end) { /* Case 0 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("no overlap\n"); #endif /* LOCKF_DEBUG */ *overlap = LIST_NEXT(lf, lf_link); continue; } if (lf->lf_start == start && lf->lf_end == end) { /* Case 1 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("overlap == lock\n"); #endif /* LOCKF_DEBUG */ res = 1; break; } if (lf->lf_start <= start && lf->lf_end >= end) { /* Case 2 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("overlap contains lock\n"); #endif /* LOCKF_DEBUG */ res = 2; break; } if (start <= lf->lf_start && end >= lf->lf_end) { /* Case 3 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("lock contains overlap\n"); #endif /* LOCKF_DEBUG */ res = 3; break; } if (lf->lf_start < start && lf->lf_end >= start) { /* Case 4 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("overlap starts before lock\n"); #endif /* LOCKF_DEBUG */ res = 4; break; } if (lf->lf_start > start && lf->lf_end > end) { /* Case 5 */ #ifdef LOCKF_DEBUG if (lockf_debug & 2) printf("overlap ends after lock\n"); #endif /* LOCKF_DEBUG */ res = 5; break; } panic("lf_findoverlap: default"); } return (res); } /* * Split an the existing 'lock1', based on the extent of the lock * described by 'lock2'. The existing lock should cover 'lock2' * entirely. * * Any pending locks which have been been unblocked are added to * 'granted' */ static void lf_split(struct lockf *state, struct lockf_entry *lock1, struct lockf_entry *lock2, struct lockf_entry_list *granted) { struct lockf_entry *splitlock; #ifdef LOCKF_DEBUG if (lockf_debug & 2) { lf_print("lf_split", lock1); lf_print("splitting from", lock2); } #endif /* LOCKF_DEBUG */ /* * Check to see if we don't need to split at all. */ if (lock1->lf_start == lock2->lf_start) { lf_set_start(state, lock1, lock2->lf_end + 1, granted); return; } if (lock1->lf_end == lock2->lf_end) { lf_set_end(state, lock1, lock2->lf_start - 1, granted); return; } /* * Make a new lock consisting of the last part of * the encompassing lock. */ splitlock = lf_alloc_lock(lock1->lf_owner); memcpy(splitlock, lock1, sizeof *splitlock); splitlock->lf_refs = 1; if (splitlock->lf_flags & F_REMOTE) vref(splitlock->lf_vnode); /* * This cannot cause a deadlock since any edges we would add * to splitlock already exist in lock1. We must be sure to add * necessary dependencies to splitlock before we reduce lock1 * otherwise we may accidentally grant a pending lock that * was blocked by the tail end of lock1. */ splitlock->lf_start = lock2->lf_end + 1; LIST_INIT(&splitlock->lf_outedges); LIST_INIT(&splitlock->lf_inedges); lf_add_incoming(state, splitlock); lf_set_end(state, lock1, lock2->lf_start - 1, granted); /* * OK, now link it in */ lf_insert_lock(state, splitlock); } struct lockdesc { STAILQ_ENTRY(lockdesc) link; struct vnode *vp; struct flock fl; }; STAILQ_HEAD(lockdesclist, lockdesc); int lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) { struct lockf *ls; struct lockf_entry *lf; struct lockdesc *ldesc; struct lockdesclist locks; int error; /* * In order to keep the locking simple, we iterate over the * active lock lists to build a list of locks that need * releasing. We then call the iterator for each one in turn. * * We take an extra reference to the vnode for the duration to * make sure it doesn't go away before we are finished. */ STAILQ_INIT(&locks); sx_xlock(&lf_lock_states_lock); LIST_FOREACH(ls, &lf_lock_states, ls_link) { sx_xlock(&ls->ls_lock); LIST_FOREACH(lf, &ls->ls_active, lf_link) { if (lf->lf_owner->lo_sysid != sysid) continue; ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, M_WAITOK); ldesc->vp = lf->lf_vnode; vref(ldesc->vp); ldesc->fl.l_start = lf->lf_start; if (lf->lf_end == OFF_MAX) ldesc->fl.l_len = 0; else ldesc->fl.l_len = lf->lf_end - lf->lf_start + 1; ldesc->fl.l_whence = SEEK_SET; ldesc->fl.l_type = F_UNLCK; ldesc->fl.l_pid = lf->lf_owner->lo_pid; ldesc->fl.l_sysid = sysid; STAILQ_INSERT_TAIL(&locks, ldesc, link); } sx_xunlock(&ls->ls_lock); } sx_xunlock(&lf_lock_states_lock); /* * Call the iterator function for each lock in turn. If the * iterator returns an error code, just free the rest of the * lockdesc structures. */ error = 0; while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { STAILQ_REMOVE_HEAD(&locks, link); if (!error) error = fn(ldesc->vp, &ldesc->fl, arg); vrele(ldesc->vp); free(ldesc, M_LOCKF); } return (error); } int lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) { struct lockf *ls; struct lockf_entry *lf; struct lockdesc *ldesc; struct lockdesclist locks; int error; /* * In order to keep the locking simple, we iterate over the * active lock lists to build a list of locks that need * releasing. We then call the iterator for each one in turn. * * We take an extra reference to the vnode for the duration to * make sure it doesn't go away before we are finished. */ STAILQ_INIT(&locks); VI_LOCK(vp); ls = vp->v_lockf; if (!ls) { VI_UNLOCK(vp); return (0); } MPASS(ls->ls_threads >= 0); ls->ls_threads++; VI_UNLOCK(vp); sx_xlock(&ls->ls_lock); LIST_FOREACH(lf, &ls->ls_active, lf_link) { ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, M_WAITOK); ldesc->vp = lf->lf_vnode; vref(ldesc->vp); ldesc->fl.l_start = lf->lf_start; if (lf->lf_end == OFF_MAX) ldesc->fl.l_len = 0; else ldesc->fl.l_len = lf->lf_end - lf->lf_start + 1; ldesc->fl.l_whence = SEEK_SET; ldesc->fl.l_type = F_UNLCK; ldesc->fl.l_pid = lf->lf_owner->lo_pid; ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; STAILQ_INSERT_TAIL(&locks, ldesc, link); } sx_xunlock(&ls->ls_lock); VI_LOCK(vp); MPASS(ls->ls_threads > 0); ls->ls_threads--; wakeup(ls); VI_UNLOCK(vp); /* * Call the iterator function for each lock in turn. If the * iterator returns an error code, just free the rest of the * lockdesc structures. */ error = 0; while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { STAILQ_REMOVE_HEAD(&locks, link); if (!error) error = fn(ldesc->vp, &ldesc->fl, arg); vrele(ldesc->vp); free(ldesc, M_LOCKF); } return (error); } static int lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) { VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); return (0); } void lf_clearremotesys(int sysid) { KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); } int lf_countlocks(int sysid) { int i; struct lock_owner *lo; int count; count = 0; for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) { sx_xlock(&lf_lock_owners[i].lock); LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link) if (lo->lo_sysid == sysid) count += lo->lo_refs; sx_xunlock(&lf_lock_owners[i].lock); } return (count); } #ifdef LOCKF_DEBUG /* * Return non-zero if y is reachable from x using a brute force * search. If reachable and path is non-null, return the route taken * in path. */ static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *path) { struct owner_edge *e; if (x == y) { if (path) TAILQ_INSERT_HEAD(path, x, v_link); return 1; } LIST_FOREACH(e, &x->v_outedges, e_outlink) { if (graph_reaches(e->e_to, y, path)) { if (path) TAILQ_INSERT_HEAD(path, x, v_link); return 1; } } return 0; } /* * Perform consistency checks on the graph. Make sure the values of * v_order are correct. If checkorder is non-zero, check no vertex can * reach any other vertex with a smaller order. */ static void graph_check(struct owner_graph *g, int checkorder) { int i, j; for (i = 0; i < g->g_size; i++) { if (!g->g_vertices[i]->v_owner) continue; KASSERT(g->g_vertices[i]->v_order == i, ("lock graph vertices disordered")); if (checkorder) { for (j = 0; j < i; j++) { if (!g->g_vertices[j]->v_owner) continue; KASSERT(!graph_reaches(g->g_vertices[i], g->g_vertices[j], NULL), ("lock graph vertices disordered")); } } } } static void graph_print_vertices(struct owner_vertex_list *set) { struct owner_vertex *v; printf("{ "); TAILQ_FOREACH(v, set, v_link) { printf("%d:", v->v_order); lf_print_owner(v->v_owner); if (TAILQ_NEXT(v, v_link)) printf(", "); } printf(" }\n"); } #endif /* * Calculate the sub-set of vertices v from the affected region [y..x] * where v is reachable from y. Return -1 if a loop was detected * (i.e. x is reachable from y, otherwise the number of vertices in * this subset. */ static int graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta) { uint32_t gen; struct owner_vertex *v; struct owner_edge *e; int n; /* * We start with a set containing just y. Then for each vertex * v in the set so far unprocessed, we add each vertex that v * has an out-edge to and that is within the affected region * [y..x]. If we see the vertex x on our travels, stop * immediately. */ TAILQ_INIT(delta); TAILQ_INSERT_TAIL(delta, y, v_link); v = y; n = 1; gen = g->g_gen; while (v) { LIST_FOREACH(e, &v->v_outedges, e_outlink) { if (e->e_to == x) return -1; if (e->e_to->v_order < x->v_order && e->e_to->v_gen != gen) { e->e_to->v_gen = gen; TAILQ_INSERT_TAIL(delta, e->e_to, v_link); n++; } } v = TAILQ_NEXT(v, v_link); } return (n); } /* * Calculate the sub-set of vertices v from the affected region [y..x] * where v reaches x. Return the number of vertices in this subset. */ static int graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y, struct owner_vertex_list *delta) { uint32_t gen; struct owner_vertex *v; struct owner_edge *e; int n; /* * We start with a set containing just x. Then for each vertex * v in the set so far unprocessed, we add each vertex that v * has an in-edge from and that is within the affected region * [y..x]. */ TAILQ_INIT(delta); TAILQ_INSERT_TAIL(delta, x, v_link); v = x; n = 1; gen = g->g_gen; while (v) { LIST_FOREACH(e, &v->v_inedges, e_inlink) { if (e->e_from->v_order > y->v_order && e->e_from->v_gen != gen) { e->e_from->v_gen = gen; TAILQ_INSERT_HEAD(delta, e->e_from, v_link); n++; } } v = TAILQ_PREV(v, owner_vertex_list, v_link); } return (n); } static int graph_add_indices(int *indices, int n, struct owner_vertex_list *set) { struct owner_vertex *v; int i, j; TAILQ_FOREACH(v, set, v_link) { for (i = n; i > 0 && indices[i - 1] > v->v_order; i--) ; for (j = n - 1; j >= i; j--) indices[j + 1] = indices[j]; indices[i] = v->v_order; n++; } return (n); } static int graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, struct owner_vertex_list *set) { struct owner_vertex *v, *vlowest; while (!TAILQ_EMPTY(set)) { vlowest = NULL; TAILQ_FOREACH(v, set, v_link) { if (!vlowest || v->v_order < vlowest->v_order) vlowest = v; } TAILQ_REMOVE(set, vlowest, v_link); vlowest->v_order = indices[nextunused]; g->g_vertices[vlowest->v_order] = vlowest; nextunused++; } return (nextunused); } static int graph_add_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y) { struct owner_edge *e; struct owner_vertex_list deltaF, deltaB; int nF, n, vi, i; int *indices; int nB __unused; sx_assert(&lf_owner_graph_lock, SX_XLOCKED); LIST_FOREACH(e, &x->v_outedges, e_outlink) { if (e->e_to == y) { e->e_refs++; return (0); } } #ifdef LOCKF_DEBUG if (lockf_debug & 8) { printf("adding edge %d:", x->v_order); lf_print_owner(x->v_owner); printf(" -> %d:", y->v_order); lf_print_owner(y->v_owner); printf("\n"); } #endif if (y->v_order < x->v_order) { /* * The new edge violates the order. First find the set * of affected vertices reachable from y (deltaF) and * the set of affect vertices affected that reach x * (deltaB), using the graph generation number to * detect whether we have visited a given vertex * already. We re-order the graph so that each vertex * in deltaB appears before each vertex in deltaF. * * If x is a member of deltaF, then the new edge would * create a cycle. Otherwise, we may assume that * deltaF and deltaB are disjoint. */ g->g_gen++; if (g->g_gen == 0) { /* * Generation wrap. */ for (vi = 0; vi < g->g_size; vi++) { g->g_vertices[vi]->v_gen = 0; } g->g_gen++; } nF = graph_delta_forward(g, x, y, &deltaF); if (nF < 0) { #ifdef LOCKF_DEBUG if (lockf_debug & 8) { struct owner_vertex_list path; printf("deadlock: "); TAILQ_INIT(&path); graph_reaches(y, x, &path); graph_print_vertices(&path); } #endif return (EDEADLK); } #ifdef LOCKF_DEBUG if (lockf_debug & 8) { printf("re-ordering graph vertices\n"); printf("deltaF = "); graph_print_vertices(&deltaF); } #endif nB = graph_delta_backward(g, x, y, &deltaB); #ifdef LOCKF_DEBUG if (lockf_debug & 8) { printf("deltaB = "); graph_print_vertices(&deltaB); } #endif /* * We first build a set of vertex indices (vertex * order values) that we may use, then we re-assign * orders first to those vertices in deltaB, then to * deltaF. Note that the contents of deltaF and deltaB * may be partially disordered - we perform an * insertion sort while building our index set. */ indices = g->g_indexbuf; n = graph_add_indices(indices, 0, &deltaF); graph_add_indices(indices, n, &deltaB); /* * We must also be sure to maintain the relative * ordering of deltaF and deltaB when re-assigning * vertices. We do this by iteratively removing the * lowest ordered element from the set and assigning * it the next value from our new ordering. */ i = graph_assign_indices(g, indices, 0, &deltaB); graph_assign_indices(g, indices, i, &deltaF); #ifdef LOCKF_DEBUG if (lockf_debug & 8) { struct owner_vertex_list set; TAILQ_INIT(&set); for (i = 0; i < nB + nF; i++) TAILQ_INSERT_TAIL(&set, g->g_vertices[indices[i]], v_link); printf("new ordering = "); graph_print_vertices(&set); } #endif } KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); #ifdef LOCKF_DEBUG if (lockf_debug & 8) { graph_check(g, TRUE); } #endif e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); e->e_refs = 1; e->e_from = x; e->e_to = y; return (0); } /* * Remove an edge x->y from the graph. */ static void graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, struct owner_vertex *y) { struct owner_edge *e; sx_assert(&lf_owner_graph_lock, SX_XLOCKED); LIST_FOREACH(e, &x->v_outedges, e_outlink) { if (e->e_to == y) break; } KASSERT(e, ("Removing non-existent edge from deadlock graph")); e->e_refs--; if (e->e_refs == 0) { #ifdef LOCKF_DEBUG if (lockf_debug & 8) { printf("removing edge %d:", x->v_order); lf_print_owner(x->v_owner); printf(" -> %d:", y->v_order); lf_print_owner(y->v_owner); printf("\n"); } #endif LIST_REMOVE(e, e_outlink); LIST_REMOVE(e, e_inlink); free(e, M_LOCKF); } } /* * Allocate a vertex from the free list. Return ENOMEM if there are * none. */ static struct owner_vertex * graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) { struct owner_vertex *v; sx_assert(&lf_owner_graph_lock, SX_XLOCKED); v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); if (g->g_size == g->g_space) { g->g_vertices = realloc(g->g_vertices, 2 * g->g_space * sizeof(struct owner_vertex *), M_LOCKF, M_WAITOK); free(g->g_indexbuf, M_LOCKF); g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), M_LOCKF, M_WAITOK); g->g_space = 2 * g->g_space; } v->v_order = g->g_size; v->v_gen = g->g_gen; g->g_vertices[g->g_size] = v; g->g_size++; LIST_INIT(&v->v_outedges); LIST_INIT(&v->v_inedges); v->v_owner = lo; return (v); } static void graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) { struct owner_vertex *w; int i; sx_assert(&lf_owner_graph_lock, SX_XLOCKED); KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); /* * Remove from the graph's array and close up the gap, * renumbering the other vertices. */ for (i = v->v_order + 1; i < g->g_size; i++) { w = g->g_vertices[i]; w->v_order--; g->g_vertices[i - 1] = w; } g->g_size--; free(v, M_LOCKF); } static struct owner_graph * graph_init(struct owner_graph *g) { g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), M_LOCKF, M_WAITOK); g->g_size = 0; g->g_space = 10; g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); g->g_gen = 0; return (g); } +struct kinfo_lockf_linked { + struct kinfo_lockf kl; + struct vnode *vp; + STAILQ_ENTRY(kinfo_lockf_linked) link; +}; + +int +vfs_report_lockf(struct mount *mp, struct sbuf *sb) +{ + struct lockf *ls; + struct lockf_entry *lf; + struct kinfo_lockf_linked *klf; + struct vnode *vp; + struct ucred *ucred; + char *fullpath, *freepath; + struct stat stt; + fsid_t fsidx; + STAILQ_HEAD(, kinfo_lockf_linked) locks; + int error, gerror; + + STAILQ_INIT(&locks); + sx_slock(&lf_lock_states_lock); + LIST_FOREACH(ls, &lf_lock_states, ls_link) { + sx_slock(&ls->ls_lock); + LIST_FOREACH(lf, &ls->ls_active, lf_link) { + vp = lf->lf_vnode; + if (VN_IS_DOOMED(vp) || vp->v_mount != mp) + continue; + vhold(vp); + klf = malloc(sizeof(struct kinfo_lockf_linked), + M_LOCKF, M_WAITOK | M_ZERO); + klf->vp = vp; + klf->kl.kl_structsize = sizeof(struct kinfo_lockf); + klf->kl.kl_start = lf->lf_start; + klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 : + lf->lf_end - lf->lf_start + 1; + klf->kl.kl_rw = lf->lf_type == F_RDLCK ? + KLOCKF_RW_READ : KLOCKF_RW_WRITE; + if (lf->lf_owner->lo_sysid != 0) { + klf->kl.kl_pid = lf->lf_owner->lo_pid; + klf->kl.kl_sysid = lf->lf_owner->lo_sysid; + klf->kl.kl_type = KLOCKF_TYPE_REMOTE; + } else if (lf->lf_owner->lo_pid == -1) { + klf->kl.kl_pid = -1; + klf->kl.kl_sysid = 0; + klf->kl.kl_type = KLOCKF_TYPE_FLOCK; + } else { + klf->kl.kl_pid = lf->lf_owner->lo_pid; + klf->kl.kl_sysid = 0; + klf->kl.kl_type = KLOCKF_TYPE_PID; + } + STAILQ_INSERT_TAIL(&locks, klf, link); + } + sx_sunlock(&ls->ls_lock); + } + sx_sunlock(&lf_lock_states_lock); + + gerror = 0; + ucred = curthread->td_ucred; + fsidx = mp->mnt_stat.f_fsid; + while ((klf = STAILQ_FIRST(&locks)) != NULL) { + STAILQ_REMOVE_HEAD(&locks, link); + vp = klf->vp; + if (gerror == 0 && vn_lock(vp, LK_SHARED) == 0) { + error = prison_canseemount(ucred, vp->v_mount); + if (error == 0) + error = VOP_STAT(vp, &stt, ucred, NOCRED); + VOP_UNLOCK(vp); + if (error == 0) { + memcpy(&klf->kl.kl_file_fsid, &fsidx, + sizeof(fsidx)); + klf->kl.kl_file_rdev = stt.st_rdev; + klf->kl.kl_file_fileid = stt.st_ino; + freepath = NULL; + fullpath = "-"; + error = vn_fullpath(vp, &fullpath, &freepath); + if (error == 0) + strlcpy(klf->kl.kl_path, fullpath, + sizeof(klf->kl.kl_path)); + free(freepath, M_TEMP); + if (sbuf_bcat(sb, &klf->kl, + klf->kl.kl_structsize) != 0) { + gerror = sbuf_error(sb); + } + } + } + vdrop(vp); + free(klf, M_LOCKF); + } + + return (gerror); +} + +static int +sysctl_kern_lockf_run(struct sbuf *sb) +{ + struct mount *mp; + int error; + + error = 0; + mtx_lock(&mountlist_mtx); + TAILQ_FOREACH(mp, &mountlist, mnt_list) { + error = vfs_busy(mp, MBF_MNTLSTLOCK); + if (error != 0) + continue; + error = mp->mnt_op->vfs_report_lockf(mp, sb); + mtx_lock(&mountlist_mtx); + vfs_unbusy(mp); + if (error != 0) + break; + } + mtx_unlock(&mountlist_mtx); + return (error); +} + +static int +sysctl_kern_lockf(SYSCTL_HANDLER_ARGS) +{ + struct sbuf sb; + int error, error2; + + sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_lockf) * 5, req); + sbuf_clear_flags(&sb, SBUF_INCLUDENUL); + error = sysctl_kern_lockf_run(&sb); + error2 = sbuf_finish(&sb); + sbuf_delete(&sb); + return (error != 0 ? error : error2); +} +SYSCTL_PROC(_kern, KERN_LOCKF, lockf, + CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, + 0, 0, sysctl_kern_lockf, "S,lockf", + "Advisory locks table"); + #ifdef LOCKF_DEBUG /* * Print description of a lock owner */ static void lf_print_owner(struct lock_owner *lo) { if (lo->lo_flags & F_REMOTE) { printf("remote pid %d, system %d", lo->lo_pid, lo->lo_sysid); } else if (lo->lo_flags & F_FLOCK) { printf("file %p", lo->lo_id); } else { printf("local pid %d", lo->lo_pid); } } /* * Print out a lock. */ static void lf_print(char *tag, struct lockf_entry *lock) { printf("%s: lock %p for ", tag, (void *)lock); lf_print_owner(lock->lf_owner); printf("\nvnode %p", lock->lf_vnode); VOP_PRINT(lock->lf_vnode); printf(" %s, start %jd, end ", lock->lf_type == F_RDLCK ? "shared" : lock->lf_type == F_WRLCK ? "exclusive" : lock->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)lock->lf_start); if (lock->lf_end == OFF_MAX) printf("EOF"); else printf("%jd", (intmax_t)lock->lf_end); if (!LIST_EMPTY(&lock->lf_outedges)) printf(" block %p\n", (void *)LIST_FIRST(&lock->lf_outedges)->le_to); else printf("\n"); } static void lf_printlist(char *tag, struct lockf_entry *lock) { struct lockf_entry *lf, *blk; struct lockf_edge *e; printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode); LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { printf("\tlock %p for ",(void *)lf); lf_print_owner(lock->lf_owner); printf(", %s, start %jd, end %jd", lf->lf_type == F_RDLCK ? "shared" : lf->lf_type == F_WRLCK ? "exclusive" : lf->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { blk = e->le_to; printf("\n\t\tlock request %p for ", (void *)blk); lf_print_owner(blk->lf_owner); printf(", %s, start %jd, end %jd", blk->lf_type == F_RDLCK ? "shared" : blk->lf_type == F_WRLCK ? "exclusive" : blk->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)blk->lf_start, (intmax_t)blk->lf_end); if (!LIST_EMPTY(&blk->lf_inedges)) panic("lf_printlist: bad list"); } printf("\n"); } } #endif /* LOCKF_DEBUG */ diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c index 612cc06a0db7..d6065deb25fe 100644 --- a/sys/kern/vfs_init.c +++ b/sys/kern/vfs_init.c @@ -1,594 +1,607 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed * to Berkeley by John Heidemann of the UCLA Ficus project. * * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vfs_init.c 8.3 (Berkeley) 1/4/94 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include static int vfs_register(struct vfsconf *); static int vfs_unregister(struct vfsconf *); MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes"); /* * The highest defined VFS number. */ int maxvfsconf = VFS_GENERIC + 1; /* * Single-linked list of configured VFSes. * New entries are added/deleted by vfs_register()/vfs_unregister() */ struct vfsconfhead vfsconf = TAILQ_HEAD_INITIALIZER(vfsconf); struct sx vfsconf_sx; SX_SYSINIT(vfsconf, &vfsconf_sx, "vfsconf"); /* * Loader.conf variable vfs.typenumhash enables setting vfc_typenum using a hash * calculation on vfc_name, so that it doesn't change when file systems are * loaded in a different order. This will avoid the NFS server file handles from * changing for file systems that use vfc_typenum in their fsid. */ static int vfs_typenumhash = 1; SYSCTL_INT(_vfs, OID_AUTO, typenumhash, CTLFLAG_RDTUN, &vfs_typenumhash, 0, "Set vfc_typenum using a hash calculation on vfc_name, so that it does not" "change when file systems are loaded in a different order."); /* * A Zen vnode attribute structure. * * Initialized when the first filesystem registers by vfs_register(). */ struct vattr va_null; /* * vfs_init.c * * Allocate and fill in operations vectors. * * An undocumented feature of this approach to defining operations is that * there can be multiple entries in vfs_opv_descs for the same operations * vector. This allows third parties to extend the set of operations * supported by another layer in a binary compatibile way. For example, * assume that NFS needed to be modified to support Ficus. NFS has an entry * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions) * listing those new operations Ficus adds to NFS, all without modifying the * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but * that is a(whole)nother story.) This is a feature. */ /* * Routines having to do with the management of the vnode table. */ static struct vfsconf * vfs_byname_locked(const char *name) { struct vfsconf *vfsp; sx_assert(&vfsconf_sx, SA_LOCKED); if (!strcmp(name, "ffs")) name = "ufs"; TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { if (!strcmp(name, vfsp->vfc_name)) return (vfsp); } return (NULL); } struct vfsconf * vfs_byname(const char *name) { struct vfsconf *vfsp; vfsconf_slock(); vfsp = vfs_byname_locked(name); vfsconf_sunlock(); return (vfsp); } struct vfsconf * vfs_byname_kld(const char *fstype, struct thread *td, int *error) { struct vfsconf *vfsp; int fileid, loaded; vfsp = vfs_byname(fstype); if (vfsp != NULL) return (vfsp); /* Try to load the respective module. */ *error = kern_kldload(td, fstype, &fileid); loaded = (*error == 0); if (*error == EEXIST) *error = 0; if (*error) return (NULL); /* Look up again to see if the VFS was loaded. */ vfsp = vfs_byname(fstype); if (vfsp == NULL) { if (loaded) (void)kern_kldunload(td, fileid, LINKER_UNLOAD_FORCE); *error = ENODEV; return (NULL); } return (vfsp); } static int vfs_mount_sigdefer(struct mount *mp) { int prev_stops, rc; TSRAW(curthread, TS_ENTER, "VFS_MOUNT", mp->mnt_vfc->vfc_name); prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_mount)(mp); sigallowstop(prev_stops); TSRAW(curthread, TS_EXIT, "VFS_MOUNT", mp->mnt_vfc->vfc_name); return (rc); } static int vfs_unmount_sigdefer(struct mount *mp, int mntflags) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_unmount)(mp, mntflags); sigallowstop(prev_stops); return (rc); } static int vfs_root_sigdefer(struct mount *mp, int flags, struct vnode **vpp) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_root)(mp, flags, vpp); sigallowstop(prev_stops); return (rc); } static int vfs_cachedroot_sigdefer(struct mount *mp, int flags, struct vnode **vpp) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_cachedroot)(mp, flags, vpp); sigallowstop(prev_stops); return (rc); } static int vfs_quotactl_sigdefer(struct mount *mp, int cmd, uid_t uid, void *arg, bool *mp_busy) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_quotactl)(mp, cmd, uid, arg, mp_busy); sigallowstop(prev_stops); return (rc); } static int vfs_statfs_sigdefer(struct mount *mp, struct statfs *sbp) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_statfs)(mp, sbp); sigallowstop(prev_stops); return (rc); } static int vfs_sync_sigdefer(struct mount *mp, int waitfor) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_sync)(mp, waitfor); sigallowstop(prev_stops); return (rc); } static int vfs_vget_sigdefer(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_vget)(mp, ino, flags, vpp); sigallowstop(prev_stops); return (rc); } static int vfs_fhtovp_sigdefer(struct mount *mp, struct fid *fidp, int flags, struct vnode **vpp) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_fhtovp)(mp, fidp, flags, vpp); sigallowstop(prev_stops); return (rc); } static int vfs_checkexp_sigdefer(struct mount *mp, struct sockaddr *nam, uint64_t *exflg, struct ucred **credp, int *numsecflavors, int *secflavors) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_checkexp)(mp, nam, exflg, credp, numsecflavors, secflavors); sigallowstop(prev_stops); return (rc); } static int vfs_extattrctl_sigdefer(struct mount *mp, int cmd, struct vnode *filename_vp, int attrnamespace, const char *attrname) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_extattrctl)(mp, cmd, filename_vp, attrnamespace, attrname); sigallowstop(prev_stops); return (rc); } static int vfs_sysctl_sigdefer(struct mount *mp, fsctlop_t op, struct sysctl_req *req) { int prev_stops, rc; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_sysctl)(mp, op, req); sigallowstop(prev_stops); return (rc); } static void vfs_susp_clean_sigdefer(struct mount *mp) { int prev_stops; if (*mp->mnt_vfc->vfc_vfsops_sd->vfs_susp_clean == NULL) return; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); (*mp->mnt_vfc->vfc_vfsops_sd->vfs_susp_clean)(mp); sigallowstop(prev_stops); } static void vfs_reclaim_lowervp_sigdefer(struct mount *mp, struct vnode *vp) { int prev_stops; if (*mp->mnt_vfc->vfc_vfsops_sd->vfs_reclaim_lowervp == NULL) return; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); (*mp->mnt_vfc->vfc_vfsops_sd->vfs_reclaim_lowervp)(mp, vp); sigallowstop(prev_stops); } static void vfs_unlink_lowervp_sigdefer(struct mount *mp, struct vnode *vp) { int prev_stops; if (*mp->mnt_vfc->vfc_vfsops_sd->vfs_unlink_lowervp == NULL) return; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); (*(mp)->mnt_vfc->vfc_vfsops_sd->vfs_unlink_lowervp)(mp, vp); sigallowstop(prev_stops); } static void vfs_purge_sigdefer(struct mount *mp) { int prev_stops; prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); (*mp->mnt_vfc->vfc_vfsops_sd->vfs_purge)(mp); sigallowstop(prev_stops); } +static int +vfs_report_lockf_sigdefer(struct mount *mp, struct sbuf *sb) +{ + int prev_stops, rc; + + prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT); + rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_report_lockf)(mp, sb); + sigallowstop(prev_stops); + return (rc); +} + static struct vfsops vfsops_sigdefer = { .vfs_mount = vfs_mount_sigdefer, .vfs_unmount = vfs_unmount_sigdefer, .vfs_root = vfs_root_sigdefer, .vfs_cachedroot = vfs_cachedroot_sigdefer, .vfs_quotactl = vfs_quotactl_sigdefer, .vfs_statfs = vfs_statfs_sigdefer, .vfs_sync = vfs_sync_sigdefer, .vfs_vget = vfs_vget_sigdefer, .vfs_fhtovp = vfs_fhtovp_sigdefer, .vfs_checkexp = vfs_checkexp_sigdefer, .vfs_extattrctl = vfs_extattrctl_sigdefer, .vfs_sysctl = vfs_sysctl_sigdefer, .vfs_susp_clean = vfs_susp_clean_sigdefer, .vfs_reclaim_lowervp = vfs_reclaim_lowervp_sigdefer, .vfs_unlink_lowervp = vfs_unlink_lowervp_sigdefer, .vfs_purge = vfs_purge_sigdefer, - + .vfs_report_lockf = vfs_report_lockf_sigdefer, }; /* Register a new filesystem type in the global table */ static int vfs_register(struct vfsconf *vfc) { struct sysctl_oid *oidp; struct vfsops *vfsops; static int once; struct vfsconf *tvfc; uint32_t hashval; int secondpass; if (!once) { vattr_null(&va_null); once = 1; } if (vfc->vfc_version != VFS_VERSION) { printf("ERROR: filesystem %s, unsupported ABI version %x\n", vfc->vfc_name, vfc->vfc_version); return (EINVAL); } vfsconf_lock(); if (vfs_byname_locked(vfc->vfc_name) != NULL) { vfsconf_unlock(); return (EEXIST); } if (vfs_typenumhash != 0) { /* * Calculate a hash on vfc_name to use for vfc_typenum. Unless * all of 1<->255 are assigned, it is limited to 8bits since * that is what ZFS uses from vfc_typenum and is also the * preferred range for vfs_getnewfsid(). */ hashval = fnv_32_str(vfc->vfc_name, FNV1_32_INIT); hashval &= 0xff; secondpass = 0; do { /* Look for and fix any collision. */ TAILQ_FOREACH(tvfc, &vfsconf, vfc_list) { if (hashval == tvfc->vfc_typenum) { if (hashval == 255 && secondpass == 0) { hashval = 1; secondpass = 1; } else hashval++; break; } } } while (tvfc != NULL); vfc->vfc_typenum = hashval; if (vfc->vfc_typenum >= maxvfsconf) maxvfsconf = vfc->vfc_typenum + 1; } else vfc->vfc_typenum = maxvfsconf++; TAILQ_INSERT_TAIL(&vfsconf, vfc, vfc_list); /* * Initialise unused ``struct vfsops'' fields, to use * the vfs_std*() functions. Note, we need the mount * and unmount operations, at the least. The check * for vfsops available is just a debugging aid. */ KASSERT(vfc->vfc_vfsops != NULL, ("Filesystem %s has no vfsops", vfc->vfc_name)); /* * Check the mount and unmount operations. */ vfsops = vfc->vfc_vfsops; KASSERT(vfsops->vfs_mount != NULL, ("Filesystem %s has no mount op", vfc->vfc_name)); KASSERT(vfsops->vfs_unmount != NULL, ("Filesystem %s has no unmount op", vfc->vfc_name)); if (vfsops->vfs_root == NULL) /* return file system's root vnode */ vfsops->vfs_root = vfs_stdroot; if (vfsops->vfs_quotactl == NULL) /* quota control */ vfsops->vfs_quotactl = vfs_stdquotactl; if (vfsops->vfs_statfs == NULL) /* return file system's status */ vfsops->vfs_statfs = vfs_stdstatfs; if (vfsops->vfs_sync == NULL) /* * flush unwritten data (nosync) * file systems can use vfs_stdsync * explicitly by setting it in the * vfsop vector. */ vfsops->vfs_sync = vfs_stdnosync; if (vfsops->vfs_vget == NULL) /* convert an inode number to a vnode */ vfsops->vfs_vget = vfs_stdvget; if (vfsops->vfs_fhtovp == NULL) /* turn an NFS file handle into a vnode */ vfsops->vfs_fhtovp = vfs_stdfhtovp; if (vfsops->vfs_checkexp == NULL) /* check if file system is exported */ vfsops->vfs_checkexp = vfs_stdcheckexp; if (vfsops->vfs_init == NULL) /* file system specific initialisation */ vfsops->vfs_init = vfs_stdinit; if (vfsops->vfs_uninit == NULL) /* file system specific uninitialisation */ vfsops->vfs_uninit = vfs_stduninit; if (vfsops->vfs_extattrctl == NULL) /* extended attribute control */ vfsops->vfs_extattrctl = vfs_stdextattrctl; if (vfsops->vfs_sysctl == NULL) vfsops->vfs_sysctl = vfs_stdsysctl; + if (vfsops->vfs_report_lockf == NULL) + vfsops->vfs_report_lockf = vfs_report_lockf; if ((vfc->vfc_flags & VFCF_SBDRY) != 0) { vfc->vfc_vfsops_sd = vfc->vfc_vfsops; vfc->vfc_vfsops = &vfsops_sigdefer; } if (vfc->vfc_flags & VFCF_JAIL) prison_add_vfs(vfc); /* * Call init function for this VFS... */ if ((vfc->vfc_flags & VFCF_SBDRY) != 0) vfc->vfc_vfsops_sd->vfs_init(vfc); else vfc->vfc_vfsops->vfs_init(vfc); vfsconf_unlock(); /* * If this filesystem has a sysctl node under vfs * (i.e. vfs.xxfs), then change the oid number of that node to * match the filesystem's type number. This allows user code * which uses the type number to read sysctl variables defined * by the filesystem to continue working. Since the oids are * in a sorted list, we need to make sure the order is * preserved by re-registering the oid after modifying its * number. */ sysctl_wlock(); SLIST_FOREACH(oidp, SYSCTL_CHILDREN(&sysctl___vfs), oid_link) { if (strcmp(oidp->oid_name, vfc->vfc_name) == 0) { sysctl_unregister_oid(oidp); oidp->oid_number = vfc->vfc_typenum; sysctl_register_oid(oidp); break; } } sysctl_wunlock(); return (0); } /* Remove registration of a filesystem type */ static int vfs_unregister(struct vfsconf *vfc) { struct vfsconf *vfsp; int error, maxtypenum; vfsconf_lock(); vfsp = vfs_byname_locked(vfc->vfc_name); if (vfsp == NULL) { vfsconf_unlock(); return (EINVAL); } if (vfsp->vfc_refcount != 0) { vfsconf_unlock(); return (EBUSY); } error = 0; if ((vfc->vfc_flags & VFCF_SBDRY) != 0) { if (vfc->vfc_vfsops_sd->vfs_uninit != NULL) error = vfc->vfc_vfsops_sd->vfs_uninit(vfsp); } else { if (vfc->vfc_vfsops->vfs_uninit != NULL) error = vfc->vfc_vfsops->vfs_uninit(vfsp); } if (error != 0) { vfsconf_unlock(); return (error); } TAILQ_REMOVE(&vfsconf, vfsp, vfc_list); maxtypenum = VFS_GENERIC; TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) if (maxtypenum < vfsp->vfc_typenum) maxtypenum = vfsp->vfc_typenum; maxvfsconf = maxtypenum + 1; vfsconf_unlock(); return (0); } /* * Standard kernel module handling code for filesystem modules. * Referenced from VFS_SET(). */ int vfs_modevent(module_t mod, int type, void *data) { struct vfsconf *vfc; int error = 0; vfc = (struct vfsconf *)data; switch (type) { case MOD_LOAD: if (vfc) error = vfs_register(vfc); break; case MOD_UNLOAD: if (vfc) error = vfs_unregister(vfc); break; default: error = EOPNOTSUPP; break; } return (error); } diff --git a/sys/sys/mount.h b/sys/sys/mount.h index 6941048656d1..3383bfe8f431 100644 --- a/sys/sys/mount.h +++ b/sys/sys/mount.h @@ -1,1194 +1,1198 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)mount.h 8.21 (Berkeley) 5/20/95 * $FreeBSD$ */ #ifndef _SYS_MOUNT_H_ #define _SYS_MOUNT_H_ #include #include #ifdef _KERNEL #include #include #include #include #include #include #endif /* * NOTE: When changing statfs structure, mount structure, MNT_* flags or * MNTK_* flags also update DDB show mount command in vfs_subr.c. */ typedef struct fsid { int32_t val[2]; } fsid_t; /* filesystem id type */ #define fsidcmp(a, b) memcmp((a), (b), sizeof(fsid_t)) /* * File identifier. * These are unique per filesystem on a single machine. * * Note that the offset of fid_data is 4 bytes, so care must be taken to avoid * undefined behavior accessing unaligned fields within an embedded struct. */ #define MAXFIDSZ 16 struct fid { u_short fid_len; /* length of data in bytes */ u_short fid_data0; /* force longword alignment */ char fid_data[MAXFIDSZ]; /* data (variable length) */ }; /* * filesystem statistics */ #define MFSNAMELEN 16 /* length of type name including null */ #define MNAMELEN 1024 /* size of on/from name bufs */ #define STATFS_VERSION 0x20140518 /* current version number */ struct statfs { uint32_t f_version; /* structure version number */ uint32_t f_type; /* type of filesystem */ uint64_t f_flags; /* copy of mount exported flags */ uint64_t f_bsize; /* filesystem fragment size */ uint64_t f_iosize; /* optimal transfer block size */ uint64_t f_blocks; /* total data blocks in filesystem */ uint64_t f_bfree; /* free blocks in filesystem */ int64_t f_bavail; /* free blocks avail to non-superuser */ uint64_t f_files; /* total file nodes in filesystem */ int64_t f_ffree; /* free nodes avail to non-superuser */ uint64_t f_syncwrites; /* count of sync writes since mount */ uint64_t f_asyncwrites; /* count of async writes since mount */ uint64_t f_syncreads; /* count of sync reads since mount */ uint64_t f_asyncreads; /* count of async reads since mount */ uint64_t f_spare[10]; /* unused spare */ uint32_t f_namemax; /* maximum filename length */ uid_t f_owner; /* user that mounted the filesystem */ fsid_t f_fsid; /* filesystem id */ char f_charspare[80]; /* spare string space */ char f_fstypename[MFSNAMELEN]; /* filesystem type name */ char f_mntfromname[MNAMELEN]; /* mounted filesystem */ char f_mntonname[MNAMELEN]; /* directory on which mounted */ }; #if defined(_WANT_FREEBSD11_STATFS) || defined(_KERNEL) #define FREEBSD11_STATFS_VERSION 0x20030518 /* current version number */ struct freebsd11_statfs { uint32_t f_version; /* structure version number */ uint32_t f_type; /* type of filesystem */ uint64_t f_flags; /* copy of mount exported flags */ uint64_t f_bsize; /* filesystem fragment size */ uint64_t f_iosize; /* optimal transfer block size */ uint64_t f_blocks; /* total data blocks in filesystem */ uint64_t f_bfree; /* free blocks in filesystem */ int64_t f_bavail; /* free blocks avail to non-superuser */ uint64_t f_files; /* total file nodes in filesystem */ int64_t f_ffree; /* free nodes avail to non-superuser */ uint64_t f_syncwrites; /* count of sync writes since mount */ uint64_t f_asyncwrites; /* count of async writes since mount */ uint64_t f_syncreads; /* count of sync reads since mount */ uint64_t f_asyncreads; /* count of async reads since mount */ uint64_t f_spare[10]; /* unused spare */ uint32_t f_namemax; /* maximum filename length */ uid_t f_owner; /* user that mounted the filesystem */ fsid_t f_fsid; /* filesystem id */ char f_charspare[80]; /* spare string space */ char f_fstypename[16]; /* filesystem type name */ char f_mntfromname[88]; /* mounted filesystem */ char f_mntonname[88]; /* directory on which mounted */ }; #endif /* _WANT_FREEBSD11_STATFS || _KERNEL */ #ifdef _KERNEL #define OMFSNAMELEN 16 /* length of fs type name, including null */ #define OMNAMELEN (88 - 2 * sizeof(long)) /* size of on/from name bufs */ /* XXX getfsstat.2 is out of date with write and read counter changes here. */ /* XXX statfs.2 is out of date with read counter changes here. */ struct ostatfs { long f_spare2; /* placeholder */ long f_bsize; /* fundamental filesystem block size */ long f_iosize; /* optimal transfer block size */ long f_blocks; /* total data blocks in filesystem */ long f_bfree; /* free blocks in fs */ long f_bavail; /* free blocks avail to non-superuser */ long f_files; /* total file nodes in filesystem */ long f_ffree; /* free file nodes in fs */ fsid_t f_fsid; /* filesystem id */ uid_t f_owner; /* user that mounted the filesystem */ int f_type; /* type of filesystem */ int f_flags; /* copy of mount exported flags */ long f_syncwrites; /* count of sync writes since mount */ long f_asyncwrites; /* count of async writes since mount */ char f_fstypename[OMFSNAMELEN]; /* fs type name */ char f_mntonname[OMNAMELEN]; /* directory on which mounted */ long f_syncreads; /* count of sync reads since mount */ long f_asyncreads; /* count of async reads since mount */ short f_spares1; /* unused spare */ char f_mntfromname[OMNAMELEN];/* mounted filesystem */ short f_spares2; /* unused spare */ /* * XXX on machines where longs are aligned to 8-byte boundaries, there * is an unnamed int32_t here. This spare was after the apparent end * of the struct until we bit off the read counters from f_mntonname. */ long f_spare[2]; /* unused spare */ }; #endif /* _KERNEL */ #if defined(_WANT_MOUNT) || defined(_KERNEL) TAILQ_HEAD(vnodelst, vnode); /* Mount options list */ TAILQ_HEAD(vfsoptlist, vfsopt); struct vfsopt { TAILQ_ENTRY(vfsopt) link; char *name; void *value; int len; int pos; int seen; }; struct mount_pcpu { int mntp_thread_in_ops; int mntp_ref; int mntp_lockref; int mntp_writeopcount; }; _Static_assert(sizeof(struct mount_pcpu) == 16, "the struct is allocated from pcpu 16 zone"); /* * Structure for tracking a stacked filesystem mounted above another * filesystem. This is expected to be stored in the upper FS' per-mount data. * * Lock reference: * i - lower mount interlock * c - constant from node initialization */ struct mount_upper_node { struct mount *mp; /* (c) mount object for upper FS */ TAILQ_ENTRY(mount_upper_node) mnt_upper_link; /* (i) position in uppers list */ }; /* * Structure per mounted filesystem. Each mounted filesystem has an * array of operations and an instance record. The filesystems are * put on a doubly linked list. * * Lock reference: * l - mnt_listmtx * m - mountlist_mtx * i - interlock * v - vnode freelist mutex * d - deferred unmount list mutex * * Unmarked fields are considered stable as long as a ref is held. * */ struct mount { int mnt_vfs_ops; /* (i) pending vfs ops */ int mnt_kern_flag; /* (i) kernel only flags */ uint64_t mnt_flag; /* (i) flags shared with user */ struct mount_pcpu *mnt_pcpu; /* per-CPU data */ struct vnode *mnt_rootvnode; struct vnode *mnt_vnodecovered; /* vnode we mounted on */ struct vfsops *mnt_op; /* operations on fs */ struct vfsconf *mnt_vfc; /* configuration info */ struct mtx __aligned(CACHE_LINE_SIZE) mnt_mtx; /* mount structure interlock */ int mnt_gen; /* struct mount generation */ #define mnt_startzero mnt_list TAILQ_ENTRY(mount) mnt_list; /* (m) mount list */ struct vnode *mnt_syncer; /* syncer vnode */ int mnt_ref; /* (i) Reference count */ struct vnodelst mnt_nvnodelist; /* (i) list of vnodes */ int mnt_nvnodelistsize; /* (i) # of vnodes */ int mnt_writeopcount; /* (i) write syscalls pending */ struct vfsoptlist *mnt_opt; /* current mount options */ struct vfsoptlist *mnt_optnew; /* new options passed to fs */ struct statfs mnt_stat; /* cache of filesystem stats */ struct ucred *mnt_cred; /* credentials of mounter */ void * mnt_data; /* private data */ time_t mnt_time; /* last time written*/ int mnt_iosize_max; /* max size for clusters, etc */ struct netexport *mnt_export; /* export list */ struct label *mnt_label; /* MAC label for the fs */ u_int mnt_hashseed; /* Random seed for vfs_hash */ int mnt_lockref; /* (i) Lock reference count */ int mnt_secondary_writes; /* (i) # of secondary writes */ int mnt_secondary_accwrites;/* (i) secondary wr. starts */ struct thread *mnt_susp_owner; /* (i) thread owning suspension */ #define mnt_endzero mnt_gjprovider char *mnt_gjprovider; /* gjournal provider name */ struct mtx mnt_listmtx; struct vnodelst mnt_lazyvnodelist; /* (l) list of lazy vnodes */ int mnt_lazyvnodelistsize; /* (l) # of lazy vnodes */ int mnt_upper_pending; /* (i) # of pending ops on mnt_uppers */ struct lock mnt_explock; /* vfs_export walkers lock */ TAILQ_HEAD(, mount_upper_node) mnt_uppers; /* (i) upper mounts over us */ TAILQ_HEAD(, mount_upper_node) mnt_notify; /* (i) upper mounts for notification */ STAILQ_ENTRY(mount) mnt_taskqueue_link; /* (d) our place in deferred unmount list */ uint64_t mnt_taskqueue_flags; /* (d) unmount flags passed from taskqueue */ unsigned int mnt_unmount_retries; /* (d) # of failed deferred unmount attempts */ }; #endif /* _WANT_MOUNT || _KERNEL */ #ifdef _KERNEL /* * Definitions for MNT_VNODE_FOREACH_ALL. */ struct vnode *__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp); struct vnode *__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp); void __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp); #define MNT_VNODE_FOREACH_ALL(vp, mp, mvp) \ for (vp = __mnt_vnode_first_all(&(mvp), (mp)); \ (vp) != NULL; vp = __mnt_vnode_next_all(&(mvp), (mp))) #define MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp) \ do { \ MNT_ILOCK(mp); \ __mnt_vnode_markerfree_all(&(mvp), (mp)); \ /* MNT_IUNLOCK(mp); -- done in above function */ \ mtx_assert(MNT_MTX(mp), MA_NOTOWNED); \ } while (0) /* * Definitions for MNT_VNODE_FOREACH_LAZY. */ typedef int mnt_lazy_cb_t(struct vnode *, void *); struct vnode *__mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, void *cbarg); struct vnode *__mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, void *cbarg); void __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp); #define MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, cb, cbarg) \ for (vp = __mnt_vnode_first_lazy(&(mvp), (mp), (cb), (cbarg)); \ (vp) != NULL; \ vp = __mnt_vnode_next_lazy(&(mvp), (mp), (cb), (cbarg))) #define MNT_VNODE_FOREACH_LAZY_ABORT(mp, mvp) \ __mnt_vnode_markerfree_lazy(&(mvp), (mp)) #define MNT_ILOCK(mp) mtx_lock(&(mp)->mnt_mtx) #define MNT_ITRYLOCK(mp) mtx_trylock(&(mp)->mnt_mtx) #define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx) #define MNT_MTX(mp) (&(mp)->mnt_mtx) #define MNT_REF(mp) do { \ mtx_assert(MNT_MTX(mp), MA_OWNED); \ mp->mnt_ref++; \ } while (0) #define MNT_REL(mp) do { \ mtx_assert(MNT_MTX(mp), MA_OWNED); \ (mp)->mnt_ref--; \ if ((mp)->mnt_vfs_ops && (mp)->mnt_ref < 0) \ vfs_dump_mount_counters(mp); \ if ((mp)->mnt_ref == 0 && (mp)->mnt_vfs_ops) \ wakeup((mp)); \ } while (0) #endif /* _KERNEL */ #if defined(_WANT_MNTOPTNAMES) || defined(_KERNEL) struct mntoptnames { uint64_t o_opt; const char *o_name; }; #define MNTOPT_NAMES \ { MNT_ASYNC, "asynchronous" }, \ { MNT_EXPORTED, "NFS exported" }, \ { MNT_LOCAL, "local" }, \ { MNT_NOATIME, "noatime" }, \ { MNT_NOEXEC, "noexec" }, \ { MNT_NOSUID, "nosuid" }, \ { MNT_NOSYMFOLLOW, "nosymfollow" }, \ { MNT_QUOTA, "with quotas" }, \ { MNT_RDONLY, "read-only" }, \ { MNT_SYNCHRONOUS, "synchronous" }, \ { MNT_UNION, "union" }, \ { MNT_NOCLUSTERR, "noclusterr" }, \ { MNT_NOCLUSTERW, "noclusterw" }, \ { MNT_SUIDDIR, "suiddir" }, \ { MNT_SOFTDEP, "soft-updates" }, \ { MNT_SUJ, "journaled soft-updates" }, \ { MNT_MULTILABEL, "multilabel" }, \ { MNT_ACLS, "acls" }, \ { MNT_NFS4ACLS, "nfsv4acls" }, \ { MNT_GJOURNAL, "gjournal" }, \ { MNT_AUTOMOUNTED, "automounted" }, \ { MNT_VERIFIED, "verified" }, \ { MNT_UNTRUSTED, "untrusted" }, \ { MNT_NOCOVER, "nocover" }, \ { MNT_EMPTYDIR, "emptydir" }, \ { MNT_UPDATE, "update" }, \ { MNT_DELEXPORT, "delexport" }, \ { MNT_RELOAD, "reload" }, \ { MNT_FORCE, "force" }, \ { MNT_SNAPSHOT, "snapshot" }, \ { 0, NULL } #endif /* * User specifiable flags, stored in mnt_flag. */ #define MNT_RDONLY 0x0000000000000001ULL /* read only filesystem */ #define MNT_SYNCHRONOUS 0x0000000000000002ULL /* fs written synchronously */ #define MNT_NOEXEC 0x0000000000000004ULL /* can't exec from filesystem */ #define MNT_NOSUID 0x0000000000000008ULL /* don't honor setuid fs bits */ #define MNT_NFS4ACLS 0x0000000000000010ULL /* enable NFS version 4 ACLs */ #define MNT_UNION 0x0000000000000020ULL /* union with underlying fs */ #define MNT_ASYNC 0x0000000000000040ULL /* fs written asynchronously */ #define MNT_SUIDDIR 0x0000000000100000ULL /* special SUID dir handling */ #define MNT_SOFTDEP 0x0000000000200000ULL /* using soft updates */ #define MNT_NOSYMFOLLOW 0x0000000000400000ULL /* do not follow symlinks */ #define MNT_GJOURNAL 0x0000000002000000ULL /* GEOM journal support enabled */ #define MNT_MULTILABEL 0x0000000004000000ULL /* MAC support for objects */ #define MNT_ACLS 0x0000000008000000ULL /* ACL support enabled */ #define MNT_NOATIME 0x0000000010000000ULL /* dont update file access time */ #define MNT_NOCLUSTERR 0x0000000040000000ULL /* disable cluster read */ #define MNT_NOCLUSTERW 0x0000000080000000ULL /* disable cluster write */ #define MNT_SUJ 0x0000000100000000ULL /* using journaled soft updates */ #define MNT_AUTOMOUNTED 0x0000000200000000ULL /* mounted by automountd(8) */ #define MNT_UNTRUSTED 0x0000000800000000ULL /* filesys metadata untrusted */ /* * NFS export related mount flags. */ #define MNT_EXRDONLY 0x0000000000000080ULL /* exported read only */ #define MNT_EXPORTED 0x0000000000000100ULL /* filesystem is exported */ #define MNT_DEFEXPORTED 0x0000000000000200ULL /* exported to the world */ #define MNT_EXPORTANON 0x0000000000000400ULL /* anon uid mapping for all */ #define MNT_EXKERB 0x0000000000000800ULL /* exported with Kerberos */ #define MNT_EXPUBLIC 0x0000000020000000ULL /* public export (WebNFS) */ #define MNT_EXTLS 0x0000004000000000ULL /* require TLS */ #define MNT_EXTLSCERT 0x0000008000000000ULL /* require TLS with client cert */ #define MNT_EXTLSCERTUSER 0x0000010000000000ULL /* require TLS with user cert */ /* * Flags set by internal operations, but visible to the user. */ #define MNT_LOCAL 0x0000000000001000ULL /* filesystem is stored locally */ #define MNT_QUOTA 0x0000000000002000ULL /* quotas are enabled on fs */ #define MNT_ROOTFS 0x0000000000004000ULL /* identifies the root fs */ #define MNT_USER 0x0000000000008000ULL /* mounted by a user */ #define MNT_IGNORE 0x0000000000800000ULL /* do not show entry in df */ #define MNT_VERIFIED 0x0000000400000000ULL /* filesystem is verified */ /* * Mask of flags that are visible to statfs(). * XXX I think that this could now become (~(MNT_CMDFLAGS)) * but the 'mount' program may need changing to handle this. */ #define MNT_VISFLAGMASK (MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | \ MNT_NOSUID | MNT_UNION | MNT_SUJ | \ MNT_ASYNC | MNT_EXRDONLY | MNT_EXPORTED | \ MNT_DEFEXPORTED | MNT_EXPORTANON| MNT_EXKERB | \ MNT_LOCAL | MNT_USER | MNT_QUOTA | \ MNT_ROOTFS | MNT_NOATIME | MNT_NOCLUSTERR| \ MNT_NOCLUSTERW | MNT_SUIDDIR | MNT_SOFTDEP | \ MNT_IGNORE | MNT_EXPUBLIC | MNT_NOSYMFOLLOW | \ MNT_GJOURNAL | MNT_MULTILABEL | MNT_ACLS | \ MNT_NFS4ACLS | MNT_AUTOMOUNTED | MNT_VERIFIED | \ MNT_UNTRUSTED) /* Mask of flags that can be updated. */ #define MNT_UPDATEMASK (MNT_NOSUID | MNT_NOEXEC | \ MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | \ MNT_NOATIME | \ MNT_NOSYMFOLLOW | MNT_IGNORE | \ MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR | \ MNT_ACLS | MNT_USER | MNT_NFS4ACLS | \ MNT_AUTOMOUNTED | MNT_UNTRUSTED) /* * External filesystem command modifier flags. * Unmount can use the MNT_FORCE flag. * XXX: These are not STATES and really should be somewhere else. * XXX: MNT_BYFSID and MNT_NONBUSY collide with MNT_ACLS and MNT_MULTILABEL, * but because MNT_ACLS and MNT_MULTILABEL are only used for mount(2), * and MNT_BYFSID and MNT_NONBUSY are only used for unmount(2), * it's harmless. */ #define MNT_UPDATE 0x0000000000010000ULL /* not real mount, just update */ #define MNT_DELEXPORT 0x0000000000020000ULL /* delete export host lists */ #define MNT_RELOAD 0x0000000000040000ULL /* reload filesystem data */ #define MNT_FORCE 0x0000000000080000ULL /* force unmount or readonly */ #define MNT_SNAPSHOT 0x0000000001000000ULL /* snapshot the filesystem */ #define MNT_NONBUSY 0x0000000004000000ULL /* check vnode use counts. */ #define MNT_BYFSID 0x0000000008000000ULL /* specify filesystem by ID. */ #define MNT_NOCOVER 0x0000001000000000ULL /* Do not cover a mount point */ #define MNT_EMPTYDIR 0x0000002000000000ULL /* Only mount on empty dir */ #define MNT_RECURSE 0x0000100000000000ULL /* recursively unmount uppers */ #define MNT_DEFERRED 0x0000200000000000ULL /* unmount in async context */ #define MNT_CMDFLAGS (MNT_UPDATE | MNT_DELEXPORT | MNT_RELOAD | \ MNT_FORCE | MNT_SNAPSHOT | MNT_NONBUSY | \ MNT_BYFSID | MNT_NOCOVER | MNT_EMPTYDIR | \ MNT_RECURSE | MNT_DEFERRED) /* * Internal filesystem control flags stored in mnt_kern_flag. * * MNTK_UNMOUNT locks the mount entry so that name lookup cannot * proceed past the mount point. This keeps the subtree stable during * mounts and unmounts. When non-forced unmount flushes all vnodes * from the mp queue, the MNTK_UNMOUNT flag prevents insmntque() from * queueing new vnodes. * * MNTK_UNMOUNTF permits filesystems to detect a forced unmount while * dounmount() is still waiting to lock the mountpoint. This allows * the filesystem to cancel operations that might otherwise deadlock * with the unmount attempt (used by NFS). */ #define MNTK_UNMOUNTF 0x00000001 /* forced unmount in progress */ #define MNTK_ASYNC 0x00000002 /* filtered async flag */ #define MNTK_SOFTDEP 0x00000004 /* async disabled by softdep */ #define MNTK_NOMSYNC 0x00000008 /* don't do msync */ #define MNTK_DRAINING 0x00000010 /* lock draining is happening */ #define MNTK_REFEXPIRE 0x00000020 /* refcount expiring is happening */ #define MNTK_EXTENDED_SHARED 0x00000040 /* Allow shared locking for more ops */ #define MNTK_SHARED_WRITES 0x00000080 /* Allow shared locking for writes */ #define MNTK_NO_IOPF 0x00000100 /* Disallow page faults during reads and writes. Filesystem shall properly handle i/o state on EFAULT. */ #define MNTK_RECURSE 0x00000200 /* pending recursive unmount */ #define MNTK_UPPER_WAITER 0x00000400 /* waiting to drain MNTK_UPPER_PENDING */ /* UNUSED 0x00000800 */ #define MNTK_UNLOCKED_INSMNTQUE 0x00001000 /* fs does not lock the vnode for insmntque */ #define MNTK_UNMAPPED_BUFS 0x00002000 #define MNTK_USES_BCACHE 0x00004000 /* FS uses the buffer cache. */ /* UNUSED 0x00008000 */ #define MNTK_VMSETSIZE_BUG 0x00010000 #define MNTK_UNIONFS 0x00020000 /* A hack for F_ISUNIONSTACK */ #define MNTK_FPLOOKUP 0x00040000 /* fast path lookup is supported */ #define MNTK_SUSPEND_ALL 0x00080000 /* Suspended by all-fs suspension */ #define MNTK_TASKQUEUE_WAITER 0x00100000 /* Waiting on unmount taskqueue */ /* UNUSED 0x00200000 */ /* UNUSED 0x00400000 */ #define MNTK_NOASYNC 0x00800000 /* disable async */ #define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ #define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ #define MNTK_SUSPEND 0x08000000 /* request write suspension */ #define MNTK_SUSPEND2 0x04000000 /* block secondary writes */ #define MNTK_SUSPENDED 0x10000000 /* write operations are suspended */ #define MNTK_NULL_NOCACHE 0x20000000 /* auto disable cache for nullfs mounts over this fs */ #define MNTK_LOOKUP_SHARED 0x40000000 /* FS supports shared lock lookups */ /* UNUSED 0x80000000 */ #ifdef _KERNEL static inline int MNT_SHARED_WRITES(struct mount *mp) { return (mp != NULL && (mp->mnt_kern_flag & MNTK_SHARED_WRITES) != 0); } static inline int MNT_EXTENDED_SHARED(struct mount *mp) { return (mp != NULL && (mp->mnt_kern_flag & MNTK_EXTENDED_SHARED) != 0); } #endif /* * Sysctl CTL_VFS definitions. * * Second level identifier specifies which filesystem. Second level * identifier VFS_VFSCONF returns information about all filesystems. * Second level identifier VFS_GENERIC is non-terminal. */ #define VFS_VFSCONF 0 /* get configured filesystems */ #define VFS_GENERIC 0 /* generic filesystem information */ /* * Third level identifiers for VFS_GENERIC are given below; third * level identifiers for specific filesystems are given in their * mount specific header files. */ #define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */ #define VFS_CONF 2 /* struct: vfsconf for filesystem given as next argument */ /* * Flags for various system call interfaces. * * waitfor flags to vfs_sync() and getfsstat() */ #define MNT_WAIT 1 /* synchronously wait for I/O to complete */ #define MNT_NOWAIT 2 /* start all I/O, but do not wait for it */ #define MNT_LAZY 3 /* push data not written by filesystem syncer */ #define MNT_SUSPEND 4 /* Suspend file system after sync */ /* * Generic file handle */ struct fhandle { fsid_t fh_fsid; /* Filesystem id of mount point */ struct fid fh_fid; /* Filesys specific id */ }; typedef struct fhandle fhandle_t; /* * Old export arguments without security flavor list */ struct oexport_args { int ex_flags; /* export related flags */ uid_t ex_root; /* mapping for root uid */ struct xucred ex_anon; /* mapping for anonymous user */ struct sockaddr *ex_addr; /* net address to which exported */ u_char ex_addrlen; /* and the net address length */ struct sockaddr *ex_mask; /* mask of valid bits in saddr */ u_char ex_masklen; /* and the smask length */ char *ex_indexfile; /* index file for WebNFS URLs */ }; /* * Not quite so old export arguments with 32bit ex_flags and xucred ex_anon. */ #define MAXSECFLAVORS 5 struct o2export_args { int ex_flags; /* export related flags */ uid_t ex_root; /* mapping for root uid */ struct xucred ex_anon; /* mapping for anonymous user */ struct sockaddr *ex_addr; /* net address to which exported */ u_char ex_addrlen; /* and the net address length */ struct sockaddr *ex_mask; /* mask of valid bits in saddr */ u_char ex_masklen; /* and the smask length */ char *ex_indexfile; /* index file for WebNFS URLs */ int ex_numsecflavors; /* security flavor count */ int ex_secflavors[MAXSECFLAVORS]; /* list of security flavors */ }; /* * Export arguments for local filesystem mount calls. */ struct export_args { uint64_t ex_flags; /* export related flags */ uid_t ex_root; /* mapping for root uid */ uid_t ex_uid; /* mapping for anonymous user */ int ex_ngroups; gid_t *ex_groups; struct sockaddr *ex_addr; /* net address to which exported */ u_char ex_addrlen; /* and the net address length */ struct sockaddr *ex_mask; /* mask of valid bits in saddr */ u_char ex_masklen; /* and the smask length */ char *ex_indexfile; /* index file for WebNFS URLs */ int ex_numsecflavors; /* security flavor count */ int ex_secflavors[MAXSECFLAVORS]; /* list of security flavors */ }; /* * Structure holding information for a publicly exported filesystem * (WebNFS). Currently the specs allow just for one such filesystem. */ struct nfs_public { int np_valid; /* Do we hold valid information */ fhandle_t np_handle; /* Filehandle for pub fs (internal) */ struct mount *np_mount; /* Mountpoint of exported fs */ char *np_index; /* Index file */ }; /* * Filesystem configuration information. One of these exists for each * type of filesystem supported by the kernel. These are searched at * mount time to identify the requested filesystem. * * XXX: Never change the first two arguments! */ struct vfsconf { u_int vfc_version; /* ABI version number */ char vfc_name[MFSNAMELEN]; /* filesystem type name */ struct vfsops *vfc_vfsops; /* filesystem operations vector */ struct vfsops *vfc_vfsops_sd; /* ... signal-deferred */ int vfc_typenum; /* historic filesystem type number */ int vfc_refcount; /* number mounted of this type */ int vfc_flags; /* permanent flags */ int vfc_prison_flag; /* prison allow.mount.* flag */ struct vfsoptdecl *vfc_opts; /* mount options */ TAILQ_ENTRY(vfsconf) vfc_list; /* list of vfscons */ }; /* Userland version of the struct vfsconf. */ struct xvfsconf { struct vfsops *vfc_vfsops; /* filesystem operations vector */ char vfc_name[MFSNAMELEN]; /* filesystem type name */ int vfc_typenum; /* historic filesystem type number */ int vfc_refcount; /* number mounted of this type */ int vfc_flags; /* permanent flags */ struct vfsconf *vfc_next; /* next in list */ }; #ifndef BURN_BRIDGES struct ovfsconf { void *vfc_vfsops; char vfc_name[32]; int vfc_index; int vfc_refcount; int vfc_flags; }; #endif /* * NB: these flags refer to IMPLEMENTATION properties, not properties of * any actual mounts; i.e., it does not make sense to change the flags. */ #define VFCF_STATIC 0x00010000 /* statically compiled into kernel */ #define VFCF_NETWORK 0x00020000 /* may get data over the network */ #define VFCF_READONLY 0x00040000 /* writes are not implemented */ #define VFCF_SYNTHETIC 0x00080000 /* data does not represent real files */ #define VFCF_LOOPBACK 0x00100000 /* aliases some other mounted FS */ #define VFCF_UNICODE 0x00200000 /* stores file names as Unicode */ #define VFCF_JAIL 0x00400000 /* can be mounted from within a jail */ #define VFCF_DELEGADMIN 0x00800000 /* supports delegated administration */ #define VFCF_SBDRY 0x01000000 /* Stop at Boundary: defer stop requests to kernel->user (AST) transition */ typedef uint32_t fsctlop_t; struct vfsidctl { int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ fsid_t vc_fsid; /* fsid to operate on */ char vc_fstypename[MFSNAMELEN]; /* type of fs 'nfs' or '*' */ fsctlop_t vc_op; /* operation VFS_CTL_* (below) */ void *vc_ptr; /* pointer to data structure */ size_t vc_len; /* sizeof said structure */ u_int32_t vc_spare[12]; /* spare (must be zero) */ }; /* vfsidctl API version. */ #define VFS_CTL_VERS1 0x01 /* * New style VFS sysctls, do not reuse/conflict with the namespace for * private sysctls. * All "global" sysctl ops have the 33rd bit set: * 0x...1.... * Private sysctl ops should have the 33rd bit unset. */ #define VFS_CTL_QUERY 0x00010001 /* anything wrong? (vfsquery) */ #define VFS_CTL_TIMEO 0x00010002 /* set timeout for vfs notification */ #define VFS_CTL_NOLOCKS 0x00010003 /* disable file locking */ struct vfsquery { u_int32_t vq_flags; u_int32_t vq_spare[31]; }; /* vfsquery flags */ #define VQ_NOTRESP 0x0001 /* server down */ #define VQ_NEEDAUTH 0x0002 /* server bad auth */ #define VQ_LOWDISK 0x0004 /* we're low on space */ #define VQ_MOUNT 0x0008 /* new filesystem arrived */ #define VQ_UNMOUNT 0x0010 /* filesystem has left */ #define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */ #define VQ_ASSIST 0x0040 /* filesystem needs assistance from external program */ #define VQ_NOTRESPLOCK 0x0080 /* server lockd down */ #define VQ_FLAG0100 0x0100 /* placeholder */ #define VQ_FLAG0200 0x0200 /* placeholder */ #define VQ_FLAG0400 0x0400 /* placeholder */ #define VQ_FLAG0800 0x0800 /* placeholder */ #define VQ_FLAG1000 0x1000 /* placeholder */ #define VQ_FLAG2000 0x2000 /* placeholder */ #define VQ_FLAG4000 0x4000 /* placeholder */ #define VQ_FLAG8000 0x8000 /* placeholder */ #ifdef _KERNEL /* Point a sysctl request at a vfsidctl's data. */ #define VCTLTOREQ(vc, req) \ do { \ (req)->newptr = (vc)->vc_ptr; \ (req)->newlen = (vc)->vc_len; \ (req)->newidx = 0; \ } while (0) #endif struct iovec; struct uio; #ifdef _KERNEL /* * vfs_busy specific flags and mask. */ #define MBF_NOWAIT 0x01 #define MBF_MNTLSTLOCK 0x02 #define MBF_MASK (MBF_NOWAIT | MBF_MNTLSTLOCK) #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_MOUNT); MALLOC_DECLARE(M_STATFS); #endif extern int maxvfsconf; /* highest defined filesystem type */ TAILQ_HEAD(vfsconfhead, vfsconf); extern struct vfsconfhead vfsconf; /* * Operations supported on mounted filesystem. */ struct mount_args; struct nameidata; struct sysctl_req; struct mntarg; /* * N.B., vfs_cmount is the ancient vfsop invoked by the old mount(2) syscall. * The new way is vfs_mount. * * vfs_cmount implementations typically translate arguments from their * respective old per-FS structures into the key-value list supported by * nmount(2), then use kernel_mount(9) to mimic nmount(2) from kernelspace. * * Filesystems with mounters that use nmount(2) do not need to and should not * implement vfs_cmount. Hopefully a future cleanup can remove vfs_cmount and * mount(2) entirely. */ typedef int vfs_cmount_t(struct mntarg *ma, void *data, uint64_t flags); typedef int vfs_unmount_t(struct mount *mp, int mntflags); typedef int vfs_root_t(struct mount *mp, int flags, struct vnode **vpp); typedef int vfs_quotactl_t(struct mount *mp, int cmds, uid_t uid, void *arg, bool *mp_busy); typedef int vfs_statfs_t(struct mount *mp, struct statfs *sbp); typedef int vfs_sync_t(struct mount *mp, int waitfor); typedef int vfs_vget_t(struct mount *mp, ino_t ino, int flags, struct vnode **vpp); typedef int vfs_fhtovp_t(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp); typedef int vfs_checkexp_t(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp, struct ucred **credanonp, int *numsecflavors, int *secflavors); typedef int vfs_init_t(struct vfsconf *); typedef int vfs_uninit_t(struct vfsconf *); typedef int vfs_extattrctl_t(struct mount *mp, int cmd, struct vnode *filename_vp, int attrnamespace, const char *attrname); typedef int vfs_mount_t(struct mount *mp); typedef int vfs_sysctl_t(struct mount *mp, fsctlop_t op, struct sysctl_req *req); typedef void vfs_susp_clean_t(struct mount *mp); typedef void vfs_notify_lowervp_t(struct mount *mp, struct vnode *lowervp); typedef void vfs_purge_t(struct mount *mp); +struct sbuf; +typedef int vfs_report_lockf_t(struct mount *mp, struct sbuf *sb); struct vfsops { vfs_mount_t *vfs_mount; vfs_cmount_t *vfs_cmount; vfs_unmount_t *vfs_unmount; vfs_root_t *vfs_root; vfs_root_t *vfs_cachedroot; vfs_quotactl_t *vfs_quotactl; vfs_statfs_t *vfs_statfs; vfs_sync_t *vfs_sync; vfs_vget_t *vfs_vget; vfs_fhtovp_t *vfs_fhtovp; vfs_checkexp_t *vfs_checkexp; vfs_init_t *vfs_init; vfs_uninit_t *vfs_uninit; vfs_extattrctl_t *vfs_extattrctl; vfs_sysctl_t *vfs_sysctl; vfs_susp_clean_t *vfs_susp_clean; vfs_notify_lowervp_t *vfs_reclaim_lowervp; vfs_notify_lowervp_t *vfs_unlink_lowervp; vfs_purge_t *vfs_purge; + vfs_report_lockf_t *vfs_report_lockf; vfs_mount_t *vfs_spare[6]; /* spares for ABI compat */ }; vfs_statfs_t __vfs_statfs; #define VFS_MOUNT(MP) ({ \ int _rc; \ \ TSRAW(curthread, TS_ENTER, "VFS_MOUNT", (MP)->mnt_vfc->vfc_name);\ _rc = (*(MP)->mnt_op->vfs_mount)(MP); \ TSRAW(curthread, TS_EXIT, "VFS_MOUNT", (MP)->mnt_vfc->vfc_name);\ _rc; }) #define VFS_UNMOUNT(MP, FORCE) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_unmount)(MP, FORCE); \ _rc; }) #define VFS_ROOT(MP, FLAGS, VPP) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_root)(MP, FLAGS, VPP); \ _rc; }) #define VFS_CACHEDROOT(MP, FLAGS, VPP) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_cachedroot)(MP, FLAGS, VPP); \ _rc; }) #define VFS_QUOTACTL(MP, C, U, A, MP_BUSY) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_quotactl)(MP, C, U, A, MP_BUSY); \ _rc; }) #define VFS_STATFS(MP, SBP) ({ \ int _rc; \ \ _rc = __vfs_statfs((MP), (SBP)); \ _rc; }) #define VFS_SYNC(MP, WAIT) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_sync)(MP, WAIT); \ _rc; }) #define VFS_VGET(MP, INO, FLAGS, VPP) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_vget)(MP, INO, FLAGS, VPP); \ _rc; }) #define VFS_FHTOVP(MP, FIDP, FLAGS, VPP) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_fhtovp)(MP, FIDP, FLAGS, VPP); \ _rc; }) #define VFS_CHECKEXP(MP, NAM, EXFLG, CRED, NUMSEC, SEC) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_checkexp)(MP, NAM, EXFLG, CRED, NUMSEC,\ SEC); \ _rc; }) #define VFS_EXTATTRCTL(MP, C, FN, NS, N) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_extattrctl)(MP, C, FN, NS, N); \ _rc; }) #define VFS_SYSCTL(MP, OP, REQ) ({ \ int _rc; \ \ _rc = (*(MP)->mnt_op->vfs_sysctl)(MP, OP, REQ); \ _rc; }) #define VFS_SUSP_CLEAN(MP) do { \ if (*(MP)->mnt_op->vfs_susp_clean != NULL) { \ (*(MP)->mnt_op->vfs_susp_clean)(MP); \ } \ } while (0) #define VFS_RECLAIM_LOWERVP(MP, VP) do { \ if (*(MP)->mnt_op->vfs_reclaim_lowervp != NULL) { \ (*(MP)->mnt_op->vfs_reclaim_lowervp)((MP), (VP)); \ } \ } while (0) #define VFS_UNLINK_LOWERVP(MP, VP) do { \ if (*(MP)->mnt_op->vfs_unlink_lowervp != NULL) { \ (*(MP)->mnt_op->vfs_unlink_lowervp)((MP), (VP)); \ } \ } while (0) #define VFS_PURGE(MP) do { \ if (*(MP)->mnt_op->vfs_purge != NULL) { \ (*(MP)->mnt_op->vfs_purge)(MP); \ } \ } while (0) #define VFS_KNOTE_LOCKED(vp, hint) do \ { \ VN_KNOTE((vp), (hint), KNF_LISTLOCKED); \ } while (0) #define VFS_KNOTE_UNLOCKED(vp, hint) do \ { \ VN_KNOTE((vp), (hint), 0); \ } while (0) #include /* * Version numbers. */ #define VFS_VERSION_00 0x19660120 #define VFS_VERSION_01 0x20121030 #define VFS_VERSION_02 0x20180504 #define VFS_VERSION VFS_VERSION_02 #define VFS_SET(vfsops, fsname, flags) \ static struct vfsconf fsname ## _vfsconf = { \ .vfc_version = VFS_VERSION, \ .vfc_name = #fsname, \ .vfc_vfsops = &vfsops, \ .vfc_typenum = -1, \ .vfc_flags = flags, \ }; \ static moduledata_t fsname ## _mod = { \ #fsname, \ vfs_modevent, \ & fsname ## _vfsconf \ }; \ DECLARE_MODULE(fsname, fsname ## _mod, SI_SUB_VFS, SI_ORDER_MIDDLE) enum vfs_notify_upper_type { VFS_NOTIFY_UPPER_RECLAIM, VFS_NOTIFY_UPPER_UNLINK, }; /* * exported vnode operations */ int dounmount(struct mount *, uint64_t, struct thread *); int kernel_mount(struct mntarg *ma, uint64_t flags); struct mntarg *mount_arg(struct mntarg *ma, const char *name, const void *val, int len); struct mntarg *mount_argb(struct mntarg *ma, int flag, const char *name); struct mntarg *mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...); struct mntarg *mount_argsu(struct mntarg *ma, const char *name, const void *val, int len); void statfs_scale_blocks(struct statfs *sf, long max_size); struct vfsconf *vfs_byname(const char *); struct vfsconf *vfs_byname_kld(const char *, struct thread *td, int *); void vfs_mount_destroy(struct mount *); void vfs_event_signal(fsid_t *, u_int32_t, intptr_t); void vfs_freeopts(struct vfsoptlist *opts); void vfs_deleteopt(struct vfsoptlist *opts, const char *name); int vfs_buildopts(struct uio *auio, struct vfsoptlist **options); int vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w, uint64_t val); int vfs_getopt(struct vfsoptlist *, const char *, void **, int *); int vfs_getopt_pos(struct vfsoptlist *opts, const char *name); int vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value); char *vfs_getopts(struct vfsoptlist *, const char *, int *error); int vfs_copyopt(struct vfsoptlist *, const char *, void *, int); int vfs_filteropt(struct vfsoptlist *, const char **legal); void vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...); int vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...); int vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len); int vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len); int vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value); int vfs_setpublicfs /* set publicly exported fs */ (struct mount *, struct netexport *, struct export_args *); void vfs_periodic(struct mount *, int); int vfs_busy(struct mount *, int); int vfs_export /* process mount export info */ (struct mount *, struct export_args *); void vfs_allocate_syncvnode(struct mount *); void vfs_deallocate_syncvnode(struct mount *); int vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions); void vfs_getnewfsid(struct mount *); struct mount *vfs_getvfs(fsid_t *); /* return vfs given fsid */ struct mount *vfs_busyfs(fsid_t *); int vfs_modevent(module_t, int, void *); void vfs_mount_error(struct mount *, const char *, ...); void vfs_mountroot(void); /* mount our root filesystem */ void vfs_mountedfrom(struct mount *, const char *from); void vfs_notify_upper(struct vnode *, enum vfs_notify_upper_type); struct mount *vfs_ref_from_vp(struct vnode *); void vfs_ref(struct mount *); void vfs_rel(struct mount *); struct mount *vfs_mount_alloc(struct vnode *, struct vfsconf *, const char *, struct ucred *); int vfs_suser(struct mount *, struct thread *); void vfs_unbusy(struct mount *); void vfs_unmountall(void); struct mount *vfs_register_upper_from_vp(struct vnode *, struct mount *ump, struct mount_upper_node *); void vfs_register_for_notification(struct mount *, struct mount *, struct mount_upper_node *); void vfs_unregister_for_notification(struct mount *, struct mount_upper_node *); void vfs_unregister_upper(struct mount *, struct mount_upper_node *); int vfs_remount_ro(struct mount *mp); +int vfs_report_lockf(struct mount *mp, struct sbuf *sb); extern TAILQ_HEAD(mntlist, mount) mountlist; /* mounted filesystem list */ extern struct mtx_padalign mountlist_mtx; extern struct nfs_public nfs_pub; extern struct sx vfsconf_sx; #define vfsconf_lock() sx_xlock(&vfsconf_sx) #define vfsconf_unlock() sx_xunlock(&vfsconf_sx) #define vfsconf_slock() sx_slock(&vfsconf_sx) #define vfsconf_sunlock() sx_sunlock(&vfsconf_sx) struct vnode *mntfs_allocvp(struct mount *, struct vnode *); void mntfs_freevp(struct vnode *); /* * Declarations for these vfs default operations are located in * kern/vfs_default.c. They will be automatically used to replace * null entries in VFS ops tables when registering a new filesystem * type in the global table. */ vfs_root_t vfs_stdroot; vfs_quotactl_t vfs_stdquotactl; vfs_statfs_t vfs_stdstatfs; vfs_sync_t vfs_stdsync; vfs_sync_t vfs_stdnosync; vfs_vget_t vfs_stdvget; vfs_fhtovp_t vfs_stdfhtovp; vfs_checkexp_t vfs_stdcheckexp; vfs_init_t vfs_stdinit; vfs_uninit_t vfs_stduninit; vfs_extattrctl_t vfs_stdextattrctl; vfs_sysctl_t vfs_stdsysctl; void syncer_suspend(void); void syncer_resume(void); struct vnode *vfs_cache_root_clear(struct mount *); void vfs_cache_root_set(struct mount *, struct vnode *); void vfs_op_barrier_wait(struct mount *); void vfs_op_enter(struct mount *); void vfs_op_exit_locked(struct mount *); void vfs_op_exit(struct mount *); #ifdef DIAGNOSTIC void vfs_assert_mount_counters(struct mount *); void vfs_dump_mount_counters(struct mount *); #else #define vfs_assert_mount_counters(mp) do { } while (0) #define vfs_dump_mount_counters(mp) do { } while (0) #endif enum mount_counter { MNT_COUNT_REF, MNT_COUNT_LOCKREF, MNT_COUNT_WRITEOPCOUNT }; int vfs_mount_fetch_counter(struct mount *, enum mount_counter); void suspend_all_fs(void); void resume_all_fs(void); /* * Code transitioning mnt_vfs_ops to > 0 issues IPIs until it observes * all CPUs not executing code enclosed by thread_in_ops_pcpu variable. * * This provides an invariant that by the time the last CPU is observed not * executing, everyone else entering will see the counter > 0 and exit. * * Note there is no barrier between vfs_ops and the rest of the code in the * section. It is not necessary as the writer has to wait for everyone to drain * before making any changes or only make changes safe while the section is * executed. */ #define vfs_mount_pcpu(mp) zpcpu_get(mp->mnt_pcpu) #define vfs_mount_pcpu_remote(mp, cpu) zpcpu_get_cpu(mp->mnt_pcpu, cpu) #define vfs_op_thread_entered(mp) ({ \ MPASS(curthread->td_critnest > 0); \ struct mount_pcpu *_mpcpu = vfs_mount_pcpu(mp); \ _mpcpu->mntp_thread_in_ops == 1; \ }) #define vfs_op_thread_enter_crit(mp, _mpcpu) ({ \ bool _retval_crit = true; \ MPASS(curthread->td_critnest > 0); \ _mpcpu = vfs_mount_pcpu(mp); \ MPASS(mpcpu->mntp_thread_in_ops == 0); \ _mpcpu->mntp_thread_in_ops = 1; \ atomic_interrupt_fence(); \ if (__predict_false(mp->mnt_vfs_ops > 0)) { \ vfs_op_thread_exit_crit(mp, _mpcpu); \ _retval_crit = false; \ } \ _retval_crit; \ }) #define vfs_op_thread_enter(mp, _mpcpu) ({ \ bool _retval; \ critical_enter(); \ _retval = vfs_op_thread_enter_crit(mp, _mpcpu); \ if (__predict_false(!_retval)) \ critical_exit(); \ _retval; \ }) #define vfs_op_thread_exit_crit(mp, _mpcpu) do { \ MPASS(_mpcpu == vfs_mount_pcpu(mp)); \ MPASS(_mpcpu->mntp_thread_in_ops == 1); \ atomic_interrupt_fence(); \ _mpcpu->mntp_thread_in_ops = 0; \ } while (0) #define vfs_op_thread_exit(mp, _mpcpu) do { \ vfs_op_thread_exit_crit(mp, _mpcpu); \ critical_exit(); \ } while (0) #define vfs_mp_count_add_pcpu(_mpcpu, count, val) do { \ MPASS(_mpcpu->mntp_thread_in_ops == 1); \ _mpcpu->mntp_##count += val; \ } while (0) #define vfs_mp_count_sub_pcpu(_mpcpu, count, val) do { \ MPASS(_mpcpu->mntp_thread_in_ops == 1); \ _mpcpu->mntp_##count -= val; \ } while (0) #else /* !_KERNEL */ #include struct stat; __BEGIN_DECLS int fhlink(struct fhandle *, const char *); int fhlinkat(struct fhandle *, int, const char *); int fhopen(const struct fhandle *, int); int fhreadlink(struct fhandle *, char *, size_t); int fhstat(const struct fhandle *, struct stat *); int fhstatfs(const struct fhandle *, struct statfs *); int fstatfs(int, struct statfs *); int getfh(const char *, fhandle_t *); int getfhat(int, char *, struct fhandle *, int); int getfsstat(struct statfs *, long, int); int getmntinfo(struct statfs **, int); int lgetfh(const char *, fhandle_t *); int mount(const char *, const char *, int, void *); int nmount(struct iovec *, unsigned int, int); int statfs(const char *, struct statfs *); int unmount(const char *, int); /* C library stuff */ int getvfsbyname(const char *, struct xvfsconf *); __END_DECLS #endif /* _KERNEL */ #endif /* !_SYS_MOUNT_H_ */ diff --git a/sys/sys/sysctl.h b/sys/sys/sysctl.h index f25152db8215..451d83bbe125 100644 --- a/sys/sys/sysctl.h +++ b/sys/sys/sysctl.h @@ -1,1191 +1,1192 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Mike Karels at Berkeley Software Design, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)sysctl.h 8.1 (Berkeley) 6/2/93 * $FreeBSD$ */ #ifndef _SYS_SYSCTL_H_ #define _SYS_SYSCTL_H_ #ifdef _KERNEL #include #endif /* * Definitions for sysctl call. The sysctl call uses a hierarchical name * for objects that can be examined or modified. The name is expressed as * a sequence of integers. Like a file path name, the meaning of each * component depends on its place in the hierarchy. The top-level and kern * identifiers are defined here, and other identifiers are defined in the * respective subsystem header files. * * Each subsystem defined by sysctl defines a list of variables for that * subsystem. Each name is either a node with further levels defined below it, * or it is a leaf of some particular type given below. Each sysctl level * defines a set of name/type pairs to be used by sysctl(8) in manipulating the * subsystem. */ #define CTL_MAXNAME 24 /* largest number of components supported */ #define CTLTYPE 0xf /* mask for the type */ #define CTLTYPE_NODE 1 /* name is a node */ #define CTLTYPE_INT 2 /* name describes an integer */ #define CTLTYPE_STRING 3 /* name describes a string */ #define CTLTYPE_S64 4 /* name describes a signed 64-bit number */ #define CTLTYPE_OPAQUE 5 /* name describes a structure */ #define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */ #define CTLTYPE_UINT 6 /* name describes an unsigned integer */ #define CTLTYPE_LONG 7 /* name describes a long */ #define CTLTYPE_ULONG 8 /* name describes an unsigned long */ #define CTLTYPE_U64 9 /* name describes an unsigned 64-bit number */ #define CTLTYPE_U8 0xa /* name describes an unsigned 8-bit number */ #define CTLTYPE_U16 0xb /* name describes an unsigned 16-bit number */ #define CTLTYPE_S8 0xc /* name describes a signed 8-bit number */ #define CTLTYPE_S16 0xd /* name describes a signed 16-bit number */ #define CTLTYPE_S32 0xe /* name describes a signed 32-bit number */ #define CTLTYPE_U32 0xf /* name describes an unsigned 32-bit number */ #define CTLFLAG_RD 0x80000000 /* Allow reads of variable */ #define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */ #define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR) #define CTLFLAG_DORMANT 0x20000000 /* This sysctl is not active yet */ #define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */ #define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */ #define CTLFLAG_PRISON 0x04000000 /* Prisoned roots can fiddle */ #define CTLFLAG_DYN 0x02000000 /* Dynamic oid - can be freed */ #define CTLFLAG_SKIP 0x01000000 /* Skip this sysctl when listing */ #define CTLMASK_SECURE 0x00F00000 /* Secure level */ #define CTLFLAG_TUN 0x00080000 /* Default value is loaded from getenv() */ #define CTLFLAG_RDTUN (CTLFLAG_RD|CTLFLAG_TUN) #define CTLFLAG_RWTUN (CTLFLAG_RW|CTLFLAG_TUN) #define CTLFLAG_MPSAFE 0x00040000 /* Handler is MP safe */ #define CTLFLAG_VNET 0x00020000 /* Prisons with vnet can fiddle */ #define CTLFLAG_DYING 0x00010000 /* Oid is being removed */ #define CTLFLAG_CAPRD 0x00008000 /* Can be read in capability mode */ #define CTLFLAG_CAPWR 0x00004000 /* Can be written in capability mode */ #define CTLFLAG_STATS 0x00002000 /* Statistics, not a tuneable */ #define CTLFLAG_NOFETCH 0x00001000 /* Don't fetch tunable from getenv() */ #define CTLFLAG_CAPRW (CTLFLAG_CAPRD|CTLFLAG_CAPWR) /* * This is transient flag to be used until all sysctl handlers are converted * to not lock Giant. * One, and only one of CTLFLAG_MPSAFE or CTLFLAG_NEEDGIANT is required * for SYSCTL_PROC and SYSCTL_NODE. */ #define CTLFLAG_NEEDGIANT 0x00000800 /* Handler require Giant */ /* * Secure level. Note that CTLFLAG_SECURE == CTLFLAG_SECURE1. * * Secure when the securelevel is raised to at least N. */ #define CTLSHIFT_SECURE 20 #define CTLFLAG_SECURE1 (CTLFLAG_SECURE | (0 << CTLSHIFT_SECURE)) #define CTLFLAG_SECURE2 (CTLFLAG_SECURE | (1 << CTLSHIFT_SECURE)) #define CTLFLAG_SECURE3 (CTLFLAG_SECURE | (2 << CTLSHIFT_SECURE)) /* * USE THIS instead of a hardwired number from the categories below * to get dynamically assigned sysctl entries using the linker-set * technology. This is the way nearly all new sysctl variables should * be implemented. * e.g. SYSCTL_INT(_parent, OID_AUTO, name, CTLFLAG_RW, &variable, 0, ""); */ #define OID_AUTO (-1) /* * The starting number for dynamically-assigned entries. WARNING! * ALL static sysctl entries should have numbers LESS than this! */ #define CTL_AUTO_START 0x100 #ifdef _KERNEL #include #ifdef KLD_MODULE /* XXX allow overspecification of type in external kernel modules */ #define SYSCTL_CT_ASSERT_MASK CTLTYPE #else #define SYSCTL_CT_ASSERT_MASK 0 #endif #define SYSCTL_HANDLER_ARGS struct sysctl_oid *oidp, void *arg1, \ intmax_t arg2, struct sysctl_req *req /* definitions for sysctl_req 'lock' member */ #define REQ_UNWIRED 1 #define REQ_WIRED 2 /* definitions for sysctl_req 'flags' member */ #ifdef COMPAT_FREEBSD32 #define SCTL_MASK32 1 /* 32 bit emulation */ #endif /* * This describes the access space for a sysctl request. This is needed * so that we can use the interface from the kernel or from user-space. */ struct thread; struct sysctl_req { struct thread *td; /* used for access checking */ int lock; /* wiring state */ void *oldptr; size_t oldlen; size_t oldidx; int (*oldfunc)(struct sysctl_req *, const void *, size_t); const void *newptr; size_t newlen; size_t newidx; int (*newfunc)(struct sysctl_req *, void *, size_t); size_t validlen; int flags; }; SLIST_HEAD(sysctl_oid_list, sysctl_oid); /* * This describes one "oid" in the MIB tree. Potentially more nodes can * be hidden behind it, expanded by the handler. */ struct sysctl_oid { struct sysctl_oid_list oid_children; struct sysctl_oid_list *oid_parent; SLIST_ENTRY(sysctl_oid) oid_link; int oid_number; u_int oid_kind; void *oid_arg1; intmax_t oid_arg2; const char *oid_name; int (*oid_handler)(SYSCTL_HANDLER_ARGS); const char *oid_fmt; int oid_refcnt; u_int oid_running; const char *oid_descr; const char *oid_label; }; #define SYSCTL_IN(r, p, l) (r->newfunc)(r, p, l) #define SYSCTL_OUT(r, p, l) (r->oldfunc)(r, p, l) #define SYSCTL_OUT_STR(r, p) (r->oldfunc)(r, p, strlen(p) + 1) int sysctl_handle_bool(SYSCTL_HANDLER_ARGS); int sysctl_handle_8(SYSCTL_HANDLER_ARGS); int sysctl_handle_16(SYSCTL_HANDLER_ARGS); int sysctl_handle_32(SYSCTL_HANDLER_ARGS); int sysctl_handle_64(SYSCTL_HANDLER_ARGS); int sysctl_handle_int(SYSCTL_HANDLER_ARGS); int sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS); int sysctl_handle_long(SYSCTL_HANDLER_ARGS); int sysctl_handle_string(SYSCTL_HANDLER_ARGS); int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS); int sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS); int sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS); int sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS); int sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS); int sysctl_msec_to_sbintime(SYSCTL_HANDLER_ARGS); int sysctl_usec_to_sbintime(SYSCTL_HANDLER_ARGS); int sysctl_sec_to_timeval(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS); int sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS); /* * These functions are used to add/remove an oid from the mib. */ void sysctl_register_oid(struct sysctl_oid *oidp); void sysctl_register_disabled_oid(struct sysctl_oid *oidp); void sysctl_enable_oid(struct sysctl_oid *oidp); void sysctl_unregister_oid(struct sysctl_oid *oidp); /* Declare a static oid to allow child oids to be added to it. */ #define SYSCTL_DECL(name) \ extern struct sysctl_oid sysctl__##name /* Hide these in macros. */ #define SYSCTL_CHILDREN(oid_ptr) (&(oid_ptr)->oid_children) #define SYSCTL_PARENT(oid_ptr) \ (((oid_ptr)->oid_parent != &sysctl__children) ? \ __containerof((oid_ptr)->oid_parent, struct sysctl_oid, \ oid_children) : (struct sysctl_oid *)NULL) #define SYSCTL_STATIC_CHILDREN(oid_name) (&sysctl__##oid_name.oid_children) /* === Structs and macros related to context handling. === */ /* All dynamically created sysctls can be tracked in a context list. */ struct sysctl_ctx_entry { struct sysctl_oid *entry; TAILQ_ENTRY(sysctl_ctx_entry) link; }; TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry); #define SYSCTL_NODE_CHILDREN(parent, name) \ sysctl__##parent##_##name.oid_children #ifndef NO_SYSCTL_DESCR #define __DESCR(d) d #else #define __DESCR(d) "" #endif #ifdef notyet #define SYSCTL_ENFORCE_FLAGS(x) \ _Static_assert((((x) & CTLFLAG_MPSAFE) != 0) ^ (((x) & CTLFLAG_NEEDGIANT) != 0), \ "Has to be either CTLFLAG_MPSAFE or CTLFLAG_NEEDGIANT") #else #define SYSCTL_ENFORCE_FLAGS(x) #endif /* This macro is only for internal use */ #define SYSCTL_OID_RAW(id, parent_child_head, nbr, name, kind, a1, a2, handler, fmt, descr, label) \ struct sysctl_oid id = { \ .oid_parent = (parent_child_head), \ .oid_children = SLIST_HEAD_INITIALIZER(&id.oid_children), \ .oid_number = (nbr), \ .oid_kind = (kind), \ .oid_arg1 = (a1), \ .oid_arg2 = (a2), \ .oid_name = (name), \ .oid_handler = (handler), \ .oid_fmt = (fmt), \ .oid_descr = __DESCR(descr), \ .oid_label = (label), \ }; \ DATA_SET(sysctl_set, id); \ SYSCTL_ENFORCE_FLAGS(kind) /* This constructs a static "raw" MIB oid. */ #define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ SYSCTL_OID_WITH_LABEL(parent, nbr, name, kind, a1, a2, \ handler, fmt, descr, NULL) #define SYSCTL_OID_WITH_LABEL(parent, nbr, name, kind, a1, a2, handler, fmt, descr, label) \ static SYSCTL_OID_RAW(sysctl__##parent##_##name, \ SYSCTL_CHILDREN(&sysctl__##parent), \ nbr, #name, kind, a1, a2, handler, fmt, descr, label) /* This constructs a global "raw" MIB oid. */ #define SYSCTL_OID_GLOBAL(parent, nbr, name, kind, a1, a2, handler, fmt, descr, label) \ SYSCTL_OID_RAW(sysctl__##parent##_##name, \ SYSCTL_CHILDREN(&sysctl__##parent), \ nbr, #name, kind, a1, a2, handler, fmt, descr, label) #define SYSCTL_ADD_OID(ctx, parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ ({ \ SYSCTL_ENFORCE_FLAGS(kind); \ sysctl_add_oid(ctx, parent, nbr, name, kind, a1, a2,handler, \ fmt, __DESCR(descr), NULL); \ }) /* This constructs a root node from which other nodes can hang. */ #define SYSCTL_ROOT_NODE(nbr, name, access, handler, descr) \ SYSCTL_OID_RAW(sysctl___##name, &sysctl__children, \ nbr, #name, CTLTYPE_NODE|(access), NULL, 0, \ handler, "N", descr, NULL); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE) /* This constructs a node from which other oids can hang. */ #define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \ SYSCTL_NODE_WITH_LABEL(parent, nbr, name, access, handler, descr, NULL) #define SYSCTL_NODE_WITH_LABEL(parent, nbr, name, access, handler, descr, label) \ SYSCTL_OID_GLOBAL(parent, nbr, name, CTLTYPE_NODE|(access), \ NULL, 0, handler, "N", descr, label); \ SYSCTL_ENFORCE_FLAGS(access); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE) #define SYSCTL_ADD_NODE(ctx, parent, nbr, name, access, handler, descr) \ SYSCTL_ADD_NODE_WITH_LABEL(ctx, parent, nbr, name, access, \ handler, descr, NULL) #define SYSCTL_ADD_NODE_WITH_LABEL(ctx, parent, nbr, name, access, handler, descr, label) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE); \ SYSCTL_ENFORCE_FLAGS(access); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_NODE|(access), \ NULL, 0, handler, "N", __DESCR(descr), label); \ }) #define SYSCTL_ADD_ROOT_NODE(ctx, nbr, name, access, handler, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_NODE); \ SYSCTL_ENFORCE_FLAGS(access); \ sysctl_add_oid(ctx, &sysctl__children, nbr, name, \ CTLTYPE_NODE|(access), \ NULL, 0, handler, "N", __DESCR(descr), NULL); \ }) /* Oid for a string. len can be 0 to indicate '\0' termination. */ #define SYSCTL_STRING(parent, nbr, name, access, arg, len, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_STRING | CTLFLAG_MPSAFE | (access), \ arg, len, sysctl_handle_string, "A", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING) #define SYSCTL_ADD_STRING(ctx, parent, nbr, name, access, arg, len, descr) \ ({ \ char *__arg = (arg); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_STRING | CTLFLAG_MPSAFE | (access), \ __arg, len, sysctl_handle_string, "A", __DESCR(descr), \ NULL); \ }) /* Oid for a constant '\0' terminated string. */ #define SYSCTL_CONST_STRING(parent, nbr, name, access, arg, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING | CTLFLAG_MPSAFE | (access),\ __DECONST(char *, arg), 0, sysctl_handle_string, "A", descr); \ CTASSERT(!(access & CTLFLAG_WR)); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING) #define SYSCTL_ADD_CONST_STRING(ctx, parent, nbr, name, access, arg, descr) \ ({ \ char *__arg = __DECONST(char *, arg); \ CTASSERT(!(access & CTLFLAG_WR)); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_STRING); \ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_STRING | \ CTLFLAG_MPSAFE | (access), __arg, 0, sysctl_handle_string, "A",\ __DESCR(descr), NULL); \ }) /* Oid for a bool. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_BOOL_PTR ((bool *)NULL) #define SYSCTL_BOOL(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U8 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_bool, "CU", descr); \ CTASSERT(((access) & CTLTYPE) == 0 && \ sizeof(bool) == sizeof(*(ptr))) #define SYSCTL_ADD_BOOL(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ bool *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U8 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_bool, "CU", __DESCR(descr), \ NULL); \ }) /* Oid for a signed 8-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_S8_PTR ((int8_t *)NULL) #define SYSCTL_S8(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S8 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_8, "C", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S8) && \ sizeof(int8_t) == sizeof(*(ptr))) #define SYSCTL_ADD_S8(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int8_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S8); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S8 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_8, "C", __DESCR(descr), NULL); \ }) /* Oid for an unsigned 8-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_U8_PTR ((uint8_t *)NULL) #define SYSCTL_U8(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U8 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_8, "CU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U8) && \ sizeof(uint8_t) == sizeof(*(ptr))) #define SYSCTL_ADD_U8(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ uint8_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U8); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U8 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_8, "CU", __DESCR(descr), NULL); \ }) /* Oid for a signed 16-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_S16_PTR ((int16_t *)NULL) #define SYSCTL_S16(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S16 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_16, "S", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S16) && \ sizeof(int16_t) == sizeof(*(ptr))) #define SYSCTL_ADD_S16(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int16_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S16); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S16 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_16, "S", __DESCR(descr), NULL); \ }) /* Oid for an unsigned 16-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_U16_PTR ((uint16_t *)NULL) #define SYSCTL_U16(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U16 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_16, "SU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U16) && \ sizeof(uint16_t) == sizeof(*(ptr))) #define SYSCTL_ADD_U16(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ uint16_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U16); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U16 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_16, "SU", __DESCR(descr), NULL); \ }) /* Oid for a signed 32-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_S32_PTR ((int32_t *)NULL) #define SYSCTL_S32(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S32 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_32, "I", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S32) && \ sizeof(int32_t) == sizeof(*(ptr))) #define SYSCTL_ADD_S32(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int32_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S32); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S32 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_32, "I", __DESCR(descr), NULL); \ }) /* Oid for an unsigned 32-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_U32_PTR ((uint32_t *)NULL) #define SYSCTL_U32(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U32 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_32, "IU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U32) && \ sizeof(uint32_t) == sizeof(*(ptr))) #define SYSCTL_ADD_U32(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ uint32_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U32); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U32 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_32, "IU", __DESCR(descr), NULL); \ }) /* Oid for a signed 64-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_S64_PTR ((int64_t *)NULL) #define SYSCTL_S64(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "Q", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64) && \ sizeof(int64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_S64(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_64, "Q", __DESCR(descr), NULL); \ }) /* Oid for an unsigned 64-bit int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_U64_PTR ((uint64_t *)NULL) #define SYSCTL_U64(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "QU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64) && \ sizeof(uint64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_U64(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ uint64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_64, "QU", __DESCR(descr), NULL); \ }) /* Oid for an int. If ptr is SYSCTL_NULL_INT_PTR, val is returned. */ #define SYSCTL_NULL_INT_PTR ((int *)NULL) #define SYSCTL_INT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_INT_WITH_LABEL(parent, nbr, name, access, ptr, val, descr, NULL) #define SYSCTL_INT_WITH_LABEL(parent, nbr, name, access, ptr, val, descr, label) \ SYSCTL_OID_WITH_LABEL(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_int, "I", descr, label); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) && \ sizeof(int) == sizeof(*(ptr))) #define SYSCTL_ADD_INT(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ int *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_int, "I", __DESCR(descr), NULL); \ }) /* Oid for an unsigned int. If ptr is NULL, val is returned. */ #define SYSCTL_NULL_UINT_PTR ((unsigned *)NULL) #define SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_int, "IU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_UINT) && \ sizeof(unsigned) == sizeof(*(ptr))) #define SYSCTL_ADD_UINT(ctx, parent, nbr, name, access, ptr, val, descr) \ ({ \ unsigned *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_UINT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ __ptr, val, sysctl_handle_int, "IU", __DESCR(descr), NULL); \ }) /* Oid for a long. The pointer must be non NULL. */ #define SYSCTL_NULL_LONG_PTR ((long *)NULL) #define SYSCTL_LONG(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_LONG | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_long, "L", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_LONG) && \ sizeof(long) == sizeof(*(ptr))) #define SYSCTL_ADD_LONG(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ long *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_LONG); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_LONG | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_long, "L", __DESCR(descr), NULL); \ }) /* Oid for an unsigned long. The pointer must be non NULL. */ #define SYSCTL_NULL_ULONG_PTR ((unsigned long *)NULL) #define SYSCTL_ULONG(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_ULONG | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_long, "LU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_ULONG) && \ sizeof(unsigned long) == sizeof(*(ptr))) #define SYSCTL_ADD_ULONG(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ unsigned long *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_ULONG); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_ULONG | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_long, "LU", __DESCR(descr), NULL); \ }) /* Oid for a quad. The pointer must be non NULL. */ #define SYSCTL_NULL_QUAD_PTR ((int64_t *)NULL) #define SYSCTL_QUAD(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "Q", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64) && \ sizeof(int64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_QUAD(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ int64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_S64 | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_64, "Q", __DESCR(descr), NULL); \ }) #define SYSCTL_NULL_UQUAD_PTR ((uint64_t *)NULL) #define SYSCTL_UQUAD(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ ptr, val, sysctl_handle_64, "QU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64) && \ sizeof(uint64_t) == sizeof(*(ptr))) #define SYSCTL_ADD_UQUAD(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uint64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_64, "QU", __DESCR(descr), NULL); \ }) /* Oid for a CPU dependent variable */ #define SYSCTL_ADD_UAUTO(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ struct sysctl_oid *__ret; \ CTASSERT((sizeof(uint64_t) == sizeof(*(ptr)) || \ sizeof(unsigned) == sizeof(*(ptr))) && \ ((access) & CTLTYPE) == 0); \ if (sizeof(uint64_t) == sizeof(*(ptr))) { \ __ret = sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_64, "QU", \ __DESCR(descr), NULL); \ } else { \ __ret = sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_UINT | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_int, "IU", \ __DESCR(descr), NULL); \ } \ __ret; \ }) /* Oid for a 64-bit unsigned counter(9). The pointer must be non NULL. */ #define SYSCTL_COUNTER_U64(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_STATS | (access), \ (ptr), 0, sysctl_handle_counter_u64, "QU", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64) && \ sizeof(counter_u64_t) == sizeof(*(ptr)) && \ sizeof(uint64_t) == sizeof(**(ptr))) #define SYSCTL_ADD_COUNTER_U64(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ counter_u64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_U64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_STATS | (access), \ __ptr, 0, sysctl_handle_counter_u64, "QU", __DESCR(descr), \ NULL); \ }) /* Oid for an array of counter(9)s. The pointer and length must be non zero. */ #define SYSCTL_COUNTER_U64_ARRAY(parent, nbr, name, access, ptr, len, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_STATS | (access), \ (ptr), (len), sysctl_handle_counter_u64_array, "S", descr); \ CTASSERT((((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE) && \ sizeof(counter_u64_t) == sizeof(*(ptr)) && \ sizeof(uint64_t) == sizeof(**(ptr))) #define SYSCTL_ADD_COUNTER_U64_ARRAY(ctx, parent, nbr, name, access, \ ptr, len, descr) \ ({ \ counter_u64_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_STATS | (access), \ __ptr, len, sysctl_handle_counter_u64_array, "S", \ __DESCR(descr), NULL); \ }) /* Oid for an opaque object. Specified by a pointer and a length. */ #define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | (access), \ ptr, len, sysctl_handle_opaque, fmt, descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE) #define SYSCTL_ADD_OPAQUE(ctx, parent, nbr, name, access, ptr, len, fmt, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | (access), \ ptr, len, sysctl_handle_opaque, fmt, __DESCR(descr), NULL); \ }) /* Oid for a struct. Specified by a pointer and a type. */ #define SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | (access), \ ptr, sizeof(struct type), sysctl_handle_opaque, \ "S," #type, descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE) #define SYSCTL_ADD_STRUCT(ctx, parent, nbr, name, access, ptr, type, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_OPAQUE); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | (access), \ (ptr), sizeof(struct type), \ sysctl_handle_opaque, "S," #type, __DESCR(descr), NULL); \ }) /* Oid for a procedure. Specified by a pointer and an arg. */ #define SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ SYSCTL_OID(parent, nbr, name, (access), \ ptr, arg, handler, fmt, descr); \ CTASSERT(((access) & CTLTYPE) != 0) #define SYSCTL_ADD_PROC(ctx, parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ ({ \ CTASSERT(((access) & CTLTYPE) != 0); \ SYSCTL_ENFORCE_FLAGS(access); \ sysctl_add_oid(ctx, parent, nbr, name, (access), \ (ptr), (arg), (handler), (fmt), __DESCR(descr), NULL); \ }) /* Oid to handle limits on uma(9) zone specified by pointer. */ #define SYSCTL_UMA_MAX(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ (ptr), 0, sysctl_handle_uma_zone_max, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) #define SYSCTL_ADD_UMA_MAX(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uma_zone_t __ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | (access), \ __ptr, 0, sysctl_handle_uma_zone_max, "I", __DESCR(descr), \ NULL); \ }) /* Oid to obtain current use of uma(9) zone specified by pointer. */ #define SYSCTL_UMA_CUR(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ (ptr), 0, sysctl_handle_uma_zone_cur, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) #define SYSCTL_ADD_UMA_CUR(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ uma_zone_t __ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ __ptr, 0, sysctl_handle_uma_zone_cur, "I", __DESCR(descr), \ NULL); \ }) /* OID expressing a sbintime_t as microseconds */ #define SYSCTL_SBINTIME_USEC(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ (ptr), 0, sysctl_usec_to_sbintime, "Q", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64) #define SYSCTL_ADD_SBINTIME_USEC(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ sbintime_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ __ptr, 0, sysctl_usec_to_sbintime, "Q", __DESCR(descr), \ NULL); \ }) /* OID expressing a sbintime_t as milliseconds */ #define SYSCTL_SBINTIME_MSEC(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ (ptr), 0, sysctl_msec_to_sbintime, "Q", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64) #define SYSCTL_ADD_SBINTIME_MSEC(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ sbintime_t *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_S64); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ __ptr, 0, sysctl_msec_to_sbintime, "Q", __DESCR(descr), \ NULL); \ }) /* OID expressing a struct timeval as seconds */ #define SYSCTL_TIMEVAL_SEC(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ (ptr), 0, sysctl_sec_to_timeval, "I", descr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT) #define SYSCTL_ADD_TIMEVAL_SEC(ctx, parent, nbr, name, access, ptr, descr) \ ({ \ struct timeval *__ptr = (ptr); \ CTASSERT(((access) & CTLTYPE) == 0 || \ ((access) & SYSCTL_CT_ASSERT_MASK) == CTLTYPE_INT); \ sysctl_add_oid(ctx, parent, nbr, name, \ CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD | (access), \ __ptr, 0, sysctl_sec_to_timeval, "I", __DESCR(descr), \ NULL); \ }) /* * A macro to generate a read-only sysctl to indicate the presence of optional * kernel features. */ #define FEATURE(name, desc) \ SYSCTL_INT_WITH_LABEL(_kern_features, OID_AUTO, name, \ CTLFLAG_RD | CTLFLAG_CAPRD, SYSCTL_NULL_INT_PTR, 1, desc, "feature") #endif /* _KERNEL */ /* * Top-level identifiers */ #define CTL_SYSCTL 0 /* "magic" numbers */ #define CTL_KERN 1 /* "high kernel": proc, limits */ #define CTL_VM 2 /* virtual memory */ #define CTL_VFS 3 /* filesystem, mount type is next */ #define CTL_NET 4 /* network, see socket.h */ #define CTL_DEBUG 5 /* debugging parameters */ #define CTL_HW 6 /* generic cpu/io */ #define CTL_MACHDEP 7 /* machine dependent */ #define CTL_USER 8 /* user-level */ #define CTL_P1003_1B 9 /* POSIX 1003.1B */ /* * CTL_SYSCTL identifiers */ #define CTL_SYSCTL_DEBUG 0 /* printf all nodes */ #define CTL_SYSCTL_NAME 1 /* string name of OID */ #define CTL_SYSCTL_NEXT 2 /* next OID, honoring CTLFLAG_SKIP */ #define CTL_SYSCTL_NAME2OID 3 /* int array of name */ #define CTL_SYSCTL_OIDFMT 4 /* OID's kind and format */ #define CTL_SYSCTL_OIDDESCR 5 /* OID's description */ #define CTL_SYSCTL_OIDLABEL 6 /* aggregation label */ #define CTL_SYSCTL_NEXTNOSKIP 7 /* next OID, ignoring CTLFLAG_SKIP */ /* * CTL_KERN identifiers */ #define KERN_OSTYPE 1 /* string: system version */ #define KERN_OSRELEASE 2 /* string: system release */ #define KERN_OSREV 3 /* int: system revision */ #define KERN_VERSION 4 /* string: compile time info */ #define KERN_MAXVNODES 5 /* int: max vnodes */ #define KERN_MAXPROC 6 /* int: max processes */ #define KERN_MAXFILES 7 /* int: max open files */ #define KERN_ARGMAX 8 /* int: max arguments to exec */ #define KERN_SECURELVL 9 /* int: system security level */ #define KERN_HOSTNAME 10 /* string: hostname */ #define KERN_HOSTID 11 /* int: host identifier */ #define KERN_CLOCKRATE 12 /* struct: struct clockrate */ #define KERN_VNODE 13 /* struct: vnode structures */ #define KERN_PROC 14 /* struct: process entries */ #define KERN_FILE 15 /* struct: file entries */ #define KERN_PROF 16 /* node: kernel profiling info */ #define KERN_POSIX1 17 /* int: POSIX.1 version */ #define KERN_NGROUPS 18 /* int: # of supplemental group ids */ #define KERN_JOB_CONTROL 19 /* int: is job control available */ #define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */ #define KERN_BOOTTIME 21 /* struct: time kernel was booted */ #define KERN_NISDOMAINNAME 22 /* string: YP domain name */ #define KERN_UPDATEINTERVAL 23 /* int: update process sleep time */ #define KERN_OSRELDATE 24 /* int: kernel release date */ #define KERN_NTP_PLL 25 /* node: NTP PLL control */ #define KERN_BOOTFILE 26 /* string: name of booted kernel */ #define KERN_MAXFILESPERPROC 27 /* int: max open files per proc */ #define KERN_MAXPROCPERUID 28 /* int: max processes per uid */ #define KERN_DUMPDEV 29 /* struct cdev *: device to dump on */ #define KERN_IPC 30 /* node: anything related to IPC */ #define KERN_DUMMY 31 /* unused */ #define KERN_PS_STRINGS 32 /* int: address of PS_STRINGS */ #define KERN_USRSTACK 33 /* int: address of USRSTACK */ #define KERN_LOGSIGEXIT 34 /* int: do we log sigexit procs? */ #define KERN_IOV_MAX 35 /* int: value of UIO_MAXIOV */ #define KERN_HOSTUUID 36 /* string: host UUID identifier */ #define KERN_ARND 37 /* int: from arc4rand() */ #define KERN_MAXPHYS 38 /* int: MAXPHYS value */ +#define KERN_LOCKF 39 /* struct: lockf reports */ /* * KERN_PROC subtypes */ #define KERN_PROC_ALL 0 /* everything */ #define KERN_PROC_PID 1 /* by process id */ #define KERN_PROC_PGRP 2 /* by process group id */ #define KERN_PROC_SESSION 3 /* by session of pid */ #define KERN_PROC_TTY 4 /* by controlling tty */ #define KERN_PROC_UID 5 /* by effective uid */ #define KERN_PROC_RUID 6 /* by real uid */ #define KERN_PROC_ARGS 7 /* get/set arguments/proctitle */ #define KERN_PROC_PROC 8 /* only return procs */ #define KERN_PROC_SV_NAME 9 /* get syscall vector name */ #define KERN_PROC_RGID 10 /* by real group id */ #define KERN_PROC_GID 11 /* by effective group id */ #define KERN_PROC_PATHNAME 12 /* path to executable */ #define KERN_PROC_OVMMAP 13 /* Old VM map entries for process */ #define KERN_PROC_OFILEDESC 14 /* Old file descriptors for process */ #define KERN_PROC_KSTACK 15 /* Kernel stacks for process */ #define KERN_PROC_INC_THREAD 0x10 /* * modifier for pid, pgrp, tty, * uid, ruid, gid, rgid and proc * This effectively uses 16-31 */ #define KERN_PROC_VMMAP 32 /* VM map entries for process */ #define KERN_PROC_FILEDESC 33 /* File descriptors for process */ #define KERN_PROC_GROUPS 34 /* process groups */ #define KERN_PROC_ENV 35 /* get environment */ #define KERN_PROC_AUXV 36 /* get ELF auxiliary vector */ #define KERN_PROC_RLIMIT 37 /* process resource limits */ #define KERN_PROC_PS_STRINGS 38 /* get ps_strings location */ #define KERN_PROC_UMASK 39 /* process umask */ #define KERN_PROC_OSREL 40 /* osreldate for process binary */ #define KERN_PROC_SIGTRAMP 41 /* signal trampoline location */ #define KERN_PROC_CWD 42 /* process current working directory */ #define KERN_PROC_NFDS 43 /* number of open file descriptors */ #define KERN_PROC_SIGFASTBLK 44 /* address of fastsigblk magic word */ #define KERN_PROC_VM_LAYOUT 45 /* virtual address space layout info */ /* * KERN_IPC identifiers */ #define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */ #define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */ #define KIPC_SOMAXCONN 3 /* int: max length of connection q */ #define KIPC_MAX_LINKHDR 4 /* int: max length of link header */ #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */ #define KIPC_MAX_HDR 6 /* int: max total length of headers */ #define KIPC_MAX_DATALEN 7 /* int: max length of data? */ /* * CTL_HW identifiers */ #define HW_MACHINE 1 /* string: machine class */ #define HW_MODEL 2 /* string: specific machine model */ #define HW_NCPU 3 /* int: number of cpus */ #define HW_BYTEORDER 4 /* int: machine byte order */ #define HW_PHYSMEM 5 /* int: total memory */ #define HW_USERMEM 6 /* int: non-kernel memory */ #define HW_PAGESIZE 7 /* int: software page size */ #define HW_DISKNAMES 8 /* strings: disk drive names */ #define HW_DISKSTATS 9 /* struct: diskstats[] */ #define HW_FLOATINGPT 10 /* int: has HW floating point? */ #define HW_MACHINE_ARCH 11 /* string: machine architecture */ #define HW_REALMEM 12 /* int: 'real' memory */ /* * CTL_USER definitions */ #define USER_CS_PATH 1 /* string: _CS_PATH */ #define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */ #define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */ #define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */ #define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */ #define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */ #define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */ #define USER_LINE_MAX 8 /* int: LINE_MAX */ #define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */ #define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */ #define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */ #define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */ #define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */ #define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */ #define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */ #define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */ #define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */ #define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */ #define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */ #define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */ #define USER_LOCALBASE 21 /* string: _PATH_LOCALBASE */ #define CTL_P1003_1B_ASYNCHRONOUS_IO 1 /* boolean */ #define CTL_P1003_1B_MAPPED_FILES 2 /* boolean */ #define CTL_P1003_1B_MEMLOCK 3 /* boolean */ #define CTL_P1003_1B_MEMLOCK_RANGE 4 /* boolean */ #define CTL_P1003_1B_MEMORY_PROTECTION 5 /* boolean */ #define CTL_P1003_1B_MESSAGE_PASSING 6 /* boolean */ #define CTL_P1003_1B_PRIORITIZED_IO 7 /* boolean */ #define CTL_P1003_1B_PRIORITY_SCHEDULING 8 /* boolean */ #define CTL_P1003_1B_REALTIME_SIGNALS 9 /* boolean */ #define CTL_P1003_1B_SEMAPHORES 10 /* boolean */ #define CTL_P1003_1B_FSYNC 11 /* boolean */ #define CTL_P1003_1B_SHARED_MEMORY_OBJECTS 12 /* boolean */ #define CTL_P1003_1B_SYNCHRONIZED_IO 13 /* boolean */ #define CTL_P1003_1B_TIMERS 14 /* boolean */ #define CTL_P1003_1B_AIO_LISTIO_MAX 15 /* int */ #define CTL_P1003_1B_AIO_MAX 16 /* int */ #define CTL_P1003_1B_AIO_PRIO_DELTA_MAX 17 /* int */ #define CTL_P1003_1B_DELAYTIMER_MAX 18 /* int */ #define CTL_P1003_1B_MQ_OPEN_MAX 19 /* int */ #define CTL_P1003_1B_PAGESIZE 20 /* int */ #define CTL_P1003_1B_RTSIG_MAX 21 /* int */ #define CTL_P1003_1B_SEM_NSEMS_MAX 22 /* int */ #define CTL_P1003_1B_SEM_VALUE_MAX 23 /* int */ #define CTL_P1003_1B_SIGQUEUE_MAX 24 /* int */ #define CTL_P1003_1B_TIMER_MAX 25 /* int */ #ifdef _KERNEL #define CTL_P1003_1B_MAXID 26 /* * Declare some common oids. */ extern struct sysctl_oid_list sysctl__children; SYSCTL_DECL(_kern); SYSCTL_DECL(_kern_features); SYSCTL_DECL(_kern_ipc); SYSCTL_DECL(_kern_proc); SYSCTL_DECL(_kern_sched); SYSCTL_DECL(_kern_sched_stats); SYSCTL_DECL(_sysctl); SYSCTL_DECL(_vm); SYSCTL_DECL(_vm_stats); SYSCTL_DECL(_vm_stats_misc); SYSCTL_DECL(_vfs); SYSCTL_DECL(_net); SYSCTL_DECL(_debug); SYSCTL_DECL(_debug_sizeof); SYSCTL_DECL(_dev); SYSCTL_DECL(_hw); SYSCTL_DECL(_hw_bus); SYSCTL_DECL(_hw_bus_devices); SYSCTL_DECL(_machdep); SYSCTL_DECL(_machdep_mitigations); SYSCTL_DECL(_user); SYSCTL_DECL(_compat); SYSCTL_DECL(_regression); SYSCTL_DECL(_security); SYSCTL_DECL(_security_bsd); extern char machine[]; extern char osrelease[]; extern char ostype[]; extern char kern_ident[]; /* Dynamic oid handling */ struct sysctl_oid *sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent, int nbr, const char *name, int kind, void *arg1, intmax_t arg2, int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr, const char *label); int sysctl_remove_name(struct sysctl_oid *parent, const char *name, int del, int recurse); void sysctl_rename_oid(struct sysctl_oid *oidp, const char *name); int sysctl_move_oid(struct sysctl_oid *oidp, struct sysctl_oid_list *parent); int sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse); int sysctl_ctx_init(struct sysctl_ctx_list *clist); int sysctl_ctx_free(struct sysctl_ctx_list *clist); struct sysctl_ctx_entry *sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); struct sysctl_ctx_entry *sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); int sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp); int kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags); int kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags); int userland_sysctl(struct thread *td, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, const void *new, size_t newlen, size_t *retval, int flags); int sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid, int *nindx, struct sysctl_req *req); void sysctl_wlock(void); void sysctl_wunlock(void); int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len); int kern___sysctlbyname(struct thread *td, const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags, bool inkernel); struct sbuf; struct sbuf *sbuf_new_for_sysctl(struct sbuf *, char *, int, struct sysctl_req *); #else /* !_KERNEL */ #include #include #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif __BEGIN_DECLS int sysctl(const int *, unsigned int, void *, size_t *, const void *, size_t); int sysctlbyname(const char *, void *, size_t *, const void *, size_t); int sysctlnametomib(const char *, int *, size_t *); __END_DECLS #endif /* _KERNEL */ #endif /* !_SYS_SYSCTL_H_ */