Index: head/sys/geom/geom_event.c =================================================================== --- head/sys/geom/geom_event.c (revision 365710) +++ head/sys/geom/geom_event.c (revision 365711) @@ -1,443 +1,446 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * XXX: How do we in general know that objects referenced in events * have not been destroyed before we get around to handle the event ? */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include TAILQ_HEAD(event_tailq_head, g_event); static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events); static u_int g_pending_events; static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep); static struct mtx g_eventlock; static int g_wither_work; #define G_N_EVENTREFS 20 struct g_event { TAILQ_ENTRY(g_event) events; g_event_t *func; void *arg; int flag; void *ref[G_N_EVENTREFS]; }; #define EV_DONE 0x80000 #define EV_WAKEUP 0x40000 #define EV_CANCELED 0x20000 #define EV_INPROGRESS 0x10000 void g_waitidle(void) { g_topology_assert_not(); mtx_lock(&g_eventlock); TSWAIT("GEOM events"); while (!TAILQ_EMPTY(&g_events)) msleep(&g_pending_events, &g_eventlock, PPAUSE, "g_waitidle", hz/5); TSUNWAIT("GEOM events"); mtx_unlock(&g_eventlock); curthread->td_pflags &= ~TDP_GEOM; } #if 0 void g_waitidlelock(void) { g_topology_assert(); mtx_lock(&g_eventlock); while (!TAILQ_EMPTY(&g_events)) { g_topology_unlock(); msleep(&g_pending_events, &g_eventlock, PPAUSE, "g_waitidlel", hz/5); g_topology_lock(); } mtx_unlock(&g_eventlock); } #endif struct g_attrchanged_args { struct g_provider *pp; const char *attr; }; static void g_attr_changed_event(void *arg, int flag) { struct g_attrchanged_args *args; struct g_provider *pp; struct g_consumer *cp; struct g_consumer *next_cp; args = arg; pp = args->pp; g_topology_assert(); if (flag != EV_CANCEL && g_shutdown == 0) { /* * Tell all consumers of the change. */ LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { if (cp->geom->attrchanged != NULL) cp->geom->attrchanged(cp, args->attr); } } g_free(args); } int g_attr_changed(struct g_provider *pp, const char *attr, int flag) { struct g_attrchanged_args *args; int error; args = g_malloc(sizeof *args, flag); if (args == NULL) return (ENOMEM); args->pp = pp; args->attr = attr; error = g_post_event(g_attr_changed_event, args, flag, pp, NULL); if (error != 0) g_free(args); return (error); } void g_orphan_provider(struct g_provider *pp, int error) { /* G_VALID_PROVIDER(pp) We likely lack topology lock */ g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)", pp, pp->name, error); KASSERT(error != 0, ("g_orphan_provider(%p(%s), 0) error must be non-zero\n", pp, pp->name)); pp->error = error; mtx_lock(&g_eventlock); KASSERT(!(pp->flags & G_PF_ORPHAN), ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name)); pp->flags |= G_PF_ORPHAN; TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan); mtx_unlock(&g_eventlock); wakeup(&g_wait_event); } /* * This function is called once on each provider which the event handler * finds on its g_doorstep. */ static void g_orphan_register(struct g_provider *pp) { struct g_consumer *cp, *cp2; int wf; g_topology_assert(); G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name); g_cancel_event(pp); wf = pp->flags & G_PF_WITHER; pp->flags &= ~G_PF_WITHER; /* * Tell all consumers the bad news. * Don't be surprised if they self-destruct. */ LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { KASSERT(cp->geom->orphan != NULL, ("geom %s has no orphan, class %s", cp->geom->name, cp->geom->class->name)); /* * XXX: g_dev_orphan method does deferred destroying * and it is possible, that other event could already * call the orphan method. Check consumer's flags to * do not schedule it twice. */ if (cp->flags & G_CF_ORPHAN) continue; cp->flags |= G_CF_ORPHAN; cp->geom->orphan(cp); } if (LIST_EMPTY(&pp->consumers) && wf) g_destroy_provider(pp); else pp->flags |= wf; #ifdef notyet cp = LIST_FIRST(&pp->consumers); if (cp != NULL) return; if (pp->geom->flags & G_GEOM_WITHER) g_destroy_provider(pp); #endif } static int one_event(void) { struct g_event *ep; struct g_provider *pp; g_topology_assert(); mtx_lock(&g_eventlock); pp = TAILQ_FIRST(&g_doorstep); if (pp != NULL) { G_VALID_PROVIDER(pp); TAILQ_REMOVE(&g_doorstep, pp, orphan); mtx_unlock(&g_eventlock); g_orphan_register(pp); return (1); } ep = TAILQ_FIRST(&g_events); if (ep == NULL) { wakeup(&g_pending_events); return (0); } if (ep->flag & EV_INPROGRESS) { mtx_unlock(&g_eventlock); return (1); } ep->flag |= EV_INPROGRESS; mtx_unlock(&g_eventlock); g_topology_assert(); ep->func(ep->arg, 0); g_topology_assert(); mtx_lock(&g_eventlock); TSRELEASE("GEOM events"); TAILQ_REMOVE(&g_events, ep, events); ep->flag &= ~EV_INPROGRESS; if (ep->flag & EV_WAKEUP) { ep->flag |= EV_DONE; mtx_unlock(&g_eventlock); wakeup(ep); } else { mtx_unlock(&g_eventlock); g_free(ep); } return (1); } void g_run_events() { for (;;) { g_topology_lock(); while (one_event()) ; mtx_assert(&g_eventlock, MA_OWNED); if (g_wither_work) { g_wither_work = 0; mtx_unlock(&g_eventlock); g_wither_washer(); g_topology_unlock(); } else { g_topology_unlock(); msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP, "-", 0); } } /* NOTREACHED */ } void g_cancel_event(void *ref) { struct g_event *ep, *epn; struct g_provider *pp; u_int n; mtx_lock(&g_eventlock); TAILQ_FOREACH(pp, &g_doorstep, orphan) { if (pp != ref) continue; TAILQ_REMOVE(&g_doorstep, pp, orphan); break; } TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) { if (ep->flag & EV_INPROGRESS) continue; for (n = 0; n < G_N_EVENTREFS; n++) { if (ep->ref[n] == NULL) break; if (ep->ref[n] != ref) continue; TSRELEASE("GEOM events"); TAILQ_REMOVE(&g_events, ep, events); ep->func(ep->arg, EV_CANCEL); mtx_assert(&g_eventlock, MA_OWNED); if (ep->flag & EV_WAKEUP) { ep->flag |= (EV_DONE|EV_CANCELED); wakeup(ep); } else { g_free(ep); } break; } } if (TAILQ_EMPTY(&g_events)) wakeup(&g_pending_events); mtx_unlock(&g_eventlock); } static int g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap) { struct g_event *ep; void *p; u_int n; g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)", func, arg, flag, wuflag); KASSERT(wuflag == 0 || wuflag == EV_WAKEUP, ("Wrong wuflag in g_post_event_x(0x%x)", wuflag)); ep = g_malloc(sizeof *ep, flag | M_ZERO); if (ep == NULL) return (ENOMEM); ep->flag = wuflag; for (n = 0; n < G_N_EVENTREFS; n++) { p = va_arg(ap, void *); if (p == NULL) break; g_trace(G_T_TOPOLOGY, " ref %p", p); ep->ref[n] = p; } KASSERT(p == NULL, ("Too many references to event")); ep->func = func; ep->arg = arg; mtx_lock(&g_eventlock); TSHOLD("GEOM events"); TAILQ_INSERT_TAIL(&g_events, ep, events); mtx_unlock(&g_eventlock); wakeup(&g_wait_event); if (epp != NULL) *epp = ep; curthread->td_pflags |= TDP_GEOM; + thread_lock(curthread); + curthread->td_flags |= TDF_ASTPENDING; + thread_unlock(curthread); return (0); } int g_post_event(g_event_t *func, void *arg, int flag, ...) { va_list ap; int i; KASSERT(flag == M_WAITOK || flag == M_NOWAIT, ("Wrong flag to g_post_event")); va_start(ap, flag); i = g_post_event_x(func, arg, flag, 0, NULL, ap); va_end(ap); return (i); } void g_do_wither() { mtx_lock(&g_eventlock); g_wither_work = 1; mtx_unlock(&g_eventlock); wakeup(&g_wait_event); } /* * XXX: It might actually be useful to call this function with topology held. * XXX: This would ensure that the event gets created before anything else * XXX: changes. At present all users have a handle on things in some other * XXX: way, so this remains an XXX for now. */ int g_waitfor_event(g_event_t *func, void *arg, int flag, ...) { va_list ap; struct g_event *ep; int error; g_topology_assert_not(); KASSERT(flag == M_WAITOK || flag == M_NOWAIT, ("Wrong flag to g_post_event")); va_start(ap, flag); error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap); va_end(ap); if (error) return (error); mtx_lock(&g_eventlock); while (!(ep->flag & EV_DONE)) msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", hz); if (ep->flag & EV_CANCELED) error = EAGAIN; mtx_unlock(&g_eventlock); g_free(ep); return (error); } void g_event_init() { mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF); } Index: head/sys/kern/subr_trap.c =================================================================== --- head/sys/kern/subr_trap.c (revision 365710) +++ head/sys/kern/subr_trap.c (revision 365711) @@ -1,379 +1,379 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1994, David Greenman * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2007 The FreeBSD Foundation * * This code is derived from software contributed to Berkeley by * the University of Utah, and William Jolitz. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_ktrace.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #include #endif #include #include #ifdef VIMAGE #include #endif #ifdef HWPMC_HOOKS #include #endif #include void (*softdep_ast_cleanup)(struct thread *); /* * Define the code needed before returning to user mode, for trap and * syscall. */ void userret(struct thread *td, struct trapframe *frame) { struct proc *p = td->td_proc; CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, td->td_name); KASSERT((p->p_flag & P_WEXIT) == 0, ("Exiting process returns to usermode")); #ifdef DIAGNOSTIC /* * Check that we called signotify() enough. For * multi-threaded processes, where signal distribution might * change due to other threads changing sigmask, the check is * racy and cannot be performed reliably. * If current process is vfork child, indicated by P_PPWAIT, then * issignal() ignores stops, so we block the check to avoid * classifying pending signals. */ if (p->p_numthreads == 1) { PROC_LOCK(p); thread_lock(td); if ((p->p_flag & P_PPWAIT) == 0 && (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { if (SIGPENDING(td) && (td->td_flags & (TDF_NEEDSIGCHK | TDF_ASTPENDING)) != (TDF_NEEDSIGCHK | TDF_ASTPENDING)) { thread_unlock(td); panic( "failed to set signal flags for ast p %p td %p fl %x", p, td, td->td_flags); } } thread_unlock(td); PROC_UNLOCK(p); } #endif #ifdef KTRACE KTRUSERRET(td); #endif td_softdep_cleanup(td); MPASS(td->td_su == NULL); /* - * If this thread tickled GEOM, we need to wait for the giggling to - * stop before we return to userland - */ - if (__predict_false(td->td_pflags & TDP_GEOM)) - g_waitidle(); - - /* * Charge system time if profiling. */ if (__predict_false(p->p_flag & P_PROFIL)) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); #ifdef HWPMC_HOOKS if (PMC_THREAD_HAS_SAMPLES(td)) PMC_CALL_HOOK(td, PMC_FN_THR_USERRET, NULL); #endif /* * Let the scheduler adjust our priority etc. */ sched_userret(td); /* * Check for misbehavior. * * In case there is a callchain tracing ongoing because of * hwpmc(4), skip the scheduler pinning check. * hwpmc(4) subsystem, infact, will collect callchain informations * at ast() checkpoint, which is past userret(). */ WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); KASSERT(td->td_critnest == 0, ("userret: Returning in a critical section")); KASSERT(td->td_locks == 0, ("userret: Returning with %d locks held", td->td_locks)); KASSERT(td->td_rw_rlocks == 0, ("userret: Returning with %d rwlocks held in read mode", td->td_rw_rlocks)); KASSERT(td->td_sx_slocks == 0, ("userret: Returning with %d sx locks held in shared mode", td->td_sx_slocks)); KASSERT(td->td_lk_slocks == 0, ("userret: Returning with %d lockmanager locks held in shared mode", td->td_lk_slocks)); KASSERT((td->td_pflags & TDP_NOFAULTING) == 0, ("userret: Returning with pagefaults disabled")); if (__predict_false(!THREAD_CAN_SLEEP())) { #ifdef EPOCH_TRACE epoch_trace_list(curthread); #endif KASSERT(0, ("userret: Returning with sleep disabled")); } KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0, ("userret: Returning with with pinned thread")); KASSERT(td->td_vp_reserved == NULL, ("userret: Returning with preallocated vnode")); KASSERT((td->td_flags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0, ("userret: Returning with stop signals deferred")); KASSERT(td->td_su == NULL, ("userret: Returning with SU cleanup request not handled")); KASSERT(td->td_vslock_sz == 0, ("userret: Returning with vslock-wired space")); #ifdef VIMAGE /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */ VNET_ASSERT(curvnet == NULL, ("%s: Returning on td %p (pid %d, %s) with vnet %p set in %s", __func__, td, p->p_pid, td->td_name, curvnet, (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush : "N/A")); #endif } /* * Process an asynchronous software trap. * This is relatively easy. * This function will return with preemption disabled. */ void ast(struct trapframe *framep) { struct thread *td; struct proc *p; int flags, sig; td = curthread; p = td->td_proc; CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode")); WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode"); mtx_assert(&Giant, MA_NOTOWNED); THREAD_LOCK_ASSERT(td, MA_NOTOWNED); td->td_frame = framep; td->td_pticks = 0; /* * This updates the td_flag's for the checks below in one * "atomic" operation with turning off the astpending flag. * If another AST is triggered while we are handling the * AST's saved in flags, the astpending flag will be set and * ast() will be called again. */ thread_lock(td); flags = td->td_flags; td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK | TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND); thread_unlock(td); VM_CNT_INC(v_trap); if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) { addupc_task(td, td->td_profil_addr, td->td_profil_ticks); td->td_profil_ticks = 0; td->td_pflags &= ~TDP_OWEUPC; } #ifdef HWPMC_HOOKS /* Handle Software PMC callchain capture. */ if (PMC_IS_PENDING_CALLCHAIN(td)) PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep); #endif if (flags & TDF_ALRMPEND) { PROC_LOCK(p); kern_psignal(p, SIGVTALRM); PROC_UNLOCK(p); } if (flags & TDF_PROFPEND) { PROC_LOCK(p); kern_psignal(p, SIGPROF); PROC_UNLOCK(p); } #ifdef MAC if (flags & TDF_MACPEND) mac_thread_userret(td); #endif if (flags & TDF_NEEDRESCHED) { #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(1, 1, __func__); #endif thread_lock(td); sched_prio(td, td->td_user_pri); mi_switch(SW_INVOL | SWT_NEEDRESCHED); #ifdef KTRACE if (KTRPOINT(td, KTR_CSW)) ktrcsw(0, 1, __func__); #endif } + + /* + * If this thread tickled GEOM, we need to wait for the giggling to + * stop before we return to userland + */ + if (__predict_false(td->td_pflags & TDP_GEOM)) + g_waitidle(); #ifdef DIAGNOSTIC if (p->p_numthreads == 1 && (flags & TDF_NEEDSIGCHK) == 0) { PROC_LOCK(p); thread_lock(td); /* * Note that TDF_NEEDSIGCHK should be re-read from * td_flags, since signal might have been delivered * after we cleared td_flags above. This is one of * the reason for looping check for AST condition. * See comment in userret() about P_PPWAIT. */ if ((p->p_flag & P_PPWAIT) == 0 && (td->td_pflags & TDP_SIGFASTBLOCK) == 0) { if (SIGPENDING(td) && (td->td_flags & (TDF_NEEDSIGCHK | TDF_ASTPENDING)) != (TDF_NEEDSIGCHK | TDF_ASTPENDING)) { thread_unlock(td); /* fix dumps */ panic( "failed2 to set signal flags for ast p %p td %p fl %x %x", p, td, flags, td->td_flags); } } thread_unlock(td); PROC_UNLOCK(p); } #endif /* * Check for signals. Unlocked reads of p_pendingcnt or * p_siglist might cause process-directed signal to be handled * later. */ if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 || !SIGISEMPTY(p->p_siglist)) { sigfastblock_fetch(td); if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0 && td->td_sigblock_val != 0) { sigfastblock_setpend(td, true); } else { PROC_LOCK(p); mtx_lock(&p->p_sigacts->ps_mtx); while ((sig = cursig(td)) != 0) { KASSERT(sig >= 0, ("sig %d", sig)); postsig(sig); } mtx_unlock(&p->p_sigacts->ps_mtx); PROC_UNLOCK(p); } } /* * Handle deferred update of the fast sigblock value, after * the postsig() loop was performed. */ if (td->td_pflags & TDP_SIGFASTPENDING) sigfastblock_setpend(td, false); /* * We need to check to see if we have to exit or wait due to a * single threading requirement or some other STOP condition. */ if (flags & TDF_NEEDSUSPCHK) { PROC_LOCK(p); thread_suspend_check(0); PROC_UNLOCK(p); } if (td->td_pflags & TDP_OLDMASK) { td->td_pflags &= ~TDP_OLDMASK; kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0); } #ifdef RACCT if (__predict_false(racct_enable && p->p_throttled != 0)) racct_proc_throttled(p); #endif userret(td, framep); } const char * syscallname(struct proc *p, u_int code) { static const char unknown[] = "unknown"; struct sysentvec *sv; sv = p->p_sysent; if (sv->sv_syscallnames == NULL || code >= sv->sv_size) return (unknown); return (sv->sv_syscallnames[code]); }