Index: sys/kern/kern_exec.c =================================================================== --- sys/kern/kern_exec.c +++ sys/kern/kern_exec.c @@ -102,6 +102,8 @@ SDT_PROBE_DEFINE1(proc, , , exec__failure, "int"); SDT_PROBE_DEFINE1(proc, , , exec__success, "char *"); +EVENTHANDLER_DEFINE_LIST(process_exec); + MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); int coredump_pack_fileinfo = 1; @@ -1071,7 +1073,7 @@ imgp->sysent = sv; /* May be called with Giant held */ - EVENTHANDLER_INVOKE(process_exec, p, imgp); + EVENTHANDLER_INVOKE_LIST(process_exec, p, imgp); /* * Blow away entire process VM, if address space not shared, Index: sys/kern/kern_exit.c =================================================================== --- sys/kern/kern_exit.c +++ sys/kern/kern_exit.c @@ -96,6 +96,8 @@ SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE1(proc, , , exit, "int"); +EVENTHANDLER_DEFINE_LIST(process_exit); + /* Hook for NFS teardown procedure. */ void (*nlminfo_release_p)(struct proc *p); @@ -329,7 +331,7 @@ * Event handler could change exit status. * XXX what if one of these generates an error? */ - EVENTHANDLER_INVOKE(process_exit, p); + EVENTHANDLER_INVOKE_LIST(process_exit, p); /* * If parent is waiting for us to exit or exec, Index: sys/kern/kern_fork.c =================================================================== --- sys/kern/kern_fork.c +++ sys/kern/kern_fork.c @@ -91,6 +91,8 @@ SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); +EVENTHANDLER_DEFINE_LIST(process_fork); + #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; @@ -699,7 +701,7 @@ * Both processes are set up, now check if any loadable modules want * to adjust anything. */ - EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags); + EVENTHANDLER_INVOKE_LIST(process_fork, p1, p2, fr->fr_flags); /* * Set the child start time and mark the process as being complete. Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c +++ sys/kern/kern_proc.c @@ -164,6 +164,11 @@ CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE); #endif +EVENTHANDLER_DEFINE_LIST(process_ctor); +EVENTHANDLER_DEFINE_LIST(process_dtor); +EVENTHANDLER_DEFINE_LIST(process_init); +EVENTHANDLER_DEFINE_LIST(process_fini); + /* * Initialize global process hashing structures. */ @@ -195,12 +200,12 @@ p = (struct proc *)mem; SDT_PROBE4(proc, , ctor , entry, p, size, arg, flags); - EVENTHANDLER_INVOKE(process_ctor, p); + EVENTHANDLER_INVOKE_LIST(process_ctor, p); SDT_PROBE4(proc, , ctor , return, p, size, arg, flags); td = FIRST_THREAD_IN_PROC(p); if (td != NULL) { /* Make sure all thread constructors are executed */ - EVENTHANDLER_INVOKE(thread_ctor, td); + EVENTHANDLER_INVOKE_LIST(thread_ctor, td); } return (0); } @@ -230,9 +235,9 @@ MPASS(td->td_su == NULL); /* Make sure all thread destructors are executed */ - EVENTHANDLER_INVOKE(thread_dtor, td); + EVENTHANDLER_INVOKE_LIST(thread_dtor, td); } - EVENTHANDLER_INVOKE(process_dtor, p); + EVENTHANDLER_INVOKE_LIST(process_dtor, p); if (p->p_ksi != NULL) KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); SDT_PROBE3(proc, , dtor, return, p, size, arg); @@ -256,7 +261,7 @@ cv_init(&p->p_pwait, "ppwait"); cv_init(&p->p_dbgwait, "dbgwait"); TAILQ_INIT(&p->p_threads); /* all threads in proc */ - EVENTHANDLER_INVOKE(process_init, p); + EVENTHANDLER_INVOKE_LIST(process_init, p); p->p_stats = pstats_alloc(); p->p_pgrp = NULL; SDT_PROBE3(proc, , init, return, p, size, flags); @@ -274,7 +279,7 @@ struct proc *p; p = (struct proc *)mem; - EVENTHANDLER_INVOKE(process_fini, p); + EVENTHANDLER_INVOKE_LIST(process_fini, p); pstats_free(p->p_stats); thread_free(FIRST_THREAD_IN_PROC(p)); mtx_destroy(&p->p_mtx); Index: sys/kern/kern_thread.c =================================================================== --- sys/kern/kern_thread.c +++ sys/kern/kern_thread.c @@ -119,6 +119,11 @@ SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); +EVENTHANDLER_DEFINE_LIST(thread_ctor); +EVENTHANDLER_DEFINE_LIST(thread_dtor); +EVENTHANDLER_DEFINE_LIST(thread_init); +EVENTHANDLER_DEFINE_LIST(thread_fini); + /* * thread related storage. */ @@ -201,7 +206,7 @@ */ td->td_critnest = 1; td->td_lend_user_pri = PRI_MAX; - EVENTHANDLER_INVOKE(thread_ctor, td); + EVENTHANDLER_INVOKE_LIST(thread_ctor, td); #ifdef AUDIT audit_thread_alloc(td); #endif @@ -247,7 +252,7 @@ td_softdep_cleanup(td); MPASS(td->td_su == NULL); - EVENTHANDLER_INVOKE(thread_dtor, td); + EVENTHANDLER_INVOKE_LIST(thread_dtor, td); tid_free(td->td_tid); } @@ -264,7 +269,7 @@ td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_rlqe = NULL; - EVENTHANDLER_INVOKE(thread_init, td); + EVENTHANDLER_INVOKE_LIST(thread_init, td); umtx_thread_init(td); td->td_kstack = 0; td->td_sel = NULL; @@ -280,7 +285,7 @@ struct thread *td; td = (struct thread *)mem; - EVENTHANDLER_INVOKE(thread_fini, td); + EVENTHANDLER_INVOKE_LIST(thread_fini, td); rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); Index: sys/kern/subr_eventhandler.c =================================================================== --- sys/kern/subr_eventhandler.c +++ sys/kern/subr_eventhandler.c @@ -64,6 +64,49 @@ SYSINIT(eventhandlers, SI_SUB_EVENTHANDLER, SI_ORDER_FIRST, eventhandler_init, NULL); +static struct eventhandler_list * +eventhandler_find_or_create_list(const char *name, bool predefined_list) +{ + struct eventhandler_list *list, *new_list; + + /* look for a matching, existing list */ + list = _eventhandler_find_list(name); + + /* Do we need to create the list? */ + if (list == NULL) { + mtx_unlock(&eventhandler_mutex); + + new_list = malloc(sizeof(struct eventhandler_list) + + strlen(name) + 1, M_EVENTHANDLER, M_WAITOK); + + /* If someone else created it already, then use that one. */ + mtx_lock(&eventhandler_mutex); + list = _eventhandler_find_list(name); + if (list != NULL) { + free(new_list, M_EVENTHANDLER); + } else { + CTR2(KTR_EVH, "%s: creating list \"%s\"", __func__, name); + list = new_list; + list->el_flags = 0; + list->el_runcount = 0; + TAILQ_INIT(&list->el_entries); + mtx_init(&list->el_lock, name, "eventhandler list", MTX_DEF); + list->el_name = (char *)list + sizeof(struct eventhandler_list); + strcpy(list->el_name, name); + /* + * Dynamic lists are accessed by name search. Predefined lists are + * most often accessed directly via pointer, so add them to the + * tail, leaving dynamic lists at the front for faster searching. + */ + if (predefined_list) + TAILQ_INSERT_TAIL(&eventhandler_lists, list, el_link); + else + TAILQ_INSERT_HEAD(&eventhandler_lists, list, el_link); + } + } + return (list); +} + /* * Insertion is O(n) due to the priority scan, but optimises to O(1) * if all priorities are identical. @@ -72,50 +115,17 @@ eventhandler_register_internal(struct eventhandler_list *list, const char *name, eventhandler_tag epn) { - struct eventhandler_list *new_list; struct eventhandler_entry *ep; KASSERT(eventhandler_lists_initted, ("eventhandler registered too early")); KASSERT(epn != NULL, ("%s: cannot register NULL event", __func__)); - /* lock the eventhandler lists */ - mtx_lock(&eventhandler_mutex); - - /* Do we need to find/create the (slow) list? */ + /* Do we need to find/create the list? */ if (list == NULL) { - /* look for a matching, existing list */ - list = _eventhandler_find_list(name); - - /* Do we need to create the list? */ - if (list == NULL) { - mtx_unlock(&eventhandler_mutex); - - new_list = malloc(sizeof(struct eventhandler_list) + - strlen(name) + 1, M_EVENTHANDLER, M_WAITOK); - - /* If someone else created it already, then use that one. */ - mtx_lock(&eventhandler_mutex); - list = _eventhandler_find_list(name); - if (list != NULL) { - free(new_list, M_EVENTHANDLER); - } else { - CTR2(KTR_EVH, "%s: creating list \"%s\"", __func__, name); - list = new_list; - list->el_flags = 0; - list->el_runcount = 0; - bzero(&list->el_lock, sizeof(list->el_lock)); - list->el_name = (char *)list + sizeof(struct eventhandler_list); - strcpy(list->el_name, name); - TAILQ_INSERT_HEAD(&eventhandler_lists, list, el_link); - } - } + mtx_lock(&eventhandler_mutex); + list = eventhandler_find_or_create_list(name, false); + mtx_unlock(&eventhandler_mutex); } - if (!(list->el_flags & EHL_INITTED)) { - TAILQ_INIT(&list->el_entries); - mtx_init(&list->el_lock, name, "eventhandler list", MTX_DEF); - atomic_store_rel_int(&list->el_flags, EHL_INITTED); - } - mtx_unlock(&eventhandler_mutex); KASSERT(epn->ee_priority != EHE_DEAD_PRIORITY, ("%s: handler for %s registered with dead priority", __func__, name)); @@ -133,6 +143,7 @@ } if (ep == NULL) TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link); + list->el_flags |= EHL_NONEMPTY; EHL_UNLOCK(list); return(epn); } @@ -274,6 +285,24 @@ } /* + * Create a named list. + */ +struct eventhandler_list * +eventhandler_create_list(const char *name) +{ + struct eventhandler_list *list; + + if (!eventhandler_lists_initted) + panic("eventhandler_create_list() called before SI_SUB_EVENTHANDLER"); + + mtx_lock(&eventhandler_mutex); + list = eventhandler_find_or_create_list(name, true); + mtx_unlock(&eventhandler_mutex); + + return (list); +} + +/* * Prune "dead" entries from an eventhandler list. */ void Index: sys/sys/eventhandler.h =================================================================== --- sys/sys/eventhandler.h +++ sys/sys/eventhandler.h @@ -52,7 +52,7 @@ struct eventhandler_list { char *el_name; int el_flags; -#define EHL_INITTED (1<<0) +#define EHL_NONEMPTY (1<<0) u_int el_runcount; struct mtx el_lock; TAILQ_ENTRY(eventhandler_list) el_link; @@ -72,8 +72,6 @@ struct eventhandler_entry *_ep; \ struct eventhandler_entry_ ## name *_t; \ \ - KASSERT((list)->el_flags & EHL_INITTED, \ - ("eventhandler_invoke: running non-inited list")); \ EHL_LOCK_ASSERT((list), MA_OWNED); \ (list)->el_runcount++; \ KASSERT((list)->el_runcount > 0, \ @@ -98,8 +96,10 @@ } while (0) /* - * Slow handlers are entirely dynamic; lists are created - * when entries are added to them, and thus have no concept of "owner", + * Declare and invoke "slow" handler lists. + * + * Slow handler lists are entirely dynamic; lists are created when entries are + * added to them, and thus have no concept of "owner". * * Slow handlers need to be declared, but do not need to be defined. The * declaration must be in scope wherever the handler is to be invoked. @@ -131,6 +131,43 @@ _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \ } while (0) +/* + * Define, declare, and invoke "fast" eventhandler lists. Fast here refers to + * keeping a copy of the list pointer in a global var to avoid the cost of + * looking up the list by name. Fast and slow handler lists are identical, and + * a fast handler list can be looked up by the regular invoke macro. + * + * Fast handler lists must be defined in exactly one module. Usually handlers + * are invoked from the module containing the definition. When the definition + * is in a different file than the invocation, use EVENTHANDLER_DECLARE_LIST. + */ +#define EVENTHANDLER_DEFINE_LIST(name) \ + struct eventhandler_list *_ehlist_ ## name; \ + static void name ## _ehl_init(void *ctx) \ + { \ + _ehlist_ ## name = eventhandler_create_list(#name); \ + } \ + SYSINIT(name ## _ehl_init, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, \ + name ## _ehl_init, NULL); \ + struct __hack + +#define EVENTHANDLER_DECLARE_LIST(name) \ + extern struct eventhandler_list *_ehlist_ ## name; \ + struct __hack + +#define EVENTHANDLER_INVOKE_LIST(name, ...) \ +do { \ + struct eventhandler_list *_el = _ehlist_ ## name; \ + \ + if (_el->el_flags & EHL_NONEMPTY) { \ + EHL_LOCK(_el); \ + _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \ + } \ +} while (0) + +/* + * Register and unregister handlers. + */ #define EVENTHANDLER_REGISTER(name, func, arg, priority) \ eventhandler_register(NULL, #name, func, arg, priority) @@ -156,6 +193,7 @@ eventhandler_tag tag); void eventhandler_deregister_nowait(struct eventhandler_list *list, eventhandler_tag tag); +struct eventhandler_list *eventhandler_create_list(const char *name); struct eventhandler_list *eventhandler_find_list(const char *name); void eventhandler_prune_list(struct eventhandler_list *list); @@ -258,6 +296,9 @@ EVENTHANDLER_DECLARE(thread_init, thread_init_fn); EVENTHANDLER_DECLARE(thread_fini, thread_fini_fn); +EVENTHANDLER_DECLARE_LIST(thread_ctor); /* These are defined in kern_thread.c */ +EVENTHANDLER_DECLARE_LIST(thread_dtor); /* and invoked from kern_proc.c */ + typedef void (*uma_zone_chfn)(void *); EVENTHANDLER_DECLARE(nmbclusters_change, uma_zone_chfn); EVENTHANDLER_DECLARE(nmbufs_change, uma_zone_chfn);