Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c +++ sys/kern/kern_event.c @@ -600,10 +600,10 @@ kn->kn_fflags |= NOTE_TRACKERR; if (kn->kn_fop->f_event(kn, NOTE_FORK)) KNOTE_ACTIVATE(kn, 0); + list->kl_lock(list->kl_lockarg); KQ_LOCK(kq); kn_leave_flux(kn); KQ_UNLOCK_FLUX(kq); - list->kl_lock(list->kl_lockarg); } list->kl_unlock(list->kl_lockarg); } @@ -1460,8 +1460,11 @@ break; } } else { - if ((kev->flags & EV_ADD) == EV_ADD) - kqueue_expand(kq, fops, kev->ident, waitok); + if ((kev->flags & EV_ADD) == EV_ADD) { + error = kqueue_expand(kq, fops, kev->ident, waitok); + if (error != 0) + goto done; + } KQ_LOCK(kq); @@ -1697,12 +1700,12 @@ { struct klist *list, *tmp_knhash, *to_free; u_long tmp_knhashmask; - int size; - int fd; + int error, fd, size; int mflag = waitok ? M_WAITOK : M_NOWAIT; KQ_NOTOWNED(kq); + error = 0; to_free = NULL; if (fops->f_isfd) { fd = ident; @@ -1714,9 +1717,11 @@ if (list == NULL) return ENOMEM; KQ_LOCK(kq); - if (kq->kq_knlistsize > fd) { + if ((kq->kq_state & KQ_CLOSING) != 0) { + to_free = list; + error = EBADF; + } else if (kq->kq_knlistsize > fd) { to_free = list; - list = NULL; } else { if (kq->kq_knlist != NULL) { bcopy(kq->kq_knlist, list, @@ -1737,9 +1742,12 @@ tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, &tmp_knhashmask); if (tmp_knhash == NULL) - return ENOMEM; + return (ENOMEM); KQ_LOCK(kq); - if (kq->kq_knhashmask == 0) { + if ((kq->kq_state & KQ_CLOSING) != 0) { + to_free = tmp_knhash; + error = EBADF; + } else if (kq->kq_knhashmask == 0) { kq->kq_knhash = tmp_knhash; kq->kq_knhashmask = tmp_knhashmask; } else { @@ -1751,7 +1759,7 @@ free(to_free, M_KQUEUE); KQ_NOTOWNED(kq); - return 0; + return (error); } static void @@ -2600,6 +2608,8 @@ KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); KQ_OWNED(kq); + if ((kq->kq_state & KQ_CLOSING) != 0) + return (EBADF); if (kn->kn_fop->f_isfd) { if (kn->kn_id >= kq->kq_knlistsize) return (ENOMEM);