Changeset View
Standalone View
sys/kern/kern_intr.c
Show First 20 Lines • Show All 183 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Regenerate the full name of an interrupt event and update its priority. | * Regenerate the full name of an interrupt event and update its priority. | ||||
*/ | */ | ||||
static void | static void | ||||
intr_event_update(struct intr_event *ie) | intr_event_update(struct intr_event *ie) | ||||
{ | { | ||||
struct intr_handler *ih; | struct intr_handler *ih; | ||||
char *last; | char *last; | ||||
int missed, space; | int missed, space, flags; | ||||
/* Start off with no entropy and just the name of the event. */ | /* Start off with no entropy and just the name of the event. */ | ||||
mtx_assert(&ie->ie_lock, MA_OWNED); | mtx_assert(&ie->ie_lock, MA_OWNED); | ||||
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); | strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); | ||||
ie->ie_hflags = 0; | flags = 0; | ||||
missed = 0; | missed = 0; | ||||
space = 1; | space = 1; | ||||
/* Run through all the handlers updating values. */ | /* Run through all the handlers updating values. */ | ||||
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { | CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { | ||||
if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < | if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < | ||||
sizeof(ie->ie_fullname)) { | sizeof(ie->ie_fullname)) { | ||||
strcat(ie->ie_fullname, " "); | strcat(ie->ie_fullname, " "); | ||||
strcat(ie->ie_fullname, ih->ih_name); | strcat(ie->ie_fullname, ih->ih_name); | ||||
space = 0; | space = 0; | ||||
} else | } else | ||||
missed++; | missed++; | ||||
ie->ie_hflags |= ih->ih_flags; | flags |= ih->ih_flags; | ||||
} | } | ||||
ie->ie_hflags = flags; | |||||
/* | /* | ||||
* If there is only one handler and its name is too long, just copy in | * If there is only one handler and its name is too long, just copy in | ||||
* as much of the end of the name (includes the unit number) as will | * as much of the end of the name (includes the unit number) as will | ||||
* fit. Otherwise, we have multiple handlers and not all of the names | * fit. Otherwise, we have multiple handlers and not all of the names | ||||
* will fit. Add +'s to indicate missing names. If we run out of room | * will fit. Add +'s to indicate missing names. If we run out of room | ||||
* and still have +'s to add, change the last character from a + to a *. | * and still have +'s to add, change the last character from a + to a *. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 983 Lines • ▼ Show 20 Lines | |||||
ithread_loop(void *arg) | ithread_loop(void *arg) | ||||
{ | { | ||||
struct epoch_tracker et; | struct epoch_tracker et; | ||||
struct intr_thread *ithd; | struct intr_thread *ithd; | ||||
struct intr_event *ie; | struct intr_event *ie; | ||||
struct thread *td; | struct thread *td; | ||||
struct proc *p; | struct proc *p; | ||||
int wake, epoch_count; | int wake, epoch_count; | ||||
bool needs_epoch; | |||||
td = curthread; | td = curthread; | ||||
p = td->td_proc; | p = td->td_proc; | ||||
ithd = (struct intr_thread *)arg; | ithd = (struct intr_thread *)arg; | ||||
KASSERT(ithd->it_thread == td, | KASSERT(ithd->it_thread == td, | ||||
("%s: ithread and proc linkage out of sync", __func__)); | ("%s: ithread and proc linkage out of sync", __func__)); | ||||
ie = ithd->it_event; | ie = ithd->it_event; | ||||
ie->ie_count = 0; | ie->ie_count = 0; | ||||
Show All 18 Lines | for (;;) { | ||||
* Service interrupts. If another interrupt arrives while | * Service interrupts. If another interrupt arrives while | ||||
* we are running, it will set it_need to note that we | * we are running, it will set it_need to note that we | ||||
* should make another pass. | * should make another pass. | ||||
* | * | ||||
* The load_acq part of the following cmpset ensures | * The load_acq part of the following cmpset ensures | ||||
* that the load of ih_need in ithread_execute_handlers() | * that the load of ih_need in ithread_execute_handlers() | ||||
* is ordered after the load of it_need here. | * is ordered after the load of it_need here. | ||||
*/ | */ | ||||
if (ie->ie_hflags & IH_NET) { | needs_epoch = | ||||
(atomic_load_int(&ie->ie_hflags) & IH_NET) != 0; | |||||
if (needs_epoch) { | |||||
kib: Does the 'needs_epoch' calculation need to happen inside the loop ? | |||||
Done Inline ActionsNot neccessarily. hselasky: Not neccessarily. | |||||
Not Done Inline Actionsyea. The IM_NET bit will never turn on after this point due to another thread. It may turn off, which is what this race is fixing. However, I think this needs an atomic_load of some flavor hear is safer than making ie_hflags volatile. imp: yea. The IM_NET bit will never turn on after this point due to another thread. It may turn off… | |||||
Not Done Inline ActionsWhy won't it ever turn on? What if a new network interrupt is added to the interrupt source that already has existing non-network interrupt handler? glebius: Why won't it ever turn on? What if a new network interrupt is added to the interrupt source… | |||||
epoch_count = 0; | epoch_count = 0; | ||||
NET_EPOCH_ENTER(et); | NET_EPOCH_ENTER(et); | ||||
} | } | ||||
while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { | while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { | ||||
ithread_execute_handlers(p, ie); | ithread_execute_handlers(p, ie); | ||||
if ((ie->ie_hflags & IH_NET) && | if (needs_epoch && | ||||
++epoch_count >= intr_epoch_batch) { | ++epoch_count >= intr_epoch_batch) { | ||||
jeffUnsubmitted Not Done Inline ActionsNot part of this review but intr_epoch_batch really looks too high to me although I am interested to know if it often increments at all. Most NICs now should be on dedicated msix interrupts where they are the only handler. This means that we only re-enter the handler if we re-enable the interrupt and it fires again while we're on the way out to de-schedule the ithread. So this is a small window. I think it is too large a value because the ithread should be polling and doing as much work as it can on each iteration, probably with some moderation, even if minimal. 1000 iterations could be hundreds of ms. This is really a long time to delay garbage collection. jeff: Not part of this review but intr_epoch_batch really looks too high to me although I am… | |||||
NET_EPOCH_EXIT(et); | NET_EPOCH_EXIT(et); | ||||
epoch_count = 0; | epoch_count = 0; | ||||
NET_EPOCH_ENTER(et); | NET_EPOCH_ENTER(et); | ||||
Not Done Inline ActionsBTW, I am very suspicious to this back-to-back epochs exits and enters. If the thread runs, I do not think that the window where epoch is exited is large enough for the epoch_wait() sleeping thread to notice it. kib: BTW, I am very suspicious to this back-to-back epochs exits and enters. If the thread runs, I… | |||||
Done Inline ActionsEpoch uses a 32-bit counter, and should notice this. hselasky: Epoch uses a 32-bit counter, and should notice this. | |||||
Not Done Inline ActionsIt is a versioned quiescence, not a lock. jeff: It is a versioned quiescence, not a lock. | |||||
} | } | ||||
} | } | ||||
if (ie->ie_hflags & IH_NET) | if (needs_epoch) | ||||
NET_EPOCH_EXIT(et); | NET_EPOCH_EXIT(et); | ||||
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); | WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); | ||||
mtx_assert(&Giant, MA_NOTOWNED); | mtx_assert(&Giant, MA_NOTOWNED); | ||||
/* | /* | ||||
* Processed all our interrupts. Now get the sched | * Processed all our interrupts. Now get the sched | ||||
* lock. This may take a while and it_need may get | * lock. This may take a while and it_need may get | ||||
* set again, so we have to check it again. | * set again, so we have to check it again. | ||||
*/ | */ | ||||
thread_lock(td); | thread_lock(td); | ||||
if (atomic_load_acq_int(&ithd->it_need) == 0 && | if (atomic_load_acq_int(&ithd->it_need) == 0 && | ||||
(ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { | (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { | ||||
TD_SET_IWAIT(td); | TD_SET_IWAIT(td); | ||||
ie->ie_count = 0; | ie->ie_count = 0; | ||||
Not Done Inline ActionsNote this access. kib: Note this access. | |||||
Done Inline ActionsCan you explain? hselasky: Can you explain? | |||||
Done Inline ActionsI think IT_WAIT flag is set in the free of "ie" case. hselasky: I think IT_WAIT flag is set in the free of "ie" case. | |||||
mi_switch(SW_VOL | SWT_IWAIT); | mi_switch(SW_VOL | SWT_IWAIT); | ||||
} else { | } else { | ||||
if (ithd->it_flags & IT_WAIT) { | if (ithd->it_flags & IT_WAIT) { | ||||
wake = 1; | wake = 1; | ||||
ithd->it_flags &= ~IT_WAIT; | ithd->it_flags &= ~IT_WAIT; | ||||
} | } | ||||
thread_unlock(td); | thread_unlock(td); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 361 Lines • Show Last 20 Lines |
Does the 'needs_epoch' calculation need to happen inside the loop ?