Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c +++ sys/kern/kern_clocksource.c @@ -122,8 +122,9 @@ sbintime_t nextprof; /* Next profclock() event. */ sbintime_t nextcall; /* Next callout event. */ sbintime_t nextcallopt; /* Next optional callout event. */ - int ipi; /* This CPU needs IPI. */ - int idle; /* This CPU is in idle mode. */ + bool ipi; /* This CPU needs IPI. */ + bool idle; /* This CPU is in idle mode. */ + bool running; /* handleevents() is running. */ }; static DPCPU_DEFINE(struct pcpu_state, timerstate); @@ -159,7 +160,8 @@ struct trapframe *frame; struct pcpu_state *state; int usermode; - int done, runs; + int done, hardruns, profruns, statruns; + bool calloutready; CTR3(KTR_SPARE2, "handle at %d: now %d.%08x", curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); @@ -174,53 +176,67 @@ state = DPCPU_PTR(timerstate); - runs = 0; + /* Acquire lock and determine what work needs to be done. */ + ET_HW_LOCK(state); +again: + hardruns = profruns = statruns = 0; while (now >= state->nexthard) { state->nexthard += tick_sbt; - runs++; + hardruns++; } - if (runs) { + if (hardruns) { hct = DPCPU_PTR(hardclocktime); *hct = state->nexthard - tick_sbt; - if (fake < 2) { - hardclock_cnt(runs, usermode); - done = 1; - } } - runs = 0; while (now >= state->nextstat) { state->nextstat += statperiod; - runs++; + statruns++; } - if (runs && fake < 2) { - statclock_cnt(runs, usermode); - done = 1; - } if (profiling) { - runs = 0; while (now >= state->nextprof) { state->nextprof += profperiod; - runs++; + profruns++; } - if (runs && !fake) { - profclock_cnt(runs, usermode, TRAPF_PC(frame)); - done = 1; - } } else state->nextprof = state->nextstat; if (now >= state->nextcallopt || now >= state->nextcall) { state->nextcall = state->nextcallopt = SBT_MAX; - callout_process(now); + calloutready = true; } + state->running = true; - t = getnextcpuevent(0); + /* Drop the lock and do the work. */ + ET_HW_UNLOCK(state); + if (hardruns && fake < 2) { + hardclock_cnt(hardruns, usermode); + done = 1; + } + if (statruns && fake < 2) { + statclock_cnt(statruns, usermode); + done = 1; + } + if (profruns && !fake) { + profclock_cnt(profruns, usermode, TRAPF_PC(frame)); + done = 1; + } + if (calloutready) + callout_process(now); + + /* Obtain the lock and reschedule the timer. */ ET_HW_LOCK(state); + t = getnextcpuevent(0); + if (!periodic) { + now = sbinuptime(); + if (now >= t || now >= state->nextcallopt) + goto again; + } if (!busy) { state->idle = 0; state->nextevent = t; loadtimer(now, (fake == 2) && (timer->et_flags & ET_FLAGS_PERCPU)); } + state->running = false; ET_HW_UNLOCK(state); return (done); } @@ -597,6 +613,7 @@ mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); state->nextcall = SBT_MAX; state->nextcallopt = SBT_MAX; + state->ipi = state->idle = state->running = false; } periodic = want_periodic; /* Grab requested timer or the best of present. */ @@ -842,6 +859,13 @@ state->nextcall = bt; /* If there is some other event set earlier -- do nothing. */ if (bt >= state->nextevent) + goto done; + /* + * If handleevents() is running, it will pick up the earlier + * callout and schedule the correct execution at the correct + * time. + */ + if (state->running) goto done; state->nextevent = bt; /* If timer is periodic -- there is nothing to reprogram. */