Index: sys/kern/kern_clock.c =================================================================== --- sys/kern/kern_clock.c +++ sys/kern/kern_clock.c @@ -710,8 +710,7 @@ td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); - for ( ; cnt > 0; cnt--) - sched_clock(td); + sched_clock(td, cnt); thread_unlock(td); #ifdef HWPMC_HOOKS if (td->td_intr_frame != NULL) Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c +++ sys/kern/sched_4bsd.c @@ -706,8 +706,8 @@ * favor processes which haven't run much recently, and to round-robin * among other processes. */ -void -sched_clock(struct thread *td) +static void +sched_clock_tick(struct thread *td) { struct pcpuidlestat *stat; struct td_sched *ts; @@ -736,6 +736,14 @@ stat->idlecalls = 0; } +void +sched_clock(struct thread *td, int cnt) +{ + + for ( ; cnt > 0; cnt--) + sched_clock_tick(td); +} + /* * Charge child's scheduling CPU usage to parent. */ Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c +++ sys/kern/sched_ule.c @@ -2415,7 +2415,7 @@ * threads. */ void -sched_clock(struct thread *td) +sched_clock(struct thread *td, int cnt) { struct tdq *tdq; struct td_sched *ts; @@ -2426,8 +2426,10 @@ /* * We run the long term load balancer infrequently on the first cpu. */ - if (balance_tdq == tdq && smp_started != 0 && rebalance != 0) { - if (balance_ticks && --balance_ticks == 0) + if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 && + balance_ticks != 0) { + balance_ticks -= cnt; + if (balance_ticks <= 0) sched_balance(); } #endif @@ -2449,14 +2451,15 @@ } ts = td_get_sched(td); sched_pctcpu_update(ts, 1); - if (td->td_pri_class & PRI_FIFO_BIT) + if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td)) return; + if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) { /* * We used a tick; charge it to the thread so * that we can compute our interactivity. */ - td_get_sched(td)->ts_runtime += tickincr; + td_get_sched(td)->ts_runtime += tickincr * cnt; sched_interact_update(td); sched_priority(td); } @@ -2465,7 +2468,8 @@ * Force a context switch if the current thread has used up a full * time slice (default is 100ms). */ - if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) { + ts->ts_slice += cnt; + if (ts->ts_slice >= tdq_slice(tdq)) { ts->ts_slice = 0; td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } Index: sys/sys/sched.h =================================================================== --- sys/sys/sched.h +++ sys/sys/sched.h @@ -135,7 +135,7 @@ */ void sched_add(struct thread *td, int flags); struct thread *sched_choose(void); -void sched_clock(struct thread *td); +void sched_clock(struct thread *td, int cnt); void sched_idletd(void *); void sched_preempt(struct thread *td); void sched_relinquish(struct thread *td);