Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c +++ sys/kern/sched_ule.c @@ -302,6 +302,7 @@ static void sched_interact_update(struct thread *); static void sched_interact_fork(struct thread *); static void sched_pctcpu_update(struct td_sched *, int); +static int sched_random(void); /* Operations on per processor queues */ static struct thread *tdq_choose(struct tdq *); @@ -357,6 +358,18 @@ "struct proc *"); /* + * We need some randomness, but LSFR provides enough + * for proper operation. + */ +static int sched_random() +{ + int rnd, *rndptr; + rndptr = DPCPU_PTR(randomval); + rnd = (*rndptr = *rndptr * 69069 + 5) >> 26; + return(rnd); +} + +/* * Print the threads waiting on a run-queue. */ static void @@ -651,7 +664,7 @@ cpuset_t cpumask; struct cpu_group *child; struct tdq *tdq; - int cpu, i, hload, lload, load, total, rnd, *rndptr; + int cpu, i, hload, lload, load, total, rnd; total = 0; cpumask = cg->cg_mask; @@ -700,9 +713,8 @@ CPU_CLR(cpu, &cpumask); tdq = TDQ_CPU(cpu); load = tdq->tdq_load * 256; - rndptr = DPCPU_PTR(randomval); - rnd = (*rndptr = *rndptr * 69069 + 5) >> 26; - if (match & CPU_SEARCH_LOWEST) { + rnd = sched_random(); + if (match & CPU_SEARCH_LOWEST) { if (cpu == low->cs_prefer) load -= 64; /* If that CPU is allowed and get data. */ @@ -861,14 +873,11 @@ { struct tdq *tdq; - /* - * Select a random time between .5 * balance_interval and - * 1.5 * balance_interval. - */ - balance_ticks = max(balance_interval / 2, 1); - balance_ticks += random() % balance_interval; if (smp_started == 0 || rebalance == 0) return; + + balance_ticks = max(balance_interval / 2, 1) + + (sched_random() % balance_interval); tdq = TDQ_SELF(); TDQ_UNLOCK(tdq); sched_balance_group(cpu_top);