diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c --- a/sys/dev/random/random_harvestq.c +++ b/sys/dev/random/random_harvestq.c @@ -72,6 +72,13 @@ #define _RANDOM_HARVEST_UMA_OFF (1u << RANDOM_UMA) #endif +/* + * Note that random_sources_feed() will also use this to try and split up + * entropy into a subset of pools per iteration with the goal of feeding + * HARVESTSIZE into every pool at least once per second. + */ +#define RANDOM_KTHREAD_HZ 10 + static void random_kthread(void); static void random_sources_feed(void); @@ -199,7 +206,8 @@ } } /* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */ - tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", SBT_1S/10, 0, C_PREL(1)); + tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", + SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1)); } random_kthread_control = -1; wakeup(&harvest_context.hc_kthread_proc); @@ -229,11 +237,24 @@ uint32_t entropy[HARVESTSIZE]; struct epoch_tracker et; struct random_sources *rrs; - u_int i, n; + u_int i, n, npools; bool rse_warm; rse_warm = epoch_inited; + /* + * Evenly-ish distribute pool population across the second based on how + * frequently random_kthread iterates. + * + * For Fortuna, the math currently works out as such: + * + * 64 bits * 4 pools = 256 bits per iteration + * 256 bits * 10 Hz = 2560 bits per second, 320 B/s + * + */ + npools = roundup(p_random_alg_context->ra_poolcount, RANDOM_KTHREAD_HZ); + npools /= RANDOM_KTHREAD_HZ; + /* * Step over all of live entropy sources, and feed their output * to the system-wide RNG. @@ -241,7 +262,7 @@ if (rse_warm) epoch_enter_preempt(rs_epoch, &et); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) { - for (i = 0; i < p_random_alg_context->ra_poolcount; i++) { + for (i = 0; i < npools; i++) { n = rrs->rrs_source->rs_read(entropy, sizeof(entropy)); KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy))); /*