Index: FreeBSD/sys/contrib/ck/src/ck_epoch.c =================================================================== --- FreeBSD/sys/contrib/ck/src/ck_epoch.c +++ FreeBSD/sys/contrib/ck/src/ck_epoch.c @@ -137,6 +137,14 @@ CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, ck_epoch_entry_container) +/* + * CK_EPOCH_LENGTH must be a power-of-2 (because (CK_EPOCH_LENGTH - 1) is used + * as a mask, and it must be at least 3 (see comments above). + */ +#if (CK_EPOCH_LENGTH < 3 || (CK_EPOCH_LENGTH & (CK_EPOCH_LENGTH - 1)) != 0) +#error "CK_EPOCH_LENGTH must be a power of 2 and >= 3" +#endif + #define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1) bool @@ -552,22 +560,36 @@ * There are cases where it will fail to reclaim as early as it could. If this * becomes a problem, we could actually use a heap for epoch buckets but that * is far from ideal too. + * + * Note that this code assumes that nothing will modify the record while the + * code is running. */ bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred) { bool active; - unsigned int epoch; + unsigned int epoch, startpend; struct ck_epoch_record *cr = NULL; struct ck_epoch *global = record->global; + startpend = record->n_pending; epoch = ck_pr_load_uint(&global->epoch); /* Serialize epoch snapshots with respect to global epoch. */ ck_pr_fence_memory(); + + /* + * At this point, epoch is the current global epoch value. + * There may or may not be active threads which observed epoch - 1. + * (ck_epoch_scan() will tell us that.) However, there should be + * no active threads which observed epoch - 2. (Note that checking + * epoch - 2 is necessary, as race conditions can allow another + * thread to increment the global epoch before this thread runs.) + */ + ck_epoch_dispatch(record, epoch - 2, deferred); cr = ck_epoch_scan(global, cr, epoch, &active); if (cr != NULL) - return false; + goto done; /* We are at a grace period if all threads are inactive. */ if (active == false) { @@ -575,14 +597,21 @@ for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) ck_epoch_dispatch(record, epoch, deferred); - return true; + goto done; } - /* If an active thread exists, rely on epoch observation. */ + /* + * If an active thread exists, rely on epoch observation. + * + * All the active threads entered the epoch section during + * the current epoch. Therefore, we can now run the handlers + * for the immediately preceding epoch and advance the epoch. + */ (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1); + ck_epoch_dispatch(record, epoch - 1, deferred); - ck_epoch_dispatch(record, epoch + 1, deferred); - return true; +done: + return (record->n_pending != startpend); } bool