Changeset View
Standalone View
sys/kern/subr_epoch.c
Show First 20 Lines • Show All 80 Lines • ▼ Show 20 Lines | struct epoch { | ||||
epoch_record_t e_pcpu_record; | epoch_record_t e_pcpu_record; | ||||
int e_idx; | int e_idx; | ||||
int e_flags; | int e_flags; | ||||
struct sx e_drain_sx; | struct sx e_drain_sx; | ||||
struct mtx e_drain_mtx; | struct mtx e_drain_mtx; | ||||
volatile int e_drain_count; | volatile int e_drain_count; | ||||
const char *e_name; | const char *e_name; | ||||
}; | }; | ||||
markj: Why is it defined like this? I can't see the purpose of `val`. | |||||
Done Inline ActionsThe atomic functions don't accept "const char *ptr", instead of casting it in the code, I make a cast in the union ... hselasky: The atomic functions don't accept "const char *ptr", instead of casting it in the code, I make… | |||||
Done Inline ActionsI don't object but personally I'd prefer the ugliness of the casts to the union. atomic_(load|store)_ptr were changed to not require casts recently, it's only the MD atomic_(load|store)_(acq|rel)_* macros that still need it for now. Please add a comment explaining why it's a union. markj: I don't object but personally I'd prefer the ugliness of the casts to the union. atomic_… | |||||
/* arbitrary --- needs benchmarking */ | /* arbitrary --- needs benchmarking */ | ||||
#define MAX_ADAPTIVE_SPIN 100 | #define MAX_ADAPTIVE_SPIN 100 | ||||
#define MAX_EPOCHS 64 | #define MAX_EPOCHS 64 | ||||
CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); | CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); | ||||
SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | ||||
"epoch information"); | "epoch information"); | ||||
SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, | ||||
▲ Show 20 Lines • Show All 219 Lines • ▼ Show 20 Lines | |||||
epoch_adjust_prio(struct thread *td, u_char prio) | epoch_adjust_prio(struct thread *td, u_char prio) | ||||
{ | { | ||||
thread_lock(td); | thread_lock(td); | ||||
sched_prio(td, prio); | sched_prio(td, prio); | ||||
thread_unlock(td); | thread_unlock(td); | ||||
} | } | ||||
/* | |||||
* NOTE: epoch_alloc() and epoch_free() should only be called from | |||||
* SYSINIT() and SYSUNINIT(), due to internal serialization | |||||
* requirements. | |||||
markjUnsubmitted Done Inline ActionsCan't we just use a global sx to synchronize epoch allocation and free? markj: Can't we just use a global `sx` to synchronize epoch allocation and free? | |||||
*/ | |||||
epoch_t | epoch_t | ||||
epoch_alloc(const char *name, int flags) | epoch_alloc(const char *name, int flags) | ||||
{ | { | ||||
epoch_t epoch; | epoch_t epoch; | ||||
int i; | |||||
if (__predict_false(!inited)) | if (__predict_false(!inited)) | ||||
panic("%s called too early in boot", __func__); | panic("%s called too early in boot", __func__); | ||||
epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); | epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); | ||||
Done Inline ActionsThis blank line is not needed. kib: This blank line is not needed. | |||||
ck_epoch_init(&epoch->e_epoch); | ck_epoch_init(&epoch->e_epoch); | ||||
epoch_ctor(epoch); | epoch_ctor(epoch); | ||||
MPASS(epoch_count < MAX_EPOCHS - 2); | |||||
for (i = 0; i != epoch_count; i++) { | |||||
if (allepochs[i] == NULL) | |||||
Done Inline ActionsComments should be a proper sentences. kib: Comments should be a proper sentences. | |||||
break; | |||||
Done Inline ActionsI do not quite understand how this loop works. You only loop up to the top allocated index, not to MAX_EPOCH, so if there is no holes, you cannot allocate anything. kib: I do not quite understand how this loop works. You only loop up to the top allocated index… | |||||
Done Inline ActionsSee the epoch_count++ below. hselasky: See the epoch_count++ below. | |||||
} | |||||
MPASS(i < MAX_EPOCHS - 2); | |||||
epoch->e_flags = flags; | epoch->e_flags = flags; | ||||
epoch->e_idx = epoch_count; | epoch->e_idx = i; | ||||
epoch->e_name = name; | epoch->e_name = name; | ||||
sx_init(&epoch->e_drain_sx, "epoch-drain-sx"); | sx_init(&epoch->e_drain_sx, "epoch-drain-sx"); | ||||
mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF); | mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF); | ||||
allepochs[epoch_count++] = epoch; | allepochs[i++] = epoch; | ||||
if (i > epoch_count) | |||||
epoch_count = i; | |||||
return (epoch); | return (epoch); | ||||
kibUnsubmitted Done Inline ActionsMay be we should bite the bullet and return NULL if there is no space in the array ? kib: May be we should bite the bullet and return NULL if there is no space in the array ? | |||||
Done Inline ActionsI suggest renaming this variable into something like epoch_array_max_slot. It is not epoch_count. kib: I suggest renaming this variable into something like epoch_array_max_slot. It is not… | |||||
} | } | ||||
void | void | ||||
Not Done Inline ActionsPlease use atomic(9) instead of volatile. markj: Please use atomic(9) instead of `volatile`. | |||||
epoch_free(epoch_t epoch) | epoch_free(epoch_t epoch) | ||||
{ | { | ||||
epoch_drain_callbacks(epoch); | epoch_drain_callbacks(epoch); | ||||
allepochs[epoch->e_idx] = NULL; | allepochs[epoch->e_idx] = NULL; | ||||
epoch_wait(global_epoch); | epoch_wait(global_epoch); | ||||
uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); | uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); | ||||
mtx_destroy(&epoch->e_drain_mtx); | mtx_destroy(&epoch->e_drain_mtx); | ||||
▲ Show 20 Lines • Show All 347 Lines • ▼ Show 20 Lines | epoch_call_task(void *arg __unused) | ||||
epoch_t epoch; | epoch_t epoch; | ||||
ck_stack_t cb_stack; | ck_stack_t cb_stack; | ||||
int i, npending, total; | int i, npending, total; | ||||
ck_stack_init(&cb_stack); | ck_stack_init(&cb_stack); | ||||
critical_enter(); | critical_enter(); | ||||
epoch_enter(global_epoch); | epoch_enter(global_epoch); | ||||
for (total = i = 0; i < epoch_count; i++) { | for (total = i = 0; i < epoch_count; i++) { | ||||
if (__predict_false((epoch = allepochs[i]) == NULL)) | if (__predict_false((epoch = allepochs[i]) == NULL)) | ||||
Done Inline ActionsIs it safe to iterate to epoch_array_max_count instead of MAX_EPOCHS ? Could we miss an update to max_count there and then skip some epoch ? kib: Is it safe to iterate to epoch_array_max_count instead of MAX_EPOCHS ? Could we miss an update… | |||||
Done Inline Actionse_name is protected by global_epoch. hselasky: e_name is protected by global_epoch. | |||||
Done Inline ActionsFrom what I see, global epoch ensures that e_name is not cleared until epoch_call_task() ends it global section. I asked about different situation, where the update from epoch_alloc (most likely, to epoch_array_max_count) is missed by epoch_call_task and then corresponding epoch is not drained by the task. But I see that you changed the loop limit to MAX_EPOCHS already. kib: From what I see, global epoch ensures that e_name is not cleared until epoch_call_task() ends… | |||||
continue; | continue; | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
record = &er->er_record; | record = &er->er_record; | ||||
if ((npending = record->n_pending) == 0) | if ((npending = record->n_pending) == 0) | ||||
continue; | continue; | ||||
ck_epoch_poll_deferred(record, &cb_stack); | ck_epoch_poll_deferred(record, &cb_stack); | ||||
total += npending - record->n_pending; | total += npending - record->n_pending; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 131 Lines • Show Last 20 Lines |
Why is it defined like this? I can't see the purpose of val.