Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/subr_epoch.c
/*- | /*- | ||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD | * SPDX-License-Identifier: BSD-2-Clause-FreeBSD | ||||
* | * | ||||
* Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> | * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> | ||||
* Copyright (c) 2017-2021, Hans Petter Selasky <hselasky@freebsd.org> | |||||
* | * | ||||
* Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without | ||||
* modification, are permitted provided that the following conditions | * modification, are permitted provided that the following conditions | ||||
* are met: | * are met: | ||||
* 1. Redistributions of source code must retain the above copyright | * 1. Redistributions of source code must retain the above copyright | ||||
* notice, this list of conditions and the following disclaimer. | * notice, this list of conditions and the following disclaimer. | ||||
* 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright | ||||
* notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the | ||||
▲ Show 20 Lines • Show All 120 Lines • ▼ Show 20 Lines | |||||
static struct epoch epoch_array[MAX_EPOCHS]; | static struct epoch epoch_array[MAX_EPOCHS]; | ||||
DPCPU_DEFINE(struct grouptask, epoch_cb_task); | DPCPU_DEFINE(struct grouptask, epoch_cb_task); | ||||
DPCPU_DEFINE(int, epoch_cb_count); | DPCPU_DEFINE(int, epoch_cb_count); | ||||
static __read_mostly int inited; | static __read_mostly int inited; | ||||
__read_mostly epoch_t global_epoch; | __read_mostly epoch_t global_epoch; | ||||
__read_mostly epoch_t global_epoch_preempt; | __read_mostly epoch_t global_epoch_preempt; | ||||
__read_mostly epoch_t global_epoch_sleepable; | |||||
static void epoch_call_task(void *context __unused); | static void epoch_call_task(void *context __unused); | ||||
static uma_zone_t pcpu_zone_record; | static uma_zone_t pcpu_zone_record; | ||||
static struct sx epoch_sx; | static struct sx epoch_sx; | ||||
#define EPOCH_LOCK() sx_xlock(&epoch_sx) | #define EPOCH_LOCK() sx_xlock(&epoch_sx) | ||||
#define EPOCH_UNLOCK() sx_xunlock(&epoch_sx) | #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx) | ||||
▲ Show 20 Lines • Show All 137 Lines • ▼ Show 20 Lines | taskqgroup_attach_cpu(qgroup_softirq, | ||||
DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL, | DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL, | ||||
"epoch call task"); | "epoch call task"); | ||||
} | } | ||||
#ifdef EPOCH_TRACE | #ifdef EPOCH_TRACE | ||||
SLIST_INIT(&thread0.td_epochs); | SLIST_INIT(&thread0.td_epochs); | ||||
#endif | #endif | ||||
sx_init(&epoch_sx, "epoch-sx"); | sx_init(&epoch_sx, "epoch-sx"); | ||||
inited = 1; | inited = 1; | ||||
global_epoch = epoch_alloc("Global", 0); | global_epoch = epoch_alloc("Global critical", EPOCH_CRITICAL); | ||||
global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT); | global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT); | ||||
global_epoch_sleepable = epoch_alloc("Global sleepable", EPOCH_SLEEPABLE); | |||||
} | } | ||||
SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL); | SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL); | ||||
#if !defined(EARLY_AP_STARTUP) | #if !defined(EARLY_AP_STARTUP) | ||||
static void | static void | ||||
epoch_init_smp(void *dummy __unused) | epoch_init_smp(void *dummy __unused) | ||||
{ | { | ||||
inited = 2; | inited = 2; | ||||
Show All 29 Lines | |||||
epoch_t | epoch_t | ||||
epoch_alloc(const char *name, int flags) | epoch_alloc(const char *name, int flags) | ||||
{ | { | ||||
epoch_t epoch; | epoch_t epoch; | ||||
int i; | int i; | ||||
MPASS(name != NULL); | MPASS(name != NULL); | ||||
MPASS((flags & EPOCH_TYPE_MASK) != EPOCH_RESERVED); | |||||
if (__predict_false(!inited)) | if (__predict_false(!inited)) | ||||
panic("%s called too early in boot", __func__); | panic("%s called too early in boot", __func__); | ||||
EPOCH_LOCK(); | EPOCH_LOCK(); | ||||
/* | /* | ||||
* Find a free index in the epoch array. If no free index is | * Find a free index in the epoch array. If no free index is | ||||
▲ Show 20 Lines • Show All 92 Lines • ▼ Show 20 Lines | _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) | ||||
struct epoch_record *er; | struct epoch_record *er; | ||||
struct thread *td; | struct thread *td; | ||||
MPASS(cold || epoch != NULL); | MPASS(cold || epoch != NULL); | ||||
td = curthread; | td = curthread; | ||||
MPASS((vm_offset_t)et >= td->td_kstack && | MPASS((vm_offset_t)et >= td->td_kstack && | ||||
(vm_offset_t)et + sizeof(struct epoch_tracker) <= | (vm_offset_t)et + sizeof(struct epoch_tracker) <= | ||||
td->td_kstack + td->td_kstack_pages * PAGE_SIZE); | td->td_kstack + td->td_kstack_pages * PAGE_SIZE); | ||||
INIT_CHECK(epoch); | INIT_CHECK(epoch); | ||||
MPASS(epoch->e_flags & EPOCH_PREEMPT); | MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_PREEMPT); | ||||
#ifdef EPOCH_TRACE | #ifdef EPOCH_TRACE | ||||
epoch_trace_enter(td, epoch, et, file, line); | epoch_trace_enter(td, epoch, et, file, line); | ||||
#endif | #endif | ||||
et->et_td = td; | et->et_td = td; | ||||
THREAD_NO_SLEEPING(); | THREAD_NO_SLEEPING(); | ||||
critical_enter(); | critical_enter(); | ||||
sched_pin(); | sched_pin(); | ||||
et->et_old_priority = td->td_priority; | et->et_old_priority = td->td_priority; | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
/* Record-level tracking is reserved for non-preemptible epochs. */ | /* Record-level tracking is reserved for non-preemptible epochs. */ | ||||
MPASS(er->er_td == NULL); | MPASS(er->er_td == NULL); | ||||
TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); | TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); | ||||
ck_epoch_begin(&er->er_record, &et->et_section); | ck_epoch_begin(&er->er_record, &et->et_section); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
void | void | ||||
_epoch_enter_sleepable(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) | |||||
{ | |||||
struct epoch_record *er; | |||||
struct thread *td; | |||||
MPASS(cold || epoch != NULL); | |||||
td = curthread; | |||||
MPASS((vm_offset_t)et >= td->td_kstack && | |||||
(vm_offset_t)et + sizeof(struct epoch_tracker) <= | |||||
td->td_kstack + td->td_kstack_pages * PAGE_SIZE); | |||||
INIT_CHECK(epoch); | |||||
MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_SLEEPABLE); | |||||
#ifdef EPOCH_TRACE | |||||
epoch_trace_enter(td, epoch, et, file, line); | |||||
#endif | |||||
et->et_td = td; | |||||
et->et_old_priority = 0; /* not used */ | |||||
critical_enter(); | |||||
sched_pin(); | |||||
er = epoch_currecord(epoch); | |||||
/* Record-level tracking is reserved for non-preemptible epochs. */ | |||||
MPASS(er->er_td == NULL); | |||||
TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); | |||||
ck_epoch_begin(&er->er_record, &et->et_section); | |||||
critical_exit(); | |||||
} | |||||
void | |||||
epoch_enter(epoch_t epoch) | epoch_enter(epoch_t epoch) | ||||
{ | { | ||||
epoch_record_t er; | epoch_record_t er; | ||||
MPASS(cold || epoch != NULL); | MPASS(cold || epoch != NULL); | ||||
INIT_CHECK(epoch); | INIT_CHECK(epoch); | ||||
critical_enter(); | critical_enter(); | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
Show All 16 Lines | _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) | ||||
struct thread *td; | struct thread *td; | ||||
INIT_CHECK(epoch); | INIT_CHECK(epoch); | ||||
td = curthread; | td = curthread; | ||||
critical_enter(); | critical_enter(); | ||||
sched_unpin(); | sched_unpin(); | ||||
THREAD_SLEEPING_OK(); | THREAD_SLEEPING_OK(); | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
MPASS(epoch->e_flags & EPOCH_PREEMPT); | MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_PREEMPT); | ||||
MPASS(et != NULL); | MPASS(et != NULL); | ||||
MPASS(et->et_td == td); | MPASS(et->et_td == td); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
et->et_td = (void*)0xDEADBEEF; | et->et_td = (void*)0xDEADBEEF; | ||||
/* Record-level tracking is reserved for non-preemptible epochs. */ | /* Record-level tracking is reserved for non-preemptible epochs. */ | ||||
MPASS(er->er_td == NULL); | MPASS(er->er_td == NULL); | ||||
#endif | #endif | ||||
ck_epoch_end(&er->er_record, &et->et_section); | ck_epoch_end(&er->er_record, &et->et_section); | ||||
TAILQ_REMOVE(&er->er_tdlist, et, et_link); | TAILQ_REMOVE(&er->er_tdlist, et, et_link); | ||||
er->er_gen++; | er->er_gen++; | ||||
if (__predict_false(et->et_old_priority != td->td_priority)) | if (__predict_false(et->et_old_priority != td->td_priority)) | ||||
epoch_adjust_prio(td, et->et_old_priority); | epoch_adjust_prio(td, et->et_old_priority); | ||||
critical_exit(); | critical_exit(); | ||||
#ifdef EPOCH_TRACE | #ifdef EPOCH_TRACE | ||||
epoch_trace_exit(td, epoch, et, file, line); | epoch_trace_exit(td, epoch, et, file, line); | ||||
#endif | #endif | ||||
} | } | ||||
void | void | ||||
_epoch_exit_sleepable(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) | |||||
{ | |||||
struct epoch_record *er; | |||||
struct thread *td; | |||||
INIT_CHECK(epoch); | |||||
td = curthread; | |||||
critical_enter(); | |||||
sched_unpin(); | |||||
er = epoch_currecord(epoch); | |||||
MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_SLEEPABLE); | |||||
MPASS(et != NULL); | |||||
MPASS(et->et_td == td); | |||||
#ifdef INVARIANTS | |||||
et->et_td = (void*)0xDEADBEEF; | |||||
MPASS(et->et_old_priority == 0); | |||||
/* Record-level tracking is reserved for non-preemptible epochs. */ | |||||
MPASS(er->er_td == NULL); | |||||
#endif | |||||
ck_epoch_end(&er->er_record, &et->et_section); | |||||
TAILQ_REMOVE(&er->er_tdlist, et, et_link); | |||||
er->er_gen++; | |||||
critical_exit(); | |||||
#ifdef EPOCH_TRACE | |||||
epoch_trace_exit(td, epoch, et, file, line); | |||||
#endif | |||||
} | |||||
void | |||||
epoch_exit(epoch_t epoch) | epoch_exit(epoch_t epoch) | ||||
{ | { | ||||
epoch_record_t er; | epoch_record_t er; | ||||
INIT_CHECK(epoch); | INIT_CHECK(epoch); | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
ck_epoch_end(&er->er_record, NULL); | ck_epoch_end(&er->er_record, NULL); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
▲ Show 20 Lines • Show All 156 Lines • ▼ Show 20 Lines | epoch_wait_preempt(epoch_t epoch) | ||||
u_char old_prio; | u_char old_prio; | ||||
int locks __unused; | int locks __unused; | ||||
MPASS(cold || epoch != NULL); | MPASS(cold || epoch != NULL); | ||||
INIT_CHECK(epoch); | INIT_CHECK(epoch); | ||||
td = curthread; | td = curthread; | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
locks = curthread->td_locks; | locks = curthread->td_locks; | ||||
MPASS(epoch->e_flags & EPOCH_PREEMPT); | MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_PREEMPT); | ||||
if ((epoch->e_flags & EPOCH_LOCKED) == 0) | if ((epoch->e_flags & EPOCH_LOCKED) == 0) | ||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | ||||
"epoch_wait() can be long running"); | "epoch_wait() can be long running"); | ||||
KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " | KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " | ||||
"of an epoch section of the same epoch")); | "of an epoch section of the same epoch")); | ||||
#endif | #endif | ||||
DROP_GIANT(); | DROP_GIANT(); | ||||
thread_lock(td); | thread_lock(td); | ||||
Show All 24 Lines | #endif | ||||
/* restore thread priority */ | /* restore thread priority */ | ||||
sched_prio(td, old_prio); | sched_prio(td, old_prio); | ||||
thread_unlock(td); | thread_unlock(td); | ||||
PICKUP_GIANT(); | PICKUP_GIANT(); | ||||
KASSERT(td->td_locks == locks, | KASSERT(td->td_locks == locks, | ||||
("%d residual locks held", td->td_locks - locks)); | ("%d residual locks held", td->td_locks - locks)); | ||||
} | } | ||||
/* | |||||
* epoch_block_handler_sleepable() is a callback from the CK code when another | |||||
* thread is currently in an epoch section. | |||||
*/ | |||||
static void | static void | ||||
epoch_block_handler_sleepable(struct ck_epoch *global __unused, | |||||
ck_epoch_record_t *cr, void *arg __unused) | |||||
{ | |||||
epoch_record_t record; | |||||
struct thread *td; | |||||
struct epoch_tracker *tdwait; | |||||
record = __containerof(cr, struct epoch_record, er_record); | |||||
td = curthread; | |||||
counter_u64_add(block_count, 1); | |||||
/* | |||||
* We lost a race and there's no longer any threads | |||||
* on the CPU in an epoch section. | |||||
*/ | |||||
if (TAILQ_EMPTY(&record->er_tdlist)) | |||||
return; | |||||
if (record->er_cpuid == curcpu) { | |||||
bool is_sleeping = 0; | |||||
uint8_t prio = 0; | |||||
/* | |||||
* Find the lowest priority or sleeping thread which | |||||
* is blocking synchronization on this CPU core. All | |||||
* the threads in the queue are CPU-pinned and cannot | |||||
* go anywhere while the current thread is locked. | |||||
*/ | |||||
TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { | |||||
if (tdwait->et_td->td_priority > prio) | |||||
prio = tdwait->et_td->td_priority; | |||||
is_sleeping |= (tdwait->et_td->td_inhibitors != 0); | |||||
} | |||||
if (is_sleeping) { | |||||
/* | |||||
* Wait one tick. Performance is not critical | |||||
* for sleepable EPOCHs. | |||||
*/ | |||||
thread_unlock(td); | |||||
pause("W", 1); | |||||
thread_lock(td); | |||||
} else { | |||||
/* set new thread priority */ | |||||
sched_prio(td, prio); | |||||
/* task switch */ | |||||
mi_switch(SW_VOL | SWT_RELINQUISH); | |||||
/* | |||||
* It is important the thread lock is dropped | |||||
* while yielding to allow other threads to | |||||
* acquire the lock pointed to by | |||||
* TDQ_LOCKPTR(td). Currently mi_switch() will | |||||
* unlock the thread lock before | |||||
* returning. Else a deadlock like situation | |||||
* might happen. | |||||
*/ | |||||
thread_lock(td); | |||||
} | |||||
} else { | |||||
/* | |||||
* To avoid spinning move execution to the other CPU | |||||
* which is blocking synchronization. Set highest | |||||
* thread priority so that code gets run. The thread | |||||
* priority will be restored later. | |||||
*/ | |||||
sched_prio(td, 0); | |||||
sched_bind(td, record->er_cpuid); | |||||
} | |||||
} | |||||
void | |||||
epoch_wait_sleepable(epoch_t epoch) | |||||
{ | |||||
struct thread *td; | |||||
int was_bound; | |||||
int old_cpu; | |||||
int old_pinned; | |||||
u_char old_prio; | |||||
MPASS(cold || epoch != NULL); | |||||
INIT_CHECK(epoch); | |||||
td = curthread; | |||||
#ifdef INVARIANTS | |||||
MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_SLEEPABLE); | |||||
if ((epoch->e_flags & EPOCH_LOCKED) == 0) | |||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, | |||||
"epoch_wait() can be long running"); | |||||
KASSERT(!in_epoch(epoch), ("epoch_wait_sleepable() called in the middle " | |||||
"of an epoch section of the same epoch")); | |||||
#endif | |||||
DROP_GIANT(); | |||||
thread_lock(td); | |||||
old_cpu = PCPU_GET(cpuid); | |||||
old_pinned = td->td_pinned; | |||||
old_prio = td->td_priority; | |||||
was_bound = sched_is_bound(td); | |||||
sched_unbind(td); | |||||
td->td_pinned = 0; | |||||
sched_bind(td, old_cpu); | |||||
ck_epoch_synchronize_wait(&epoch->e_epoch, | |||||
epoch_block_handler_sleepable, NULL); | |||||
/* restore CPU binding, if any */ | |||||
if (was_bound != 0) { | |||||
sched_bind(td, old_cpu); | |||||
} else { | |||||
/* get thread back to initial CPU, if any */ | |||||
if (old_pinned != 0) | |||||
sched_bind(td, old_cpu); | |||||
sched_unbind(td); | |||||
} | |||||
/* restore pinned after bind */ | |||||
td->td_pinned = old_pinned; | |||||
/* restore thread priority */ | |||||
sched_prio(td, old_prio); | |||||
thread_unlock(td); | |||||
PICKUP_GIANT(); | |||||
} | |||||
static void | |||||
epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, | epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, | ||||
void *arg __unused) | void *arg __unused) | ||||
{ | { | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
} | } | ||||
void | void | ||||
epoch_wait(epoch_t epoch) | epoch_wait(epoch_t epoch) | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
static int | static int | ||||
in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail) | in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail) | ||||
{ | { | ||||
epoch_record_t er; | epoch_record_t er; | ||||
struct epoch_tracker *tdwait; | struct epoch_tracker *tdwait; | ||||
struct thread *td; | struct thread *td; | ||||
MPASS(epoch != NULL); | MPASS(epoch != NULL); | ||||
MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0); | MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_PREEMPT); | ||||
td = curthread; | td = curthread; | ||||
if (THREAD_CAN_SLEEP()) | if (THREAD_CAN_SLEEP()) | ||||
return (0); | return (0); | ||||
critical_enter(); | critical_enter(); | ||||
er = epoch_currecord(epoch); | er = epoch_currecord(epoch); | ||||
TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) | TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) | ||||
if (tdwait->et_td == td) { | if (tdwait->et_td == td) { | ||||
critical_exit(); | critical_exit(); | ||||
return (1); | return (1); | ||||
} | } | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (dump_onfail) { | if (dump_onfail) { | ||||
MPASS(td->td_pinned); | MPASS(td->td_pinned); | ||||
printf("cpu: %d id: %d\n", curcpu, td->td_tid); | printf("cpu: %d id: %d\n", curcpu, td->td_tid); | ||||
TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) | TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) | ||||
printf("td_tid: %d ", tdwait->et_td->td_tid); | printf("td_tid: %d ", tdwait->et_td->td_tid); | ||||
printf("\n"); | printf("\n"); | ||||
} | } | ||||
#endif | #endif | ||||
critical_exit(); | critical_exit(); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | |||||
in_epoch_verbose_sleepable(epoch_t epoch, int dump_onfail) | |||||
{ | |||||
epoch_record_t er; | |||||
struct epoch_tracker *tdwait; | |||||
struct thread *td; | |||||
MPASS(epoch != NULL); | |||||
MPASS((epoch->e_flags & EPOCH_TYPE_MASK) == EPOCH_SLEEPABLE); | |||||
td = curthread; | |||||
critical_enter(); | |||||
er = epoch_currecord(epoch); | |||||
TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) { | |||||
if (tdwait->et_td != td) | |||||
continue; | |||||
critical_exit(); | |||||
return (1); | |||||
} | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
if (dump_onfail) { | |||||
MPASS(td->td_pinned); | |||||
printf("cpu: %d id: %d\n", curcpu, td->td_tid); | |||||
TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) | |||||
printf("td_tid: %d ", tdwait->et_td->td_tid); | |||||
printf("\n"); | |||||
} | |||||
#endif | |||||
critical_exit(); | |||||
return (0); | |||||
} | |||||
#ifdef INVARIANTS | |||||
static void | static void | ||||
epoch_assert_nocpu(epoch_t epoch, struct thread *td) | epoch_assert_nocpu(epoch_t epoch, struct thread *td) | ||||
{ | { | ||||
epoch_record_t er; | epoch_record_t er; | ||||
int cpu; | int cpu; | ||||
bool crit; | bool crit; | ||||
crit = td->td_critnest > 0; | crit = td->td_critnest > 0; | ||||
Show All 11 Lines | |||||
#endif | #endif | ||||
int | int | ||||
in_epoch_verbose(epoch_t epoch, int dump_onfail) | in_epoch_verbose(epoch_t epoch, int dump_onfail) | ||||
{ | { | ||||
epoch_record_t er; | epoch_record_t er; | ||||
struct thread *td; | struct thread *td; | ||||
if (__predict_false((epoch) == NULL)) | if (__predict_false(epoch == NULL)) | ||||
return (0); | return (0); | ||||
if ((epoch->e_flags & EPOCH_PREEMPT) != 0) | |||||
switch (epoch->e_flags & EPOCH_TYPE_MASK) { | |||||
case EPOCH_CRITICAL: | |||||
break; | |||||
case EPOCH_PREEMPT: | |||||
return (in_epoch_verbose_preempt(epoch, dump_onfail)); | return (in_epoch_verbose_preempt(epoch, dump_onfail)); | ||||
case EPOCH_SLEEPABLE: | |||||
return (in_epoch_verbose_sleepable(epoch, dump_onfail)); | |||||
default: | |||||
panic("in_epoch_verbose: Invalid EPOCH type."); | |||||
} | |||||
/* | /* | ||||
* The thread being in a critical section is a necessary | * The thread being in a critical section is a necessary | ||||
* condition to be correctly inside a non-preemptible epoch, | * condition to be correctly inside a non-preemptible epoch, | ||||
* so it's definitely not in this epoch. | * so it's definitely not in this epoch. | ||||
*/ | */ | ||||
td = curthread; | td = curthread; | ||||
if (td->td_critnest == 0) { | if (td->td_critnest == 0) { | ||||
▲ Show 20 Lines • Show All 102 Lines • Show Last 20 Lines |