Changeset View
Changeset View
Standalone View
Standalone View
sys/contrib/openzfs/module/zfs/txg.c
Show First 20 Lines • Show All 286 Lines • ▼ Show 20 Lines | txg_sync_stop(dsl_pool_t *dp) | ||||
while (tx->tx_threads != 0) | while (tx->tx_threads != 0) | ||||
cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); | cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); | ||||
tx->tx_exiting = 0; | tx->tx_exiting = 0; | ||||
mutex_exit(&tx->tx_sync_lock); | mutex_exit(&tx->tx_sync_lock); | ||||
} | } | ||||
/* | |||||
* Get a handle on the currently open txg and keep it open. | |||||
* | |||||
* The txg is guaranteed to stay open until txg_rele_to_quiesce() is called for | |||||
* the handle. Once txg_rele_to_quiesce() has been called, the txg stays | |||||
* in quiescing state until txg_rele_to_sync() is called for the handle. | |||||
* | |||||
* It is guaranteed that subsequent calls return monotonically increasing | |||||
* txgs for the same dsl_pool_t. Of course this is not strong monotonicity, | |||||
* because the same txg can be returned multiple times in a row. This | |||||
* guarantee holds both for subsequent calls from one thread and for multiple | |||||
* threads. For example, it is impossible to observe the following sequence | |||||
* of events: | |||||
* | |||||
* Thread 1 Thread 2 | |||||
* | |||||
* 1 <- txg_hold_open(P, ...) | |||||
* 2 <- txg_hold_open(P, ...) | |||||
* 1 <- txg_hold_open(P, ...) | |||||
* | |||||
*/ | |||||
uint64_t | uint64_t | ||||
txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) | txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) | ||||
{ | { | ||||
tx_state_t *tx = &dp->dp_tx; | tx_state_t *tx = &dp->dp_tx; | ||||
tx_cpu_t *tc; | tx_cpu_t *tc; | ||||
uint64_t txg; | uint64_t txg; | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | txg_quiesce(dsl_pool_t *dp, uint64_t txg) | ||||
*/ | */ | ||||
for (c = 0; c < max_ncpus; c++) | for (c = 0; c < max_ncpus; c++) | ||||
mutex_exit(&tx->tx_cpu[c].tc_open_lock); | mutex_exit(&tx->tx_cpu[c].tc_open_lock); | ||||
spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time); | spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time); | ||||
spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time); | spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time); | ||||
/* | /* | ||||
* Quiesce the transaction group by waiting for everyone to txg_exit(). | * Quiesce the transaction group by waiting for everyone to | ||||
* call txg_rele_to_sync() for their open transaction handles. | |||||
*/ | */ | ||||
for (c = 0; c < max_ncpus; c++) { | for (c = 0; c < max_ncpus; c++) { | ||||
tx_cpu_t *tc = &tx->tx_cpu[c]; | tx_cpu_t *tc = &tx->tx_cpu[c]; | ||||
mutex_enter(&tc->tc_lock); | mutex_enter(&tc->tc_lock); | ||||
while (tc->tc_count[g] != 0) | while (tc->tc_count[g] != 0) | ||||
cv_wait(&tc->tc_cv[g], &tc->tc_lock); | cv_wait(&tc->tc_cv[g], &tc->tc_lock); | ||||
mutex_exit(&tc->tc_lock); | mutex_exit(&tc->tc_lock); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 650 Lines • Show Last 20 Lines |