Page MenuHomeFreeBSD

D36239.id109440.diff
No OneTemporary

D36239.id109440.diff

Index: sys/dev/wg/if_wg.c
===================================================================
--- sys/dev/wg/if_wg.c
+++ sys/dev/wg/if_wg.c
@@ -187,13 +187,13 @@
bool p_enabled;
bool p_need_another_keepalive;
uint16_t p_persistent_keepalive_interval;
+ struct mtx p_timers_mtx;
struct callout p_new_handshake;
struct callout p_send_keepalive;
struct callout p_retry_handshake;
struct callout p_zero_key_material;
struct callout p_persistent_keepalive;
- struct mtx p_handshake_mtx;
struct timespec p_handshake_complete; /* nanotime */
int p_handshake_retries;
@@ -419,13 +419,13 @@
peer->p_enabled = false;
peer->p_need_another_keepalive = false;
peer->p_persistent_keepalive_interval = 0;
- callout_init(&peer->p_new_handshake, true);
- callout_init(&peer->p_send_keepalive, true);
- callout_init(&peer->p_retry_handshake, true);
- callout_init(&peer->p_persistent_keepalive, true);
- callout_init(&peer->p_zero_key_material, true);
+ mtx_init(&peer->p_timers_mtx, "peer timers", NULL, MTX_DEF | MTX_RECURSE);
+ callout_init_mtx(&peer->p_new_handshake, &peer->p_timers_mtx, 0);
+ callout_init_mtx(&peer->p_send_keepalive, &peer->p_timers_mtx, 0);
+ callout_init_mtx(&peer->p_retry_handshake, &peer->p_timers_mtx, 0);
+ callout_init_mtx(&peer->p_persistent_keepalive, &peer->p_timers_mtx, 0);
+ callout_init_mtx(&peer->p_zero_key_material, &peer->p_timers_mtx, 0);
- mtx_init(&peer->p_handshake_mtx, "peer handshake", NULL, MTX_DEF);
bzero(&peer->p_handshake_complete, sizeof(peer->p_handshake_complete));
peer->p_handshake_retries = 0;
@@ -457,10 +457,15 @@
wg_queue_deinit(&peer->p_encrypt_serial);
wg_queue_deinit(&peer->p_stage_queue);
+ callout_drain(&peer->p_new_handshake);
+ callout_drain(&peer->p_send_keepalive);
+ callout_drain(&peer->p_retry_handshake);
+ callout_drain(&peer->p_persistent_keepalive);
+ callout_drain(&peer->p_zero_key_material);
counter_u64_free(peer->p_tx_bytes);
counter_u64_free(peer->p_rx_bytes);
rw_destroy(&peer->p_endpoint_lock);
- mtx_destroy(&peer->p_handshake_mtx);
+ mtx_destroy(&peer->p_timers_mtx);
cookie_maker_free(&peer->p_cookie);
@@ -944,162 +949,152 @@
static void
wg_timers_enable(struct wg_peer *peer)
{
- ck_pr_store_bool(&peer->p_enabled, true);
+ mtx_lock(&peer->p_timers_mtx);
+ peer->p_enabled = true;
wg_timers_run_persistent_keepalive(peer);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_disable(struct wg_peer *peer)
{
- /* By setting p_enabled = false, then calling NET_EPOCH_WAIT, we can be
- * sure no new handshakes are created after the wait. This is because
- * all callout_resets (scheduling the callout) are guarded by
- * p_enabled. We can be sure all sections that read p_enabled and then
- * optionally call callout_reset are finished as they are surrounded by
- * NET_EPOCH_{ENTER,EXIT}.
- *
- * However, as new callouts may be scheduled during NET_EPOCH_WAIT (but
- * not after), we stop all callouts leaving no callouts active.
- *
- * We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the
- * performance impact is acceptable for the time being. */
- ck_pr_store_bool(&peer->p_enabled, false);
- NET_EPOCH_WAIT();
- ck_pr_store_bool(&peer->p_need_another_keepalive, false);
+ mtx_lock(&peer->p_timers_mtx);
+ peer->p_enabled = false;
+ peer->p_need_another_keepalive = false;
callout_stop(&peer->p_new_handshake);
callout_stop(&peer->p_send_keepalive);
callout_stop(&peer->p_retry_handshake);
callout_stop(&peer->p_persistent_keepalive);
callout_stop(&peer->p_zero_key_material);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval)
{
- struct epoch_tracker et;
+ mtx_lock(&peer->p_timers_mtx);
if (interval != peer->p_persistent_keepalive_interval) {
- ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval);
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ peer->p_persistent_keepalive_interval = interval;
+ if (peer->p_enabled)
wg_timers_run_persistent_keepalive(peer);
- NET_EPOCH_EXIT(et);
}
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_get_last_handshake(struct wg_peer *peer, struct wg_timespec64 *time)
{
- mtx_lock(&peer->p_handshake_mtx);
+ mtx_lock(&peer->p_timers_mtx);
time->tv_sec = peer->p_handshake_complete.tv_sec;
time->tv_nsec = peer->p_handshake_complete.tv_nsec;
- mtx_unlock(&peer->p_handshake_mtx);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_data_sent(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake))
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled && !callout_pending(&peer->p_new_handshake))
callout_reset(&peer->p_new_handshake, MSEC_2_TICKS(
NEW_HANDSHAKE_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
wg_timers_run_new_handshake, peer);
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_data_received(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled)) {
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled) {
if (!callout_pending(&peer->p_send_keepalive))
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
else
- ck_pr_store_bool(&peer->p_need_another_keepalive, true);
+ peer->p_need_another_keepalive = true;
}
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_any_authenticated_packet_sent(struct wg_peer *peer)
{
+ mtx_lock(&peer->p_timers_mtx);
callout_stop(&peer->p_send_keepalive);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_any_authenticated_packet_received(struct wg_peer *peer)
{
+ mtx_lock(&peer->p_timers_mtx);
callout_stop(&peer->p_new_handshake);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer)
{
- struct epoch_tracker et;
uint16_t interval;
- NET_EPOCH_ENTER(et);
- interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval);
- if (ck_pr_load_bool(&peer->p_enabled) && interval > 0)
+
+ /* Optimize for the default case of keepalives disabled. */
+ if (peer->p_persistent_keepalive_interval == 0)
+ return;
+
+ mtx_lock(&peer->p_timers_mtx);
+ interval = peer->p_persistent_keepalive_interval;
+ if (peer->p_enabled && interval > 0)
callout_reset(&peer->p_persistent_keepalive,
MSEC_2_TICKS(interval * 1000),
wg_timers_run_persistent_keepalive, peer);
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_handshake_initiated(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled)
callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS(
REKEY_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
wg_timers_run_retry_handshake, peer);
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_handshake_complete(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled)) {
- mtx_lock(&peer->p_handshake_mtx);
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled) {
callout_stop(&peer->p_retry_handshake);
peer->p_handshake_retries = 0;
getnanotime(&peer->p_handshake_complete);
- mtx_unlock(&peer->p_handshake_mtx);
wg_timers_run_send_keepalive(peer);
}
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_session_derived(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled)
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
wg_timers_run_zero_key_material, peer);
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
wg_timers_event_want_initiation(struct wg_peer *peer)
{
- struct epoch_tracker et;
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ mtx_lock(&peer->p_timers_mtx);
+ if (peer->p_enabled)
wg_timers_run_send_initiation(peer, false);
- NET_EPOCH_EXIT(et);
+ mtx_unlock(&peer->p_timers_mtx);
}
static void
@@ -1114,13 +1109,12 @@
static void
wg_timers_run_retry_handshake(void *_peer)
{
- struct epoch_tracker et;
struct wg_peer *peer = _peer;
- mtx_lock(&peer->p_handshake_mtx);
+ mtx_assert(&peer->p_timers_mtx, MA_OWNED);
+
if (peer->p_handshake_retries <= MAX_TIMER_HANDSHAKES) {
peer->p_handshake_retries++;
- mtx_unlock(&peer->p_handshake_mtx);
DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete "
"after %d seconds, retrying (try %d)\n", peer->p_id,
@@ -1128,40 +1122,33 @@
wg_peer_clear_src(peer);
wg_timers_run_send_initiation(peer, true);
} else {
- mtx_unlock(&peer->p_handshake_mtx);
-
DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete "
"after %d retries, giving up\n", peer->p_id,
MAX_TIMER_HANDSHAKES + 2);
callout_stop(&peer->p_send_keepalive);
wg_queue_purge(&peer->p_stage_queue);
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) &&
+ if (peer->p_enabled &&
!callout_pending(&peer->p_zero_key_material))
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
wg_timers_run_zero_key_material, peer);
- NET_EPOCH_EXIT(et);
}
}
static void
wg_timers_run_send_keepalive(void *_peer)
{
- struct epoch_tracker et;
struct wg_peer *peer = _peer;
+ mtx_assert(&peer->p_timers_mtx, MA_OWNED);
wg_send_keepalive(peer);
- NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) &&
- ck_pr_load_bool(&peer->p_need_another_keepalive)) {
- ck_pr_store_bool(&peer->p_need_another_keepalive, false);
+ if (peer->p_enabled && peer->p_need_another_keepalive) {
+ peer->p_need_another_keepalive = false;
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
}
- NET_EPOCH_EXIT(et);
}
static void
@@ -1173,6 +1160,7 @@
"stopped hearing back after %d seconds\n",
peer->p_id, NEW_HANDSHAKE_TIMEOUT);
+ mtx_assert(&peer->p_timers_mtx, MA_OWNED);
wg_peer_clear_src(peer);
wg_timers_run_send_initiation(peer, false);
}
@@ -1185,6 +1173,7 @@
DPRINTF(peer->p_sc, "Zeroing out keys for peer %" PRIu64 ", since we "
"haven't received a new one in %d seconds\n",
peer->p_id, REJECT_AFTER_TIME * 3);
+ mtx_assert(&peer->p_timers_mtx, MA_OWNED);
noise_remote_keypairs_clear(peer->p_remote);
}
@@ -1193,7 +1182,8 @@
{
struct wg_peer *peer = _peer;
- if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0)
+ mtx_assert(&peer->p_timers_mtx, MA_OWNED);
+ if (peer->p_persistent_keepalive_interval > 0)
wg_send_keepalive(peer);
}

File Metadata

Mime Type
text/plain
Expires
Fri, Jan 2, 1:26 PM (7 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27456317
Default Alt Text
D36239.id109440.diff (10 KB)

Event Timeline