diff --git a/sys/dev/wg/if_wg.c b/sys/dev/wg/if_wg.c --- a/sys/dev/wg/if_wg.c +++ b/sys/dev/wg/if_wg.c @@ -945,7 +945,7 @@ static void wg_timers_enable(struct wg_peer *peer) { - ck_pr_store_bool(&peer->p_enabled, true); + atomic_store_bool(&peer->p_enabled, true); wg_timers_run_persistent_keepalive(peer); } @@ -964,9 +964,9 @@ * * We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the * performance impact is acceptable for the time being. */ - ck_pr_store_bool(&peer->p_enabled, false); + atomic_store_bool(&peer->p_enabled, false); NET_EPOCH_WAIT(); - ck_pr_store_bool(&peer->p_need_another_keepalive, false); + atomic_store_bool(&peer->p_need_another_keepalive, false); callout_stop(&peer->p_new_handshake); callout_stop(&peer->p_send_keepalive); @@ -980,9 +980,9 @@ { struct epoch_tracker et; if (interval != peer->p_persistent_keepalive_interval) { - ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval); + atomic_store_16(&peer->p_persistent_keepalive_interval, interval); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_persistent_keepalive(peer); NET_EPOCH_EXIT(et); } @@ -1002,7 +1002,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake)) + if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake)) callout_reset(&peer->p_new_handshake, MSEC_2_TICKS( NEW_HANDSHAKE_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), @@ -1015,13 +1015,13 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) { + if (atomic_load_bool(&peer->p_enabled)) { if (!callout_pending(&peer->p_send_keepalive)) callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); else - ck_pr_store_bool(&peer->p_need_another_keepalive, true); + atomic_store_bool(&peer->p_need_another_keepalive, true); } NET_EPOCH_EXIT(et); } @@ -1044,8 +1044,8 @@ struct epoch_tracker et; uint16_t interval; NET_EPOCH_ENTER(et); - interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval); - if (ck_pr_load_bool(&peer->p_enabled) && interval > 0) + interval = atomic_load_16(&peer->p_persistent_keepalive_interval); + if (atomic_load_bool(&peer->p_enabled) && interval > 0) callout_reset(&peer->p_persistent_keepalive, MSEC_2_TICKS(interval * 1000), wg_timers_run_persistent_keepalive, peer); @@ -1057,7 +1057,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS( REKEY_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), @@ -1070,7 +1070,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) { + if (atomic_load_bool(&peer->p_enabled)) { mtx_lock(&peer->p_handshake_mtx); callout_stop(&peer->p_retry_handshake); peer->p_handshake_retries = 0; @@ -1086,7 +1086,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), wg_timers_run_zero_key_material, peer); @@ -1098,7 +1098,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_send_initiation(peer, false); NET_EPOCH_EXIT(et); } @@ -1138,7 +1138,7 @@ callout_stop(&peer->p_send_keepalive); wg_queue_purge(&peer->p_stage_queue); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && + if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_zero_key_material)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), @@ -1155,9 +1155,9 @@ wg_send_keepalive(peer); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && - ck_pr_load_bool(&peer->p_need_another_keepalive)) { - ck_pr_store_bool(&peer->p_need_another_keepalive, false); + if (atomic_load_bool(&peer->p_enabled) && + atomic_load_bool(&peer->p_need_another_keepalive)) { + atomic_store_bool(&peer->p_need_another_keepalive, false); callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); @@ -1194,7 +1194,7 @@ { struct wg_peer *peer = _peer; - if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0) + if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0) wg_send_keepalive(peer); } diff --git a/sys/dev/wg/wg_noise.c b/sys/dev/wg/wg_noise.c --- a/sys/dev/wg/wg_noise.c +++ b/sys/dev/wg/wg_noise.c @@ -586,12 +586,12 @@ noise_remote_handshake_clear(r); NET_EPOCH_ENTER(et); - kp = ck_pr_load_ptr(&r->r_next); + kp = atomic_load_ptr(&r->r_next); if (kp != NULL) - ck_pr_store_bool(&kp->kp_can_send, false); - kp = ck_pr_load_ptr(&r->r_current); + atomic_store_bool(&kp->kp_can_send, false); + kp = atomic_load_ptr(&r->r_current); if (kp != NULL) - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); NET_EPOCH_EXIT(et); } @@ -703,10 +703,10 @@ struct noise_keypair *kp, *ret = NULL; NET_EPOCH_ENTER(et); - kp = ck_pr_load_ptr(&r->r_current); - if (kp != NULL && ck_pr_load_bool(&kp->kp_can_send)) { + kp = atomic_load_ptr(&r->r_current); + if (kp != NULL && atomic_load_bool(&kp->kp_can_send)) { if (noise_timer_expired(kp->kp_birthdate, REJECT_AFTER_TIME, 0)) - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); else if (refcount_acquire_if_not_zero(&kp->kp_refcnt)) ret = kp; } @@ -792,11 +792,11 @@ int noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send) { - if (!ck_pr_load_bool(&kp->kp_can_send)) + if (!atomic_load_bool(&kp->kp_can_send)) return (EINVAL); #ifdef __LP64__ - *send = ck_pr_faa_64(&kp->kp_nonce_send, 1); + *send = atomic_fetchadd_64(&kp->kp_nonce_send, 1); #else rw_wlock(&kp->kp_nonce_lock); *send = kp->kp_nonce_send++; @@ -804,7 +804,7 @@ #endif if (*send < REJECT_AFTER_MESSAGES) return (0); - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); return (EINVAL); } @@ -835,7 +835,7 @@ (i + index_current) & ((COUNTER_BITS_TOTAL / COUNTER_BITS) - 1)] = 0; #ifdef __LP64__ - ck_pr_store_64(&kp->kp_nonce_recv, recv); + atomic_store_64(&kp->kp_nonce_recv, recv); #else kp->kp_nonce_recv = recv; #endif @@ -862,12 +862,12 @@ uint64_t nonce; NET_EPOCH_ENTER(et); - current = ck_pr_load_ptr(&r->r_current); - keep_key_fresh = current != NULL && ck_pr_load_bool(¤t->kp_can_send); + current = atomic_load_ptr(&r->r_current); + keep_key_fresh = current != NULL && atomic_load_bool(¤t->kp_can_send); if (!keep_key_fresh) goto out; #ifdef __LP64__ - nonce = ck_pr_load_64(¤t->kp_nonce_send); + nonce = atomic_load_64(¤t->kp_nonce_send); #else rw_rlock(¤t->kp_nonce_lock); nonce = current->kp_nonce_send; @@ -891,8 +891,8 @@ int keep_key_fresh; NET_EPOCH_ENTER(et); - current = ck_pr_load_ptr(&r->r_current); - keep_key_fresh = current != NULL && ck_pr_load_bool(¤t->kp_can_send) && + current = atomic_load_ptr(&r->r_current); + keep_key_fresh = current != NULL && atomic_load_bool(¤t->kp_can_send) && current->kp_is_initiator && noise_timer_expired(current->kp_birthdate, REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT, 0); NET_EPOCH_EXIT(et); @@ -920,7 +920,7 @@ int ret; #ifdef __LP64__ - cur_nonce = ck_pr_load_64(&kp->kp_nonce_recv); + cur_nonce = atomic_load_64(&kp->kp_nonce_recv); #else rw_rlock(&kp->kp_nonce_lock); cur_nonce = kp->kp_nonce_recv;