diff --git a/sys/dev/wg/if_wg.c b/sys/dev/wg/if_wg.c --- a/sys/dev/wg/if_wg.c +++ b/sys/dev/wg/if_wg.c @@ -786,10 +786,10 @@ sx_assert(&sc->sc_lock, SX_XLOCKED); - so4 = ck_pr_load_ptr(&so->so_so4); - so6 = ck_pr_load_ptr(&so->so_so6); - ck_pr_store_ptr(&so->so_so4, new_so4); - ck_pr_store_ptr(&so->so_so6, new_so6); + so4 = atomic_load_ptr(&so->so_so4); + so6 = atomic_load_ptr(&so->so_so6); + atomic_store_ptr(&so->so_so4, new_so4); + atomic_store_ptr(&so->so_so6, new_so6); if (!so4 && !so6) return; @@ -894,8 +894,8 @@ sa = &e->e_remote.r_sa; NET_EPOCH_ENTER(et); - so4 = ck_pr_load_ptr(&so->so_so4); - so6 = ck_pr_load_ptr(&so->so_so6); + so4 = atomic_load_ptr(&so->so_so4); + so6 = atomic_load_ptr(&so->so_so6); if (e->e_remote.r_sa.sa_family == AF_INET && so4 != NULL) ret = sosend(so4, sa, NULL, m, control, 0, curthread); else if (e->e_remote.r_sa.sa_family == AF_INET6 && so6 != NULL) @@ -948,7 +948,7 @@ static void wg_timers_enable(struct wg_peer *peer) { - ck_pr_store_bool(&peer->p_enabled, true); + atomic_store_bool(&peer->p_enabled, true); wg_timers_run_persistent_keepalive(peer); } @@ -967,9 +967,9 @@ * * We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the * performance impact is acceptable for the time being. */ - ck_pr_store_bool(&peer->p_enabled, false); + atomic_store_bool(&peer->p_enabled, false); NET_EPOCH_WAIT(); - ck_pr_store_bool(&peer->p_need_another_keepalive, false); + atomic_store_bool(&peer->p_need_another_keepalive, false); callout_stop(&peer->p_new_handshake); callout_stop(&peer->p_send_keepalive); @@ -983,9 +983,9 @@ { struct epoch_tracker et; if (interval != peer->p_persistent_keepalive_interval) { - ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval); + atomic_store_16(&peer->p_persistent_keepalive_interval, interval); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_persistent_keepalive(peer); NET_EPOCH_EXIT(et); } @@ -1005,7 +1005,8 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake)) + if (atomic_load_bool(&peer->p_enabled) && + !callout_pending(&peer->p_new_handshake)) callout_reset(&peer->p_new_handshake, MSEC_2_TICKS( NEW_HANDSHAKE_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), @@ -1018,13 +1019,14 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) { + if (atomic_load_bool(&peer->p_enabled)) { if (!callout_pending(&peer->p_send_keepalive)) callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); else - ck_pr_store_bool(&peer->p_need_another_keepalive, true); + atomic_store_bool(&peer->p_need_another_keepalive, + true); } NET_EPOCH_EXIT(et); } @@ -1047,8 +1049,8 @@ struct epoch_tracker et; uint16_t interval; NET_EPOCH_ENTER(et); - interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval); - if (ck_pr_load_bool(&peer->p_enabled) && interval > 0) + interval = atomic_load_16(&peer->p_persistent_keepalive_interval); + if (atomic_load_bool(&peer->p_enabled) && interval > 0) callout_reset(&peer->p_persistent_keepalive, MSEC_2_TICKS(interval * 1000), wg_timers_run_persistent_keepalive, peer); @@ -1060,7 +1062,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS( REKEY_TIMEOUT * 1000 + arc4random_uniform(REKEY_TIMEOUT_JITTER)), @@ -1073,7 +1075,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) { + if (atomic_load_bool(&peer->p_enabled)) { mtx_lock(&peer->p_handshake_mtx); callout_stop(&peer->p_retry_handshake); peer->p_handshake_retries = 0; @@ -1089,7 +1091,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), wg_timers_run_zero_key_material, peer); @@ -1101,7 +1103,7 @@ { struct epoch_tracker et; NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled)) + if (atomic_load_bool(&peer->p_enabled)) wg_timers_run_send_initiation(peer, false); NET_EPOCH_EXIT(et); } @@ -1141,7 +1143,7 @@ callout_stop(&peer->p_send_keepalive); wg_queue_purge(&peer->p_stage_queue); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && + if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_zero_key_material)) callout_reset(&peer->p_zero_key_material, MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), @@ -1158,9 +1160,9 @@ wg_send_keepalive(peer); NET_EPOCH_ENTER(et); - if (ck_pr_load_bool(&peer->p_enabled) && - ck_pr_load_bool(&peer->p_need_another_keepalive)) { - ck_pr_store_bool(&peer->p_need_another_keepalive, false); + if (atomic_load_bool(&peer->p_enabled) && + atomic_load_bool(&peer->p_need_another_keepalive)) { + atomic_store_bool(&peer->p_need_another_keepalive, false); callout_reset(&peer->p_send_keepalive, MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), wg_timers_run_send_keepalive, peer); @@ -1197,7 +1199,7 @@ { struct wg_peer *peer = _peer; - if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0) + if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0) wg_send_keepalive(peer); } diff --git a/sys/dev/wg/wg_noise.c b/sys/dev/wg/wg_noise.c --- a/sys/dev/wg/wg_noise.c +++ b/sys/dev/wg/wg_noise.c @@ -563,16 +563,16 @@ struct noise_keypair *kp; mtx_lock(&r->r_keypair_mtx); - kp = ck_pr_load_ptr(&r->r_next); - ck_pr_store_ptr(&r->r_next, NULL); + kp = atomic_load_ptr(&r->r_next); + atomic_store_ptr(&r->r_next, NULL); noise_keypair_drop(kp); - kp = ck_pr_load_ptr(&r->r_current); - ck_pr_store_ptr(&r->r_current, NULL); + kp = atomic_load_ptr(&r->r_current); + atomic_store_ptr(&r->r_current, NULL); noise_keypair_drop(kp); - kp = ck_pr_load_ptr(&r->r_previous); - ck_pr_store_ptr(&r->r_previous, NULL); + kp = atomic_load_ptr(&r->r_previous); + atomic_store_ptr(&r->r_previous, NULL); noise_keypair_drop(kp); mtx_unlock(&r->r_keypair_mtx); } @@ -586,12 +586,12 @@ noise_remote_handshake_clear(r); NET_EPOCH_ENTER(et); - kp = ck_pr_load_ptr(&r->r_next); + kp = atomic_load_ptr(&r->r_next); if (kp != NULL) - ck_pr_store_bool(&kp->kp_can_send, false); - kp = ck_pr_load_ptr(&r->r_current); + atomic_store_bool(&kp->kp_can_send, false); + kp = atomic_load_ptr(&r->r_current); if (kp != NULL) - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); NET_EPOCH_EXIT(et); } @@ -605,24 +605,24 @@ /* Insert into the keypair table */ mtx_lock(&r->r_keypair_mtx); - next = ck_pr_load_ptr(&r->r_next); - current = ck_pr_load_ptr(&r->r_current); - previous = ck_pr_load_ptr(&r->r_previous); + next = atomic_load_ptr(&r->r_next); + current = atomic_load_ptr(&r->r_current); + previous = atomic_load_ptr(&r->r_previous); if (kp->kp_is_initiator) { if (next != NULL) { - ck_pr_store_ptr(&r->r_next, NULL); - ck_pr_store_ptr(&r->r_previous, next); + atomic_store_ptr(&r->r_next, NULL); + atomic_store_ptr(&r->r_previous, next); noise_keypair_drop(current); } else { - ck_pr_store_ptr(&r->r_previous, current); + atomic_store_ptr(&r->r_previous, current); } noise_keypair_drop(previous); - ck_pr_store_ptr(&r->r_current, kp); + atomic_store_ptr(&r->r_current, kp); } else { - ck_pr_store_ptr(&r->r_next, kp); + atomic_store_ptr(&r->r_next, kp); noise_keypair_drop(next); - ck_pr_store_ptr(&r->r_previous, NULL); + atomic_store_ptr(&r->r_previous, NULL); noise_keypair_drop(previous); } @@ -703,10 +703,10 @@ struct noise_keypair *kp, *ret = NULL; NET_EPOCH_ENTER(et); - kp = ck_pr_load_ptr(&r->r_current); - if (kp != NULL && ck_pr_load_bool(&kp->kp_can_send)) { + kp = atomic_load_ptr(&r->r_current); + if (kp != NULL && atomic_load_bool(&kp->kp_can_send)) { if (noise_timer_expired(kp->kp_birthdate, REJECT_AFTER_TIME, 0)) - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); else if (refcount_acquire_if_not_zero(&kp->kp_refcnt)) ret = kp; } @@ -727,20 +727,20 @@ struct noise_keypair *old; struct noise_remote *r = kp->kp_remote; - if (kp != ck_pr_load_ptr(&r->r_next)) + if (kp != atomic_load_ptr(&r->r_next)) return (0); mtx_lock(&r->r_keypair_mtx); - if (kp != ck_pr_load_ptr(&r->r_next)) { + if (kp != atomic_load_ptr(&r->r_next)) { mtx_unlock(&r->r_keypair_mtx); return (0); } - old = ck_pr_load_ptr(&r->r_previous); - ck_pr_store_ptr(&r->r_previous, ck_pr_load_ptr(&r->r_current)); + old = atomic_load_ptr(&r->r_previous); + atomic_store_ptr(&r->r_previous, atomic_load_ptr(&r->r_current)); noise_keypair_drop(old); - ck_pr_store_ptr(&r->r_current, kp); - ck_pr_store_ptr(&r->r_next, NULL); + atomic_store_ptr(&r->r_current, kp); + atomic_store_ptr(&r->r_next, NULL); mtx_unlock(&r->r_keypair_mtx); return (ECONNRESET); @@ -792,11 +792,11 @@ int noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send) { - if (!ck_pr_load_bool(&kp->kp_can_send)) + if (!atomic_load_bool(&kp->kp_can_send)) return (EINVAL); #ifdef __LP64__ - *send = ck_pr_faa_64(&kp->kp_nonce_send, 1); + *send = atomic_fetchadd_64(&kp->kp_nonce_send, 1); #else rw_wlock(&kp->kp_nonce_lock); *send = kp->kp_nonce_send++; @@ -804,7 +804,7 @@ #endif if (*send < REJECT_AFTER_MESSAGES) return (0); - ck_pr_store_bool(&kp->kp_can_send, false); + atomic_store_bool(&kp->kp_can_send, false); return (EINVAL); } @@ -835,7 +835,7 @@ (i + index_current) & ((COUNTER_BITS_TOTAL / COUNTER_BITS) - 1)] = 0; #ifdef __LP64__ - ck_pr_store_64(&kp->kp_nonce_recv, recv); + atomic_store_64(&kp->kp_nonce_recv, recv); #else kp->kp_nonce_recv = recv; #endif @@ -862,12 +862,12 @@ uint64_t nonce; NET_EPOCH_ENTER(et); - current = ck_pr_load_ptr(&r->r_current); - keep_key_fresh = current != NULL && ck_pr_load_bool(¤t->kp_can_send); + current = atomic_load_ptr(&r->r_current); + keep_key_fresh = current != NULL && atomic_load_bool(¤t->kp_can_send); if (!keep_key_fresh) goto out; #ifdef __LP64__ - nonce = ck_pr_load_64(¤t->kp_nonce_send); + nonce = atomic_load_64(¤t->kp_nonce_send); #else rw_rlock(¤t->kp_nonce_lock); nonce = current->kp_nonce_send; @@ -891,8 +891,8 @@ int keep_key_fresh; NET_EPOCH_ENTER(et); - current = ck_pr_load_ptr(&r->r_current); - keep_key_fresh = current != NULL && ck_pr_load_bool(¤t->kp_can_send) && + current = atomic_load_ptr(&r->r_current); + keep_key_fresh = current != NULL && atomic_load_bool(¤t->kp_can_send) && current->kp_is_initiator && noise_timer_expired(current->kp_birthdate, REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT, 0); NET_EPOCH_EXIT(et); @@ -920,7 +920,7 @@ int ret; #ifdef __LP64__ - cur_nonce = ck_pr_load_64(&kp->kp_nonce_recv); + cur_nonce = atomic_load_64(&kp->kp_nonce_recv); #else rw_rlock(&kp->kp_nonce_lock); cur_nonce = kp->kp_nonce_recv;