Index: sys/netinet/cc/cc.h =================================================================== --- sys/netinet/cc/cc.h +++ sys/netinet/cc/cc.h @@ -72,6 +72,15 @@ /* Define the new net.inet.tcp.cc sysctl tree. */ SYSCTL_DECL(_net_inet_tcp_cc); +/* For CC modules that use hystart++ */ +extern uint32_t hystart_lowcwnd; +extern uint32_t hystart_minrtt_thresh; +extern uint32_t hystart_maxrtt_thresh; +extern uint32_t hystart_n_rttsamples; +extern uint32_t hystart_css_growth_div; +extern uint32_t hystart_css_rounds; +extern uint32_t hystart_bblogs; + /* CC housekeeping functions. */ int cc_register_algo(struct cc_algo *add_cc); int cc_deregister_algo(struct cc_algo *remove_cc); @@ -106,6 +115,9 @@ #define CCF_CHG_MAX_CWND 0x0080 /* Cubic max_cwnd changed, for K */ #define CCF_USR_IWND 0x0100 /* User specified initial window */ #define CCF_USR_IWND_INIT_NSEG 0x0200 /* Convert segs to bytes on conn init */ +#define CCF_HYSTART_ALLOWED 0x0400 /* If the CC supports it Hystart is allowed */ +#define CCF_HYSTART_CAN_SH_CWND 0x0800 /* Can hystart when going CSS -> CA slam the cwnd */ +#define CCF_HYSTART_CONS_SSTH 0x1000 /* Should hystart use the more conservative ssthresh */ /* ACK types passed to the ack_received() hook. */ #define CC_ACK 0x0001 /* Regular in sequence ACK. */ Index: sys/netinet/cc/cc.c =================================================================== --- sys/netinet/cc/cc.c +++ sys/netinet/cc/cc.c @@ -84,6 +84,14 @@ #define CC_DEFAULT "newreno" #endif +uint32_t hystart_lowcwnd = 16; +uint32_t hystart_minrtt_thresh = 4000; +uint32_t hystart_maxrtt_thresh = 16000; +uint32_t hystart_n_rttsamples = 8; +uint32_t hystart_css_growth_div = 4; +uint32_t hystart_css_rounds = 5; +uint32_t hystart_bblogs = 0; + MALLOC_DEFINE(M_CC_MEM, "CC Mem", "Congestion Control State memory"); /* @@ -583,6 +591,45 @@ NULL, 0, cc_list_available, "A", "List available congestion control algorithms"); +SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, hystartplusplus, + CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, + "New Reno related HyStart++ settings"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, lowcwnd, + CTLFLAG_RW, + &hystart_lowcwnd, 16, + "The number of MSS in the CWND before HyStart++ is active"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, minrtt_thresh, + CTLFLAG_RW, + &hystart_minrtt_thresh, 4000, + "HyStarts++ minimum RTT thresh used in clamp (in microseconds)"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, maxrtt_thresh, + CTLFLAG_RW, + &hystart_maxrtt_thresh, 16000, + "HyStarts++ maximum RTT thresh used in clamp (in microseconds)"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, n_rttsamples, + CTLFLAG_RW, + &hystart_n_rttsamples, 8, + "The number of RTT samples that must be seen to consider HyStart++"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, css_growth_div, + CTLFLAG_RW, + &hystart_css_growth_div, 4, + "The divisor to the growth when in Hystart++ CSS"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, css_rounds, + CTLFLAG_RW, + &hystart_css_rounds, 5, + "The number of rounds HyStart++ lasts in CSS before falling to CA"); + +SYSCTL_UINT(_net_inet_tcp_cc_hystartplusplus, OID_AUTO, bblogs, + CTLFLAG_RW, + &hystart_bblogs, 0, + "Do we enable HyStart++ Black Box logs to be generated if BB logging is on"); + VNET_DEFINE(int, cc_do_abe) = 0; SYSCTL_INT(_net_inet_tcp_cc, OID_AUTO, abe, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(cc_do_abe), 0, Index: sys/netinet/cc/cc_cubic.h =================================================================== --- sys/netinet/cc/cc_cubic.h +++ sys/netinet/cc/cc_cubic.h @@ -78,6 +78,55 @@ */ #define CUBED_ROOT_MAX_ULONG 448845 +/* Flags used in the cubic structure */ +#define CUBICFLAG_CONG_EVENT 0x00000001 /* congestion experienced */ +#define CUBICFLAG_IN_SLOWSTART 0x00000002 /* in slow start */ +#define CUBICFLAG_IN_APPLIMIT 0x00000004 /* application limited */ +#define CUBICFLAG_RTO_EVENT 0x00000008 /* RTO experienced */ +#define CUBICFLAG_HYSTART_ENABLED 0x00000010 /* Hystart++ is enabled */ +#define CUBICFLAG_HYSTART_IN_CSS 0x00000020 /* We are in Hystart++ CSS */ + +/* Kernel only bits */ +#ifdef _KERNEL +struct cubic { + /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */ + int64_t K; + /* Sum of RTT samples across an epoch in ticks. */ + int64_t sum_rtt_ticks; + /* cwnd at the most recent congestion event. */ + unsigned long max_cwnd; + /* cwnd at the previous congestion event. */ + unsigned long prev_max_cwnd; + /* A copy of prev_max_cwnd. Used for CC_RTO_ERR */ + unsigned long prev_max_cwnd_cp; + /* various flags */ + uint32_t flags; + /* Minimum observed rtt in ticks. */ + int min_rtt_ticks; + /* Mean observed rtt between congestion epochs. */ + int mean_rtt_ticks; + /* ACKs since last congestion event. */ + int epoch_ack_count; + /* Timestamp (in ticks) of arriving in congestion avoidance from last + * congestion event. + */ + int t_last_cong; + /* Timestamp (in ticks) of a previous congestion event. Used for + * CC_RTO_ERR. + */ + int t_last_cong_prev; + uint32_t css_baseline_minrtt; + uint32_t css_current_round_minrtt; + uint32_t css_lastround_minrtt; + uint32_t css_rttsample_count; + uint32_t css_entered_at_round; + uint32_t css_current_round; + uint32_t css_fas_at_css_entry; + uint32_t css_lowrtt_fas; + uint32_t css_last_fas; +}; +#endif + /* Userland only bits. */ #ifndef _KERNEL Index: sys/netinet/cc/cc_cubic.c =================================================================== --- sys/netinet/cc/cc_cubic.c +++ sys/netinet/cc/cc_cubic.c @@ -70,6 +70,8 @@ #include #include #include +#include +#include #include #include #include @@ -85,39 +87,9 @@ static void cubic_ssthresh_update(struct cc_var *ccv, uint32_t maxseg); static void cubic_after_idle(struct cc_var *ccv); static size_t cubic_data_sz(void); - -struct cubic { - /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */ - int64_t K; - /* Sum of RTT samples across an epoch in ticks. */ - int64_t sum_rtt_ticks; - /* cwnd at the most recent congestion event. */ - unsigned long max_cwnd; - /* cwnd at the previous congestion event. */ - unsigned long prev_max_cwnd; - /* A copy of prev_max_cwnd. Used for CC_RTO_ERR */ - unsigned long prev_max_cwnd_cp; - /* various flags */ - uint32_t flags; -#define CUBICFLAG_CONG_EVENT 0x00000001 /* congestion experienced */ -#define CUBICFLAG_IN_SLOWSTART 0x00000002 /* in slow start */ -#define CUBICFLAG_IN_APPLIMIT 0x00000004 /* application limited */ -#define CUBICFLAG_RTO_EVENT 0x00000008 /* RTO experienced */ - /* Minimum observed rtt in ticks. */ - int min_rtt_ticks; - /* Mean observed rtt between congestion epochs. */ - int mean_rtt_ticks; - /* ACKs since last congestion event. */ - int epoch_ack_count; - /* Timestamp (in ticks) of arriving in congestion avoidance from last - * congestion event. - */ - int t_last_cong; - /* Timestamp (in ticks) of a previous congestion event. Used for - * CC_RTO_ERR. - */ - int t_last_cong_prev; -}; +static void cubic_newround(struct cc_var *ccv, uint32_t round_cnt); +static void cubic_rttsample(struct cc_var *ccv, uint32_t usec_rtt, + uint32_t rxtcnt, uint32_t fas); struct cc_algo cubic_cc_algo = { .name = "cubic", @@ -129,9 +101,132 @@ .mod_init = cubic_mod_init, .post_recovery = cubic_post_recovery, .after_idle = cubic_after_idle, - .cc_data_sz = cubic_data_sz + .cc_data_sz = cubic_data_sz, + .rttsample = cubic_rttsample, + .newround = cubic_newround }; +static void +cubic_log_hystart_event(struct cc_var *ccv, struct cubic *cubicd, uint8_t mod, uint32_t flex1) +{ + /* + * Types of logs (mod value) + * 1 - rtt_thresh in flex1, checking to see if RTT is to great. + * 2 - rtt is too great, rtt_thresh in flex1. + * 3 - CSS is active incr in flex1 + * 4 - A new round is beginning flex1 is round count + * 5 - A new RTT measurement flex1 is the new measurement. + * 6 - We enter CA ssthresh is also in flex1. + * 7 - Socket option to change hystart executed opt.val in flex1. + * 8 - Back out of CSS into SS, flex1 is the css_baseline_minrtt + * 9 - We enter CA, via an ECN mark. + * 10 - We enter CA, via a loss. + * 11 - We have slipped out of SS into CA via cwnd growth. + * 12 - After idle has re-enabled hystart++ + */ + struct tcpcb *tp; + + if (hystart_bblogs == 0) + return; + tp = ccv->ccvc.tcp; + if (tp->t_logstate != TCP_LOG_STATE_OFF) { + union tcp_log_stackspecific log; + struct timeval tv; + + memset(&log, 0, sizeof(log)); + log.u_bbr.flex1 = flex1; + log.u_bbr.flex2 = cubicd->css_current_round_minrtt; + log.u_bbr.flex3 = cubicd->css_lastround_minrtt; + log.u_bbr.flex4 = cubicd->css_rttsample_count; + log.u_bbr.flex5 = cubicd->css_entered_at_round; + log.u_bbr.flex6 = cubicd->css_baseline_minrtt; + /* We only need bottom 16 bits of flags */ + log.u_bbr.flex7 = cubicd->flags & 0x0000ffff; + log.u_bbr.flex8 = mod; + log.u_bbr.epoch = cubicd->css_current_round; + log.u_bbr.timeStamp = tcp_get_usecs(&tv); + log.u_bbr.lt_epoch = cubicd->css_fas_at_css_entry; + log.u_bbr.pkts_out = cubicd->css_last_fas; + log.u_bbr.delivered = cubicd->css_lowrtt_fas; + log.u_bbr.pkt_epoch = ccv->flags; + TCP_LOG_EVENTP(tp, NULL, + &tp->t_inpcb->inp_socket->so_rcv, + &tp->t_inpcb->inp_socket->so_snd, + TCP_HYSTART, 0, + 0, &log, false, &tv); + } +} + +static void +cubic_does_slow_start(struct cc_var *ccv, struct cubic *cubicd) +{ + /* + * In slow-start with ABC enabled and no RTO in sight? + * (Must not use abc_l_var > 1 if slow starting after + * an RTO. On RTO, snd_nxt = snd_una, so the + * snd_nxt == snd_max check is sufficient to + * handle this). + * + * XXXLAS: Find a way to signal SS after RTO that + * doesn't rely on tcpcb vars. + */ + u_int cw = CCV(ccv, snd_cwnd); + u_int incr = CCV(ccv, t_maxseg); + uint16_t abc_val; + + cubicd->flags |= CUBICFLAG_IN_SLOWSTART; + if (ccv->flags & CCF_USE_LOCAL_ABC) + abc_val = ccv->labc; + else + abc_val = V_tcp_abc_l_var; + if ((ccv->flags & CCF_HYSTART_ALLOWED) && + (cubicd->flags & CUBICFLAG_HYSTART_ENABLED) && + ((cubicd->flags & CUBICFLAG_HYSTART_IN_CSS) == 0)) { + /* + * Hystart is allowed and still enabled and we are not yet + * in CSS. Lets check to see if we can make a decision on + * if we need to go into CSS. + */ + if ((cubicd->css_rttsample_count >= hystart_n_rttsamples) && + (CCV(ccv, snd_cwnd) > + (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp)))) { + uint32_t rtt_thresh; + + /* Clamp (minrtt_thresh, lastround/8, maxrtt_thresh) */ + rtt_thresh = (cubicd->css_lastround_minrtt >> 3); + if (rtt_thresh < hystart_minrtt_thresh) + rtt_thresh = hystart_minrtt_thresh; + if (rtt_thresh > hystart_maxrtt_thresh) + rtt_thresh = hystart_maxrtt_thresh; + cubic_log_hystart_event(ccv, cubicd, 1, rtt_thresh); + if (cubicd->css_current_round_minrtt >= (cubicd->css_lastround_minrtt + rtt_thresh)) { + /* Enter CSS */ + cubicd->flags |= CUBICFLAG_HYSTART_IN_CSS; + cubicd->css_fas_at_css_entry = cubicd->css_lowrtt_fas; + cubicd->css_baseline_minrtt = cubicd->css_current_round_minrtt; + cubicd->css_entered_at_round = cubicd->css_current_round; + cubic_log_hystart_event(ccv, cubicd, 2, rtt_thresh); + } + } + } + if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max)) + incr = min(ccv->bytes_this_ack, + ccv->nsegs * abc_val * + CCV(ccv, t_maxseg)); + else + incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg)); + + /* Only if Hystart is enabled will the flag get set */ + if (cubicd->flags & CUBICFLAG_HYSTART_IN_CSS) { + incr /= hystart_css_growth_div; + cubic_log_hystart_event(ccv, cubicd, 3, incr); + } + /* ABC is on by default, so incr equals 0 frequently. */ + if (incr > 0) + CCV(ccv, snd_cwnd) = min((cw + incr), + TCP_MAXWIN << CCV(ccv, snd_scale)); +} + static void cubic_ack_received(struct cc_var *ccv, uint16_t type) { @@ -151,9 +246,19 @@ /* Use the logic in NewReno ack_received() for slow start. */ if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) { - cubic_data->flags |= CUBICFLAG_IN_SLOWSTART; - newreno_cc_ack_received(ccv, type); + cubic_does_slow_start(ccv, cubic_data); } else { + if (cubic_data->flags & CUBICFLAG_HYSTART_IN_CSS) { + /* + * We have slipped into CA with + * CSS active. Deactivate all. + */ + /* Turn off the CSS flag */ + cubic_data->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + /* Disable use of CSS in the future except long idle */ + cubic_data->flags &= ~CUBICFLAG_HYSTART_ENABLED; + cubic_log_hystart_event(ccv, cubic_data, 11, CCV(ccv, snd_ssthresh)); + } if ((cubic_data->flags & CUBICFLAG_RTO_EVENT) && (cubic_data->flags & CUBICFLAG_IN_SLOWSTART)) { /* RFC8312 Section 4.7 */ @@ -245,7 +350,17 @@ cubic_data->max_cwnd = ulmax(cubic_data->max_cwnd, CCV(ccv, snd_cwnd)); cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); - + if ((cubic_data->flags & CUBICFLAG_HYSTART_ENABLED) == 0) { + if (CCV(ccv, snd_cwnd) <= (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp))) { + /* + * Re-enable hystart if our cwnd has fallen below + * the hystart lowcwnd point. + */ + cubic_data->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + cubic_data->flags |= CUBICFLAG_HYSTART_ENABLED; + cubic_log_hystart_event(ccv, cubic_data, 12, CCV(ccv, snd_ssthresh)); + } + } newreno_cc_after_idle(ccv); cubic_data->t_last_cong = ticks; } @@ -281,6 +396,17 @@ cubic_data->mean_rtt_ticks = 1; ccv->cc_data = cubic_data; + cubic_data->flags = CUBICFLAG_HYSTART_ENABLED; + /* At init set both to infinity */ + cubic_data->css_lastround_minrtt = 0xffffffff; + cubic_data->css_current_round_minrtt = 0xffffffff; + cubic_data->css_current_round = 0; + cubic_data->css_baseline_minrtt = 0xffffffff; + cubic_data->css_rttsample_count = 0; + cubic_data->css_entered_at_round = 0; + cubic_data->css_fas_at_css_entry = 0; + cubic_data->css_lowrtt_fas = 0; + cubic_data->css_last_fas = 0; return (0); } @@ -299,6 +425,12 @@ switch (type) { case CC_NDUPACK: + if (cubic_data->flags & CUBICFLAG_HYSTART_ENABLED) { + /* Make sure the flags are all off we had a loss */ + cubic_data->flags &= ~CUBICFLAG_HYSTART_ENABLED; + cubic_data->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + cubic_log_hystart_event(ccv, cubic_data, 10, CCV(ccv, snd_ssthresh)); + } if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { cubic_ssthresh_update(ccv, mss); @@ -311,6 +443,12 @@ break; case CC_ECN: + if (cubic_data->flags & CUBICFLAG_HYSTART_ENABLED) { + /* Make sure the flags are all off we had a loss */ + cubic_data->flags &= ~CUBICFLAG_HYSTART_ENABLED; + cubic_data->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + cubic_log_hystart_event(ccv, cubic_data, 9, CCV(ccv, snd_ssthresh)); + } if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { cubic_ssthresh_update(ccv, mss); cubic_data->flags |= CUBICFLAG_CONG_EVENT; @@ -495,5 +633,79 @@ CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * maxseg); } +static void +cubic_rttsample(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas) +{ + struct cubic *cubicd; + + cubicd = ccv->cc_data; + if (rxtcnt > 1) { + /* + * Only look at RTT's that are non-ambiguous. + */ + return; + } + cubicd->css_rttsample_count++; + cubicd->css_last_fas = fas; + if (cubicd->css_current_round_minrtt > usec_rtt) { + cubicd->css_current_round_minrtt = usec_rtt; + cubicd->css_lowrtt_fas = cubicd->css_last_fas; + } + if ((cubicd->flags & CUBICFLAG_HYSTART_IN_CSS) && + (cubicd->css_rttsample_count >= hystart_n_rttsamples) && + (cubicd->css_baseline_minrtt > cubicd->css_current_round_minrtt)) { + /* + * We were in CSS and the RTT is now less, we + * entered CSS erroneously. + */ + cubicd->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + cubic_log_hystart_event(ccv, cubicd, 8, cubicd->css_baseline_minrtt); + cubicd->css_baseline_minrtt = 0xffffffff; + } + if (cubicd->flags & CUBICFLAG_HYSTART_ENABLED) + cubic_log_hystart_event(ccv, cubicd, 5, usec_rtt); +} + +static void +cubic_newround(struct cc_var *ccv, uint32_t round_cnt) +{ + struct cubic *cubicd; + + cubicd = ccv->cc_data; + /* We have entered a new round */ + cubicd->css_lastround_minrtt = cubicd->css_current_round_minrtt; + cubicd->css_current_round_minrtt = 0xffffffff; + cubicd->css_rttsample_count = 0; + cubicd->css_current_round = round_cnt; + if ((cubicd->flags & CUBICFLAG_HYSTART_IN_CSS) && + ((round_cnt - cubicd->css_entered_at_round) >= hystart_css_rounds)) { + /* Enter CA */ + if (ccv->flags & CCF_HYSTART_CAN_SH_CWND) { + /* + * We engage more than snd_ssthresh, engage + * the brakes!! Though we will stay in SS to + * creep back up again, so lets leave CSS active + * and give us hystart_css_rounds more rounds. + */ + if (ccv->flags & CCF_HYSTART_CONS_SSTH) { + CCV(ccv, snd_ssthresh) = ((cubicd->css_lowrtt_fas + cubicd->css_fas_at_css_entry) / 2); + } else { + CCV(ccv, snd_ssthresh) = cubicd->css_lowrtt_fas; + } + CCV(ccv, snd_cwnd) = cubicd->css_fas_at_css_entry; + cubicd->css_entered_at_round = round_cnt; + } else { + CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd); + /* Turn off the CSS flag */ + cubicd->flags &= ~CUBICFLAG_HYSTART_IN_CSS; + /* Disable use of CSS in the future except long idle */ + cubicd->flags &= ~CUBICFLAG_HYSTART_ENABLED; + } + cubic_log_hystart_event(ccv, cubicd, 6, CCV(ccv, snd_ssthresh)); + } + if (cubicd->flags & CUBICFLAG_HYSTART_ENABLED) + cubic_log_hystart_event(ccv, cubicd, 4, round_cnt); +} + DECLARE_CC_MODULE(cubic, &cubic_cc_algo); MODULE_VERSION(cubic, 2); Index: sys/netinet/cc/cc_newreno.h =================================================================== --- sys/netinet/cc/cc_newreno.h +++ sys/netinet/cc/cc_newreno.h @@ -53,13 +53,9 @@ #define CC_NEWRENO_BETA 1 /* Beta for normal DUP-ACK/Sack recovery */ #define CC_NEWRENO_BETA_ECN 2 /* ECN Beta for Abe */ -#define CC_NEWRENO_ENABLE_HYSTART 3 /* Enable hystart */ /* Flags values */ -#define CC_NEWRENO_HYSTART_ALLOWED 0x0001 /* Does the tcp connection allow hystart? */ #define CC_NEWRENO_HYSTART_ENABLED 0x0002 /* We can do hystart, a loss removes this flag */ #define CC_NEWRENO_HYSTART_IN_CSS 0x0004 /* If we enter hystart CSS this flag is set */ -#define CC_NEWRENO_HYSTART_CAN_SH_CWND 0x0008 /* Can hystart when going CSS -> CA slam the cwnd */ -#define CC_NEWRENO_HYSTART_CONS_SSTH 0x0010 /* Should hystart use the more conservative sstrhesh */ #define CC_NEWRENO_BETA_ECN_ENABLED 0x0020 #endif /* _CC_NEWRENO_H */ Index: sys/netinet/cc/cc_newreno.c =================================================================== --- sys/netinet/cc/cc_newreno.c +++ sys/netinet/cc/cc_newreno.c @@ -116,14 +116,6 @@ .cc_data_sz = newreno_data_sz, }; -static uint32_t hystart_lowcwnd = 16; -static uint32_t hystart_minrtt_thresh = 4000; -static uint32_t hystart_maxrtt_thresh = 16000; -static uint32_t hystart_n_rttsamples = 8; -static uint32_t hystart_css_growth_div = 4; -static uint32_t hystart_css_rounds = 5; -static uint32_t hystart_bblogs = 0; - static void newreno_log_hystart_event(struct cc_var *ccv, struct newreno *nreno, uint8_t mod, uint32_t flex1) { @@ -137,6 +129,10 @@ * 6 - We enter CA ssthresh is also in flex1. * 7 - Socket option to change hystart executed opt.val in flex1. * 8 - Back out of CSS into SS, flex1 is the css_baseline_minrtt + * 9 - We enter CA, via an ECN mark. + * 10 - We enter CA, via a loss. + * 11 - We have slipped out of SS into CA via cwnd growth. + * 12 - After idle has re-enabled hystart++ */ struct tcpcb *tp; @@ -162,6 +158,7 @@ log.u_bbr.lt_epoch = nreno->css_fas_at_css_entry; log.u_bbr.pkts_out = nreno->css_last_fas; log.u_bbr.delivered = nreno->css_lowrtt_fas; + log.u_bbr.pkt_epoch = ccv->flags; TCP_LOG_EVENTP(tp, NULL, &tp->t_inpcb->inp_socket->so_rcv, &tp->t_inpcb->inp_socket->so_snd, @@ -265,6 +262,7 @@ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; /* Disable use of CSS in the future except long idle */ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; + newreno_log_hystart_event(ccv, nreno, 11, CCV(ccv, snd_ssthresh)); } if (V_tcp_do_rfc3465) { if (ccv->flags & CCF_ABC_SENTAWND) @@ -290,7 +288,7 @@ abc_val = ccv->labc; else abc_val = V_tcp_abc_l_var; - if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) && + if ((ccv->flags & CCF_HYSTART_ALLOWED) && (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) && ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) == 0)) { /* @@ -355,6 +353,7 @@ */ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; nreno->newreno_flags |= CC_NEWRENO_HYSTART_ENABLED; + newreno_log_hystart_event(ccv, nreno, 12, CCV(ccv, snd_ssthresh)); } } } @@ -400,6 +399,7 @@ /* Make sure the flags are all off we had a loss */ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + newreno_log_hystart_event(ccv, nreno, 10, CCV(ccv, snd_ssthresh)); } if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { if (IN_CONGRECOVERY(CCV(ccv, t_flags) && @@ -418,6 +418,7 @@ /* Make sure the flags are all off we had a loss */ nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + newreno_log_hystart_event(ccv, nreno, 9, CCV(ccv, snd_ssthresh)); } if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { CCV(ccv, snd_ssthresh) = cwin; @@ -460,18 +461,6 @@ nreno->beta_ecn = opt->val; nreno->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; break; - case CC_NEWRENO_ENABLE_HYSTART: - /* Allow hystart on this connection */ - if (opt->val != 0) { - nreno->newreno_flags |= CC_NEWRENO_HYSTART_ALLOWED; - if (opt->val > 1) - nreno->newreno_flags |= CC_NEWRENO_HYSTART_CAN_SH_CWND; - if (opt->val > 2) - nreno->newreno_flags |= CC_NEWRENO_HYSTART_CONS_SSTH; - } else - nreno->newreno_flags &= ~(CC_NEWRENO_HYSTART_ALLOWED|CC_NEWRENO_HYSTART_CAN_SH_CWND|CC_NEWRENO_HYSTART_CONS_SSTH); - newreno_log_hystart_event(ccv, nreno, 7, opt->val); - break; default: return (ENOPROTOOPT); } @@ -486,17 +475,6 @@ opt->val = (nreno == NULL) ? V_newreno_beta_ecn : nreno->beta_ecn; break; - case CC_NEWRENO_ENABLE_HYSTART: - if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) { - if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH) - opt->val = 3; - else if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND) - opt->val = 2; - else - opt->val = 1; - } else - opt->val = 0; - break; default: return (ENOPROTOOPT); } @@ -542,14 +520,14 @@ if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) && ((round_cnt - nreno->css_entered_at_round) >= hystart_css_rounds)) { /* Enter CA */ - if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND) { + if (ccv->flags & CCF_HYSTART_CAN_SH_CWND) { /* * We engage more than snd_ssthresh, engage * the brakes!! Though we will stay in SS to * creep back up again, so lets leave CSS active * and give us hystart_css_rounds more rounds. */ - if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH) { + if (ccv->flags & CCF_HYSTART_CONS_SSTH) { CCV(ccv, snd_ssthresh) = ((nreno->css_lowrtt_fas + nreno->css_fas_at_css_entry) / 2); } else { CCV(ccv, snd_ssthresh) = nreno->css_lowrtt_fas; @@ -565,7 +543,8 @@ } newreno_log_hystart_event(ccv, nreno, 6, CCV(ccv, snd_ssthresh)); } - newreno_log_hystart_event(ccv, nreno, 4, round_cnt); + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) + newreno_log_hystart_event(ccv, nreno, 4, round_cnt); } static void @@ -597,7 +576,8 @@ newreno_log_hystart_event(ccv, nreno, 8, nreno->css_baseline_minrtt); nreno->css_baseline_minrtt = 0xffffffff; } - newreno_log_hystart_event(ccv, nreno, 5, usec_rtt); + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) + newreno_log_hystart_event(ccv, nreno, 5, usec_rtt); } SYSCTL_DECL(_net_inet_tcp_cc_newreno); @@ -615,45 +595,5 @@ &VNET_NAME(newreno_beta_ecn), 3, &newreno_beta_handler, "IU", "New Reno beta ecn, specified as number between 1 and 100"); -SYSCTL_NODE(_net_inet_tcp_cc_newreno, OID_AUTO, hystartplusplus, - CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, - "New Reno related HyStart++ settings"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, lowcwnd, - CTLFLAG_RW, - &hystart_lowcwnd, 16, - "The number of MSS in the CWND before HyStart++ is active"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, minrtt_thresh, - CTLFLAG_RW, - &hystart_minrtt_thresh, 4000, - "HyStarts++ minimum RTT thresh used in clamp (in microseconds)"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, maxrtt_thresh, - CTLFLAG_RW, - &hystart_maxrtt_thresh, 16000, - "HyStarts++ maximum RTT thresh used in clamp (in microseconds)"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, n_rttsamples, - CTLFLAG_RW, - &hystart_n_rttsamples, 8, - "The number of RTT samples that must be seen to consider HyStart++"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_growth_div, - CTLFLAG_RW, - &hystart_css_growth_div, 4, - "The divisor to the growth when in Hystart++ CSS"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_rounds, - CTLFLAG_RW, - &hystart_css_rounds, 5, - "The number of rounds HyStart++ lasts in CSS before falling to CA"); - -SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, bblogs, - CTLFLAG_RW, - &hystart_bblogs, 0, - "Do we enable HyStart++ Black Box logs to be generated if BB logging is on"); - - DECLARE_CC_MODULE(newreno, &newreno_cc_algo); MODULE_VERSION(newreno, 2); Index: sys/netinet/tcp_stacks/rack.c =================================================================== --- sys/netinet/tcp_stacks/rack.c +++ sys/netinet/tcp_stacks/rack.c @@ -12853,15 +12853,11 @@ rack_convert_rtts(tp); tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); if (rack_do_hystart) { - struct sockopt sopt; - struct cc_newreno_opts opt; - - sopt.sopt_valsize = sizeof(struct cc_newreno_opts); - sopt.sopt_dir = SOPT_SET; - opt.name = CC_NEWRENO_ENABLE_HYSTART; - opt.val = rack_do_hystart; - if (CC_ALGO(tp)->ctl_output != NULL) - (void)CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); + tp->ccv->flags |= CCF_HYSTART_ALLOWED; + if (rack_do_hystart > 1) + tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; + if (rack_do_hystart > 2) + tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; } if (rack_def_profile) rack_set_profile(rack, rack_def_profile); @@ -13506,7 +13502,6 @@ } } - static int rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) { @@ -13776,6 +13771,20 @@ (((ae->ack - high_seq) + segsiz - 1) / segsiz)); #endif high_seq = ae->ack; + if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { + union tcp_log_stackspecific log; + struct timeval tv; + + memset(&log.u_bbr, 0, sizeof(log.u_bbr)); + log.u_bbr.timeStamp = tcp_get_usecs(&tv); + log.u_bbr.flex1 = high_seq; + log.u_bbr.flex2 = rack->r_ctl.roundends; + log.u_bbr.flex3 = rack->r_ctl.current_round; + log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; + log.u_bbr.flex8 = 8; + tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, + 0, &log, false, NULL, NULL, 0, &tv); + } if (SEQ_GEQ(high_seq, rack->r_ctl.roundends)) { rack->r_ctl.current_round++; rack->r_ctl.roundends = tp->snd_max; @@ -14200,7 +14209,7 @@ * us_cts - is the time that LRO or hardware actually got the packet in microseconds. */ uint32_t cts, us_cts, ms_cts; - uint32_t tiwin; + uint32_t tiwin, high_seq; struct timespec ts; struct tcpopt to; struct tcp_rack *rack; @@ -14307,6 +14316,7 @@ tp->t_flags &= ~TF_GPUTINPROG; } } + high_seq = th->th_ack; if (tp->t_logstate != TCP_LOG_STATE_OFF) { union tcp_log_stackspecific log; struct timeval ltv; @@ -14663,6 +14673,20 @@ rack_free_trim(rack); } /* Update any rounds needed */ + if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { + union tcp_log_stackspecific log; + struct timeval tv; + + memset(&log.u_bbr, 0, sizeof(log.u_bbr)); + log.u_bbr.timeStamp = tcp_get_usecs(&tv); + log.u_bbr.flex1 = high_seq; + log.u_bbr.flex2 = rack->r_ctl.roundends; + log.u_bbr.flex3 = rack->r_ctl.current_round; + log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; + log.u_bbr.flex8 = 9; + tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, + 0, &log, false, NULL, NULL, 0, &tv); + } if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends)) { rack->r_ctl.current_round++; rack->r_ctl.roundends = tp->snd_max; @@ -20244,17 +20268,15 @@ break; case TCP_RACK_ENABLE_HYSTART: { - struct sockopt sopt; - struct cc_newreno_opts opt; - - sopt.sopt_valsize = sizeof(struct cc_newreno_opts); - sopt.sopt_dir = SOPT_SET; - opt.name = CC_NEWRENO_ENABLE_HYSTART; - opt.val = optval; - if (CC_ALGO(tp)->ctl_output != NULL) - error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); - else - error = EINVAL; + if (optval) { + tp->ccv->flags |= CCF_HYSTART_ALLOWED; + if (rack_do_hystart > RACK_HYSTART_ON) + tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; + if (rack_do_hystart > RACK_HYSTART_ON_W_SC) + tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; + } else { + tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); + } } break; case TCP_RACK_REORD_THRESH: @@ -20702,17 +20724,15 @@ break; case TCP_RACK_ENABLE_HYSTART: { - struct sockopt sopt; - struct cc_newreno_opts opt; - - sopt.sopt_valsize = sizeof(struct cc_newreno_opts); - sopt.sopt_dir = SOPT_GET; - opt.name = CC_NEWRENO_ENABLE_HYSTART; - if (CC_ALGO(tp)->ctl_output != NULL) - error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); - else - error = EINVAL; - optval = opt.val; + if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { + optval = RACK_HYSTART_ON; + if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) + optval = RACK_HYSTART_ON_W_SC; + if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) + optval = RACK_HYSTART_ON_W_SC_C; + } else { + optval = RACK_HYSTART_OFF; + } } break; case TCP_FAST_RSM_HACK: Index: sys/netinet/tcp_stacks/tcp_rack.h =================================================================== --- sys/netinet/tcp_stacks/tcp_rack.h +++ sys/netinet/tcp_stacks/tcp_rack.h @@ -525,6 +525,14 @@ #define RACK_TIMELY_CNT_BOOST 5 /* At 5th increase boost */ #define RACK_MINRTT_FILTER_TIM 10 /* Seconds */ +#define RACK_HYSTART_OFF 0 +#define RACK_HYSTART_ON 1 /* hystart++ on */ +#define RACK_HYSTART_ON_W_SC 2 /* hystart++ on +Slam Cwnd */ +#define RACK_HYSTART_ON_W_SC_C 3 /* hystart++ on, + * Conservative ssthresh and + * +Slam cwnd + */ + #ifdef _KERNEL struct tcp_rack {