diff --git a/sys/netinet/cc/cc.h b/sys/netinet/cc/cc.h --- a/sys/netinet/cc/cc.h +++ b/sys/netinet/cc/cc.h @@ -163,6 +163,15 @@ /* Called for an additional ECN processing apart from RFC3168. */ void (*ecnpkt_handler)(struct cc_var *ccv); + /* Called when a new "round" begins, if the transport is tracking rounds. */ + void (*newround)(struct cc_var *ccv, uint32_t round_cnt); + + /* + * Called when a RTT sample is made (fas = flight at send, if you dont have it + * send the cwnd in). + */ + void (*rttsample)(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas); + /* Called for {get|set}sockopt() on a TCP socket with TCP_CCALGOOPT. */ int (*ctl_output)(struct cc_var *, struct sockopt *, void *); diff --git a/sys/netinet/cc/cc_newreno.h b/sys/netinet/cc/cc_newreno.h --- a/sys/netinet/cc/cc_newreno.h +++ b/sys/netinet/cc/cc_newreno.h @@ -35,6 +35,15 @@ uint32_t beta; uint32_t beta_ecn; uint32_t newreno_flags; + uint32_t css_baseline_minrtt; + uint32_t css_current_round_minrtt; + uint32_t css_lastround_minrtt; + uint32_t css_rttsample_count; + uint32_t css_entered_at_round; + uint32_t css_current_round; + uint32_t css_fas_at_css_entry; + uint32_t css_lowrtt_fas; + uint32_t css_last_fas; }; struct cc_newreno_opts { @@ -42,6 +51,15 @@ uint32_t val; }; -#define CC_NEWRENO_BETA 1 /* Beta for normal DUP-ACK/Sack recovery */ -#define CC_NEWRENO_BETA_ECN 2 /* ECN Beta for Abe */ +#define CC_NEWRENO_BETA 1 /* Beta for normal DUP-ACK/Sack recovery */ +#define CC_NEWRENO_BETA_ECN 2 /* ECN Beta for Abe */ +#define CC_NEWRENO_ENABLE_HYSTART 3 /* Enable hystart */ + +/* Flags values */ +#define CC_NEWRENO_HYSTART_ALLOWED 0x0001 /* Does the tcp connection allow hystart? */ +#define CC_NEWRENO_HYSTART_ENABLED 0x0002 /* We can do hystart, a loss removes this flag */ +#define CC_NEWRENO_HYSTART_IN_CSS 0x0004 /* If we enter hystart CSS this flag is set */ +#define CC_NEWRENO_HYSTART_CAN_SH_CWND 0x0008 /* Can hystart when going CSS -> CA slam the cwnd */ +#define CC_NEWRENO_HYSTART_CONS_SSTH 0x0010 /* Should hystart use the more conservative sstrhesh */ +#define CC_NEWRENO_BETA_ECN_ENABLED 0x0020 #endif /* _CC_NEWRENO_H */ diff --git a/sys/netinet/cc/cc_newreno.c b/sys/netinet/cc/cc_newreno.c --- a/sys/netinet/cc/cc_newreno.c +++ b/sys/netinet/cc/cc_newreno.c @@ -63,15 +63,21 @@ #include #include #include +#include +#include #include #include #include #include +#include +#include #include #include #include +#include +#include #include #include #include @@ -85,6 +91,9 @@ static void newreno_cong_signal(struct cc_var *ccv, uint32_t type); static void newreno_post_recovery(struct cc_var *ccv); static int newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf); +static void newreno_newround(struct cc_var *ccv, uint32_t round_cnt); +static void newreno_rttsample(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas); +static int newreno_cb_init(struct cc_var *ccv); VNET_DEFINE(uint32_t, newreno_beta) = 50; VNET_DEFINE(uint32_t, newreno_beta_ecn) = 80; @@ -99,23 +108,95 @@ .cong_signal = newreno_cong_signal, .post_recovery = newreno_post_recovery, .ctl_output = newreno_ctl_output, + .newround = newreno_newround, + .rttsample = newreno_rttsample, + .cb_init = newreno_cb_init, }; -static inline struct newreno * -newreno_malloc(struct cc_var *ccv) -{ - struct newreno *nreno; +static uint32_t hystart_lowcwnd = 16; +static uint32_t hystart_minrtt_thresh = 4000; +static uint32_t hystart_maxrtt_thresh = 16000; +static uint32_t hystart_n_rttsamples = 8; +static uint32_t hystart_css_growth_div = 4; +static uint32_t hystart_css_rounds = 5; +static uint32_t hystart_bblogs = 0; - nreno = malloc(sizeof(struct newreno), M_NEWRENO, M_NOWAIT); - if (nreno != NULL) { - /* NB: nreno is not zeroed, so initialise all fields. */ - nreno->beta = V_newreno_beta; - nreno->beta_ecn = V_newreno_beta_ecn; - nreno->newreno_flags = 0; - ccv->cc_data = nreno; +static void +newreno_log_hystart_event(struct cc_var *ccv, struct newreno *nreno, uint8_t mod, uint32_t flex1) +{ + /* + * Types of logs (mod value) + * 1 - rtt_thresh in flex1, checking to see if RTT is to great. + * 2 - rtt is too great, rtt_thresh in flex1. + * 3 - CSS is active incr in flex1 + * 4 - A new round is beginning flex1 is round count + * 5 - A new RTT measurement flex1 is the new measurement. + * 6 - We enter CA ssthresh is also in flex1. + * 7 - Socket option to change hystart executed opt.val in flex1. + * 8 - Back out of CSS into SS, flex1 is the css_baseline_minrtt + */ + struct tcpcb *tp; + + if (hystart_bblogs == 0) + return; + tp = ccv->ccvc.tcp; + if (tp->t_logstate != TCP_LOG_STATE_OFF) { + union tcp_log_stackspecific log; + struct timeval tv; + + memset(&log, 0, sizeof(log)); + log.u_bbr.flex1 = flex1; + log.u_bbr.flex2 = nreno->css_current_round_minrtt; + log.u_bbr.flex3 = nreno->css_lastround_minrtt; + log.u_bbr.flex4 = nreno->css_rttsample_count; + log.u_bbr.flex5 = nreno->css_entered_at_round; + log.u_bbr.flex6 = nreno->css_baseline_minrtt; + /* We only need bottom 16 bits of flags */ + log.u_bbr.flex7 = nreno->newreno_flags & 0x0000ffff; + log.u_bbr.flex8 = mod; + log.u_bbr.epoch = nreno->css_current_round; + log.u_bbr.timeStamp = tcp_get_usecs(&tv); + log.u_bbr.lt_epoch = nreno->css_fas_at_css_entry; + log.u_bbr.pkts_out = nreno->css_last_fas; + log.u_bbr.delivered = nreno->css_lowrtt_fas; + TCP_LOG_EVENTP(tp, NULL, + &tp->t_inpcb->inp_socket->so_rcv, + &tp->t_inpcb->inp_socket->so_snd, + TCP_HYSTART, 0, + 0, &log, false, &tv); } +} + +static int +newreno_cb_init(struct cc_var *ccv) +{ + struct newreno *nreno; - return (nreno); + ccv->cc_data = NULL; + ccv->cc_data = malloc(sizeof(struct newreno), M_NEWRENO, M_NOWAIT); + if (ccv->cc_data == NULL) + return (ENOMEM); + nreno = (struct newreno *)ccv->cc_data; + /* NB: nreno is not zeroed, so initialise all fields. */ + nreno->beta = V_newreno_beta; + nreno->beta_ecn = V_newreno_beta_ecn; + /* + * We set the enabled flag so that if + * the socket option gets strobed and + * we have not hit a loss + */ + nreno->newreno_flags = CC_NEWRENO_HYSTART_ENABLED; + /* At init set both to infinity */ + nreno->css_lastround_minrtt = 0xffffffff; + nreno->css_current_round_minrtt = 0xffffffff; + nreno->css_current_round = 0; + nreno->css_baseline_minrtt = 0xffffffff; + nreno->css_rttsample_count = 0; + nreno->css_entered_at_round = 0; + nreno->css_fas_at_css_entry = 0; + nreno->css_lowrtt_fas = 0; + nreno->css_last_fas = 0; + return (0); } static void @@ -127,6 +208,9 @@ static void newreno_ack_received(struct cc_var *ccv, uint16_t type) { + struct newreno *nreno; + + nreno = (struct newreno *)ccv->cc_data; if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && (ccv->flags & CCF_CWND_LIMITED)) { u_int cw = CCV(ccv, snd_cwnd); @@ -160,6 +244,16 @@ * avoid capping cwnd. */ if (cw > CCV(ccv, snd_ssthresh)) { + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) { + /* + * We have slipped into CA with + * CSS active. Deactivate all. + */ + /* Turn off the CSS flag */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + /* Disable use of CSS in the future except long idle */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; + } if (V_tcp_do_rfc3465) { if (ccv->flags & CCF_ABC_SENTAWND) ccv->flags &= ~CCF_ABC_SENTAWND; @@ -184,12 +278,48 @@ abc_val = ccv->labc; else abc_val = V_tcp_abc_l_var; + if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) && + (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) && + ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) == 0)) { + /* + * Hystart is allowed and still enabled and we are not yet + * in CSS. Lets check to see if we can make a decision on + * if we need to go into CSS. + */ + if ((nreno->css_rttsample_count >= hystart_n_rttsamples) && + (CCV(ccv, snd_cwnd) > + (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp)))) { + uint32_t rtt_thresh; + + /* Clamp (minrtt_thresh, lastround/8, maxrtt_thresh) */ + rtt_thresh = (nreno->css_lastround_minrtt >> 3); + if (rtt_thresh < hystart_minrtt_thresh) + rtt_thresh = hystart_minrtt_thresh; + if (rtt_thresh > hystart_maxrtt_thresh) + rtt_thresh = hystart_maxrtt_thresh; + newreno_log_hystart_event(ccv, nreno, 1, rtt_thresh); + if (nreno->css_current_round_minrtt >= (nreno->css_lastround_minrtt + rtt_thresh)) { + /* Enter CSS */ + nreno->newreno_flags |= CC_NEWRENO_HYSTART_IN_CSS; + nreno->css_fas_at_css_entry = nreno->css_lowrtt_fas; + nreno->css_baseline_minrtt = nreno->css_current_round_minrtt; + nreno->css_entered_at_round = nreno->css_current_round; + newreno_log_hystart_event(ccv, nreno, 2, rtt_thresh); + } + } + } if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max)) incr = min(ccv->bytes_this_ack, ccv->nsegs * abc_val * CCV(ccv, t_maxseg)); else incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg)); + + /* Only if Hystart is enabled will the flag get set */ + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) { + incr /= hystart_css_growth_div; + newreno_log_hystart_event(ccv, nreno, 3, incr); + } } /* ABC is on by default, so incr equals 0 frequently. */ if (incr > 0) @@ -201,8 +331,10 @@ static void newreno_after_idle(struct cc_var *ccv) { + struct newreno *nreno; uint32_t rw; + nreno = (struct newreno *)ccv->cc_data; /* * If we've been idle for more than one retransmit timeout the old * congestion window is no longer current and we have to reduce it to @@ -226,6 +358,16 @@ CCV(ccv, snd_cwnd)-(CCV(ccv, snd_cwnd)>>2)); CCV(ccv, snd_cwnd) = min(rw, CCV(ccv, snd_cwnd)); + if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) == 0) { + if (CCV(ccv, snd_cwnd) <= (hystart_lowcwnd * tcp_fixed_maxseg(ccv->ccvc.tcp))) { + /* + * Re-enable hystart if our cwnd has fallen below + * the hystart lowcwnd point. + */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + nreno->newreno_flags |= CC_NEWRENO_HYSTART_ENABLED; + } + } } /* @@ -240,15 +382,9 @@ cwin = CCV(ccv, snd_cwnd); mss = tcp_fixed_maxseg(ccv->ccvc.tcp); - /* - * Other TCP congestion controls use newreno_cong_signal(), but - * with their own private cc_data. Make sure the cc_data is used - * correctly. - */ - nreno = (CC_ALGO(ccv->ccvc.tcp) == &newreno_cc_algo) ? ccv->cc_data : NULL; - beta = (nreno == NULL) ? V_newreno_beta : nreno->beta; - beta_ecn = (nreno == NULL) ? V_newreno_beta_ecn : nreno->beta_ecn; - + nreno = (struct newreno *) ccv->cc_data; + beta = nreno->beta; + beta_ecn = nreno->beta_ecn; /* * Note that we only change the backoff for ECN if the * global sysctl V_cc_do_abe is set the stack itself @@ -257,7 +393,7 @@ */ if ((type == CC_ECN) && (V_cc_do_abe || - ((nreno != NULL) && (nreno->newreno_flags & CC_NEWRENO_BETA_ECN)))) + ((nreno != NULL) && (nreno->newreno_flags & CC_NEWRENO_BETA_ECN_ENABLED)))) factor = beta_ecn; else factor = beta; @@ -271,6 +407,11 @@ switch (type) { case CC_NDUPACK: + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) { + /* Make sure the flags are all off we had a loss */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + } if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { if (IN_CONGRECOVERY(CCV(ccv, t_flags) && V_cc_do_abe && V_cc_abe_frlossreduce)) { @@ -284,6 +425,11 @@ } break; case CC_ECN: + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ENABLED) { + /* Make sure the flags are all off we had a loss */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + } if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { CCV(ccv, snd_ssthresh) = cwin; CCV(ccv, snd_cwnd) = cwin; @@ -346,17 +492,10 @@ if (CC_ALGO(ccv->ccvc.tcp) != &newreno_cc_algo) return (ENOPROTOOPT); - nreno = ccv->cc_data; + nreno = (struct newreno *)ccv->cc_data; opt = buf; - switch (sopt->sopt_dir) { case SOPT_SET: - /* We cannot set without cc_data memory. */ - if (nreno == NULL) { - nreno = newreno_malloc(ccv); - if (nreno == NULL) - return (ENOMEM); - } switch (opt->name) { case CC_NEWRENO_BETA: nreno->beta = opt->val; @@ -365,6 +504,19 @@ if ((!V_cc_do_abe) && ((nreno->newreno_flags & CC_NEWRENO_BETA_ECN) == 0)) return (EACCES); nreno->beta_ecn = opt->val; + nreno->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; + break; + case CC_NEWRENO_ENABLE_HYSTART: + /* Allow hystart on this connection */ + if (opt->val != 0) { + nreno->newreno_flags |= CC_NEWRENO_HYSTART_ALLOWED; + if (opt->val > 1) + nreno->newreno_flags |= CC_NEWRENO_HYSTART_CAN_SH_CWND; + if (opt->val > 2) + nreno->newreno_flags |= CC_NEWRENO_HYSTART_CONS_SSTH; + } else + nreno->newreno_flags &= ~(CC_NEWRENO_HYSTART_ALLOWED|CC_NEWRENO_HYSTART_CAN_SH_CWND|CC_NEWRENO_HYSTART_CONS_SSTH); + newreno_log_hystart_event(ccv, nreno, 7, opt->val); break; default: return (ENOPROTOOPT); @@ -380,6 +532,17 @@ opt->val = (nreno == NULL) ? V_newreno_beta_ecn : nreno->beta_ecn; break; + case CC_NEWRENO_ENABLE_HYSTART: + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_ALLOWED) { + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH) + opt->val = 3; + else if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND) + opt->val = 2; + else + opt->val = 1; + } else + opt->val = 0; + break; default: return (ENOPROTOOPT); } @@ -411,6 +574,78 @@ return (error); } +static void +newreno_newround(struct cc_var *ccv, uint32_t round_cnt) +{ + struct newreno *nreno; + + nreno = (struct newreno *)ccv->cc_data; + /* We have entered a new round */ + nreno->css_lastround_minrtt = nreno->css_current_round_minrtt; + nreno->css_current_round_minrtt = 0xffffffff; + nreno->css_rttsample_count = 0; + nreno->css_current_round = round_cnt; + if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) && + ((round_cnt - nreno->css_entered_at_round) >= hystart_css_rounds)) { + /* Enter CA */ + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CAN_SH_CWND) { + /* + * We engage more than snd_ssthresh, engage + * the brakes!! Though we will stay in SS to + * creep back up again, so lets leave CSS active + * and give us hystart_css_rounds more rounds. + */ + if (nreno->newreno_flags & CC_NEWRENO_HYSTART_CONS_SSTH) { + CCV(ccv, snd_ssthresh) = ((nreno->css_lowrtt_fas + nreno->css_fas_at_css_entry) / 2); + } else { + CCV(ccv, snd_ssthresh) = nreno->css_lowrtt_fas; + } + CCV(ccv, snd_cwnd) = nreno->css_fas_at_css_entry; + nreno->css_entered_at_round = round_cnt; + } else { + CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd); + /* Turn off the CSS flag */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + /* Disable use of CSS in the future except long idle */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_ENABLED; + } + newreno_log_hystart_event(ccv, nreno, 6, CCV(ccv, snd_ssthresh)); + } + newreno_log_hystart_event(ccv, nreno, 4, round_cnt); +} + +static void +newreno_rttsample(struct cc_var *ccv, uint32_t usec_rtt, uint32_t rxtcnt, uint32_t fas) +{ + struct newreno *nreno; + + nreno = (struct newreno *)ccv->cc_data; + if (rxtcnt > 1) { + /* + * Only look at RTT's that are non-ambiguous. + */ + return; + } + nreno->css_rttsample_count++; + nreno->css_last_fas = fas; + if (nreno->css_current_round_minrtt > usec_rtt) { + nreno->css_current_round_minrtt = usec_rtt; + nreno->css_lowrtt_fas = nreno->css_last_fas; + } + if ((nreno->newreno_flags & CC_NEWRENO_HYSTART_IN_CSS) && + (nreno->css_rttsample_count >= hystart_n_rttsamples) && + (nreno->css_baseline_minrtt > nreno->css_current_round_minrtt)) { + /* + * We were in CSS and the RTT is now less, we + * entered CSS erroneously. + */ + nreno->newreno_flags &= ~CC_NEWRENO_HYSTART_IN_CSS; + newreno_log_hystart_event(ccv, nreno, 8, nreno->css_baseline_minrtt); + nreno->css_baseline_minrtt = 0xffffffff; + } + newreno_log_hystart_event(ccv, nreno, 5, usec_rtt); +} + SYSCTL_DECL(_net_inet_tcp_cc_newreno); SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, newreno, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, @@ -426,5 +661,45 @@ &VNET_NAME(newreno_beta_ecn), 3, &newreno_beta_handler, "IU", "New Reno beta ecn, specified as number between 1 and 100"); +SYSCTL_NODE(_net_inet_tcp_cc_newreno, OID_AUTO, hystartplusplus, + CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, + "New Reno related HyStart++ settings"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, lowcwnd, + CTLFLAG_RW, + &hystart_lowcwnd, 16, + "The number of MSS in the CWND before HyStart++ is active"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, minrtt_thresh, + CTLFLAG_RW, + &hystart_minrtt_thresh, 4000, + "HyStarts++ minimum RTT thresh used in clamp (in microseconds)"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, maxrtt_thresh, + CTLFLAG_RW, + &hystart_maxrtt_thresh, 16000, + "HyStarts++ maximum RTT thresh used in clamp (in microseconds)"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, n_rttsamples, + CTLFLAG_RW, + &hystart_n_rttsamples, 8, + "The number of RTT samples that must be seen to consider HyStart++"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_growth_div, + CTLFLAG_RW, + &hystart_css_growth_div, 4, + "The divisor to the growth when in Hystart++ CSS"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, css_rounds, + CTLFLAG_RW, + &hystart_css_rounds, 5, + "The number of rounds HyStart++ lasts in CSS before falling to CA"); + +SYSCTL_UINT(_net_inet_tcp_cc_newreno_hystartplusplus, OID_AUTO, bblogs, + CTLFLAG_RW, + &hystart_bblogs, 0, + "Do we enable HyStart++ Black Box logs to be generated if BB logging is on"); + + DECLARE_CC_MODULE(newreno, &newreno_cc_algo); MODULE_VERSION(newreno, 1); diff --git a/sys/netinet/tcp.h b/sys/netinet/tcp.h --- a/sys/netinet/tcp.h +++ b/sys/netinet/tcp.h @@ -310,7 +310,7 @@ #define TCP_RACK_PACING_BETA_ECN 1139 /* Changing the beta for ecn with pacing */ #define TCP_RACK_TIMER_SLOP 1140 /* Set or get the timer slop used */ #define TCP_RACK_DSACK_OPT 1141 /* How do we setup rack timer DSACK options bit 1/2 */ - +#define TCP_RACK_ENABLE_HYSTART 1142 /* Do we allow hystart in the CC modules */ /* Start of reserved space for third-party user-settable options. */ #define TCP_VENDOR SO_VENDOR diff --git a/sys/netinet/tcp_log_buf.h b/sys/netinet/tcp_log_buf.h --- a/sys/netinet/tcp_log_buf.h +++ b/sys/netinet/tcp_log_buf.h @@ -235,7 +235,8 @@ TCP_LOG_ACCOUNTING, /* Log of TCP Accounting data 62 */ TCP_LOG_FSB, /* FSB information 63 */ RACK_DSACK_HANDLING, /* Handling of DSACK in rack for reordering window 64 */ - TCP_LOG_END /* End (keep at end) 65 */ + TCP_HYSTART, /* TCP Hystart logging 65 */ + TCP_LOG_END /* End (keep at end) 66 */ }; enum tcp_log_states { diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c --- a/sys/netinet/tcp_stacks/rack.c +++ b/sys/netinet/tcp_stacks/rack.c @@ -204,6 +204,7 @@ static int32_t rack_hw_up_only = 1; static int32_t rack_stats_gets_ms_rtt = 1; static int32_t rack_prr_addbackmax = 2; +static int32_t rack_do_hystart = 0; static int32_t rack_pkt_delay = 1000; static int32_t rack_send_a_lot_in_prr = 1; @@ -624,7 +625,7 @@ * Hack alert we need to set in our newreno_flags * so that Abe behavior is also applied. */ - ((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN; + ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; opt.name = CC_NEWRENO_BETA_ECN; opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); @@ -835,6 +836,7 @@ struct sysctl_oid *rack_timers; struct sysctl_oid *rack_tlp; struct sysctl_oid *rack_misc; + struct sysctl_oid *rack_features; struct sysctl_oid *rack_measure; struct sysctl_oid *rack_probertt; struct sysctl_oid *rack_hw_pacing; @@ -1362,6 +1364,43 @@ OID_AUTO, "min_measure_tim", CTLFLAG_RW, &rack_min_measure_usec, 0, "What is the Minimum time time for a measurement if 0, this is off"); + /* Features */ + rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_sysctl_root), + OID_AUTO, + "features", + CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "Feature controls"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "cmpack", CTLFLAG_RW, + &rack_use_cmp_acks, 1, + "Should RACK have LRO send compressed acks"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "fsb", CTLFLAG_RW, + &rack_use_fsb, 1, + "Should RACK use the fast send block?"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "rfo", CTLFLAG_RW, + &rack_use_rfo, 1, + "Should RACK use rack_fast_output()?"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "rsmrfo", CTLFLAG_RW, + &rack_use_rsm_rfo, 1, + "Should RACK use rack_fast_rsm_output()?"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, + &rack_enable_mqueue_for_nonpaced, 0, + "Should RACK use mbuf queuing for non-paced connections"); + SYSCTL_ADD_S32(&rack_sysctl_ctx, + SYSCTL_CHILDREN(rack_features), + OID_AUTO, "hystartplusplus", CTLFLAG_RW, + &rack_do_hystart, 0, + "Should RACK enable HyStart++ on connections?"); /* Misc rack controls */ rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), @@ -1376,7 +1415,6 @@ &rack_tcp_accounting, 0, "Should we turn on TCP accounting for all rack sessions?"); #endif - SYSCTL_ADD_S32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_misc), OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, @@ -1402,26 +1440,6 @@ OID_AUTO, "defprofile", CTLFLAG_RW, &rack_def_profile, 0, "Should RACK use a default profile (0=no, num == profile num)?"); - SYSCTL_ADD_S32(&rack_sysctl_ctx, - SYSCTL_CHILDREN(rack_misc), - OID_AUTO, "cmpack", CTLFLAG_RW, - &rack_use_cmp_acks, 1, - "Should RACK have LRO send compressed acks"); - SYSCTL_ADD_S32(&rack_sysctl_ctx, - SYSCTL_CHILDREN(rack_misc), - OID_AUTO, "fsb", CTLFLAG_RW, - &rack_use_fsb, 1, - "Should RACK use the fast send block?"); - SYSCTL_ADD_S32(&rack_sysctl_ctx, - SYSCTL_CHILDREN(rack_misc), - OID_AUTO, "rfo", CTLFLAG_RW, - &rack_use_rfo, 1, - "Should RACK use rack_fast_output()?"); - SYSCTL_ADD_S32(&rack_sysctl_ctx, - SYSCTL_CHILDREN(rack_misc), - OID_AUTO, "rsmrfo", CTLFLAG_RW, - &rack_use_rsm_rfo, 1, - "Should RACK use rack_fast_rsm_output()?"); SYSCTL_ADD_S32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_misc), OID_AUTO, "shared_cwnd", CTLFLAG_RW, @@ -1432,11 +1450,6 @@ OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, &rack_limits_scwnd, 1, "Should RACK place low end time limits on the shared cwnd feature"); - SYSCTL_ADD_S32(&rack_sysctl_ctx, - SYSCTL_CHILDREN(rack_misc), - OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, - &rack_enable_mqueue_for_nonpaced, 0, - "Should RACK use mbuf queuing for non-paced connections"); SYSCTL_ADD_S32(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_misc), OID_AUTO, "iMac_dack", CTLFLAG_RW, @@ -6139,6 +6152,7 @@ nrsm->r_dupack = rsm->r_dupack; nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; nrsm->r_rtr_bytes = 0; + nrsm->r_fas = rsm->r_fas; rsm->r_end = nrsm->r_start; nrsm->r_just_ret = rsm->r_just_ret; for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { @@ -7260,6 +7274,12 @@ } idx = rsm->r_rtr_cnt - 1; rsm->r_tim_lastsent[idx] = ts; + /* + * Here we don't add in the len of send, since its already + * in snduna <->snd_max. + */ + rsm->r_fas = ctf_flight_size(rack->rc_tp, + rack->r_ctl.rc_sacked); stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP); if (rsm->r_flags & RACK_ACKED) { /* Problably MTU discovery messing with us */ @@ -7479,6 +7499,13 @@ */ rsm->m = s_mb; rsm->soff = s_moff; + /* + * Here we do add in the len of send, since its not yet + * reflected in in snduna <->snd_max + */ + rsm->r_fas = (ctf_flight_size(rack->rc_tp, + rack->r_ctl.rc_sacked) + + (rsm->r_end - rsm->r_start)); /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ if (rsm->m) { if (rsm->m->m_len <= rsm->soff) { @@ -7927,6 +7954,7 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) { + uint32_t us_rtt; int32_t i, all; uint32_t t, len_acked; @@ -7951,7 +7979,6 @@ all = 0; } if (rsm->r_rtr_cnt == 1) { - uint32_t us_rtt; t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; if ((int)t <= 0) @@ -7971,6 +7998,10 @@ us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; if (us_rtt == 0) us_rtt = 1; + if (CC_ALGO(tp)->rttsample != NULL) { + /* Kick the RTT to the CC */ + CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); + } rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); if (ack_type == SACKED) { rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); @@ -8057,12 +8088,29 @@ t = cts - (uint32_t)rsm->r_tim_lastsent[i]; if ((int)t <= 0) t = 1; + if (CC_ALGO(tp)->rttsample != NULL) { + /* + * Kick the RTT to the CC, here + * we lie a bit in that we know the + * retransmission is correct even though + * we retransmitted. This is because + * we match the timestamps. + */ + if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) + us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; + else + us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; + CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); + } if ((i + 1) < rsm->r_rtr_cnt) { /* * The peer ack'd from our previous * transmission. We have a spurious * retransmission and thus we dont * want to update our rack_rtt. + * + * Hmm should there be a CC revert here? + * */ return (0); } @@ -12548,10 +12596,11 @@ rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; /* We want abe like behavior as well */ - rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; + rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; rack->r_ctl.rc_reorder_fade = rack_reorder_fade; rack->rc_allow_data_af_clo = rack_ignore_data_after_close; rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; + rack->r_ctl.roundends = tp->snd_max; if (use_rack_rr) rack->use_rack_rr = 1; if (V_tcp_delack_enabled) @@ -12730,6 +12779,17 @@ */ rack_convert_rtts(tp); tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); + if (rack_do_hystart) { + struct sockopt sopt; + struct cc_newreno_opts opt; + + sopt.sopt_valsize = sizeof(struct cc_newreno_opts); + sopt.sopt_dir = SOPT_SET; + opt.name = CC_NEWRENO_ENABLE_HYSTART; + opt.val = rack_do_hystart; + if (CC_ALGO(tp)->ctl_output != NULL) + (void)CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); + } if (rack_def_profile) rack_set_profile(rack, rack_def_profile); /* Cancel the GP measurement in progress */ @@ -13576,6 +13636,13 @@ (((ae->ack - high_seq) + segsiz - 1) / segsiz)); #endif high_seq = ae->ack; + if (SEQ_GEQ(high_seq, rack->r_ctl.roundends)) { + rack->r_ctl.current_round++; + rack->r_ctl.roundends = tp->snd_max; + if (CC_ALGO(tp)->newround != NULL) { + CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); + } + } /* Setup our act_rcv_time */ if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { ts.tv_sec = ae->timestamp / 1000000000; @@ -14464,6 +14531,14 @@ rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); rack_free_trim(rack); } + /* Update any rounds needed */ + if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends)) { + rack->r_ctl.current_round++; + rack->r_ctl.roundends = tp->snd_max; + if (CC_ALGO(tp)->newround != NULL) { + CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); + } + } if ((nxt_pkt == 0) && ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && (SEQ_GT(tp->snd_max, tp->snd_una) || @@ -16936,7 +17011,6 @@ goto just_return_nolock; } rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); - KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack)); if (rsm == NULL) { /* TSNH */ rack->r_must_retran = 0; @@ -19565,7 +19639,7 @@ * rack pcb storage. */ rack->r_ctl.rc_saved_beta.beta_ecn = optval; - rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; + rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; } break; case TCP_DEFER_OPTIONS: @@ -19998,6 +20072,21 @@ RACK_OPTS_INC(tcp_rack_early_seg); rack->r_ctl.rc_early_recovery_segs = optval; break; + case TCP_RACK_ENABLE_HYSTART: + { + struct sockopt sopt; + struct cc_newreno_opts opt; + + sopt.sopt_valsize = sizeof(struct cc_newreno_opts); + sopt.sopt_dir = SOPT_SET; + opt.name = CC_NEWRENO_ENABLE_HYSTART; + opt.val = optval; + if (CC_ALGO(tp)->ctl_output != NULL) + error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); + else + error = EINVAL; + } + break; case TCP_RACK_REORD_THRESH: /* RACK reorder threshold (shift amount) */ RACK_OPTS_INC(tcp_rack_reord_thresh); @@ -20210,6 +20299,7 @@ case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ + case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ break; default: /* Filter off all unknown options to the base stack */ @@ -20394,6 +20484,21 @@ optval |= 2; } break; + case TCP_RACK_ENABLE_HYSTART: + { + struct sockopt sopt; + struct cc_newreno_opts opt; + + sopt.sopt_valsize = sizeof(struct cc_newreno_opts); + sopt.sopt_dir = SOPT_GET; + opt.name = CC_NEWRENO_ENABLE_HYSTART; + if (CC_ALGO(tp)->ctl_output != NULL) + error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); + else + error = EINVAL; + optval = opt.val; + } + break; case TCP_FAST_RSM_HACK: optval = rack->fast_rsm_hack; break; diff --git a/sys/netinet/tcp_stacks/tcp_rack.h b/sys/netinet/tcp_stacks/tcp_rack.h --- a/sys/netinet/tcp_stacks/tcp_rack.h +++ b/sys/netinet/tcp_stacks/tcp_rack.h @@ -73,6 +73,7 @@ uint64_t r_tim_lastsent[RACK_NUM_OF_RETRANS]; uint64_t r_ack_arrival; /* This is the time of ack-arrival (if SACK'd) */ RB_ENTRY(rack_sendmap) r_next; /* RB Tree next */ + uint32_t r_fas; /* Flight at send */ }; struct deferred_opt_list { @@ -465,6 +466,8 @@ uint32_t rc_loss_at_start; /* At measurement window where was our lost value */ uint32_t dsack_round_end; /* In a round of seeing a DSACK */ + uint32_t current_round; /* Starting at zero */ + uint32_t roundends; /* acked value above which round ends */ uint32_t num_dsack; /* Count of dsack's seen (1 per window)*/ uint32_t forced_ack_ts; uint32_t rc_lower_rtt_us_cts; /* Time our GP rtt was last lowered */