Index: sys/netinet/tcp_stacks/bbr.c =================================================================== --- sys/netinet/tcp_stacks/bbr.c +++ sys/netinet/tcp_stacks/bbr.c @@ -443,80 +443,105 @@ static uint32_t bbr_get_pacing_length(struct tcp_bbr *bbr, uint16_t gain, uint32_t useconds_time, uint64_t bw); + static uint32_t bbr_get_a_state_target(struct tcp_bbr *bbr, uint32_t gain); + static void bbr_set_state(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t win); + static void bbr_set_probebw_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses); + static void bbr_substate_change(struct tcp_bbr *bbr, uint32_t cts, int line, int dolog); + static uint32_t bbr_get_target_cwnd(struct tcp_bbr *bbr, uint64_t bw, uint32_t gain); + static void bbr_state_change(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, int32_t pkt_epoch, uint32_t losses); + +static uint32_t +bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts, + struct bbr_sendmap *rsm); + static uint32_t -bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts, struct bbr_sendmap *rsm); -static uint32_t bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp); +bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp); + static uint32_t bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, - struct bbr_sendmap *rsm, uint32_t srtt, - uint32_t cts); + struct bbr_sendmap *rsm, uint32_t srtt, uint32_t cts); + static void bbr_exit_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, - int32_t line); + int32_t line); + static void - bbr_set_state_target(struct tcp_bbr *bbr, int line); +bbr_set_state_target(struct tcp_bbr *bbr, int line); + static void - bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line); +bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line); static void - bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick, int event, int line); +bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick, + int event, int line); static void - tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts); +tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts); static void - bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts); +bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts); static void - bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied, uint32_t rtt, - uint32_t line, uint8_t is_start, uint16_t set); +bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied, + uint32_t rtt, uint32_t line, uint8_t is_start, uint16_t set); static struct bbr_sendmap * - bbr_find_lowest_rsm(struct tcp_bbr *bbr); +bbr_find_lowest_rsm(struct tcp_bbr *bbr); + static __inline uint32_t bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type); + static void - bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which); +bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, + uint8_t which); static void -bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts, uint32_t time_since_sent, uint32_t srtt, - uint32_t thresh, uint32_t to); +bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts, + uint32_t time_since_sent, uint32_t srtt, + uint32_t thresh, uint32_t to); + static void - bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag); +bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag); static void bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, - uint32_t del_by, uint32_t cts, uint32_t sloton, uint32_t prev_delay); + uint32_t del_by, uint32_t cts, uint32_t sloton, + uint32_t prev_delay); static void -bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, - uint32_t cts, int32_t line); +bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, + int32_t line); + static void bbr_stop_all_timers(struct tcpcb *tp); + static void bbr_exit_probe_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts); + static void bbr_check_probe_rtt_limits(struct tcp_bbr *bbr, uint32_t cts); + static void bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts); static void bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len, - uint32_t cts, uint32_t usecs, uint64_t bw, uint32_t override, int mod); + uint32_t cts, uint32_t usecs, uint64_t bw, + uint32_t override, int mod); static inline uint8_t bbr_state_val(struct tcp_bbr *bbr) @@ -1615,8 +1640,7 @@ OID_AUTO, "drop_limit", CTLFLAG_RW, &bbr_drop_limit, 0, "Number of segments limit for drop (0=use min_cwnd w/flight)?"); - - /* Timeout controls */ + /* Timeout controls */ bbr_timeout = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), OID_AUTO, @@ -5742,7 +5766,7 @@ * seg = goal_tso / mss * tso = seg * mss * else - * tso = mss + * tso = mss * if (tso > per-tcb-max) * tso = per-tcb-max * else if ( bw > 512Mbps) @@ -6787,7 +6811,7 @@ else bbr->rc_ack_is_cumack = 0; old_rttprop = bbr_get_rtt(bbr, BBR_RTT_PROP); - /* + /* * Note the following code differs to the original * BBR spec. It calls for <= not <. However after a * long discussion in email with Neal, he acknowledged @@ -7807,7 +7831,7 @@ if (bbr->rc_in_persist) tp->t_rxtshift = 0; if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) - bbr_strike_dupack(bbr); + bbr_strike_dupack(bbr); sack_changed = bbr_log_ack(tp, to, th, &prev_acked); } bbr_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime, (bbr->r_ctl.rc_lost > lost)); @@ -11606,18 +11630,18 @@ if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { retval = 0; m_freem(m); - goto done_with_input; - } - /* - * If a segment with the ACK-bit set arrives in the SYN-SENT state - * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. - */ - if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && - (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { + goto done_with_input; + } + /* + * If a segment with the ACK-bit set arrives in the SYN-SENT state + * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. + */ + if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && + (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); - return (1); - } + return (1); + } in_recovery = IN_RECOVERY(tp->t_flags); if (tiwin > bbr->r_ctl.rc_high_rwnd) bbr->r_ctl.rc_high_rwnd = tiwin; Index: sys/netinet/tcp_stacks/rack.c =================================================================== --- sys/netinet/tcp_stacks/rack.c +++ sys/netinet/tcp_stacks/rack.c @@ -2334,7 +2334,7 @@ len = bw * srtt; len /= (uint64_t)HPTS_USEC_IN_SEC; len *= max(1, rack_goal_bdp); - /* Now we need to round up to the nearest MSS */ + /* Now we need to round up to the nearest MSS */ len = roundup(len, segsiz); if (rack_min_measure_usec) { /* Now calculate our min length for this b/w */ @@ -8686,41 +8686,41 @@ if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0) && TCPS_HAVEESTABLISHED(tp->t_state)) { - if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { - /* - * DSACK actually handled in the fastpath - * above. - */ + if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { + /* + * DSACK actually handled in the fastpath + * above. + */ RACK_OPTS_INC(tcp_sack_path_1); - tcp_update_sack_list(tp, save_start, - save_start + save_tlen); - } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { - if ((tp->rcv_numsacks >= 1) && - (tp->sackblks[0].end == save_start)) { - /* - * Partial overlap, recorded at todrop - * above. - */ + tcp_update_sack_list(tp, save_start, + save_start + save_tlen); + } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { + if ((tp->rcv_numsacks >= 1) && + (tp->sackblks[0].end == save_start)) { + /* + * Partial overlap, recorded at todrop + * above. + */ RACK_OPTS_INC(tcp_sack_path_2a); - tcp_update_sack_list(tp, - tp->sackblks[0].start, - tp->sackblks[0].end); - } else { + tcp_update_sack_list(tp, + tp->sackblks[0].start, + tp->sackblks[0].end); + } else { RACK_OPTS_INC(tcp_sack_path_2b); - tcp_update_dsack_list(tp, save_start, - save_start + save_tlen); - } - } else if (tlen >= save_tlen) { - /* Update of sackblks. */ + tcp_update_dsack_list(tp, save_start, + save_start + save_tlen); + } + } else if (tlen >= save_tlen) { + /* Update of sackblks. */ RACK_OPTS_INC(tcp_sack_path_3); - tcp_update_dsack_list(tp, save_start, - save_start + save_tlen); - } else if (tlen > 0) { + tcp_update_dsack_list(tp, save_start, + save_start + save_tlen); + } else if (tlen > 0) { RACK_OPTS_INC(tcp_sack_path_4); - tcp_update_dsack_list(tp, save_start, - save_start + tlen); - } - } + tcp_update_dsack_list(tp, save_start, + save_start + tlen); + } + } } else { m_freem(m); thflags &= ~TH_FIN; @@ -13154,7 +13154,7 @@ len -= moff; } } - /* + /* * In case there are too many small fragments don't * use TSO: */ @@ -14113,7 +14113,7 @@ case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ case TCP_RACK_EARLY_RECOV: /* URL:early_recov */ case TCP_RACK_PACE_REDUCE: /* Not used */ - /* Pacing related ones */ + /* Pacing related ones */ case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ case TCP_BBR_IWINTSO: /* URL:tso_iwin */ @@ -14127,7 +14127,7 @@ case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ case TCP_RACK_RR_CONF: /* URL:rrr_conf */ case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ - /* End pacing related */ + /* End pacing related */ case TCP_DELACK: case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ case TCP_RACK_MIN_TO: /* URL:min_to */ @@ -14334,7 +14334,7 @@ rack->r_ctl.rc_early_recovery = optval; break; - /* Pacing related ones */ + /* Pacing related ones */ case TCP_RACK_PACE_ALWAYS: /* * zero is old rack method, 1 is new @@ -14545,7 +14545,7 @@ #endif } break; - /* End Pacing related ones */ + /* End Pacing related ones */ case TCP_RACK_PRR_SENDALOT: /* Allow PRR to send more than one seg */ RACK_OPTS_INC(tcp_rack_prr_sendalot); Index: sys/netinet/tcp_stacks/tcp_bbr.h =================================================================== --- sys/netinet/tcp_stacks/tcp_bbr.h +++ sys/netinet/tcp_stacks/tcp_bbr.h @@ -588,9 +588,9 @@ uint32_t rc_reorder_ts; /* Last time we saw reordering Lock(a) */ uint32_t rc_init_rwnd; /* Initial rwnd when we transitioned */ - /*- --- + /*- --- * used only initial and close - */ + */ uint32_t rc_high_rwnd; /* Highest rwnd seen */ uint32_t rc_lowest_rtt; /* Smallest RTT we have seen */