diff --git a/sys/netinet/cc/cc_cubic.h b/sys/netinet/cc/cc_cubic.h --- a/sys/netinet/cc/cc_cubic.h +++ b/sys/netinet/cc/cc_cubic.h @@ -83,6 +83,7 @@ #define CUBICFLAG_RTO_EVENT 0x00000008 /* RTO experienced */ #define CUBICFLAG_HYSTART_ENABLED 0x00000010 /* Hystart++ is enabled */ #define CUBICFLAG_HYSTART_IN_CSS 0x00000020 /* We are in Hystart++ CSS */ +#define CUBICFLAG_IN_TF 0x00000040 /* We are in TCP friendly region */ /* Kernel only bits */ #ifdef _KERNEL @@ -288,22 +289,13 @@ } /* - * Compute an approximation of the "TCP friendly" cwnd some number of usecs - * after a congestion event that is designed to yield the same average cwnd as - * NewReno while using CUBIC's beta of 0.7. RTT should be the average RTT - * estimate for the path measured over the previous congestion epoch and wmax is - * the value of cwnd at the last congestion event. + * Compute the "TCP friendly" cwnd by newreno in congestion avoidance state. */ static __inline unsigned long -tf_cwnd(int usecs_since_epoch, int rtt_usecs, unsigned long wmax, - uint32_t smss) +tf_cwnd(struct cc_var *ccv) { - - /* Equation 4 of I-D. */ - return (((wmax * CUBIC_BETA) + - (((THREE_X_PT3 * (unsigned long)usecs_since_epoch * - (unsigned long)smss) << CUBIC_SHIFT) / (TWO_SUB_PT3 * rtt_usecs))) - >> CUBIC_SHIFT); + /* newreno is "TCP friendly" */ + return newreno_cc_cwnd_in_cong_avoid(ccv); } #endif /* _NETINET_CC_CUBIC_H_ */ diff --git a/sys/netinet/cc/cc_cubic.c b/sys/netinet/cc/cc_cubic.c --- a/sys/netinet/cc/cc_cubic.c +++ b/sys/netinet/cc/cc_cubic.c @@ -289,31 +289,26 @@ usecs_since_epoch = INT_MAX; cubic_data->t_epoch = ticks - INT_MAX; } + + W_est = tf_cwnd(ccv); + /* * The mean RTT is used to best reflect the equations in - * the I-D. Using min_rtt in the tf_cwnd calculation - * causes W_est to grow much faster than it should if the - * RTT is dominated by network buffering rather than - * propagation delay. + * the I-D. */ - W_est = tf_cwnd(usecs_since_epoch, cubic_data->mean_rtt_usecs, - cubic_data->W_max, CCV(ccv, t_maxseg)); - W_cubic = cubic_cwnd(usecs_since_epoch + cubic_data->mean_rtt_usecs, cubic_data->W_max, CCV(ccv, t_maxseg), cubic_data->K); - ccv->flags &= ~CCF_ABC_SENTAWND; - if (W_cubic < W_est) { /* * TCP-friendly region, follow tf * cwnd growth. */ - if (CCV(ccv, snd_cwnd) < W_est) - CCV(ccv, snd_cwnd) = ulmin(W_est, INT_MAX); + CCV(ccv, snd_cwnd) = ulmin(W_est, INT_MAX); + cubic_data->flags |= CUBICFLAG_IN_TF; } else if (CCV(ccv, snd_cwnd) < W_cubic) { /* * Concave or convex region, follow CUBIC @@ -321,6 +316,7 @@ * Only update snd_cwnd, if it doesn't shrink. */ CCV(ccv, snd_cwnd) = ulmin(W_cubic, INT_MAX); + cubic_data->flags &= ~CUBICFLAG_IN_TF; } /* @@ -637,19 +633,23 @@ cubic_data->undo_W_max = cubic_data->W_max; cubic_data->W_max = cwnd; - /* - * On the first congestion event, set ssthresh to cwnd * 0.5 - * and reduce W_max to cwnd * beta. This aligns the cubic concave - * region appropriately. On subsequent congestion events, set - * ssthresh to cwnd * beta. - */ - if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) { + if (cubic_data->flags & CUBICFLAG_IN_TF) { + /* If in the TCP friendly region, follow what newreno does */ + ssthresh = newreno_cc_cwnd_on_multiplicative_decrease(ccv, maxseg); + + } else if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) { + /* + * On the first congestion event, set ssthresh to cwnd * 0.5 + * and reduce W_max to cwnd * beta. This aligns the cubic + * concave region appropriately. + */ ssthresh = cwnd >> 1; - cubic_data->W_max = ((uint64_t)cwnd * - CUBIC_BETA) >> CUBIC_SHIFT; + cubic_data->W_max = ((uint64_t)cwnd * CUBIC_BETA) >> CUBIC_SHIFT; } else { - ssthresh = ((uint64_t)cwnd * - CUBIC_BETA) >> CUBIC_SHIFT; + /* + * On subsequent congestion events, set ssthresh to cwnd * beta. + */ + ssthresh = ((uint64_t)cwnd * CUBIC_BETA) >> CUBIC_SHIFT; } CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * maxseg); }