diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
index aa78e02e39d9..5f7c6125c1f0 100644
--- a/sys/netinet/tcp_stacks/bbr.c
+++ b/sys/netinet/tcp_stacks/bbr.c
@@ -1,14798 +1,14801 @@
 /*-
  * Copyright (c) 2016-2020 Netflix, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  */
 /**
  * Author: Randall Stewart <rrs@netflix.com>
  * This work is based on the ACM Queue paper
  * BBR - Congestion Based Congestion Control
  * and also numerous discussions with Neal, Yuchung and Van.
  */
 
 #include <sys/cdefs.h>
 #include "opt_inet.h"
 #include "opt_inet6.h"
 #include "opt_ipsec.h"
 #include "opt_ratelimit.h"
 #include <sys/param.h>
 #include <sys/arb.h>
 #include <sys/module.h>
 #include <sys/kernel.h>
 #include <sys/libkern.h>
 #ifdef TCP_HHOOK
 #include <sys/hhook.h>
 #endif
 #include <sys/malloc.h>
 #include <sys/mbuf.h>
 #include <sys/proc.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 #ifdef STATS
 #include <sys/qmath.h>
 #include <sys/tree.h>
 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
 #endif
 #include <sys/refcount.h>
 #include <sys/queue.h>
 #include <sys/eventhandler.h>
 #include <sys/smp.h>
 #include <sys/kthread.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/tim_filter.h>
 #include <sys/time.h>
 #include <sys/protosw.h>
 #include <vm/uma.h>
 #include <sys/kern_prefetch.h>
 
 #include <net/route.h>
 #include <net/route/nhop.h>
 #include <net/vnet.h>
 
 #define TCPSTATES		/* for logging */
 
 #include <netinet/in.h>
 #include <netinet/in_kdtrace.h>
 #include <netinet/in_pcb.h>
 #include <netinet/ip.h>
 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
 #include <netinet/ip_var.h>
 #include <netinet/ip6.h>
 #include <netinet6/in6_pcb.h>
 #include <netinet6/ip6_var.h>
 #define	TCPOUTFLAGS
 #include <netinet/tcp.h>
 #include <netinet/tcp_fsm.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
 #include <netinet/tcpip.h>
 #include <netinet/tcp_hpts.h>
 #include <netinet/cc/cc.h>
 #include <netinet/tcp_log_buf.h>
 #include <netinet/tcp_ratelimit.h>
 #include <netinet/tcp_lro.h>
 #ifdef TCP_OFFLOAD
 #include <netinet/tcp_offload.h>
 #endif
 #ifdef INET6
 #include <netinet6/tcp6_var.h>
 #endif
 #include <netinet/tcp_fastopen.h>
 
 #include <netipsec/ipsec_support.h>
 #include <net/if.h>
 #include <net/if_var.h>
 #include <net/ethernet.h>
 
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 #include <netipsec/ipsec.h>
 #include <netipsec/ipsec6.h>
 #endif				/* IPSEC */
 
 #include <netinet/udp.h>
 #include <netinet/udp_var.h>
 #include <machine/in_cksum.h>
 
 #ifdef MAC
 #include <security/mac/mac_framework.h>
 #endif
 
 #include "sack_filter.h"
 #include "tcp_bbr.h"
 #include "rack_bbr_common.h"
 uma_zone_t bbr_zone;
 uma_zone_t bbr_pcb_zone;
 
 struct sysctl_ctx_list bbr_sysctl_ctx;
 struct sysctl_oid *bbr_sysctl_root;
 
 #define	TCPT_RANGESET_NOSLOP(tv, value, tvmin, tvmax) do { \
 	(tv) = (value); \
 	if ((u_long)(tv) < (u_long)(tvmin)) \
 		(tv) = (tvmin); \
 	if ((u_long)(tv) > (u_long)(tvmax)) \
 		(tv) = (tvmax); \
 } while(0)
 
 /*#define BBR_INVARIANT 1*/
 
 /*
  * initial window
  */
 static uint32_t bbr_def_init_win = 10;
 static int32_t bbr_persist_min = 250000;	/* 250ms */
 static int32_t bbr_persist_max = 1000000;	/* 1 Second */
 static int32_t bbr_cwnd_may_shrink = 0;
 static int32_t bbr_cwndtarget_rtt_touse = BBR_RTT_PROP;
 static int32_t bbr_num_pktepo_for_del_limit = BBR_NUM_RTTS_FOR_DEL_LIMIT;
 static int32_t bbr_hardware_pacing_limit = 8000;
 static int32_t bbr_quanta = 3;	/* How much extra quanta do we get? */
 static int32_t bbr_no_retran = 0;
 
 static int32_t bbr_error_base_paceout = 10000; /* usec to pace */
 static int32_t bbr_max_net_error_cnt = 10;
 /* Should the following be dynamic too -- loss wise */
 static int32_t bbr_rtt_gain_thresh = 0;
 /* Measurement controls */
 static int32_t bbr_use_google_algo = 1;
 static int32_t bbr_ts_limiting = 1;
 static int32_t bbr_ts_can_raise = 0;
 static int32_t bbr_do_red = 600;
 static int32_t bbr_red_scale = 20000;
 static int32_t bbr_red_mul = 1;
 static int32_t bbr_red_div = 2;
 static int32_t bbr_red_growth_restrict = 1;
 static int32_t  bbr_target_is_bbunit = 0;
 static int32_t bbr_drop_limit = 0;
 /*
  * How much gain do we need to see to
  * stay in startup?
  */
 static int32_t bbr_marks_rxt_sack_passed = 0;
 static int32_t bbr_start_exit = 25;
 static int32_t bbr_low_start_exit = 25;	/* When we are in reduced gain */
 static int32_t bbr_startup_loss_thresh = 2000;	/* 20.00% loss */
 static int32_t bbr_hptsi_max_mul = 1;	/* These two mul/div assure a min pacing */
 static int32_t bbr_hptsi_max_div = 2;	/* time, 0 means turned off. We need this
 					 * if we go back ever to where the pacer
 					 * has priority over timers.
 					 */
 static int32_t bbr_policer_call_from_rack_to = 0;
 static int32_t bbr_policer_detection_enabled = 1;
 static int32_t bbr_min_measurements_req = 1;	/* We need at least 2
 						 * measurements before we are
 						 * "good" note that 2 == 1.
 						 * This is because we use a >
 						 * comparison. This means if
 						 * min_measure was 0, it takes
 						 * num-measures > min(0) and
 						 * you get 1 measurement and
 						 * you are good. Set to 1, you
 						 * have to have two
 						 * measurements (this is done
 						 * to prevent it from being ok
 						 * to have no measurements). */
 static int32_t bbr_no_pacing_until = 4;
 
 static int32_t bbr_min_usec_delta = 20000;	/* 20,000 usecs */
 static int32_t bbr_min_peer_delta = 20;		/* 20 units */
 static int32_t bbr_delta_percent = 150;		/* 15.0 % */
 
 static int32_t bbr_target_cwnd_mult_limit = 8;
 /*
  * bbr_cwnd_min_val is the number of
  * segments we hold to in the RTT probe
  * state typically 4.
  */
 static int32_t bbr_cwnd_min_val = BBR_PROBERTT_NUM_MSS;
 
 static int32_t bbr_cwnd_min_val_hs = BBR_HIGHSPEED_NUM_MSS;
 
 static int32_t bbr_gain_to_target = 1;
 static int32_t bbr_gain_gets_extra_too = 1;
 /*
  * bbr_high_gain is the 2/ln(2) value we need
  * to double the sending rate in startup. This
  * is used for both cwnd and hptsi gain's.
  */
 static int32_t bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
 static int32_t bbr_startup_lower = BBR_UNIT * 1500 / 1000 + 1;
 static int32_t bbr_use_lower_gain_in_startup = 1;
 
 /* thresholds for reduction on drain in sub-states/drain */
 static int32_t bbr_drain_rtt = BBR_SRTT;
 static int32_t bbr_drain_floor = 88;
 static int32_t google_allow_early_out = 1;
 static int32_t google_consider_lost = 1;
 static int32_t bbr_drain_drop_mul = 4;
 static int32_t bbr_drain_drop_div = 5;
 static int32_t bbr_rand_ot = 50;
 static int32_t bbr_can_force_probertt = 0;
 static int32_t bbr_can_adjust_probertt = 1;
 static int32_t bbr_probertt_sets_rtt = 0;
 static int32_t bbr_can_use_ts_for_rtt = 1;
 static int32_t bbr_is_ratio = 0;
 static int32_t bbr_sub_drain_app_limit = 1;
 static int32_t bbr_prtt_slam_cwnd = 1;
 static int32_t bbr_sub_drain_slam_cwnd = 1;
 static int32_t bbr_slam_cwnd_in_main_drain = 1;
 static int32_t bbr_filter_len_sec = 6;	/* How long does the rttProp filter
 					 * hold */
 static uint32_t bbr_rtt_probe_limit = (USECS_IN_SECOND * 4);
 /*
  * bbr_drain_gain is the reverse of the high_gain
  * designed to drain back out the standing queue
  * that is formed in startup by causing a larger
  * hptsi gain and thus drainging the packets
  * in flight.
  */
 static int32_t bbr_drain_gain = BBR_UNIT * 1000 / 2885;
 static int32_t bbr_rttprobe_gain = 192;
 
 /*
  * The cwnd_gain is the default cwnd gain applied when
  * calculating a target cwnd. Note that the cwnd is
  * a secondary factor in the way BBR works (see the
  * paper and think about it, it will take some time).
  * Basically the hptsi_gain spreads the packets out
  * so you never get more than BDP to the peer even
  * if the cwnd is high. In our implemenation that
  * means in non-recovery/retransmission scenarios
  * cwnd will never be reached by the flight-size.
  */
 static int32_t bbr_cwnd_gain = BBR_UNIT * 2;
 static int32_t bbr_tlp_type_to_use = BBR_SRTT;
 static int32_t bbr_delack_time = 100000;	/* 100ms in useconds */
 static int32_t bbr_sack_not_required = 0;	/* set to one to allow non-sack to use bbr */
 static int32_t bbr_initial_bw_bps = 62500;	/* 500kbps in bytes ps */
 static int32_t bbr_ignore_data_after_close = 1;
 static int16_t bbr_hptsi_gain[] = {
 	(BBR_UNIT *5 / 4),
 	(BBR_UNIT * 3 / 4),
 	BBR_UNIT,
 	BBR_UNIT,
 	BBR_UNIT,
 	BBR_UNIT,
 	BBR_UNIT,
 	BBR_UNIT
 };
 int32_t bbr_use_rack_resend_cheat = 1;
 int32_t bbr_sends_full_iwnd = 1;
 
 #define BBR_HPTSI_GAIN_MAX 8
 /*
  * The BBR module incorporates a number of
  * TCP ideas that have been put out into the IETF
  * over the last few years:
  * - Yuchung Cheng's RACK TCP (for which its named) that
  *    will stop us using the number of dup acks and instead
  *    use time as the gage of when we retransmit.
  * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
  *    of Dukkipati et.al.
  * - Van Jacobson's et.al BBR.
  *
  * RACK depends on SACK, so if an endpoint arrives that
  * cannot do SACK the state machine below will shuttle the
  * connection back to using the "default" TCP stack that is
  * in FreeBSD.
  *
  * To implement BBR and RACK the original TCP stack was first decomposed
  * into a functional state machine with individual states
  * for each of the possible TCP connection states. The do_segment
  * functions role in life is to mandate the connection supports SACK
  * initially and then assure that the RACK state matches the conenction
  * state before calling the states do_segment function. Data processing
  * of inbound segments also now happens in the hpts_do_segment in general
  * with only one exception. This is so we can keep the connection on
  * a single CPU.
  *
  * Each state is simplified due to the fact that the original do_segment
  * has been decomposed and we *know* what state we are in (no
  * switches on the state) and all tests for SACK are gone. This
  * greatly simplifies what each state does.
  *
  * TCP output is also over-written with a new version since it
  * must maintain the new rack scoreboard and has had hptsi
  * integrated as a requirment. Still todo is to eliminate the
  * use of the callout_() system and use the hpts for all
  * timers as well.
  */
 static uint32_t bbr_rtt_probe_time = 200000;	/* 200ms in micro seconds */
 static uint32_t bbr_rtt_probe_cwndtarg = 4;	/* How many mss's outstanding */
 static const int32_t bbr_min_req_free = 2;	/* The min we must have on the
 						 * free list */
 static int32_t bbr_tlp_thresh = 1;
 static int32_t bbr_reorder_thresh = 2;
 static int32_t bbr_reorder_fade = 60000000;	/* 0 - never fade, def
 						 * 60,000,000 - 60 seconds */
 static int32_t bbr_pkt_delay = 1000;
 static int32_t bbr_min_to = 1000;	/* Number of usec's minimum timeout */
 static int32_t bbr_incr_timers = 1;
 
 static int32_t bbr_tlp_min = 10000;	/* 10ms in usecs */
 static int32_t bbr_delayed_ack_time = 200000;	/* 200ms in usecs */
 static int32_t bbr_exit_startup_at_loss = 1;
 
 /*
  * bbr_lt_bw_ratio is 1/8th
  * bbr_lt_bw_diff is  < 4 Kbit/sec
  */
 static uint64_t bbr_lt_bw_diff = 4000 / 8;	/* In bytes per second */
 static uint64_t bbr_lt_bw_ratio = 8;	/* For 1/8th */
 static uint32_t bbr_lt_bw_max_rtts = 48;	/* How many rtt's do we use
 						 * the lt_bw for */
 static uint32_t bbr_lt_intvl_min_rtts = 4;	/* Min num of RTT's to measure
 						 * lt_bw */
 static int32_t bbr_lt_intvl_fp = 0;		/* False positive epoch diff */
 static int32_t bbr_lt_loss_thresh = 196;	/* Lost vs delivered % */
 static int32_t bbr_lt_fd_thresh = 100;		/* false detection % */
 
 static int32_t bbr_verbose_logging = 0;
 /*
  * Currently regular tcp has a rto_min of 30ms
  * the backoff goes 12 times so that ends up
  * being a total of 122.850 seconds before a
  * connection is killed.
  */
 static int32_t bbr_rto_min_ms = 30;	/* 30ms same as main freebsd */
 static int32_t bbr_rto_max_sec = 4;	/* 4 seconds */
 
 /****************************************************/
 /* DEFAULT TSO SIZING  (cpu performance impacting)  */
 /****************************************************/
 /* What amount is our formula using to get TSO size */
 static int32_t bbr_hptsi_per_second = 1000;
 
 /*
  * For hptsi under bbr_cross_over connections what is delay
  * target 7ms (in usec) combined with a seg_max of 2
  * gets us close to identical google behavior in
  * TSO size selection (possibly more 1MSS sends).
  */
 static int32_t bbr_hptsi_segments_delay_tar = 7000;
 
 /* Does pacing delay include overhead's in its time calculations? */
 static int32_t bbr_include_enet_oh = 0;
 static int32_t bbr_include_ip_oh = 1;
 static int32_t bbr_include_tcp_oh = 1;
 static int32_t bbr_google_discount = 10;
 
 /* Do we use (nf mode) pkt-epoch to drive us or rttProp? */
 static int32_t bbr_state_is_pkt_epoch = 0;
 static int32_t bbr_state_drain_2_tar = 1;
 /* What is the max the 0 - bbr_cross_over MBPS TSO target
  * can reach using our delay target. Note that this
  * value becomes the floor for the cross over
  * algorithm.
  */
 static int32_t bbr_hptsi_segments_max = 2;
 static int32_t bbr_hptsi_segments_floor = 1;
 static int32_t bbr_hptsi_utter_max = 0;
 
 /* What is the min the 0 - bbr_cross-over MBPS  TSO target can be */
 static int32_t bbr_hptsi_bytes_min = 1460;
 static int32_t bbr_all_get_min = 0;
 
 /* Cross over point from algo-a to algo-b */
 static uint32_t bbr_cross_over = TWENTY_THREE_MBPS;
 
 /* Do we deal with our restart state? */
 static int32_t bbr_uses_idle_restart = 0;
 static int32_t bbr_idle_restart_threshold = 100000;	/* 100ms in useconds */
 
 /* Do we allow hardware pacing? */
 static int32_t bbr_allow_hdwr_pacing = 0;
 static int32_t bbr_hdwr_pace_adjust = 2;	/* multipler when we calc the tso size */
 static int32_t bbr_hdwr_pace_floor = 1;
 static int32_t bbr_hdwr_pacing_delay_cnt = 10;
 
 /****************************************************/
 static int32_t bbr_resends_use_tso = 0;
 static int32_t bbr_tlp_max_resend = 2;
 static int32_t bbr_sack_block_limit = 128;
 
 #define  BBR_MAX_STAT 19
 counter_u64_t bbr_state_time[BBR_MAX_STAT];
 counter_u64_t bbr_state_lost[BBR_MAX_STAT];
 counter_u64_t bbr_state_resend[BBR_MAX_STAT];
 counter_u64_t bbr_stat_arry[BBR_STAT_SIZE];
 counter_u64_t bbr_opts_arry[BBR_OPTS_SIZE];
 counter_u64_t bbr_out_size[TCP_MSS_ACCT_SIZE];
 counter_u64_t bbr_flows_whdwr_pacing;
 counter_u64_t bbr_flows_nohdwr_pacing;
 
 counter_u64_t bbr_nohdwr_pacing_enobuf;
 counter_u64_t bbr_hdwr_pacing_enobuf;
 
 static inline uint64_t bbr_get_bw(struct tcp_bbr *bbr);
 
 /*
  * Static defintions we need for forward declarations.
  */
 static uint32_t
 bbr_get_pacing_length(struct tcp_bbr *bbr, uint16_t gain,
 		      uint32_t useconds_time, uint64_t bw);
 static uint32_t
 bbr_get_a_state_target(struct tcp_bbr *bbr, uint32_t gain);
 static void
 bbr_set_state(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t win);
 static void
 bbr_set_probebw_gains(struct tcp_bbr *bbr,  uint32_t cts, uint32_t losses);
 static void
 bbr_substate_change(struct tcp_bbr *bbr, uint32_t cts, int line,
 		    int dolog);
 static uint32_t
 bbr_get_target_cwnd(struct tcp_bbr *bbr, uint64_t bw, uint32_t gain);
 static void
 bbr_state_change(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch,
 		 int32_t pkt_epoch, uint32_t losses);
 static uint32_t
 bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts,
 		     struct bbr_sendmap *rsm);
 static uint32_t
 bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp);
 static uint32_t
 bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr,
 		    struct bbr_sendmap *rsm, uint32_t srtt, uint32_t cts);
 static void
 bbr_exit_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts,
 		 int32_t line);
 static void
 bbr_set_state_target(struct tcp_bbr *bbr, int line);
 static void
 bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line);
 static void
 bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick,
 		       int event, int line);
 static void
 tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts);
 static void
 bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts);
 static void
 bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied,
 		    uint32_t rtt, uint32_t line, uint8_t is_start,
 		    uint16_t set);
 static struct bbr_sendmap *
 bbr_find_lowest_rsm(struct tcp_bbr *bbr);
 static __inline uint32_t
 bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type);
 static void
 bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot,
 		 uint8_t which);
 static void
 bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts,
 		  uint32_t time_since_sent, uint32_t srtt,
 		  uint32_t thresh, uint32_t to);
 static void
 bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag);
 static void
 bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot,
 		    uint32_t del_by, uint32_t cts, uint32_t sloton,
 		    uint32_t prev_delay);
 static void
 bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts,
 		  int32_t line);
 static void
 bbr_stop_all_timers(struct tcpcb *tp, struct tcp_bbr *bbr);
 static void
 bbr_exit_probe_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts);
 static void
 bbr_check_probe_rtt_limits(struct tcp_bbr *bbr, uint32_t cts);
 static void
 bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts);
 static void
 bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len,
 			  uint32_t cts, uint32_t usecs, uint64_t bw,
 			  uint32_t override, int mod);
 static int bbr_ctloutput(struct tcpcb *tp, struct sockopt *sopt);
 
 static inline uint8_t
 bbr_state_val(struct tcp_bbr *bbr)
 {
 	return(bbr->rc_bbr_substate);
 }
 
 static inline uint32_t
 get_min_cwnd(struct tcp_bbr *bbr)
 {
 	int mss;
 
 	mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options),
 		  bbr->r_ctl.rc_pace_max_segs);
 	if (bbr_get_rtt(bbr, BBR_RTT_PROP) < BBR_HIGH_SPEED)
 		return (bbr_cwnd_min_val_hs * mss);
 	else
 		return (bbr_cwnd_min_val * mss);
 }
 
 static uint32_t
 bbr_get_persists_timer_val(struct tcpcb *tp, struct tcp_bbr *bbr)
 {
 	uint64_t srtt, var;
 	uint64_t ret_val;
 
 	bbr->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
 	if (tp->t_srtt == 0) {
 		srtt = (uint64_t)BBR_INITIAL_RTO;
 		var = 0;
 	} else {
 		srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
 		var = ((uint64_t)TICKS_2_USEC(tp->t_rttvar) >> TCP_RTT_SHIFT);
 	}
 	TCPT_RANGESET_NOSLOP(ret_val, ((srtt + var) * tcp_backoff[tp->t_rxtshift]),
 	    bbr_persist_min, bbr_persist_max);
 	return ((uint32_t)ret_val);
 }
 
 static uint32_t
 bbr_timer_start(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * Start the FR timer, we do this based on getting the first one in
 	 * the rc_tmap. Note that if its NULL we must stop the timer. in all
 	 * events we need to stop the running timer (if its running) before
 	 * starting the new one.
 	 */
 	uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
 	int32_t idx;
 	int32_t is_tlp_timer = 0;
 	struct bbr_sendmap *rsm;
 
 	if (bbr->rc_all_timers_stopped) {
 		/* All timers have been stopped none are to run */
 		return (0);
 	}
 	if (bbr->rc_in_persist) {
 		/* We can't start any timer in persists */
 		return (bbr_get_persists_timer_val(tp, bbr));
 	}
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 	if ((rsm == NULL) ||
 	    ((tp->t_flags & TF_SACK_PERMIT) == 0) ||
 	    (tp->t_state < TCPS_ESTABLISHED)) {
 		/* Nothing on the send map */
 activate_rxt:
 		if (SEQ_LT(tp->snd_una, tp->snd_max) ||
 		    sbavail(&tptosocket(tp)->so_snd)) {
 			uint64_t tov;
 
 			time_since_sent = 0;
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 			if (rsm) {
 				idx = rsm->r_rtr_cnt - 1;
 				if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], bbr->r_ctl.rc_tlp_rxt_last_time))
 					tstmp_touse = rsm->r_tim_lastsent[idx];
 				else
 					tstmp_touse = bbr->r_ctl.rc_tlp_rxt_last_time;
 				if (TSTMP_GT(tstmp_touse, cts))
 				    time_since_sent = cts - tstmp_touse;
 			}
 			bbr->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
 			if (tp->t_srtt == 0)
 				tov = BBR_INITIAL_RTO;
 			else
 				tov = ((uint64_t)(TICKS_2_USEC(tp->t_srtt) +
 				    ((uint64_t)TICKS_2_USEC(tp->t_rttvar) * (uint64_t)4)) >> TCP_RTT_SHIFT);
 			if (tp->t_rxtshift)
 				tov *= tcp_backoff[tp->t_rxtshift];
 			if (tov > time_since_sent)
 				tov -= time_since_sent;
 			else
 				tov = bbr->r_ctl.rc_min_to;
 			TCPT_RANGESET_NOSLOP(to, tov,
 			    (bbr->r_ctl.rc_min_rto_ms * MS_IN_USEC),
 			    (bbr->rc_max_rto_sec * USECS_IN_SECOND));
 			bbr_log_timer_var(bbr, 2, cts, 0, srtt, 0, to);
 			return (to);
 		}
 		return (0);
 	}
 	if (rsm->r_flags & BBR_ACKED) {
 		rsm = bbr_find_lowest_rsm(bbr);
 		if (rsm == NULL) {
 			/* No lowest? */
 			goto activate_rxt;
 		}
 	}
 	/* Convert from ms to usecs */
 	if (rsm->r_flags & BBR_SACK_PASSED) {
 		if ((tp->t_flags & TF_SENTFIN) &&
 		    ((tp->snd_max - tp->snd_una) == 1) &&
 		    (rsm->r_flags & BBR_HAS_FIN)) {
 			/*
 			 * We don't start a bbr rack timer if all we have is
 			 * a FIN outstanding.
 			 */
 			goto activate_rxt;
 		}
 		srtt = bbr_get_rtt(bbr, BBR_RTT_RACK);
 		thresh = bbr_calc_thresh_rack(bbr, srtt, cts, rsm);
 		idx = rsm->r_rtr_cnt - 1;
 		exp = rsm->r_tim_lastsent[idx] + thresh;
 		if (SEQ_GEQ(exp, cts)) {
 			to = exp - cts;
 			if (to < bbr->r_ctl.rc_min_to) {
 				to = bbr->r_ctl.rc_min_to;
 			}
 		} else {
 			to = bbr->r_ctl.rc_min_to;
 		}
 	} else {
 		/* Ok we need to do a TLP not RACK */
 		if (bbr->rc_tlp_in_progress != 0) {
 			/*
 			 * The previous send was a TLP.
 			 */
 			goto activate_rxt;
 		}
 		rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_tmap, bbr_sendmap, r_tnext);
 		if (rsm == NULL) {
 			/* We found no rsm to TLP with. */
 			goto activate_rxt;
 		}
 		if (rsm->r_flags & BBR_HAS_FIN) {
 			/* If its a FIN we don't do TLP */
 			rsm = NULL;
 			goto activate_rxt;
 		}
 		time_since_sent = 0;
 		idx = rsm->r_rtr_cnt - 1;
 		if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], bbr->r_ctl.rc_tlp_rxt_last_time))
 			tstmp_touse = rsm->r_tim_lastsent[idx];
 		else
 			tstmp_touse = bbr->r_ctl.rc_tlp_rxt_last_time;
 		if (TSTMP_GT(tstmp_touse, cts))
 		    time_since_sent = cts - tstmp_touse;
 		is_tlp_timer = 1;
 		srtt = bbr_get_rtt(bbr, bbr_tlp_type_to_use);
 		thresh = bbr_calc_thresh_tlp(tp, bbr, rsm, srtt, cts);
 		if (thresh > time_since_sent)
 			to = thresh - time_since_sent;
 		else
 			to = bbr->r_ctl.rc_min_to;
 		if (to > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) {
 			/*
 			 * If the TLP time works out to larger than the max
 			 * RTO lets not do TLP.. just RTO.
 			 */
 			goto activate_rxt;
 		}
 		if ((bbr->rc_tlp_rtx_out == 1) &&
 		    (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq)) {
 			/*
 			 * Second retransmit of the same TLP
 			 * lets not.
 			 */
 			bbr->rc_tlp_rtx_out = 0;
 			goto activate_rxt;
 		}
 		if (rsm->r_start != bbr->r_ctl.rc_last_tlp_seq) {
 			/*
 			 * The tail is no longer the last one I did a probe
 			 * on
 			 */
 			bbr->r_ctl.rc_tlp_seg_send_cnt = 0;
 			bbr->r_ctl.rc_last_tlp_seq = rsm->r_start;
 		}
 	}
 	if (is_tlp_timer == 0) {
 		BBR_STAT_INC(bbr_to_arm_rack);
 		bbr->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
 	} else {
 		bbr_log_timer_var(bbr, 1, cts, time_since_sent, srtt, thresh, to);
 		if (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend) {
 			/*
 			 * We have exceeded how many times we can retran the
 			 * current TLP timer, switch to the RTO timer.
 			 */
 			goto activate_rxt;
 		} else {
 			BBR_STAT_INC(bbr_to_arm_tlp);
 			bbr->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
 		}
 	}
 	return (to);
 }
 
 static inline int32_t
 bbr_minseg(struct tcp_bbr *bbr)
 {
 	return (bbr->r_ctl.rc_pace_min_segs - bbr->rc_last_options);
 }
 
 static void
 bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct hpts_diag diag;
 	uint32_t delayed_ack = 0;
 	uint32_t left = 0;
 	uint32_t hpts_timeout;
 	uint8_t stopped;
 	int32_t delay_calc = 0;
 	uint32_t prev_delay = 0;
 
 	if (tcp_in_hpts(tp)) {
 		/* A previous call is already set up */
 		return;
 	}
 	if ((tp->t_state == TCPS_CLOSED) ||
 	    (tp->t_state == TCPS_LISTEN)) {
 		return;
 	}
 	stopped = bbr->rc_tmr_stopped;
 	if (stopped && TSTMP_GT(bbr->r_ctl.rc_timer_exp, cts)) {
 		left = bbr->r_ctl.rc_timer_exp - cts;
 	}
 	bbr->r_ctl.rc_hpts_flags = 0;
 	bbr->r_ctl.rc_timer_exp = 0;
 	prev_delay = bbr->r_ctl.rc_last_delay_val;
 	if (bbr->r_ctl.rc_last_delay_val &&
 	    (slot == 0)) {
 		/*
 		 * If a previous pacer delay was in place we
 		 * are not coming from the output side (where
 		 * we calculate a delay, more likely a timer).
 		 */
 		slot = bbr->r_ctl.rc_last_delay_val;
 		if (TSTMP_GT(cts, bbr->rc_pacer_started)) {
 			/* Compensate for time passed  */
 			delay_calc = cts - bbr->rc_pacer_started;
 			if (delay_calc <= slot)
 				slot -= delay_calc;
 		}
 	}
 	/* Do we have early to make up for by pushing out the pacing time? */
 	if (bbr->r_agg_early_set) {
 		bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, slot, 0, bbr->r_agg_early_set, 2);
 		slot += bbr->r_ctl.rc_agg_early;
 		bbr->r_ctl.rc_agg_early = 0;
 		bbr->r_agg_early_set = 0;
 	}
 	/* Are we running a total debt that needs to be compensated for? */
 	if (bbr->r_ctl.rc_hptsi_agg_delay) {
 		if (slot > bbr->r_ctl.rc_hptsi_agg_delay) {
 			/* We nuke the delay */
 			slot -= bbr->r_ctl.rc_hptsi_agg_delay;
 			bbr->r_ctl.rc_hptsi_agg_delay = 0;
 		} else {
 			/* We nuke some of the delay, put in a minimal 100usecs  */
 			bbr->r_ctl.rc_hptsi_agg_delay -= slot;
 			bbr->r_ctl.rc_last_delay_val = slot = 100;
 		}
 	}
 	bbr->r_ctl.rc_last_delay_val = slot;
 	hpts_timeout = bbr_timer_start(tp, bbr, cts);
 	if (tp->t_flags & TF_DELACK) {
 		if (bbr->rc_in_persist == 0) {
 			delayed_ack = bbr_delack_time;
 		} else {
 			/*
 			 * We are in persists and have
 			 * gotten a new data element.
 			 */
 			if (hpts_timeout > bbr_delack_time) {
 				/*
 				 * Lets make the persists timer (which acks)
 				 * be the smaller of hpts_timeout and bbr_delack_time.
 				 */
 				hpts_timeout = bbr_delack_time;
 			}
 		}
 	}
 	if (delayed_ack &&
 	    ((hpts_timeout == 0) ||
 	     (delayed_ack < hpts_timeout))) {
 		/* We need a Delayed ack timer */
 		bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK;
 		hpts_timeout = delayed_ack;
 	}
 	if (slot) {
 		/* Mark that we have a pacing timer up */
 		BBR_STAT_INC(bbr_paced_segments);
 		bbr->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
 	}
 	/*
 	 * If no timers are going to run and we will fall off thfe hptsi
 	 * wheel, we resort to a keep-alive timer if its configured.
 	 */
 	if ((hpts_timeout == 0) &&
 	    (slot == 0)) {
 		if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
 		    (tp->t_state <= TCPS_CLOSING)) {
 			/*
 			 * Ok we have no timer (persists, rack, tlp, rxt  or
 			 * del-ack), we don't have segments being paced. So
 			 * all that is left is the keepalive timer.
 			 */
 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 				hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
 			} else {
 				hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
 			}
 			bbr->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
 		}
 	}
 	if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
 	    (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
 		/*
 		 * RACK, TLP, persists and RXT timers all are restartable
 		 * based on actions input .. i.e we received a packet (ack
 		 * or sack) and that changes things (rw, or snd_una etc).
 		 * Thus we can restart them with a new value. For
 		 * keep-alive, delayed_ack we keep track of what was left
 		 * and restart the timer with a smaller value.
 		 */
 		if (left < hpts_timeout)
 			hpts_timeout = left;
 	}
 	if (bbr->r_ctl.rc_incr_tmrs && slot &&
 	    (bbr->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
 		/*
 		 * If configured to do so, and the timer is either
 		 * the TLP or RXT timer, we need to increase the timeout
 		 * by the pacing time. Consider the bottleneck at my
 		 * machine as an example, we are sending something
 		 * to start a TLP on. The last packet won't be emitted
 		 * fully until the pacing time (the bottleneck will hold
 		 * the data in place). Once the packet is emitted that
 		 * is when we want to start waiting for the TLP. This
 		 * is most evident with hardware pacing (where the nic
 		 * is holding the packet(s) before emitting). But it
 		 * can also show up in the network so we do it for all
 		 * cases. Technically we would take off one packet from
 		 * this extra delay but this is easier and being more
 		 * conservative is probably better.
 		 */
 		hpts_timeout += slot;
 	}
 	if (hpts_timeout) {
 		/*
 		 * Hack alert for now we can't time-out over 2147 seconds (a
 		 * bit more than 35min)
 		 */
 		if (hpts_timeout > 0x7ffffffe)
 			hpts_timeout = 0x7ffffffe;
 		bbr->r_ctl.rc_timer_exp = cts + hpts_timeout;
 	} else
 		bbr->r_ctl.rc_timer_exp = 0;
 	if ((slot) &&
 	    (bbr->rc_use_google ||
 	     bbr->output_error_seen ||
 	     (slot <= hpts_timeout))  ) {
 		/*
 		 * Tell LRO that it can queue packets while
 		 * we pace.
 		 */
 		bbr->rc_tp->t_flags2 |= TF2_MBUF_QUEUE_READY;
 		if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
 		    (bbr->rc_cwnd_limited == 0)) {
 			/*
 			 * If we are not cwnd limited and we
 			 * are running a rack timer we put on
 			 * the do not disturbe even for sack.
 			 */
 			tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
 		} else
 			tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
 		bbr->rc_pacer_started = cts;
 
 		(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
 					   __LINE__, &diag);
 		bbr->rc_timer_first = 0;
 		bbr->bbr_timer_src = frm;
 		bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1);
 		bbr_log_hpts_diag(bbr, cts, &diag);
 	} else if (hpts_timeout) {
 		(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
 					   __LINE__, &diag);
 		/*
 		 * We add the flag here as well if the slot is set,
 		 * since hpts will call in to clear the queue first before
 		 * calling the output routine (which does our timers).
 		 * We don't want to set the flag if its just a timer
 		 * else the arrival of data might (that causes us
 		 * to send more) might get delayed. Imagine being
 		 * on a keep-alive timer and a request comes in for
 		 * more data.
 		 */
 		if (slot)
 			bbr->rc_pacer_started = cts;
 		if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
 		    (bbr->rc_cwnd_limited == 0)) {
 			/*
 			 * For a rack timer, don't wake us even
 			 * if a sack arrives as long as we are
 			 * not cwnd limited.
 			 */
 			tp->t_flags2 |= (TF2_MBUF_QUEUE_READY |
 			    TF2_DONT_SACK_QUEUE);
 		} else {
 			/* All other timers wake us up */
 			tp->t_flags2 &= ~(TF2_MBUF_QUEUE_READY |
 			    TF2_DONT_SACK_QUEUE);
 		}
 		bbr->bbr_timer_src = frm;
 		bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0);
 		bbr_log_hpts_diag(bbr, cts, &diag);
 		bbr->rc_timer_first = 1;
 	}
 	bbr->rc_tmr_stopped = 0;
 	bbr_log_type_bbrsnd(bbr, tot_len, slot, delay_calc, cts, frm, prev_delay);
 }
 
 static void
 bbr_timer_audit(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, struct sockbuf *sb)
 {
 	/*
 	 * We received an ack, and then did not call send or were bounced
 	 * out due to the hpts was running. Now a timer is up as well, is it
 	 * the right timer?
 	 */
 	struct inpcb *inp;
 	struct bbr_sendmap *rsm;
 	uint32_t hpts_timeout;
 	int tmr_up;
 
 	tmr_up = bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
 	if (bbr->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
 		return;
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 	if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
 	    (tmr_up == PACE_TMR_RXT)) {
 		/* Should be an RXT */
 		return;
 	}
 	inp = bbr->rc_inp;
 	if (rsm == NULL) {
 		/* Nothing outstanding? */
 		if (tp->t_flags & TF_DELACK) {
 			if (tmr_up == PACE_TMR_DELACK)
 				/*
 				 * We are supposed to have delayed ack up
 				 * and we do
 				 */
 				return;
 		} else if (sbavail(&inp->inp_socket->so_snd) &&
 		    (tmr_up == PACE_TMR_RXT)) {
 			/*
 			 * if we hit enobufs then we would expect the
 			 * possibility of nothing outstanding and the RXT up
 			 * (and the hptsi timer).
 			 */
 			return;
 		} else if (((V_tcp_always_keepalive ||
 			    inp->inp_socket->so_options & SO_KEEPALIVE) &&
 			    (tp->t_state <= TCPS_CLOSING)) &&
 			    (tmr_up == PACE_TMR_KEEP) &&
 		    (tp->snd_max == tp->snd_una)) {
 			/* We should have keep alive up and we do */
 			return;
 		}
 	}
 	if (rsm && (rsm->r_flags & BBR_SACK_PASSED)) {
 		if ((tp->t_flags & TF_SENTFIN) &&
 		    ((tp->snd_max - tp->snd_una) == 1) &&
 		    (rsm->r_flags & BBR_HAS_FIN)) {
 			/* needs to be a RXT */
 			if (tmr_up == PACE_TMR_RXT)
 				return;
 			else
 				goto wrong_timer;
 		} else if (tmr_up == PACE_TMR_RACK)
 			return;
 		else
 			goto wrong_timer;
 	} else if (rsm && (tmr_up == PACE_TMR_RACK)) {
 		/* Rack timer has priority if we have data out */
 		return;
 	} else if (SEQ_GT(tp->snd_max, tp->snd_una) &&
 		    ((tmr_up == PACE_TMR_TLP) ||
 	    (tmr_up == PACE_TMR_RXT))) {
 		/*
 		 * Either a TLP or RXT is fine if no sack-passed is in place
 		 * and data is outstanding.
 		 */
 		return;
 	} else if (tmr_up == PACE_TMR_DELACK) {
 		/*
 		 * If the delayed ack was going to go off before the
 		 * rtx/tlp/rack timer were going to expire, then that would
 		 * be the timer in control. Note we don't check the time
 		 * here trusting the code is correct.
 		 */
 		return;
 	}
 	if (SEQ_GT(tp->snd_max, tp->snd_una) &&
 	    ((tmr_up == PACE_TMR_RXT) ||
 	     (tmr_up == PACE_TMR_TLP) ||
 	     (tmr_up == PACE_TMR_RACK))) {
 		/*
 		 * We have outstanding data and
 		 * we *do* have a RACK, TLP or RXT
 		 * timer running. We won't restart
 		 * anything here since thats probably ok we
 		 * will get called with some timer here shortly.
 		 */
 		return;
 	}
 	/*
 	 * Ok the timer originally started is not what we want now. We will
 	 * force the hpts to be stopped if any, and restart with the slot
 	 * set to what was in the saved slot.
 	 */
 wrong_timer:
 	if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) {
 		if (tcp_in_hpts(tp))
 			tcp_hpts_remove(tp);
 		bbr_timer_cancel(bbr, __LINE__, cts);
 		bbr_start_hpts_timer(bbr, tp, cts, 1, bbr->r_ctl.rc_last_delay_val,
 		    0);
 	} else {
 		/*
 		 * Output is hptsi so we just need to switch the type of
 		 * timer. We don't bother with keep-alive, since when we
 		 * jump through the output, it will start the keep-alive if
 		 * nothing is sent.
 		 *
 		 * We only need a delayed-ack added and or the hpts_timeout.
 		 */
 		hpts_timeout = bbr_timer_start(tp, bbr, cts);
 		if (tp->t_flags & TF_DELACK) {
 			if (hpts_timeout == 0) {
 				hpts_timeout = bbr_delack_time;
 				bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK;
 			}
 			else if (hpts_timeout > bbr_delack_time) {
 				hpts_timeout = bbr_delack_time;
 				bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK;
 			}
 		}
 		if (hpts_timeout) {
 			if (hpts_timeout > 0x7ffffffe)
 				hpts_timeout = 0x7ffffffe;
 			bbr->r_ctl.rc_timer_exp = cts + hpts_timeout;
 		}
 	}
 }
 
 int32_t bbr_clear_lost = 0;
 
 /*
  * Considers the two time values now (cts) and earlier.
  * If cts is smaller than earlier, we could have
  * had a sequence wrap (our counter wraps every
  * 70 min or so) or it could be just clock skew
  * getting us two different time values. Clock skew
  * will show up within 10ms or so. So in such
  * a case (where cts is behind earlier time by
  * less than 10ms) we return 0. Otherwise we
  * return the true difference between them.
  */
 static inline uint32_t
 bbr_calc_time(uint32_t cts, uint32_t earlier_time) {
 	/*
 	 * Given two timestamps, the current time stamp cts, and some other
 	 * time-stamp taken in theory earlier return the difference. The
 	 * trick is here sometimes locking will get the other timestamp
 	 * after the cts. If this occurs we need to return 0.
 	 */
 	if (TSTMP_GEQ(cts, earlier_time))
 		return (cts - earlier_time);
 	/*
 	 * cts is behind earlier_time if its less than 10ms consider it 0.
 	 * If its more than 10ms difference then we had a time wrap. Else
 	 * its just the normal locking foo. I wonder if we should not go to
 	 * 64bit TS and get rid of this issue.
 	 */
 	if (TSTMP_GEQ((cts + 10000), earlier_time))
 		return (0);
 	/*
 	 * Ok the time must have wrapped. So we need to answer a large
 	 * amount of time, which the normal subtraction should do.
 	 */
 	return (cts - earlier_time);
 }
 
 static int
 sysctl_bbr_clear_lost(SYSCTL_HANDLER_ARGS)
 {
 	uint32_t stat;
 	int32_t error;
 
 	error = SYSCTL_OUT(req, &bbr_clear_lost, sizeof(uint32_t));
 	if (error || req->newptr == NULL)
 		return error;
 
 	error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
 	if (error)
 		return (error);
 	if (stat == 1) {
 #ifdef BBR_INVARIANTS
 		printf("Clearing BBR lost counters\n");
 #endif
 		COUNTER_ARRAY_ZERO(bbr_state_lost, BBR_MAX_STAT);
 		COUNTER_ARRAY_ZERO(bbr_state_time, BBR_MAX_STAT);
 		COUNTER_ARRAY_ZERO(bbr_state_resend, BBR_MAX_STAT);
 	} else if (stat == 2) {
 #ifdef BBR_INVARIANTS
 		printf("Clearing BBR option counters\n");
 #endif
 		COUNTER_ARRAY_ZERO(bbr_opts_arry, BBR_OPTS_SIZE);
 	} else if (stat == 3) {
 #ifdef BBR_INVARIANTS
 		printf("Clearing BBR stats counters\n");
 #endif
 		COUNTER_ARRAY_ZERO(bbr_stat_arry, BBR_STAT_SIZE);
 	} else if (stat == 4) {
 #ifdef BBR_INVARIANTS
 		printf("Clearing BBR out-size counters\n");
 #endif
 		COUNTER_ARRAY_ZERO(bbr_out_size, TCP_MSS_ACCT_SIZE);
 	}
 	bbr_clear_lost = 0;
 	return (0);
 }
 
 static void
 bbr_init_sysctls(void)
 {
 	struct sysctl_oid *bbr_probertt;
 	struct sysctl_oid *bbr_hptsi;
 	struct sysctl_oid *bbr_measure;
 	struct sysctl_oid *bbr_cwnd;
 	struct sysctl_oid *bbr_timeout;
 	struct sysctl_oid *bbr_states;
 	struct sysctl_oid *bbr_startup;
 	struct sysctl_oid *bbr_policer;
 
 	/* Probe rtt controls */
 	bbr_probertt = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "probertt",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "gain", CTLFLAG_RW,
 	    &bbr_rttprobe_gain, 192,
 	    "What is the filter gain drop in probe_rtt (0=disable)?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "cwnd", CTLFLAG_RW,
 	    &bbr_rtt_probe_cwndtarg, 4,
 	    "How many mss's are outstanding during probe-rtt");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "int", CTLFLAG_RW,
 	    &bbr_rtt_probe_limit, 4000000,
 	    "If RTT has not shrank in this many micro-seconds enter probe-rtt");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "mintime", CTLFLAG_RW,
 	    &bbr_rtt_probe_time, 200000,
 	    "How many microseconds in probe-rtt");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "filter_len_sec", CTLFLAG_RW,
 	    &bbr_filter_len_sec, 6,
 	    "How long in seconds does the rttProp filter run?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "drain_rtt", CTLFLAG_RW,
 	    &bbr_drain_rtt, BBR_SRTT,
 	    "What is the drain rtt to use in probeRTT (rtt_prop=0, rtt_rack=1, rtt_pkt=2, rtt_srtt=3?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "can_force", CTLFLAG_RW,
 	    &bbr_can_force_probertt, 0,
 	    "If we keep setting new low rtt's but delay going in probe-rtt can we force in??");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "enter_sets_force", CTLFLAG_RW,
 	    &bbr_probertt_sets_rtt, 0,
 	    "In NF mode, do we imitate google_mode and set the rttProp on entry to probe-rtt?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "can_adjust", CTLFLAG_RW,
 	    &bbr_can_adjust_probertt, 1,
 	    "Can we dynamically adjust the probe-rtt limits and times?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "is_ratio", CTLFLAG_RW,
 	    &bbr_is_ratio, 0,
 	    "is the limit to filter a ratio?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "use_cwnd", CTLFLAG_RW,
 	    &bbr_prtt_slam_cwnd, 0,
 	    "Should we set/recover cwnd?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_probertt),
 	    OID_AUTO, "can_use_ts", CTLFLAG_RW,
 	    &bbr_can_use_ts_for_rtt, 1,
 	    "Can we use the ms timestamp if available for retransmistted rtt calculations?");
 
 	/* Pacing controls */
 	bbr_hptsi = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "pacing",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "hw_pacing", CTLFLAG_RW,
 	    &bbr_allow_hdwr_pacing, 1,
 	    "Do we allow hardware pacing?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "hw_pacing_limit", CTLFLAG_RW,
 	    &bbr_hardware_pacing_limit, 4000,
 	    "Do we have a limited number of connections for pacing chelsio (0=no limit)?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "hw_pacing_adj", CTLFLAG_RW,
 	    &bbr_hdwr_pace_adjust, 2,
 	    "Multiplier to calculated tso size?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "hw_pacing_floor", CTLFLAG_RW,
 	    &bbr_hdwr_pace_floor, 1,
 	    "Do we invoke the hardware pacing floor?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "hw_pacing_delay_cnt", CTLFLAG_RW,
 	    &bbr_hdwr_pacing_delay_cnt, 10,
 	    "How many packets must be sent after hdwr pacing is enabled");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "bw_cross", CTLFLAG_RW,
 	    &bbr_cross_over, 3000000,
 	    "What is the point where we cross over to linux like TSO size set");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "seg_deltarg", CTLFLAG_RW,
 	    &bbr_hptsi_segments_delay_tar, 7000,
 	    "What is the worse case delay target for hptsi < 48Mbp connections");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "enet_oh", CTLFLAG_RW,
 	    &bbr_include_enet_oh, 0,
 	    "Do we include the ethernet overhead in calculating pacing delay?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "ip_oh", CTLFLAG_RW,
 	    &bbr_include_ip_oh, 1,
 	    "Do we include the IP overhead in calculating pacing delay?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "tcp_oh", CTLFLAG_RW,
 	    &bbr_include_tcp_oh, 0,
 	    "Do we include the TCP overhead in calculating pacing delay?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "google_discount", CTLFLAG_RW,
 	    &bbr_google_discount, 10,
 	    "What is the default google discount percentage wise for pacing (11 = 1.1%%)?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "all_get_min", CTLFLAG_RW,
 	    &bbr_all_get_min, 0,
 	    "If you are less than a MSS do you just get the min?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "tso_min", CTLFLAG_RW,
 	    &bbr_hptsi_bytes_min, 1460,
 	    "For 0 -> 24Mbps what is floor number of segments for TSO");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "seg_tso_max", CTLFLAG_RW,
 	    &bbr_hptsi_segments_max, 6,
 	    "For 0 -> 24Mbps what is top number of segments for TSO");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "seg_floor", CTLFLAG_RW,
 	    &bbr_hptsi_segments_floor, 1,
 	    "Minimum TSO size we will fall too in segments");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "utter_max", CTLFLAG_RW,
 	    &bbr_hptsi_utter_max, 0,
 	    "The absolute maximum that any pacing (outside of hardware) can be");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "seg_divisor", CTLFLAG_RW,
 	    &bbr_hptsi_per_second, 100,
 	    "What is the divisor in our hptsi TSO calculation 512Mbps < X > 24Mbps ");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "srtt_mul", CTLFLAG_RW,
 	    &bbr_hptsi_max_mul, 1,
 	    "The multiplier for pace len max");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_hptsi),
 	    OID_AUTO, "srtt_div", CTLFLAG_RW,
 	    &bbr_hptsi_max_div, 2,
 	    "The divisor for pace len max");
 	/* Measurement controls */
 	bbr_measure = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "measure",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Measurement controls");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "min_i_bw", CTLFLAG_RW,
 	    &bbr_initial_bw_bps, 62500,
 	    "Minimum initial b/w in bytes per second");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "no_sack_needed", CTLFLAG_RW,
 	    &bbr_sack_not_required, 0,
 	    "Do we allow bbr to run on connections not supporting SACK?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "use_google", CTLFLAG_RW,
 	    &bbr_use_google_algo, 0,
 	    "Use has close to google V1.0 has possible?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "ts_limiting", CTLFLAG_RW,
 	    &bbr_ts_limiting, 1,
 	    "Do we attempt to use the peers timestamp to limit b/w caculations?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "ts_can_raise", CTLFLAG_RW,
 	    &bbr_ts_can_raise, 0,
 	    "Can we raise the b/w via timestamp b/w calculation?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "ts_delta", CTLFLAG_RW,
 	    &bbr_min_usec_delta, 20000,
 	    "How long in usec between ts of our sends in ts validation code?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "ts_peer_delta", CTLFLAG_RW,
 	    &bbr_min_peer_delta, 20,
 	    "What min numerical value should be between the peer deltas?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "ts_delta_percent", CTLFLAG_RW,
 	    &bbr_delta_percent, 150,
 	    "What percentage (150 = 15.0) do we allow variance for?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "min_measure_good_bw", CTLFLAG_RW,
 	    &bbr_min_measurements_req, 1,
 	    "What is the minimum measurement count we need before we switch to our b/w estimate");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "min_measure_before_pace", CTLFLAG_RW,
 	    &bbr_no_pacing_until, 4,
 	    "How many pkt-epoch's (0 is off) do we need before pacing is on?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "quanta", CTLFLAG_RW,
 	    &bbr_quanta, 2,
 	    "Extra quanta to add when calculating the target (ID section 4.2.3.2).");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_measure),
 	    OID_AUTO, "noretran", CTLFLAG_RW,
 	    &bbr_no_retran, 0,
 	    "Should google mode not use retransmission measurements for the b/w estimation?");
 	/* State controls */
 	bbr_states = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "states",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "State controls");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "idle_restart", CTLFLAG_RW,
 	    &bbr_uses_idle_restart, 0,
 	    "Do we use a new special idle_restart state to ramp back up quickly?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "idle_restart_threshold", CTLFLAG_RW,
 	    &bbr_idle_restart_threshold, 100000,
 	    "How long must we be idle before we restart??");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "use_pkt_epoch", CTLFLAG_RW,
 	    &bbr_state_is_pkt_epoch, 0,
 	    "Do we use a pkt-epoch for substate if 0 rttProp?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "startup_rtt_gain", CTLFLAG_RW,
 	    &bbr_rtt_gain_thresh, 0,
 	    "What increase in RTT triggers us to stop ignoring no-loss and possibly exit startup?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "drain_floor", CTLFLAG_RW,
 	    &bbr_drain_floor, 88,
 	    "What is the lowest we can drain (pg) too?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "drain_2_target", CTLFLAG_RW,
 	    &bbr_state_drain_2_tar, 1,
 	    "Do we drain to target in drain substate?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "gain_2_target", CTLFLAG_RW,
 	    &bbr_gain_to_target, 1,
 	    "Does probe bw gain to target??");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "gain_extra_time", CTLFLAG_RW,
 	    &bbr_gain_gets_extra_too, 1,
 	    "Does probe bw gain get the extra time too?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "ld_div", CTLFLAG_RW,
 	    &bbr_drain_drop_div, 5,
 	    "Long drain drop divider?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "ld_mul", CTLFLAG_RW,
 	    &bbr_drain_drop_mul, 4,
 	    "Long drain drop multiplier?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "rand_ot_disc", CTLFLAG_RW,
 	    &bbr_rand_ot, 50,
 	    "Random discount of the ot?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "dr_filter_life", CTLFLAG_RW,
 	    &bbr_num_pktepo_for_del_limit, BBR_NUM_RTTS_FOR_DEL_LIMIT,
 	    "How many packet-epochs does the b/w delivery rate last?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "subdrain_applimited", CTLFLAG_RW,
 	    &bbr_sub_drain_app_limit, 0,
 	    "Does our sub-state drain invoke app limited if its long?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "use_cwnd_subdrain", CTLFLAG_RW,
 	    &bbr_sub_drain_slam_cwnd, 0,
 	    "Should we set/recover cwnd for sub-state drain?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "use_cwnd_maindrain", CTLFLAG_RW,
 	    &bbr_slam_cwnd_in_main_drain, 0,
 	    "Should we set/recover cwnd for main-state drain?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "google_gets_earlyout", CTLFLAG_RW,
 	    &google_allow_early_out, 1,
 	    "Should we allow google probe-bw/drain to exit early at flight target?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_states),
 	    OID_AUTO, "google_exit_loss", CTLFLAG_RW,
 	    &google_consider_lost, 1,
 	    "Should we have losses exit gain of probebw in google mode??");
 	/* Startup controls */
 	bbr_startup = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "startup",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Startup controls");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "cheat_iwnd", CTLFLAG_RW,
 	    &bbr_sends_full_iwnd, 1,
 	    "Do we not pace but burst out initial windows has our TSO size?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "loss_threshold", CTLFLAG_RW,
 	    &bbr_startup_loss_thresh, 2000,
 	    "In startup what is the loss threshold in a pe that will exit us from startup?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "use_lowerpg", CTLFLAG_RW,
 	    &bbr_use_lower_gain_in_startup, 1,
 	    "Should we use a lower hptsi gain if we see loss in startup?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "gain", CTLFLAG_RW,
 	    &bbr_start_exit, 25,
 	    "What gain percent do we need to see to stay in startup??");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "low_gain", CTLFLAG_RW,
 	    &bbr_low_start_exit, 15,
 	    "What gain percent do we need to see to stay in the lower gain startup??");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_startup),
 	    OID_AUTO, "loss_exit", CTLFLAG_RW,
 	    &bbr_exit_startup_at_loss, 1,
 	    "Should we exit startup at loss in an epoch if we are not gaining?");
 	/* CWND controls */
 	bbr_cwnd = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "cwnd",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Cwnd controls");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "tar_rtt", CTLFLAG_RW,
 	    &bbr_cwndtarget_rtt_touse, 0,
 	    "Target cwnd rtt measurement to use (0=rtt_prop, 1=rtt_rack, 2=pkt_rtt, 3=srtt)?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "may_shrink", CTLFLAG_RW,
 	    &bbr_cwnd_may_shrink, 0,
 	    "Can the cwnd shrink if it would grow to more than the target?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "max_target_limit", CTLFLAG_RW,
 	    &bbr_target_cwnd_mult_limit, 8,
 	    "Do we limit the cwnd to some multiple of the cwnd target if cwnd can't shrink 0=no?");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "highspeed_min", CTLFLAG_RW,
 	    &bbr_cwnd_min_val_hs, BBR_HIGHSPEED_NUM_MSS,
 	    "What is the high-speed min cwnd (rttProp under 1ms)");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "lowspeed_min", CTLFLAG_RW,
 	    &bbr_cwnd_min_val, BBR_PROBERTT_NUM_MSS,
 	    "What is the min cwnd (rttProp > 1ms)");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "initwin", CTLFLAG_RW,
 	    &bbr_def_init_win, 10,
 	    "What is the BBR initial window, if 0 use tcp version");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "do_loss_red", CTLFLAG_RW,
 	    &bbr_do_red, 600,
 	    "Do we reduce the b/w at exit from recovery based on ratio of prop/srtt (800=80.0, 0=off)?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "red_scale", CTLFLAG_RW,
 	    &bbr_red_scale, 20000,
 	    "What RTT do we scale with?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "red_growslow", CTLFLAG_RW,
 	    &bbr_red_growth_restrict, 1,
 	    "Do we restrict cwnd growth for whats in flight?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "red_div", CTLFLAG_RW,
 	    &bbr_red_div, 2,
 	    "If we reduce whats the divisor?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "red_mul", CTLFLAG_RW,
 	    &bbr_red_mul, 1,
 	    "If we reduce whats the mulitiplier?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "target_is_unit", CTLFLAG_RW,
 	    &bbr_target_is_bbunit, 0,
 	    "Is the state target the pacing_gain or BBR_UNIT?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_cwnd),
 	    OID_AUTO, "drop_limit", CTLFLAG_RW,
 	    &bbr_drop_limit, 0,
 	    "Number of segments limit for drop (0=use min_cwnd w/flight)?");
 
 	/* Timeout controls */
 	bbr_timeout = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "timeout",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Time out controls");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "delack", CTLFLAG_RW,
 	    &bbr_delack_time, 100000,
 	    "BBR's delayed ack time");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "tlp_uses", CTLFLAG_RW,
 	    &bbr_tlp_type_to_use, 3,
 	    "RTT that TLP uses in its calculations, 0=rttProp, 1=Rack_rtt, 2=pkt_rtt and 3=srtt");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "persmin", CTLFLAG_RW,
 	    &bbr_persist_min, 250000,
 	    "What is the minimum time in microseconds between persists");
 	SYSCTL_ADD_U32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "persmax", CTLFLAG_RW,
 	    &bbr_persist_max, 1000000,
 	    "What is the largest delay in microseconds between persists");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "tlp_minto", CTLFLAG_RW,
 	    &bbr_tlp_min, 10000,
 	    "TLP Min timeout in usecs");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "tlp_dack_time", CTLFLAG_RW,
 	    &bbr_delayed_ack_time, 200000,
 	    "TLP delayed ack compensation value");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "minrto", CTLFLAG_RW,
 	    &bbr_rto_min_ms, 30,
 	    "Minimum RTO in ms");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "maxrto", CTLFLAG_RW,
 	    &bbr_rto_max_sec, 4,
 	    "Maximum RTO in seconds -- should be at least as large as min_rto");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "tlp_retry", CTLFLAG_RW,
 	    &bbr_tlp_max_resend, 2,
 	    "How many times does TLP retry a single segment or multiple with no ACK");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "minto", CTLFLAG_RW,
 	    &bbr_min_to, 1000,
 	    "Minimum rack timeout in useconds");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "pktdelay", CTLFLAG_RW,
 	    &bbr_pkt_delay, 1000,
 	    "Extra RACK time (in useconds) besides reordering thresh");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "incr_tmrs", CTLFLAG_RW,
 	    &bbr_incr_timers, 1,
 	    "Increase the RXT/TLP timer by the pacing time used?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_timeout),
 	    OID_AUTO, "rxtmark_sackpassed", CTLFLAG_RW,
 	    &bbr_marks_rxt_sack_passed, 0,
 	    "Mark sack passed on all those not ack'd when a RXT hits?");
 	/* Policer controls */
 	bbr_policer = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO,
 	    "policer",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Policer controls");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "detect_enable", CTLFLAG_RW,
 	    &bbr_policer_detection_enabled, 1,
 	    "Is policer detection enabled??");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "min_pes", CTLFLAG_RW,
 	    &bbr_lt_intvl_min_rtts, 4,
 	    "Minimum number of PE's?");
 	SYSCTL_ADD_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "bwdiff", CTLFLAG_RW,
 	    &bbr_lt_bw_diff, (4000/8),
 	    "Minimal bw diff?");
 	SYSCTL_ADD_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "bwratio", CTLFLAG_RW,
 	    &bbr_lt_bw_ratio, 8,
 	    "Minimal bw diff?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "from_rack_rxt", CTLFLAG_RW,
 	    &bbr_policer_call_from_rack_to, 0,
 	    "Do we call the policer detection code from a rack-timeout?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "false_postive", CTLFLAG_RW,
 	    &bbr_lt_intvl_fp, 0,
 	    "What packet epoch do we do false-positive detection at (0=no)?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "loss_thresh", CTLFLAG_RW,
 	    &bbr_lt_loss_thresh, 196,
 	    "Loss threshold 196 = 19.6%?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_policer),
 	    OID_AUTO, "false_postive_thresh", CTLFLAG_RW,
 	    &bbr_lt_fd_thresh, 100,
 	    "What percentage is the false detection threshold (150=15.0)?");
 	/* All the rest */
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "cheat_rxt", CTLFLAG_RW,
 	    &bbr_use_rack_resend_cheat, 0,
 	    "Do we burst 1ms between sends on retransmissions (like rack)?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "error_paceout", CTLFLAG_RW,
 	    &bbr_error_base_paceout, 10000,
 	    "When we hit an error what is the min to pace out in usec's?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "kill_paceout", CTLFLAG_RW,
 	    &bbr_max_net_error_cnt, 10,
 	    "When we hit this many errors in a row, kill the session?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "data_after_close", CTLFLAG_RW,
 	    &bbr_ignore_data_after_close, 1,
 	    "Do we hold off sending a RST until all pending data is ack'd");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "resend_use_tso", CTLFLAG_RW,
 	    &bbr_resends_use_tso, 0,
 	    "Can resends use TSO?");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "sblklimit", CTLFLAG_RW,
 	    &bbr_sack_block_limit, 128,
 	    "When do we start ignoring small sack blocks");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "bb_verbose", CTLFLAG_RW,
 	    &bbr_verbose_logging, 0,
 	    "Should BBR black box logging be verbose");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "reorder_thresh", CTLFLAG_RW,
 	    &bbr_reorder_thresh, 2,
 	    "What factor for rack will be added when seeing reordering (shift right)");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "reorder_fade", CTLFLAG_RW,
 	    &bbr_reorder_fade, 0,
 	    "Does reorder detection fade, if so how many ms (0 means never)");
 	SYSCTL_ADD_S32(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
 	    &bbr_tlp_thresh, 1,
 	    "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
 	/* Stats and counters */
 	/* The pacing counters for hdwr/software can't be in the array */
 	bbr_nohdwr_pacing_enobuf = counter_u64_alloc(M_WAITOK);
 	bbr_hdwr_pacing_enobuf = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "enob_hdwr_pacing", CTLFLAG_RD,
 	    &bbr_hdwr_pacing_enobuf,
 	    "Total number of enobufs for hardware paced flows");
 	SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "enob_no_hdwr_pacing", CTLFLAG_RD,
 	    &bbr_nohdwr_pacing_enobuf,
 	    "Total number of enobufs for non-hardware paced flows");
 
 	bbr_flows_whdwr_pacing = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "hdwr_pacing", CTLFLAG_RD,
 	    &bbr_flows_whdwr_pacing,
 	    "Total number of hardware paced flows");
 	bbr_flows_nohdwr_pacing = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "software_pacing", CTLFLAG_RD,
 	    &bbr_flows_nohdwr_pacing,
 	    "Total number of software paced flows");
 	COUNTER_ARRAY_ALLOC(bbr_stat_arry, BBR_STAT_SIZE, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "stats", CTLFLAG_RD,
 	    bbr_stat_arry, BBR_STAT_SIZE, "BBR Stats");
 	COUNTER_ARRAY_ALLOC(bbr_opts_arry, BBR_OPTS_SIZE, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "opts", CTLFLAG_RD,
 	    bbr_opts_arry, BBR_OPTS_SIZE, "BBR Option Stats");
 	COUNTER_ARRAY_ALLOC(bbr_state_lost, BBR_MAX_STAT, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "lost", CTLFLAG_RD,
 	    bbr_state_lost, BBR_MAX_STAT, "Stats of when losses occur");
 	COUNTER_ARRAY_ALLOC(bbr_state_resend, BBR_MAX_STAT, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "stateresend", CTLFLAG_RD,
 	    bbr_state_resend, BBR_MAX_STAT, "Stats of what states resend");
 	COUNTER_ARRAY_ALLOC(bbr_state_time, BBR_MAX_STAT, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "statetime", CTLFLAG_RD,
 	    bbr_state_time, BBR_MAX_STAT, "Stats of time spent in the states");
 	COUNTER_ARRAY_ALLOC(bbr_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "outsize", CTLFLAG_RD,
 	    bbr_out_size, TCP_MSS_ACCT_SIZE, "Size of output calls");
 	SYSCTL_ADD_PROC(&bbr_sysctl_ctx,
 	    SYSCTL_CHILDREN(bbr_sysctl_root),
 	    OID_AUTO, "clrlost", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 	    &bbr_clear_lost, 0, sysctl_bbr_clear_lost, "IU", "Clear lost counters");
 }
 
 static void
 bbr_counter_destroy(void)
 {
 	COUNTER_ARRAY_FREE(bbr_stat_arry, BBR_STAT_SIZE);
 	COUNTER_ARRAY_FREE(bbr_opts_arry, BBR_OPTS_SIZE);
 	COUNTER_ARRAY_FREE(bbr_out_size, TCP_MSS_ACCT_SIZE);
 	COUNTER_ARRAY_FREE(bbr_state_lost, BBR_MAX_STAT);
 	COUNTER_ARRAY_FREE(bbr_state_time, BBR_MAX_STAT);
 	COUNTER_ARRAY_FREE(bbr_state_resend, BBR_MAX_STAT);
 	counter_u64_free(bbr_nohdwr_pacing_enobuf);
 	counter_u64_free(bbr_hdwr_pacing_enobuf);
 	counter_u64_free(bbr_flows_whdwr_pacing);
 	counter_u64_free(bbr_flows_nohdwr_pacing);
 
 }
 
 static __inline void
 bbr_fill_in_logging_data(struct tcp_bbr *bbr, struct tcp_log_bbr *l, uint32_t cts)
 {
 	memset(l, 0, sizeof(union tcp_log_stackspecific));
 	l->cur_del_rate = bbr->r_ctl.rc_bbr_cur_del_rate;
 	l->delRate = get_filter_value(&bbr->r_ctl.rc_delrate);
 	l->rttProp = get_filter_value_small(&bbr->r_ctl.rc_rttprop);
 	l->bw_inuse = bbr_get_bw(bbr);
 	l->inflight = ctf_flight_size(bbr->rc_tp,
 			  (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 	l->applimited = bbr->r_ctl.r_app_limited_until;
 	l->delivered = bbr->r_ctl.rc_delivered;
 	l->timeStamp = cts;
 	l->lost = bbr->r_ctl.rc_lost;
 	l->bbr_state = bbr->rc_bbr_state;
 	l->bbr_substate = bbr_state_val(bbr);
 	l->epoch = bbr->r_ctl.rc_rtt_epoch;
 	l->lt_epoch = bbr->r_ctl.rc_lt_epoch;
 	l->pacing_gain = bbr->r_ctl.rc_bbr_hptsi_gain;
 	l->cwnd_gain = bbr->r_ctl.rc_bbr_cwnd_gain;
 	l->inhpts = tcp_in_hpts(bbr->rc_tp);
 	l->use_lt_bw = bbr->rc_lt_use_bw;
 	l->pkts_out = bbr->r_ctl.rc_flight_at_input;
 	l->pkt_epoch = bbr->r_ctl.rc_pkt_epoch;
 }
 
 static void
 bbr_log_type_bw_reduce(struct tcp_bbr *bbr, int reason)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = 0;
 		log.u_bbr.flex2 = 0;
 		log.u_bbr.flex5 = 0;
 		log.u_bbr.flex3 = 0;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_pkt_epoch_loss_rate;
 		log.u_bbr.flex7 = reason;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_bbr_enters_probertt;
 		log.u_bbr.flex8 = 0;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BW_RED_EV, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_rwnd_collapse(struct tcp_bbr *bbr, int seq, int mode, uint32_t count)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = seq;
 		log.u_bbr.flex2 = count;
 		log.u_bbr.flex8 = mode;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_LOWGAIN, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_just_return(struct tcp_bbr *bbr, uint32_t cts, uint32_t tlen, uint8_t hpts_calling,
     uint8_t reason, uint32_t p_maxseg, int len)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = p_maxseg;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_timer_exp;
 		log.u_bbr.flex4 = reason;
 		log.u_bbr.flex5 = bbr->rc_in_persist;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_last_delay_val;
 		log.u_bbr.flex7 = p_maxseg;
 		log.u_bbr.flex8 = bbr->rc_in_persist;
 		log.u_bbr.pkts_out = 0;
 		log.u_bbr.applimited = len;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_JUSTRET, 0,
 		    tlen, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_enter_rec(struct tcp_bbr *bbr, uint32_t seq)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = seq;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_cwnd_on_ent;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_recovery_start;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_ENTREC, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_msgsize_fail(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t len, uint32_t maxseg, uint32_t mtu, int32_t csum_flags, int32_t tso, uint32_t cts)
 {
 	if (tcp_bblogging_on(tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = tso;
 		log.u_bbr.flex2 = maxseg;
 		log.u_bbr.flex3 = mtu;
 		log.u_bbr.flex4 = csum_flags;
 		TCP_LOG_EVENTP(tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_MSGSIZE, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_flowend(struct tcp_bbr *bbr)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct sockbuf *r, *s;
 		struct timeval tv;
 
 		if (bbr->rc_inp->inp_socket) {
 			r = &bbr->rc_inp->inp_socket->so_rcv;
 			s = &bbr->rc_inp->inp_socket->so_snd;
 		} else {
 			r = s = NULL;
 		}
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, tcp_get_usecs(&tv));
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    r, s,
 		    TCP_LOG_FLOWEND, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 bbr_log_pkt_epoch(struct tcp_bbr *bbr, uint32_t cts, uint32_t line,
     uint32_t lost, uint32_t del)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = lost;
 		log.u_bbr.flex2 = del;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_bbr_lastbtlbw;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_pkt_epoch_rtt;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_bbr_last_startup_epoch;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup;
 		log.u_bbr.flex7 = line;
 		log.u_bbr.flex8 = 0;
 		log.u_bbr.inflight = bbr->r_ctl.r_measurement_count;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_PKT_EPOCH, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_time_epoch(struct tcp_bbr *bbr, uint32_t cts, uint32_t line, uint32_t epoch_time)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->r_ctl.rc_lost;
 		log.u_bbr.flex2 = bbr->rc_inp->inp_socket->so_snd.sb_lowat;
 		log.u_bbr.flex3 = bbr->rc_inp->inp_socket->so_snd.sb_hiwat;
 		log.u_bbr.flex7 = line;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIME_EPOCH, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_set_of_state_target(struct tcp_bbr *bbr, uint32_t new_tar, int line, int meth)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex2 = new_tar;
 		log.u_bbr.flex3 = line;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex5 = bbr_quanta;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex7 = bbr->rc_last_options;
 		log.u_bbr.flex8 = meth;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_STATE_TARGET, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 
 }
 
 static void
 bbr_log_type_statechange(struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_probertt_int;
 		if (bbr_state_is_pkt_epoch)
 			log.u_bbr.flex4 = bbr_get_rtt(bbr, BBR_RTT_PKTRTT);
 		else
 			log.u_bbr.flex4 = bbr_get_rtt(bbr, BBR_RTT_PROP);
 		log.u_bbr.flex5 = bbr->r_ctl.rc_bbr_last_startup_epoch;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup;
 		log.u_bbr.flex7 = (bbr->r_ctl.rc_target_at_state/1000);
 		log.u_bbr.lt_epoch = bbr->r_ctl.rc_level_state_extra;
 		log.u_bbr.pkts_out = bbr->r_ctl.rc_target_at_state;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_STATE, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied,
 		    uint32_t rtt, uint32_t line, uint8_t reas, uint16_t cond)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks;
 		log.u_bbr.flex3 = bbr->r_ctl.last_in_probertt;
 		log.u_bbr.flex4 = applied;
 		log.u_bbr.flex5 = rtt;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex7 = cond;
 		log.u_bbr.flex8 = reas;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_RTT_SHRINKS, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_exit_rec(struct tcp_bbr *bbr)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = bbr->r_ctl.rc_recovery_start;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_cwnd_on_ent;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_EXITREC, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_cwndupd(struct tcp_bbr *bbr, uint32_t bytes_this_ack, uint32_t chg,
     uint32_t prev_acked, int32_t meth, uint32_t target, uint32_t th_ack, int32_t line)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = prev_acked;
 		log.u_bbr.flex3 = bytes_this_ack;
 		log.u_bbr.flex4 = chg;
 		log.u_bbr.flex5 = th_ack;
 		log.u_bbr.flex6 = target;
 		log.u_bbr.flex8 = meth;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_CWND, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_rtt_sample(struct tcp_bbr *bbr, uint32_t rtt, uint32_t tsin)
 {
 	/*
 	 * Log the rtt sample we are applying to the srtt algorithm in
 	 * useconds.
 	 */
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = rtt;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_bbr_state_time;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_ack_hdwr_delay;
 		log.u_bbr.flex4 = bbr->rc_tp->ts_offset;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.pkts_out = tcp_tv_to_mssectick(&bbr->rc_tv);
 		log.u_bbr.flex6 = tsin;
 		log.u_bbr.flex7 = 0;
 		log.u_bbr.flex8 = bbr->rc_ack_was_delayed;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_RTT, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_pesist(struct tcp_bbr *bbr, uint32_t cts, uint32_t time_in, int32_t line, uint8_t enter_exit)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = time_in;
 		log.u_bbr.flex2 = line;
 		log.u_bbr.flex8 = enter_exit;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_PERSIST, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 static void
 bbr_log_ack_clear(struct tcp_bbr *bbr, uint32_t cts)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->rc_tp->ts_recent_age;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_probertt_int;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_went_idle_time;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_ACKCLEAR, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_ack_event(struct tcp_bbr *bbr, struct tcphdr *th, struct tcpopt *to, uint32_t tlen,
 		  uint16_t nsegs, uint32_t cts, int32_t nxt_pkt, struct mbuf *m)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = nsegs;
 		log.u_bbr.flex2 = bbr->r_ctl.rc_lost_bytes;
 		if (m) {
 			struct timespec ts;
 
 			log.u_bbr.flex3 = m->m_flags;
 			if (m->m_flags & M_TSTMP) {
 				mbuf_tstmp2timespec(m, &ts);
 				tv.tv_sec = ts.tv_sec;
 				tv.tv_usec = ts.tv_nsec / 1000;
 				log.u_bbr.lt_epoch = tcp_tv_to_usectick(&tv);
 			} else {
 				log.u_bbr.lt_epoch = 0;
 			}
 			if (m->m_flags & M_TSTMP_LRO) {
 				mbuf_tstmp2timeval(m, &tv);
 				log.u_bbr.flex5 = tcp_tv_to_usectick(&tv);
 			} else {
 				/* No arrival timestamp */
 				log.u_bbr.flex5 = 0;
 			}
 
 			log.u_bbr.pkts_out = tcp_get_usecs(&tv);
 		} else {
 			log.u_bbr.flex3 = 0;
 			log.u_bbr.flex5 = 0;
 			log.u_bbr.flex6 = 0;
 			log.u_bbr.pkts_out = 0;
 		}
 		log.u_bbr.flex4 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex7 = bbr->r_wanted_output;
 		log.u_bbr.flex8 = bbr->rc_in_persist;
 		TCP_LOG_EVENTP(bbr->rc_tp, th,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_IN, 0,
 		    tlen, &log, true, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_doseg_done(struct tcp_bbr *bbr, uint32_t cts, int32_t nxt_pkt, int32_t did_out)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = did_out;
 		log.u_bbr.flex2 = nxt_pkt;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_last_delay_val;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_timer_exp;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_lost_bytes;
 		log.u_bbr.flex7 = bbr->r_wanted_output;
 		log.u_bbr.flex8 = bbr->rc_in_persist;
 		log.u_bbr.pkts_out = bbr->r_ctl.highest_hdwr_delay;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_DOSEG_DONE, 0,
 		    0, &log, true, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_enobuf_jmp(struct tcp_bbr *bbr, uint32_t len, uint32_t cts,
     int32_t line, uint32_t o_len, uint32_t segcnt, uint32_t segsiz)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = o_len;
 		log.u_bbr.flex3 = segcnt;
 		log.u_bbr.flex4 = segsiz;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_ENOBUF_JMP, ENOBUFS,
 		    len, &log, true, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_to_processing(struct tcp_bbr *bbr, uint32_t cts, int32_t ret, int32_t timers, uint8_t hpts_calling)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = timers;
 		log.u_bbr.flex2 = ret;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_timer_exp;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex5 = cts;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex8 = hpts_calling;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TO_PROCESS, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_to_event(struct tcp_bbr *bbr, uint32_t cts, int32_t to_num)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 		uint64_t ar;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->bbr_timer_src;
 		log.u_bbr.flex2 = 0;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags;
 		ar = (uint64_t)(bbr->r_ctl.rc_resend);
 		ar >>= 32;
 		ar &= 0x00000000ffffffff;
 		log.u_bbr.flex4 = (uint32_t)ar;
 		ar = (uint64_t)bbr->r_ctl.rc_resend;
 		ar &= 0x00000000ffffffff;
 		log.u_bbr.flex5 = (uint32_t)ar;
 		log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
 		log.u_bbr.flex8 = to_num;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_RTO, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_startup_event(struct tcp_bbr *bbr, uint32_t cts, uint32_t flex1, uint32_t flex2, uint32_t flex3, uint8_t reason)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = flex1;
 		log.u_bbr.flex2 = flex2;
 		log.u_bbr.flex3 = flex3;
 		log.u_bbr.flex4 = 0;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup;
 		log.u_bbr.flex8 = reason;
 		log.u_bbr.cur_del_rate = bbr->r_ctl.rc_bbr_lastbtlbw;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_REDUCE, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = diag->p_nxt_slot;
 		log.u_bbr.flex2 = diag->p_cur_slot;
 		log.u_bbr.flex3 = diag->slot_req;
 		log.u_bbr.flex4 = diag->inp_hptsslot;
 		log.u_bbr.flex5 = diag->slot_remaining;
 		log.u_bbr.flex6 = diag->need_new_to;
 		log.u_bbr.flex7 = diag->p_hpts_active;
 		log.u_bbr.flex8 = diag->p_on_min_sleep;
 		/* Hijack other fields as needed  */
 		log.u_bbr.epoch = diag->have_slept;
 		log.u_bbr.lt_epoch = diag->yet_to_sleep;
 		log.u_bbr.pkts_out = diag->co_ret;
 		log.u_bbr.applimited = diag->hpts_sleep_time;
 		log.u_bbr.delivered = diag->p_prev_slot;
 		log.u_bbr.inflight = diag->p_runningslot;
 		log.u_bbr.bw_inuse = diag->wheel_slot;
 		log.u_bbr.rttProp = diag->wheel_cts;
 		log.u_bbr.delRate = diag->maxslots;
 		log.u_bbr.cur_del_rate = diag->p_curtick;
 		log.u_bbr.cur_del_rate <<= 32;
 		log.u_bbr.cur_del_rate |= diag->p_lasttick;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HPTSDIAG, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts, uint32_t time_since_sent, uint32_t srtt,
     uint32_t thresh, uint32_t to)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->rc_tp->t_rttvar;
 		log.u_bbr.flex2 = time_since_sent;
 		log.u_bbr.flex3 = srtt;
 		log.u_bbr.flex4 = thresh;
 		log.u_bbr.flex5 = to;
 		log.u_bbr.flex6 = bbr->rc_tp->t_srtt;
 		log.u_bbr.flex8 = mode;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERPREP, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len,
     uint32_t cts, uint32_t usecs, uint64_t bw, uint32_t override, int mod)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = usecs;
 		log.u_bbr.flex2 = len;
 		log.u_bbr.flex3 = (uint32_t)((bw >> 32) & 0x00000000ffffffff);
 		log.u_bbr.flex4 = (uint32_t)(bw & 0x00000000ffffffff);
 		if (override)
 			log.u_bbr.flex5 = (1 << 2);
 		else
 			log.u_bbr.flex5 = 0;
 		log.u_bbr.flex6 = override;
 		log.u_bbr.flex7 = gain;
 		log.u_bbr.flex8 = mod;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HPTSI_CALC, 0,
 		    len, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 
 		log.u_bbr.flex1 = bbr->bbr_timer_src;
 		log.u_bbr.flex2 = to;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex4 = slot;
 		log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot;
 		log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
 		log.u_bbr.pkts_out = bbr->rc_tp->t_flags2;
 		log.u_bbr.flex8 = which;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERSTAR, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_thresh_choice(struct tcp_bbr *bbr, uint32_t cts, uint32_t thresh, uint32_t lro, uint32_t srtt, struct bbr_sendmap *rsm, uint8_t frm)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = thresh;
 		log.u_bbr.flex2 = lro;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_reorder_ts;
 		log.u_bbr.flex4 = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
 		log.u_bbr.flex5 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
 		log.u_bbr.flex6 = srtt;
 		log.u_bbr.flex7 = bbr->r_ctl.rc_reorder_shift;
 		log.u_bbr.flex8 = frm;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_THRESH_CALC, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_to_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts, uint8_t hpts_removed)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = bbr->bbr_timer_src;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex4 = bbr->rc_in_persist;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
 		log.u_bbr.flex8 = hpts_removed;
 		log.u_bbr.pkts_out = bbr->rc_pacer_started;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERCANC, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_tstmp_validation(struct tcp_bbr *bbr, uint64_t peer_delta, uint64_t delta)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = bbr->r_ctl.bbr_peer_tsratio;
 		log.u_bbr.flex2 = (peer_delta >> 32);
 		log.u_bbr.flex3 = (peer_delta & 0x00000000ffffffff);
 		log.u_bbr.flex4 = (delta >> 32);
 		log.u_bbr.flex5 = (delta & 0x00000000ffffffff);
 		log.u_bbr.flex7 = bbr->rc_ts_clock_set;
 		log.u_bbr.flex8 = bbr->rc_ts_cant_be_used;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TSTMP_VAL, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_tsosize(struct tcp_bbr *bbr, uint32_t cts, uint32_t tsosz, uint32_t tls, uint32_t old_val, uint32_t maxseg, int hdwr)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = tsosz;
 		log.u_bbr.flex2 = tls;
 		log.u_bbr.flex3 = tcp_min_hptsi_time;
 		log.u_bbr.flex4 = bbr->r_ctl.bbr_hptsi_bytes_min;
 		log.u_bbr.flex5 = old_val;
 		log.u_bbr.flex6 = maxseg;
 		log.u_bbr.flex7 = bbr->rc_no_pacing;
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= bbr->rc_past_init_win;
 		if (hdwr)
 			log.u_bbr.flex8 = 0x80 | bbr->rc_use_google;
 		else
 			log.u_bbr.flex8 = bbr->rc_use_google;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRTSO, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_rsmclear(struct tcp_bbr *bbr, uint32_t cts, struct bbr_sendmap *rsm,
 		      uint32_t flags, uint32_t line)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = rsm->r_start;
 		log.u_bbr.flex3 = rsm->r_end;
 		log.u_bbr.flex4 = rsm->r_delivered;
 		log.u_bbr.flex5 = rsm->r_rtr_cnt;
 		log.u_bbr.flex6 = rsm->r_dupack;
 		log.u_bbr.flex7 = rsm->r_tim_lastsent[0];
 		log.u_bbr.flex8 = rsm->r_flags;
 		/* Hijack the pkts_out fids */
 		log.u_bbr.applimited = flags;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_RSM_CLEARED, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_bbrupd(struct tcp_bbr *bbr, uint8_t flex8, uint32_t cts,
     uint32_t flex3, uint32_t flex2, uint32_t flex5,
     uint32_t flex6, uint32_t pkts_out, int flex7,
     uint32_t flex4, uint32_t flex1)
 {
 
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = flex1;
 		log.u_bbr.flex2 = flex2;
 		log.u_bbr.flex3 = flex3;
 		log.u_bbr.flex4 = flex4;
 		log.u_bbr.flex5 = flex5;
 		log.u_bbr.flex6 = flex6;
 		log.u_bbr.flex7 = flex7;
 		/* Hijack the pkts_out fids */
 		log.u_bbr.pkts_out = pkts_out;
 		log.u_bbr.flex8 = flex8;
 		if (bbr->rc_ack_was_delayed)
 			log.u_bbr.epoch = bbr->r_ctl.rc_ack_hdwr_delay;
 		else
 			log.u_bbr.epoch = 0;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRUPD, 0,
 		    flex2, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_ltbw(struct tcp_bbr *bbr, uint32_t cts, int32_t reason,
 	uint32_t newbw, uint32_t obw, uint32_t diff,
 	uint32_t tim)
 {
 	if (/*bbr_verbose_logging && */tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = reason;
 		log.u_bbr.flex2 = newbw;
 		log.u_bbr.flex3 = obw;
 		log.u_bbr.flex4 = diff;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_lt_lost;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_lt_del;
 		log.u_bbr.flex7 = bbr->rc_lt_is_sampling;
 		log.u_bbr.pkts_out = tim;
 		log.u_bbr.bw_inuse = bbr->r_ctl.rc_lt_bw;
 		if (bbr->rc_lt_use_bw == 0)
 			log.u_bbr.epoch = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch;
 		else
 			log.u_bbr.epoch = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch_use;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BWSAMP, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static inline void
 bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick, int event, int line)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = tick;
 		log.u_bbr.flex3 = tp->t_maxunacktime;
 		log.u_bbr.flex4 = tp->t_acktime;
 		log.u_bbr.flex8 = event;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_PROGRESS, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_type_log_hdwr_pacing(struct tcp_bbr *bbr, const struct ifnet *ifp,
 			 uint64_t rate, uint64_t hw_rate, int line, uint32_t cts,
 			 int error)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
 		log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
 		log.u_bbr.flex3 = (((uint64_t)ifp  >> 32) & 0x00000000ffffffff);
 		log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
 		log.u_bbr.bw_inuse = rate;
 		log.u_bbr.flex5 = line;
 		log.u_bbr.flex6 = error;
 		log.u_bbr.flex8 = bbr->skip_gain;
 		log.u_bbr.flex8 <<= 1;
 		log.u_bbr.flex8 |= bbr->gain_is_limited;
 		log.u_bbr.flex8 <<= 1;
 		log.u_bbr.flex8 |= bbr->bbr_hdrw_pacing;
 		log.u_bbr.pkts_out = bbr->rc_tp->t_maxseg;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HDWR_PACE, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = slot;
 		log.u_bbr.flex2 = del_by;
 		log.u_bbr.flex3 = prev_delay;
 		log.u_bbr.flex4 = line;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_last_delay_val;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_hptsi_agg_delay;
 		log.u_bbr.flex7 = (0x0000ffff & bbr->r_ctl.rc_hpts_flags);
 		log.u_bbr.flex8 = bbr->rc_in_persist;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRSND, 0,
 		    len, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_type_bbrrttprop(struct tcp_bbr *bbr, uint32_t t, uint32_t end, uint32_t tsconv, uint32_t cts, int32_t match, uint32_t seq, uint8_t flags)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->r_ctl.rc_delivered;
 		log.u_bbr.flex2 = 0;
 		log.u_bbr.flex3 = bbr->r_ctl.rc_lowest_rtt;
 		log.u_bbr.flex4 = end;
 		log.u_bbr.flex5 = seq;
 		log.u_bbr.flex6 = t;
 		log.u_bbr.flex7 = match;
 		log.u_bbr.flex8 = flags;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRRTT, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_exit_gain(struct tcp_bbr *bbr, uint32_t cts, int32_t entry_method)
 {
 	if (tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		log.u_bbr.flex1 = bbr->r_ctl.rc_target_at_state;
 		log.u_bbr.flex2 = (bbr->rc_tp->t_maxseg - bbr->rc_last_options);
 		log.u_bbr.flex3 = bbr->r_ctl.gain_epoch;
 		log.u_bbr.flex4 = bbr->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex5 = bbr->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_bbr_state_atflight;
 		log.u_bbr.flex7 = 0;
 		log.u_bbr.flex8 = entry_method;
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_EXIT_GAIN, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 static void
 bbr_log_settings_change(struct tcp_bbr *bbr, int settings_desired)
 {
 	if (bbr_verbose_logging && tcp_bblogging_on(bbr->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime);
 		/* R-HU */
 		log.u_bbr.flex1 = 0;
 		log.u_bbr.flex2 = 0;
 		log.u_bbr.flex3 = 0;
 		log.u_bbr.flex4 = 0;
 		log.u_bbr.flex7 = 0;
 		log.u_bbr.flex8 = settings_desired;
 
 		TCP_LOG_EVENTP(bbr->rc_tp, NULL,
 		    &bbr->rc_inp->inp_socket->so_rcv,
 		    &bbr->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_SETTINGS_CHG, 0,
 		    0, &log, false, &bbr->rc_tv);
 	}
 }
 
 /*
  * Returns the bw from the our filter.
  */
 static inline uint64_t
 bbr_get_full_bw(struct tcp_bbr *bbr)
 {
 	uint64_t bw;
 
 	bw = get_filter_value(&bbr->r_ctl.rc_delrate);
 
 	return (bw);
 }
 
 static inline void
 bbr_set_pktepoch(struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	uint64_t calclr;
 	uint32_t lost, del;
 
 	if (bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_pktepoch)
 		lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lost_at_pktepoch;
 	else
 		lost = 0;
 	del = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_pkt_epoch_del;
 	if (lost == 0)  {
 		calclr = 0;
 	} else if (del) {
 		calclr = lost;
 		calclr *= (uint64_t)1000;
 		calclr /= (uint64_t)del;
 	} else {
 		/* Nothing delivered? 100.0% loss */
 		calclr = 1000;
 	}
 	bbr->r_ctl.rc_pkt_epoch_loss_rate =  (uint32_t)calclr;
 	if (IN_RECOVERY(bbr->rc_tp->t_flags))
 		bbr->r_ctl.recovery_lr += (uint32_t)calclr;
 	bbr->r_ctl.rc_pkt_epoch++;
 	if (bbr->rc_no_pacing &&
 	    (bbr->r_ctl.rc_pkt_epoch >= bbr->no_pacing_until)) {
 		bbr->rc_no_pacing = 0;
 		tcp_bbr_tso_size_check(bbr, cts);
 	}
 	bbr->r_ctl.rc_pkt_epoch_rtt = bbr_calc_time(cts, bbr->r_ctl.rc_pkt_epoch_time);
 	bbr->r_ctl.rc_pkt_epoch_time = cts;
 	/* What was our loss rate */
 	bbr_log_pkt_epoch(bbr, cts, line, lost, del);
 	bbr->r_ctl.rc_pkt_epoch_del = bbr->r_ctl.rc_delivered;
 	bbr->r_ctl.rc_lost_at_pktepoch = bbr->r_ctl.rc_lost;
 }
 
 static inline void
 bbr_set_epoch(struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	uint32_t epoch_time;
 
 	/* Tick the RTT clock */
 	bbr->r_ctl.rc_rtt_epoch++;
 	epoch_time = cts - bbr->r_ctl.rc_rcv_epoch_start;
 	bbr_log_time_epoch(bbr, cts, line, epoch_time);
 	bbr->r_ctl.rc_rcv_epoch_start = cts;
 }
 
 static inline void
 bbr_isit_a_pkt_epoch(struct tcp_bbr *bbr, uint32_t cts, struct bbr_sendmap *rsm, int32_t line, int32_t cum_acked)
 {
 	if (SEQ_GEQ(rsm->r_delivered, bbr->r_ctl.rc_pkt_epoch_del)) {
 		bbr->rc_is_pkt_epoch_now = 1;
 	}
 }
 
 /*
  * Returns the bw from either the b/w filter
  * or from the lt_bw (if the connection is being
  * policed).
  */
 static inline uint64_t
 __bbr_get_bw(struct tcp_bbr *bbr)
 {
 	uint64_t bw, min_bw;
 	uint64_t rtt;
 	int gm_measure_cnt = 1;
 
 	/*
 	 * For startup we make, like google, a
 	 * minimum b/w. This is generated from the
 	 * IW and the rttProp. We do fall back to srtt
 	 * if for some reason (initial handshake) we don't
 	 * have a rttProp. We, in the worst case, fall back
 	 * to the configured min_bw (rc_initial_hptsi_bw).
 	 */
 	if (bbr->rc_bbr_state == BBR_STATE_STARTUP) {
 		/* Attempt first to use rttProp */
 		rtt = (uint64_t)get_filter_value_small(&bbr->r_ctl.rc_rttprop);
 		if (rtt && (rtt < 0xffffffff)) {
 measure:
 			min_bw = (uint64_t)(bbr_initial_cwnd(bbr, bbr->rc_tp)) *
 				((uint64_t)1000000);
 			min_bw /= rtt;
 			if (min_bw < bbr->r_ctl.rc_initial_hptsi_bw) {
 				min_bw = bbr->r_ctl.rc_initial_hptsi_bw;
 			}
 
 		} else if (bbr->rc_tp->t_srtt != 0) {
 			/* No rttProp, use srtt? */
 			rtt = bbr_get_rtt(bbr, BBR_SRTT);
 			goto measure;
 		} else {
 			min_bw = bbr->r_ctl.rc_initial_hptsi_bw;
 		}
 	} else
 		min_bw = 0;
 
 	if ((bbr->rc_past_init_win == 0) &&
 	    (bbr->r_ctl.rc_delivered > bbr_initial_cwnd(bbr, bbr->rc_tp)))
 		bbr->rc_past_init_win = 1;
 	if ((bbr->rc_use_google)  && (bbr->r_ctl.r_measurement_count >= 1))
 		gm_measure_cnt = 0;
 	if (gm_measure_cnt &&
 	    ((bbr->r_ctl.r_measurement_count < bbr_min_measurements_req) ||
 	     (bbr->rc_past_init_win == 0))) {
 		/* For google we use our guess rate until we get 1 measurement */
 
 use_initial_window:
 		rtt = (uint64_t)get_filter_value_small(&bbr->r_ctl.rc_rttprop);
 		if (rtt && (rtt < 0xffffffff)) {
 			/*
 			 * We have an RTT measurement. Use that in
 			 * combination with our initial window to calculate
 			 * a b/w.
 			 */
 			bw = (uint64_t)(bbr_initial_cwnd(bbr, bbr->rc_tp)) *
 				((uint64_t)1000000);
 			bw /= rtt;
 			if (bw < bbr->r_ctl.rc_initial_hptsi_bw) {
 				bw = bbr->r_ctl.rc_initial_hptsi_bw;
 			}
 		} else {
 			/* Drop back to the 40 and punt to a default */
 			bw = bbr->r_ctl.rc_initial_hptsi_bw;
 		}
 		if (bw < 1)
 			/* Probably should panic */
 			bw = 1;
 		if (bw > min_bw)
 			return (bw);
 		else
 			return (min_bw);
 	}
 	if (bbr->rc_lt_use_bw)
 		bw = bbr->r_ctl.rc_lt_bw;
 	else if (bbr->r_recovery_bw && (bbr->rc_use_google == 0))
 		bw = bbr->r_ctl.red_bw;
 	else
 		bw = get_filter_value(&bbr->r_ctl.rc_delrate);
 	if (bw == 0) {
 		/* We should not be at 0, go to the initial window then  */
 		goto use_initial_window;
 	}
 	if (bw < 1)
 		/* Probably should panic */
 		bw = 1;
 	if (bw < min_bw)
 		bw = min_bw;
 	return (bw);
 }
 
 static inline uint64_t
 bbr_get_bw(struct tcp_bbr *bbr)
 {
 	uint64_t bw;
 
 	bw = __bbr_get_bw(bbr);
 	return (bw);
 }
 
 static inline void
 bbr_reset_lt_bw_interval(struct tcp_bbr *bbr, uint32_t cts)
 {
 	bbr->r_ctl.rc_lt_epoch = bbr->r_ctl.rc_pkt_epoch;
 	bbr->r_ctl.rc_lt_time = bbr->r_ctl.rc_del_time;
 	bbr->r_ctl.rc_lt_del = bbr->r_ctl.rc_delivered;
 	bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 }
 
 static inline void
 bbr_reset_lt_bw_sampling(struct tcp_bbr *bbr, uint32_t cts)
 {
 	bbr->rc_lt_is_sampling = 0;
 	bbr->rc_lt_use_bw = 0;
 	bbr->r_ctl.rc_lt_bw = 0;
 	bbr_reset_lt_bw_interval(bbr, cts);
 }
 
 static inline void
 bbr_lt_bw_samp_done(struct tcp_bbr *bbr, uint64_t bw, uint32_t cts, uint32_t timin)
 {
 	uint64_t diff;
 
 	/* Do we have a previous sample? */
 	if (bbr->r_ctl.rc_lt_bw) {
 		/* Get the diff in bytes per second */
 		if (bbr->r_ctl.rc_lt_bw > bw)
 			diff = bbr->r_ctl.rc_lt_bw - bw;
 		else
 			diff = bw - bbr->r_ctl.rc_lt_bw;
 		if ((diff <= bbr_lt_bw_diff) ||
 		    (diff <= (bbr->r_ctl.rc_lt_bw / bbr_lt_bw_ratio))) {
 			/* Consider us policed */
 			uint32_t saved_bw;
 
 			saved_bw = (uint32_t)bbr->r_ctl.rc_lt_bw;
 			bbr->r_ctl.rc_lt_bw = (bw + bbr->r_ctl.rc_lt_bw) / 2;	/* average of two */
 			bbr->rc_lt_use_bw = 1;
 			bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 			/*
 			 * Use pkt based epoch for measuring length of
 			 * policer up
 			 */
 			bbr->r_ctl.rc_lt_epoch_use = bbr->r_ctl.rc_pkt_epoch;
 			/*
 			 * reason 4 is we need to start consider being
 			 * policed
 			 */
 			bbr_log_type_ltbw(bbr, cts, 4, (uint32_t)bw, saved_bw, (uint32_t)diff, timin);
 			return;
 		}
 	}
 	bbr->r_ctl.rc_lt_bw = bw;
 	bbr_reset_lt_bw_interval(bbr, cts);
 	bbr_log_type_ltbw(bbr, cts, 5, 0, (uint32_t)bw, 0, timin);
 }
 
 static void
 bbr_randomize_extra_state_time(struct tcp_bbr *bbr)
 {
 	uint32_t ran, deduct;
 
 	ran = arc4random_uniform(bbr_rand_ot);
 	if (ran) {
 		deduct = bbr->r_ctl.rc_level_state_extra / ran;
 		bbr->r_ctl.rc_level_state_extra -= deduct;
 	}
 }
 /*
  * Return randomly the starting state
  * to use in probebw.
  */
 static uint8_t
 bbr_pick_probebw_substate(struct tcp_bbr *bbr, uint32_t cts)
 {
 	uint32_t ran;
 	uint8_t ret_val;
 
 	/* Initialize the offset to 0 */
 	bbr->r_ctl.rc_exta_time_gd = 0;
 	bbr->rc_hit_state_1 = 0;
 	bbr->r_ctl.rc_level_state_extra = 0;
 	ran = arc4random_uniform((BBR_SUBSTATE_COUNT-1));
 	/*
 	 * The math works funny here :) the return value is used to set the
 	 * substate and then the state change is called which increments by
 	 * one. So if we return 1 (DRAIN) we will increment to 2 (LEVEL1) when
 	 * we fully enter the state. Note that the (8 - 1 - ran) assures that
 	 * we return 1 - 7, so we dont return 0 and end up starting in
 	 * state 1 (DRAIN).
 	 */
 	ret_val = BBR_SUBSTATE_COUNT - 1 - ran;
 	/* Set an epoch */
 	if ((cts - bbr->r_ctl.rc_rcv_epoch_start) >= bbr_get_rtt(bbr, BBR_RTT_PROP))
 		bbr_set_epoch(bbr, cts, __LINE__);
 
 	bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 	return (ret_val);
 }
 
 static void
 bbr_lt_bw_sampling(struct tcp_bbr *bbr, uint32_t cts, int32_t loss_detected)
 {
 	uint32_t diff, d_time;
 	uint64_t del_time, bw, lost, delivered;
 
 	if (bbr->r_use_policer == 0)
 		return;
 	if (bbr->rc_lt_use_bw) {
 		/* We are using lt bw do we stop yet? */
 		diff = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch_use;
 		if (diff > bbr_lt_bw_max_rtts) {
 			/* Reset it all */
 reset_all:
 			bbr_reset_lt_bw_sampling(bbr, cts);
 			if (bbr->rc_filled_pipe) {
 				bbr_set_epoch(bbr, cts, __LINE__);
 				bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts);
 				bbr_substate_change(bbr, cts, __LINE__, 0);
 				bbr->rc_bbr_state = BBR_STATE_PROBE_BW;
 				bbr_log_type_statechange(bbr, cts, __LINE__);
 			} else {
 				/*
 				 * This should not happen really
 				 * unless we remove the startup/drain
 				 * restrictions above.
 				 */
 				bbr->rc_bbr_state = BBR_STATE_STARTUP;
 				bbr_set_epoch(bbr, cts, __LINE__);
 				bbr->r_ctl.rc_bbr_state_time = cts;
 				bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 				bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg;
 				bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg;
 				bbr_set_state_target(bbr, __LINE__);
 				bbr_log_type_statechange(bbr, cts, __LINE__);
 			}
 			/* reason 0 is to stop using lt-bw */
 			bbr_log_type_ltbw(bbr, cts, 0, 0, 0, 0, 0);
 			return;
 		}
 		if (bbr_lt_intvl_fp == 0) {
 			/* Not doing false-positive detection */
 			return;
 		}
 		/* False positive detection */
 		if (diff == bbr_lt_intvl_fp) {
 			/* At bbr_lt_intvl_fp we record the lost */
 			bbr->r_ctl.rc_lt_del = bbr->r_ctl.rc_delivered;
 			bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 		} else if (diff > (bbr_lt_intvl_min_rtts + bbr_lt_intvl_fp)) {
 			/* Now is our loss rate still high? */
 			lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lt_lost;
 			delivered = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_lt_del;
 			if ((delivered == 0) ||
 			    (((lost * 1000)/delivered) < bbr_lt_fd_thresh)) {
 				/* No still below our threshold */
 				bbr_log_type_ltbw(bbr, cts, 7, lost, delivered, 0, 0);
 			} else {
 				/* Yikes its still high, it must be a false positive */
 				bbr_log_type_ltbw(bbr, cts, 8, lost, delivered, 0, 0);
 				goto reset_all;
 			}
 		}
 		return;
 	}
 	/*
 	 * Wait for the first loss before sampling, to let the policer
 	 * exhaust its tokens and estimate the steady-state rate allowed by
 	 * the policer. Starting samples earlier includes bursts that
 	 * over-estimate the bw.
 	 */
 	if (bbr->rc_lt_is_sampling == 0) {
 		/* reason 1 is to begin doing the sampling  */
 		if (loss_detected == 0)
 			return;
 		bbr_reset_lt_bw_interval(bbr, cts);
 		bbr->rc_lt_is_sampling = 1;
 		bbr_log_type_ltbw(bbr, cts, 1, 0, 0, 0, 0);
 		return;
 	}
 	/* Now how long were we delivering long term last> */
 	if (TSTMP_GEQ(bbr->r_ctl.rc_del_time, bbr->r_ctl.rc_lt_time))
 		d_time = bbr->r_ctl.rc_del_time - bbr->r_ctl.rc_lt_time;
 	else
 		d_time = 0;
 
 	/* To avoid underestimates, reset sampling if we run out of data. */
 	if (bbr->r_ctl.r_app_limited_until) {
 		/* Can not measure in app-limited state */
 		bbr_reset_lt_bw_sampling(bbr, cts);
 		/* reason 2 is to reset sampling due to app limits  */
 		bbr_log_type_ltbw(bbr, cts, 2, 0, 0, 0, d_time);
 		return;
 	}
 	diff = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch;
 	if (diff < bbr_lt_intvl_min_rtts) {
 		/*
 		 * need more samples (we don't
 		 * start on a round like linux so
 		 * we need 1 more).
 		 */
 		/* 6 is not_enough time or no-loss */
 		bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time);
 		return;
 	}
 	if (diff > (4 * bbr_lt_intvl_min_rtts)) {
 		/*
 		 * For now if we wait too long, reset all sampling. We need
 		 * to do some research here, its possible that we should
 		 * base this on how much loss as occurred.. something like
 		 * if its under 10% (or some thresh) reset all otherwise
 		 * don't.  Thats for phase II I guess.
 		 */
 		bbr_reset_lt_bw_sampling(bbr, cts);
  		/* reason 3 is to reset sampling due too long of sampling */
 		bbr_log_type_ltbw(bbr, cts, 3, 0, 0, 0, d_time);
 		return;
 	}
 	/*
 	 * End sampling interval when a packet is lost, so we estimate the
 	 * policer tokens were exhausted. Stopping the sampling before the
 	 * tokens are exhausted under-estimates the policed rate.
 	 */
 	if (loss_detected == 0) {
 		/* 6 is not_enough time or no-loss */
 		bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time);
 		return;
 	}
 	/* Calculate packets lost and delivered in sampling interval. */
 	lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lt_lost;
 	delivered = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_lt_del;
 	if ((delivered == 0) ||
 	    (((lost * 1000)/delivered) < bbr_lt_loss_thresh)) {
 		bbr_log_type_ltbw(bbr, cts, 6, lost, delivered, 0, d_time);
 		return;
 	}
 	if (d_time < 1000) {
 		/* Not enough time. wait */
 		/* 6 is not_enough time or no-loss */
 		bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time);
 		return;
 	}
 	if (d_time >= (0xffffffff / USECS_IN_MSEC)) {
 		/* Too long */
 		bbr_reset_lt_bw_sampling(bbr, cts);
  		/* reason 3 is to reset sampling due too long of sampling */
 		bbr_log_type_ltbw(bbr, cts, 3, 0, 0, 0, d_time);
 		return;
 	}
 	del_time = d_time;
 	bw = delivered;
 	bw *= (uint64_t)USECS_IN_SECOND;
 	bw /= del_time;
 	bbr_lt_bw_samp_done(bbr, bw, cts, d_time);
 }
 
 /*
  * Allocate a sendmap from our zone.
  */
 static struct bbr_sendmap *
 bbr_alloc(struct tcp_bbr *bbr)
 {
 	struct bbr_sendmap *rsm;
 
 	BBR_STAT_INC(bbr_to_alloc);
 	rsm = uma_zalloc(bbr_zone, (M_NOWAIT | M_ZERO));
 	if (rsm) {
 		bbr->r_ctl.rc_num_maps_alloced++;
 		return (rsm);
 	}
 	if (bbr->r_ctl.rc_free_cnt) {
 		BBR_STAT_INC(bbr_to_alloc_emerg);
 		rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free);
 		TAILQ_REMOVE(&bbr->r_ctl.rc_free, rsm, r_next);
 		bbr->r_ctl.rc_free_cnt--;
 		return (rsm);
 	}
 	BBR_STAT_INC(bbr_to_alloc_failed);
 	return (NULL);
 }
 
 static struct bbr_sendmap *
 bbr_alloc_full_limit(struct tcp_bbr *bbr)
 {
 	if ((V_tcp_map_entries_limit > 0) &&
 	    (bbr->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
 		BBR_STAT_INC(bbr_alloc_limited);
 		if (!bbr->alloc_limit_reported) {
 			bbr->alloc_limit_reported = 1;
 			BBR_STAT_INC(bbr_alloc_limited_conns);
 		}
 		return (NULL);
 	}
 	return (bbr_alloc(bbr));
 }
 
 /* wrapper to allocate a sendmap entry, subject to a specific limit */
 static struct bbr_sendmap *
 bbr_alloc_limit(struct tcp_bbr *bbr, uint8_t limit_type)
 {
 	struct bbr_sendmap *rsm;
 
 	if (limit_type) {
 		/* currently there is only one limit type */
 		if (V_tcp_map_split_limit > 0 &&
 		    bbr->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) {
 			BBR_STAT_INC(bbr_split_limited);
 			if (!bbr->alloc_limit_reported) {
 				bbr->alloc_limit_reported = 1;
 				BBR_STAT_INC(bbr_alloc_limited_conns);
 			}
 			return (NULL);
 		}
 	}
 
 	/* allocate and mark in the limit type, if set */
 	rsm = bbr_alloc(bbr);
 	if (rsm != NULL && limit_type) {
 		rsm->r_limit_type = limit_type;
 		bbr->r_ctl.rc_num_split_allocs++;
 	}
 	return (rsm);
 }
 
 static void
 bbr_free(struct tcp_bbr *bbr, struct bbr_sendmap *rsm)
 {
 	if (rsm->r_limit_type) {
 		/* currently there is only one limit type */
 		bbr->r_ctl.rc_num_split_allocs--;
 	}
 	if (rsm->r_is_smallmap)
 		bbr->r_ctl.rc_num_small_maps_alloced--;
 	if (bbr->r_ctl.rc_tlp_send == rsm)
 		bbr->r_ctl.rc_tlp_send = NULL;
 	if (bbr->r_ctl.rc_resend == rsm) {
 		bbr->r_ctl.rc_resend = NULL;
 	}
 	if (bbr->r_ctl.rc_next == rsm)
 		bbr->r_ctl.rc_next = NULL;
 	if (bbr->r_ctl.rc_sacklast == rsm)
 		bbr->r_ctl.rc_sacklast = NULL;
 	if (bbr->r_ctl.rc_free_cnt < bbr_min_req_free) {
 		memset(rsm, 0, sizeof(struct bbr_sendmap));
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next);
 		rsm->r_limit_type = 0;
 		bbr->r_ctl.rc_free_cnt++;
 		return;
 	}
 	bbr->r_ctl.rc_num_maps_alloced--;
 	uma_zfree(bbr_zone, rsm);
 }
 
 /*
  * Returns the BDP.
  */
 static uint64_t
 bbr_get_bw_delay_prod(uint64_t rtt, uint64_t bw) {
 	/*
 	 * Calculate the bytes in flight needed given the bw (in bytes per
 	 * second) and the specifyed rtt in useconds. We need to put out the
 	 * returned value per RTT to match that rate. Gain will normally
 	 * raise it up from there.
 	 *
 	 * This should not overflow as long as the bandwidth is below 1
 	 * TByte per second (bw < 10**12 = 2**40) and the rtt is smaller
 	 * than 1000 seconds (rtt < 10**3 * 10**6 = 10**9 = 2**30).
 	 */
 	uint64_t usec_per_sec;
 
 	usec_per_sec = USECS_IN_SECOND;
 	return ((rtt * bw) / usec_per_sec);
 }
 
 /*
  * Return the initial cwnd.
  */
 static uint32_t
 bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp)
 {
 	uint32_t i_cwnd;
 
 	if (bbr->rc_init_win) {
 		i_cwnd = bbr->rc_init_win * tp->t_maxseg;
 	} else if (V_tcp_initcwnd_segments)
 		i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg),
 		    max(2 * tp->t_maxseg, 14600));
 	else if (V_tcp_do_rfc3390)
 		i_cwnd = min(4 * tp->t_maxseg,
 		    max(2 * tp->t_maxseg, 4380));
 	else {
 		/* Per RFC5681 Section 3.1 */
 		if (tp->t_maxseg > 2190)
 			i_cwnd = 2 * tp->t_maxseg;
 		else if (tp->t_maxseg > 1095)
 			i_cwnd = 3 * tp->t_maxseg;
 		else
 			i_cwnd = 4 * tp->t_maxseg;
 	}
 	return (i_cwnd);
 }
 
 /*
  * Given a specified gain, return the target
  * cwnd based on that gain.
  */
 static uint32_t
 bbr_get_raw_target_cwnd(struct tcp_bbr *bbr, uint32_t gain, uint64_t bw)
 {
 	uint64_t bdp, rtt;
 	uint32_t cwnd;
 
 	if ((get_filter_value_small(&bbr->r_ctl.rc_rttprop) == 0xffffffff) ||
 	    (bbr_get_full_bw(bbr) == 0)) {
 		/* No measurements yet */
 		return (bbr_initial_cwnd(bbr, bbr->rc_tp));
 	}
 	/*
 	 * Get bytes per RTT needed (rttProp is normally in
 	 * bbr_cwndtarget_rtt_touse)
 	 */
 	rtt = bbr_get_rtt(bbr, bbr_cwndtarget_rtt_touse);
 	/* Get the bdp from the two values */
 	bdp = bbr_get_bw_delay_prod(rtt, bw);
 	/* Now apply the gain */
 	cwnd = (uint32_t)(((bdp * ((uint64_t)gain)) + (uint64_t)(BBR_UNIT - 1)) / ((uint64_t)BBR_UNIT));
 
 	return (cwnd);
 }
 
 static uint32_t
 bbr_get_target_cwnd(struct tcp_bbr *bbr, uint64_t bw, uint32_t gain)
 {
 	uint32_t cwnd, mss;
 
 	mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), bbr->r_ctl.rc_pace_max_segs);
 	/* Get the base cwnd with gain rounded to a mss */
 	cwnd = roundup(bbr_get_raw_target_cwnd(bbr, bw, gain), mss);
 	/*
 	 * Add in N (2 default since we do not have a
 	 * fq layer to trap packets in) quanta's per the I-D
 	 * section 4.2.3.2 quanta adjust.
 	 */
 	cwnd += (bbr_quanta * bbr->r_ctl.rc_pace_max_segs);
 	if (bbr->rc_use_google) {
 		if((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) &&
 		   (bbr_state_val(bbr) == BBR_SUB_GAIN)) {
 			/*
 			 * The linux implementation adds
 			 * an extra 2 x mss in gain cycle which
 			 * is documented no-where except in the code.
 			 * so we add more for Neal undocumented feature
 			 */
 			cwnd += 2 * mss;
 		}
  		if ((cwnd / mss) & 0x1) {
 			/* Round up for odd num mss */
 			cwnd += mss;
 		}
 	}
 	/* Are we below the min cwnd? */
 	if (cwnd < get_min_cwnd(bbr))
 		return (get_min_cwnd(bbr));
 	return (cwnd);
 }
 
 static uint16_t
 bbr_gain_adjust(struct tcp_bbr *bbr, uint16_t gain)
 {
 	if (gain < 1)
 		gain = 1;
 	return (gain);
 }
 
 static uint32_t
 bbr_get_header_oh(struct tcp_bbr *bbr)
 {
 	int seg_oh;
 
 	seg_oh = 0;
 	if (bbr->r_ctl.rc_inc_tcp_oh) {
 		/* Do we include TCP overhead? */
 		seg_oh = (bbr->rc_last_options + sizeof(struct tcphdr));
 	}
 	if (bbr->r_ctl.rc_inc_ip_oh) {
 		/* Do we include IP overhead? */
 #ifdef INET6
 		if (bbr->r_is_v6) {
 			seg_oh += sizeof(struct ip6_hdr);
 		} else
 #endif
 		{
 
 #ifdef INET
 			seg_oh += sizeof(struct ip);
 #endif
 		}
 	}
 	if (bbr->r_ctl.rc_inc_enet_oh) {
 		/* Do we include the ethernet overhead?  */
 		seg_oh += sizeof(struct ether_header);
 	}
 	return(seg_oh);
 }
 
 static uint32_t
 bbr_get_pacing_length(struct tcp_bbr *bbr, uint16_t gain, uint32_t useconds_time, uint64_t bw)
 {
 	uint64_t divor, res, tim;
 
 	if (useconds_time == 0)
 		return (0);
 	gain = bbr_gain_adjust(bbr, gain);
 	divor = (uint64_t)USECS_IN_SECOND * (uint64_t)BBR_UNIT;
 	tim = useconds_time;
 	res = (tim * bw * gain) / divor;
 	if (res == 0)
 		res = 1;
 	return ((uint32_t)res);
 }
 
 /*
  * Given a gain and a length return the delay in useconds that
  * should be used to evenly space out packets
  * on the connection (based on the gain factor).
  */
 static uint32_t
 bbr_get_pacing_delay(struct tcp_bbr *bbr, uint16_t gain, int32_t len, uint32_t cts, int nolog)
 {
 	uint64_t bw, lentim, res;
 	uint32_t usecs, srtt, over = 0;
 	uint32_t seg_oh, num_segs, maxseg;
 
 	if (len == 0)
 		return (0);
 
 	maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options;
 	num_segs = (len + maxseg - 1) / maxseg;
 	if (bbr->rc_use_google == 0) {
 		seg_oh = bbr_get_header_oh(bbr);
 		len += (num_segs * seg_oh);
 	}
 	gain = bbr_gain_adjust(bbr, gain);
 	bw = bbr_get_bw(bbr);
 	if (bbr->rc_use_google) {
 		uint64_t cbw;
 
 		/*
 		 * Reduce the b/w by the google discount
 		 * factor 10 = 1%.
 		 */
 		cbw = bw *  (uint64_t)(1000 - bbr->r_ctl.bbr_google_discount);
 		cbw /= (uint64_t)1000;
 		/* We don't apply a discount if it results in 0 */
 		if (cbw > 0)
 			bw = cbw;
 	}
 	lentim = ((uint64_t)len *
 		  (uint64_t)USECS_IN_SECOND *
 		  (uint64_t)BBR_UNIT);
 	res = lentim / ((uint64_t)gain * bw);
 	if (res == 0)
 		res = 1;
 	usecs = (uint32_t)res;
 	srtt = bbr_get_rtt(bbr, BBR_SRTT);
 	if (bbr_hptsi_max_mul && bbr_hptsi_max_div &&
 	    (bbr->rc_use_google == 0) &&
 	    (usecs > ((srtt * bbr_hptsi_max_mul) / bbr_hptsi_max_div))) {
 		/*
 		 * We cannot let the delay be more than 1/2 the srtt time.
 		 * Otherwise we cannot pace out or send properly.
 		 */
 		over = usecs = (srtt * bbr_hptsi_max_mul) / bbr_hptsi_max_div;
 		BBR_STAT_INC(bbr_hpts_min_time);
 	}
 	if (!nolog)
 		bbr_log_pacing_delay_calc(bbr, gain, len, cts, usecs, bw, over, 1);
 	return (usecs);
 }
 
 static void
 bbr_ack_received(struct tcpcb *tp, struct tcp_bbr *bbr, struct tcphdr *th, uint32_t bytes_this_ack,
 		 uint32_t sack_changed, uint32_t prev_acked, int32_t line, uint32_t losses)
 {
 	uint64_t bw;
 	uint32_t cwnd, target_cwnd, saved_bytes, maxseg;
 	int32_t meth;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 #ifdef STATS
 	if ((tp->t_flags & TF_GPUTINPROG) &&
 	    SEQ_GEQ(th->th_ack, tp->gput_ack)) {
 		/*
 		 * Strech acks and compressed acks will cause this to
 		 * oscillate but we are doing it the same way as the main
 		 * stack so it will be compariable (though possibly not
 		 * ideal).
 		 */
 		int32_t cgput;
 		int64_t gput, time_stamp;
 
 		gput = (int64_t) (th->th_ack - tp->gput_seq) * 8;
 		time_stamp = max(1, ((bbr->r_ctl.rc_rcvtime - tp->gput_ts) / 1000));
 		cgput = gput / time_stamp;
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
 					 cgput);
 		if (tp->t_stats_gput_prev > 0)
 			stats_voi_update_abs_s32(tp->t_stats,
 						 VOI_TCP_GPUT_ND,
 						 ((gput - tp->t_stats_gput_prev) * 100) /
 						 tp->t_stats_gput_prev);
 		tp->t_flags &= ~TF_GPUTINPROG;
 		tp->t_stats_gput_prev = cgput;
 	}
 #endif
 	if ((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) &&
 	    ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google)) {
 		/* We don't change anything in probe-rtt */
 		return;
 	}
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	saved_bytes = bytes_this_ack;
 	bytes_this_ack += sack_changed;
 	if (bytes_this_ack > prev_acked) {
 		bytes_this_ack -= prev_acked;
 		/*
 		 * A byte ack'd gives us a full mss
 		 * to be like linux i.e. they count packets.
 		 */
 		if ((bytes_this_ack < maxseg) && bbr->rc_use_google)
 			bytes_this_ack = maxseg;
 	} else {
 		/* Unlikely */
 		bytes_this_ack = 0;
 	}
 	cwnd = tp->snd_cwnd;
 	bw = get_filter_value(&bbr->r_ctl.rc_delrate);
 	if (bw)
 		target_cwnd = bbr_get_target_cwnd(bbr,
 						  bw,
 						  (uint32_t)bbr->r_ctl.rc_bbr_cwnd_gain);
 	else
 		target_cwnd = bbr_initial_cwnd(bbr, bbr->rc_tp);
 	if (IN_RECOVERY(tp->t_flags) &&
 	    (bbr->bbr_prev_in_rec == 0)) {
 		/*
 		 * We are entering recovery and
 		 * thus packet conservation.
 		 */
 		bbr->pkt_conservation = 1;
 		bbr->r_ctl.rc_recovery_start = bbr->r_ctl.rc_rcvtime;
 		cwnd = ctf_flight_size(tp,
 				       (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) +
 			bytes_this_ack;
 	}
 	if (IN_RECOVERY(tp->t_flags)) {
 		uint32_t flight;
 
 		bbr->bbr_prev_in_rec = 1;
 		if (cwnd > losses) {
 			cwnd -= losses;
 			if (cwnd < maxseg)
 				cwnd = maxseg;
 		} else
 			cwnd = maxseg;
 		flight = ctf_flight_size(tp,
 					 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		bbr_log_type_cwndupd(bbr, flight, 0,
 				     losses, 10, 0, 0, line);
 		if (bbr->pkt_conservation) {
 			uint32_t time_in;
 
 			if (TSTMP_GEQ(bbr->r_ctl.rc_rcvtime, bbr->r_ctl.rc_recovery_start))
 				time_in = bbr->r_ctl.rc_rcvtime - bbr->r_ctl.rc_recovery_start;
 			else
 				time_in = 0;
 
 			if (time_in >= bbr_get_rtt(bbr, BBR_RTT_PROP)) {
 				/* Clear packet conservation after an rttProp */
 				bbr->pkt_conservation = 0;
 			} else {
 				if ((flight + bytes_this_ack) > cwnd)
 					cwnd = flight + bytes_this_ack;
 				if (cwnd < get_min_cwnd(bbr))
 					cwnd = get_min_cwnd(bbr);
 				tp->snd_cwnd = cwnd;
 				bbr_log_type_cwndupd(bbr, saved_bytes, sack_changed,
 						     prev_acked, 1, target_cwnd, th->th_ack, line);
 				return;
 			}
 		}
 	} else
 		bbr->bbr_prev_in_rec = 0;
 	if ((bbr->rc_use_google == 0) && bbr->r_ctl.restrict_growth) {
 		bbr->r_ctl.restrict_growth--;
 		if (bytes_this_ack > maxseg)
 			bytes_this_ack = maxseg;
 	}
 	if (bbr->rc_filled_pipe) {
 		/*
 		 * Here we have exited startup and filled the pipe. We will
 		 * thus allow the cwnd to shrink to the target. We hit here
 		 * mostly.
 		 */
 		uint32_t s_cwnd;
 
 		meth = 2;
 		s_cwnd = min((cwnd + bytes_this_ack), target_cwnd);
 		if (s_cwnd > cwnd)
 			cwnd = s_cwnd;
 		else if (bbr_cwnd_may_shrink || bbr->rc_use_google || bbr->rc_no_pacing)
 			cwnd = s_cwnd;
 	} else {
 		/*
 		 * Here we are still in startup, we increase cwnd by what
 		 * has been acked.
 		 */
 		if ((cwnd < target_cwnd) ||
 		    (bbr->rc_past_init_win == 0)) {
 			meth = 3;
 			cwnd += bytes_this_ack;
 		} else {
 			/*
 			 * Method 4 means we are at target so no gain in
 			 * startup and past the initial window.
 			 */
 			meth = 4;
 		}
 	}
 	tp->snd_cwnd = max(cwnd, get_min_cwnd(bbr));
 	bbr_log_type_cwndupd(bbr, saved_bytes, sack_changed, prev_acked, meth, target_cwnd, th->th_ack, line);
 }
 
 static void
 tcp_bbr_partialack(struct tcpcb *tp)
 {
 	struct tcp_bbr *bbr;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (ctf_flight_size(tp,
 		(bbr->r_ctl.rc_sacked  + bbr->r_ctl.rc_lost_bytes)) <=
 	    tp->snd_cwnd) {
 		bbr->r_wanted_output = 1;
 	}
 }
 
 static void
 bbr_post_recovery(struct tcpcb *tp)
 {
 	struct tcp_bbr *bbr;
 	uint32_t  flight;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	/*
 	 * Here we just exit recovery.
 	 */
 	EXIT_RECOVERY(tp->t_flags);
 	/* Lock in our b/w reduction for the specified number of pkt-epochs */
 	bbr->r_recovery_bw = 0;
 	tp->snd_recover = tp->snd_una;
 	tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime);
 	bbr->pkt_conservation = 0;
 	if (bbr->rc_use_google == 0) {
 		/*
 		 * For non-google mode lets
 		 * go ahead and make sure we clear
 		 * the recovery state so if we
 		 * bounce back in to recovery we
 		 * will do PC.
 		 */
 		bbr->bbr_prev_in_rec = 0;
 	}
 	bbr_log_type_exit_rec(bbr);
 	if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) {
 		tp->snd_cwnd = max(tp->snd_cwnd, bbr->r_ctl.rc_cwnd_on_ent);
 		bbr_log_type_cwndupd(bbr, 0, 0, 0, 15, 0, 0, __LINE__);
 	} else {
 		/* For probe-rtt case lets fix up its saved_cwnd */
 		if (bbr->r_ctl.rc_saved_cwnd < bbr->r_ctl.rc_cwnd_on_ent) {
 			bbr->r_ctl.rc_saved_cwnd = bbr->r_ctl.rc_cwnd_on_ent;
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 16, 0, 0, __LINE__);
 		}
 	}
 	flight = ctf_flight_size(tp,
 		     (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 	if ((bbr->rc_use_google == 0) &&
 	    bbr_do_red) {
 		uint64_t val, lr2use;
 		uint32_t maxseg, newcwnd, acks_inflight, ratio, cwnd;
 		uint32_t *cwnd_p;
 
 		if (bbr_get_rtt(bbr, BBR_SRTT)) {
 			val = ((uint64_t)bbr_get_rtt(bbr, BBR_RTT_PROP) * (uint64_t)1000);
 			val /= bbr_get_rtt(bbr, BBR_SRTT);
 			ratio = (uint32_t)val;
 		} else
 			ratio = 1000;
 
 		bbr_log_type_cwndupd(bbr, bbr_red_mul, bbr_red_div,
 				     bbr->r_ctl.recovery_lr, 21,
 				     ratio,
 				     bbr->r_ctl.rc_red_cwnd_pe,
 				     __LINE__);
 		if ((ratio < bbr_do_red) || (bbr_do_red == 0))
 			goto done;
 		if (((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) &&
 		     bbr_prtt_slam_cwnd) ||
 		    (bbr_sub_drain_slam_cwnd &&
 		     (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) &&
 		     bbr->rc_hit_state_1 &&
 		     (bbr_state_val(bbr) == BBR_SUB_DRAIN)) ||
 		    ((bbr->rc_bbr_state == BBR_STATE_DRAIN) &&
 		     bbr_slam_cwnd_in_main_drain)) {
 			/*
 			 * Here we must poke at the saved cwnd
 			 * as well as the cwnd.
 			 */
 			cwnd = bbr->r_ctl.rc_saved_cwnd;
 			cwnd_p = &bbr->r_ctl.rc_saved_cwnd;
 		} else {
  			cwnd = tp->snd_cwnd;
 			cwnd_p = &tp->snd_cwnd;
 		}
 		maxseg = tp->t_maxseg - bbr->rc_last_options;
 		/* Add the overall lr with the recovery lr */
 		if (bbr->r_ctl.rc_lost == 0)
 			lr2use = 0;
 		else if (bbr->r_ctl.rc_delivered == 0)
 			lr2use = 1000;
 		else {
 			lr2use = bbr->r_ctl.rc_lost * 1000;
 			lr2use /= bbr->r_ctl.rc_delivered;
 		}
 		lr2use += bbr->r_ctl.recovery_lr;
 		acks_inflight = (flight / (maxseg * 2));
 		if (bbr_red_scale) {
 			lr2use *= bbr_get_rtt(bbr, BBR_SRTT);
 			lr2use /= bbr_red_scale;
 			if ((bbr_red_growth_restrict) &&
 			    ((bbr_get_rtt(bbr, BBR_SRTT)/bbr_red_scale) > 1))
 			    bbr->r_ctl.restrict_growth += acks_inflight;
 		}
 		if (lr2use) {
 			val = (uint64_t)cwnd * lr2use;
 			val /= 1000;
 			if (cwnd > val)
 				newcwnd = roundup((cwnd - val), maxseg);
 			else
 				newcwnd = maxseg;
 		} else {
 			val = (uint64_t)cwnd * (uint64_t)bbr_red_mul;
 			val /= (uint64_t)bbr_red_div;
 			newcwnd = roundup((uint32_t)val, maxseg);
 		}
 		/* with standard delayed acks how many acks can I expect? */
 		if (bbr_drop_limit == 0) {
 			/*
 			 * Anticpate how much we will
 			 * raise the cwnd based on the acks.
 			 */
 			if ((newcwnd + (acks_inflight * maxseg)) < get_min_cwnd(bbr)) {
 				/* We do enforce the min (with the acks) */
 				newcwnd = (get_min_cwnd(bbr) - acks_inflight);
 			}
 		} else {
 			/*
 			 * A strict drop limit of N is inplace
 			 */
 			if (newcwnd < (bbr_drop_limit * maxseg)) {
 				newcwnd = bbr_drop_limit * maxseg;
 			}
 		}
 		/* For the next N acks do we restrict the growth */
 		*cwnd_p = newcwnd;
 		if (tp->snd_cwnd > newcwnd)
 			tp->snd_cwnd = newcwnd;
 		bbr_log_type_cwndupd(bbr, bbr_red_mul, bbr_red_div, val, 22,
 				     (uint32_t)lr2use,
 				     bbr_get_rtt(bbr, BBR_SRTT), __LINE__);
 		bbr->r_ctl.rc_red_cwnd_pe = bbr->r_ctl.rc_pkt_epoch;
 	}
 done:
 	bbr->r_ctl.recovery_lr = 0;
 	if (flight <= tp->snd_cwnd) {
 		bbr->r_wanted_output = 1;
 	}
 	tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime);
 }
 
 static void
 bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts)
 {
 	bbr->r_ctl.red_bw = get_filter_value(&bbr->r_ctl.rc_delrate);
 	/* Limit the drop in b/w to 1/2 our current filter. */
 	if (bbr->r_ctl.red_bw > bbr->r_ctl.rc_bbr_cur_del_rate)
 		bbr->r_ctl.red_bw = bbr->r_ctl.rc_bbr_cur_del_rate;
 	if (bbr->r_ctl.red_bw < (get_filter_value(&bbr->r_ctl.rc_delrate) / 2))
 		bbr->r_ctl.red_bw = get_filter_value(&bbr->r_ctl.rc_delrate) / 2;
 	tcp_bbr_tso_size_check(bbr, cts);
 }
 
 static void
 bbr_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type, struct bbr_sendmap *rsm)
 {
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 #ifdef STATS
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
 #endif
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	switch (type) {
 	case CC_NDUPACK:
 		if (!IN_RECOVERY(tp->t_flags)) {
 			tp->snd_recover = tp->snd_max;
 			/* Start a new epoch */
 			bbr_set_pktepoch(bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 			if (bbr->rc_lt_is_sampling || bbr->rc_lt_use_bw) {
 				/*
 				 * Move forward the lt epoch
 				 * so it won't count the truncated
 				 * epoch.
 				 */
 				bbr->r_ctl.rc_lt_epoch++;
 			}
 			if (bbr->rc_bbr_state == BBR_STATE_STARTUP) {
 				/*
 				 * Just like the policer detection code
 				 * if we are in startup we must push
 				 * forward the last startup epoch
 				 * to hide the truncated PE.
 				 */
 				bbr->r_ctl.rc_bbr_last_startup_epoch++;
 			}
 			bbr->r_ctl.rc_cwnd_on_ent = tp->snd_cwnd;
 			ENTER_RECOVERY(tp->t_flags);
 			bbr->rc_tlp_rtx_out = 0;
 			bbr->r_ctl.recovery_lr = bbr->r_ctl.rc_pkt_epoch_loss_rate;
 			tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime);
 			if (tcp_in_hpts(bbr->rc_tp) &&
 			    ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) == 0)) {
 				/*
 				 * When we enter recovery, we need to restart
 				 * any timers. This may mean we gain an agg
 				 * early, which will be made up for at the last
 				 * rxt out.
 				 */
 				bbr->rc_timer_first = 1;
 				bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 			}
 			/*
 			 * Calculate a new cwnd based on to the current
 			 * delivery rate with no gain. We get the bdp
 			 * without gaining it up like we normally would and
 			 * we use the last cur_del_rate.
 			 */
 			if ((bbr->rc_use_google == 0) &&
 			    (bbr->r_ctl.bbr_rttprobe_gain_val ||
 			     (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT))) {
 				tp->snd_cwnd = ctf_flight_size(tp,
 					           (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) +
 					(tp->t_maxseg - bbr->rc_last_options);
 				if (tp->snd_cwnd < get_min_cwnd(bbr)) {
 					/* We always gate to min cwnd */
 					tp->snd_cwnd = get_min_cwnd(bbr);
 				}
 				bbr_log_type_cwndupd(bbr, 0, 0, 0, 14, 0, 0, __LINE__);
 			}
 			bbr_log_type_enter_rec(bbr, rsm->r_start);
 		}
 		break;
 	case CC_RTO_ERR:
 		KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
 		/* RTO was unnecessary, so reset everything. */
 		bbr_reset_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime);
 		if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) {
 			tp->snd_cwnd = tp->snd_cwnd_prev;
 			tp->snd_ssthresh = tp->snd_ssthresh_prev;
 			tp->snd_recover = tp->snd_recover_prev;
 			tp->snd_cwnd = max(tp->snd_cwnd, bbr->r_ctl.rc_cwnd_on_ent);
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 13, 0, 0, __LINE__);
 		}
 		tp->t_badrxtwin = 0;
 		break;
 	}
 }
 
 /*
  * Indicate whether this ack should be delayed.  We can delay the ack if
  * following conditions are met:
  *	- There is no delayed ack timer in progress.
  *	- Our last ack wasn't a 0-sized window. We never want to delay
  *	  the ack that opens up a 0-sized window.
  *	- LRO wasn't used for this segment. We make sure by checking that the
  *	  segment size is not larger than the MSS.
  *	- Delayed acks are enabled or this is a half-synchronized T/TCP
  *	  connection.
  *	- The data being acked is less than a full segment (a stretch ack
  *        of more than a segment we should ack.
  *      - nsegs is 1 (if its more than that we received more than 1 ack).
  */
 #define DELAY_ACK(tp, bbr, nsegs)				\
 	(((tp->t_flags & TF_RXWIN0SENT) == 0) &&		\
 	 ((tp->t_flags & TF_DELACK) == 0) && 		 	\
 	 ((bbr->bbr_segs_rcvd + nsegs) < tp->t_delayed_ack) &&	\
 	 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
 
 /*
  * Return the lowest RSM in the map of
  * packets still in flight that is not acked.
  * This should normally find on the first one
  * since we remove packets from the send
  * map after they are marked ACKED.
  */
 static struct bbr_sendmap *
 bbr_find_lowest_rsm(struct tcp_bbr *bbr)
 {
 	struct bbr_sendmap *rsm;
 
 	/*
 	 * Walk the time-order transmitted list looking for an rsm that is
 	 * not acked. This will be the one that was sent the longest time
 	 * ago that is still outstanding.
 	 */
 	TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_tmap, r_tnext) {
 		if (rsm->r_flags & BBR_ACKED) {
 			continue;
 		}
 		goto finish;
 	}
 finish:
 	return (rsm);
 }
 
 static struct bbr_sendmap *
 bbr_find_high_nonack(struct tcp_bbr *bbr, struct bbr_sendmap *rsm)
 {
 	struct bbr_sendmap *prsm;
 
 	/*
 	 * Walk the sequence order list backward until we hit and arrive at
 	 * the highest seq not acked. In theory when this is called it
 	 * should be the last segment (which it was not).
 	 */
 	prsm = rsm;
 	TAILQ_FOREACH_REVERSE_FROM(prsm, &bbr->r_ctl.rc_map, bbr_head, r_next) {
 		if (prsm->r_flags & (BBR_ACKED | BBR_HAS_FIN)) {
 			continue;
 		}
 		return (prsm);
 	}
 	return (NULL);
 }
 
 /*
  * Returns to the caller the number of microseconds that
  * the packet can be outstanding before we think we
  * should have had an ack returned.
  */
 static uint32_t
 bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts, struct bbr_sendmap *rsm)
 {
 	/*
 	 * lro is the flag we use to determine if we have seen reordering.
 	 * If it gets set we have seen reordering. The reorder logic either
 	 * works in one of two ways:
 	 *
 	 * If reorder-fade is configured, then we track the last time we saw
 	 * re-ordering occur. If we reach the point where enough time as
 	 * passed we no longer consider reordering has occuring.
 	 *
 	 * Or if reorder-face is 0, then once we see reordering we consider
 	 * the connection to alway be subject to reordering and just set lro
 	 * to 1.
 	 *
 	 * In the end if lro is non-zero we add the extra time for
 	 * reordering in.
 	 */
 	int32_t lro;
 	uint32_t thresh, t_rxtcur;
 
 	if (srtt == 0)
 		srtt = 1;
 	if (bbr->r_ctl.rc_reorder_ts) {
 		if (bbr->r_ctl.rc_reorder_fade) {
 			if (SEQ_GEQ(cts, bbr->r_ctl.rc_reorder_ts)) {
 				lro = cts - bbr->r_ctl.rc_reorder_ts;
 				if (lro == 0) {
 					/*
 					 * No time as passed since the last
 					 * reorder, mark it as reordering.
 					 */
 					lro = 1;
 				}
 			} else {
 				/* Negative time? */
 				lro = 0;
 			}
 			if (lro > bbr->r_ctl.rc_reorder_fade) {
 				/* Turn off reordering seen too */
 				bbr->r_ctl.rc_reorder_ts = 0;
 				lro = 0;
 			}
 		} else {
 			/* Reodering does not fade */
 			lro = 1;
 		}
 	} else {
 		lro = 0;
 	}
 	thresh = srtt + bbr->r_ctl.rc_pkt_delay;
 	if (lro) {
 		/* It must be set, if not you get 1/4 rtt */
 		if (bbr->r_ctl.rc_reorder_shift)
 			thresh += (srtt >> bbr->r_ctl.rc_reorder_shift);
 		else
 			thresh += (srtt >> 2);
 	} else {
 		thresh += 1000;
 	}
 	/* We don't let the rack timeout be above a RTO */
 	if ((bbr->rc_tp)->t_srtt == 0)
 		t_rxtcur = BBR_INITIAL_RTO;
 	else
 		t_rxtcur = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
 	if (thresh > t_rxtcur) {
 		thresh = t_rxtcur;
 	}
 	/* And we don't want it above the RTO max either */
 	if (thresh > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) {
 		thresh = (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND);
 	}
 	bbr_log_thresh_choice(bbr, cts, thresh, lro, srtt, rsm, BBR_TO_FRM_RACK);
 	return (thresh);
 }
 
 /*
  * Return to the caller the amount of time in mico-seconds
  * that should be used for the TLP timer from the last
  * send time of this packet.
  */
 static uint32_t
 bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr,
     struct bbr_sendmap *rsm, uint32_t srtt,
     uint32_t cts)
 {
 	uint32_t thresh, len, maxseg, t_rxtcur;
 	struct bbr_sendmap *prsm;
 
 	if (srtt == 0)
 		srtt = 1;
 	if (bbr->rc_tlp_threshold)
 		thresh = srtt + (srtt / bbr->rc_tlp_threshold);
 	else
 		thresh = (srtt * 2);
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	/* Get the previous sent packet, if any  */
 	len = rsm->r_end - rsm->r_start;
 
 	/* 2.1 behavior */
 	prsm = TAILQ_PREV(rsm, bbr_head, r_tnext);
 	if (prsm && (len <= maxseg)) {
 		/*
 		 * Two packets outstanding, thresh should be (2*srtt) +
 		 * possible inter-packet delay (if any).
 		 */
 		uint32_t inter_gap = 0;
 		int idx, nidx;
 
 		idx = rsm->r_rtr_cnt - 1;
 		nidx = prsm->r_rtr_cnt - 1;
 		if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) {
 			/* Yes it was sent later (or at the same time) */
 			inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
 		}
 		thresh += inter_gap;
 	} else if (len <= maxseg) {
 		/*
 		 * Possibly compensate for delayed-ack.
 		 */
 		uint32_t alt_thresh;
 
 		alt_thresh = srtt + (srtt / 2) + bbr_delayed_ack_time;
 		if (alt_thresh > thresh)
 			thresh = alt_thresh;
 	}
 	/* Not above the current  RTO */
 	if (tp->t_srtt == 0)
 		t_rxtcur = BBR_INITIAL_RTO;
 	else
 		t_rxtcur = TICKS_2_USEC(tp->t_rxtcur);
 
 	bbr_log_thresh_choice(bbr, cts, thresh, t_rxtcur, srtt, rsm, BBR_TO_FRM_TLP);
 	/* Not above an RTO */
 	if (thresh > t_rxtcur) {
 		thresh = t_rxtcur;
 	}
 	/* Not above a RTO max */
 	if (thresh > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) {
 		thresh = (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND);
 	}
 	/* And now apply the user TLP min */
 	if (thresh < bbr_tlp_min) {
 		thresh = bbr_tlp_min;
 	}
 	return (thresh);
 }
 
 /*
  * Return one of three RTTs to use (in microseconds).
  */
 static __inline uint32_t
 bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type)
 {
 	uint32_t f_rtt;
 	uint32_t srtt;
 
 	f_rtt = get_filter_value_small(&bbr->r_ctl.rc_rttprop);
 	if (get_filter_value_small(&bbr->r_ctl.rc_rttprop) == 0xffffffff) {
 		/* We have no rtt at all */
 		if (bbr->rc_tp->t_srtt == 0)
 			f_rtt = BBR_INITIAL_RTO;
 		else
 			f_rtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
 		/*
 		 * Since we don't know how good the rtt is apply a
 		 * delayed-ack min
 		 */
 		if (f_rtt < bbr_delayed_ack_time) {
 			f_rtt = bbr_delayed_ack_time;
 		}
 	}
 	/* Take the filter version or last measured pkt-rtt */
 	if (rtt_type == BBR_RTT_PROP) {
 		srtt = f_rtt;
 	} else if (rtt_type == BBR_RTT_PKTRTT) {
 		if (bbr->r_ctl.rc_pkt_epoch_rtt) {
 			srtt = bbr->r_ctl.rc_pkt_epoch_rtt;
 		} else {
 			/* No pkt rtt yet */
 			srtt = f_rtt;
 		}
 	} else if (rtt_type == BBR_RTT_RACK) {
 		srtt = bbr->r_ctl.rc_last_rtt;
 		/* We need to add in any internal delay for our timer */
 		if (bbr->rc_ack_was_delayed)
 			srtt += bbr->r_ctl.rc_ack_hdwr_delay;
 	} else if (rtt_type == BBR_SRTT) {
 		srtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
 	} else {
 		/* TSNH */
 		srtt = f_rtt;
 #ifdef BBR_INVARIANTS
 		panic("Unknown rtt request type %d", rtt_type);
 #endif
 	}
 	return (srtt);
 }
 
 static int
 bbr_is_lost(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t cts)
 {
 	uint32_t thresh;
 
 	thresh = bbr_calc_thresh_rack(bbr, bbr_get_rtt(bbr, BBR_RTT_RACK),
 				      cts, rsm);
 	if ((cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) >= thresh) {
 		/* It is lost (past time) */
 		return (1);
 	}
 	return (0);
 }
 
 /*
  * Return a sendmap if we need to retransmit something.
  */
 static struct bbr_sendmap *
 bbr_check_recovery_mode(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * Check to see that we don't need to fall into recovery. We will
 	 * need to do so if our oldest transmit is past the time we should
 	 * have had an ack.
 	 */
 
 	struct bbr_sendmap *rsm;
 	int32_t idx;
 
 	if (TAILQ_EMPTY(&bbr->r_ctl.rc_map)) {
 		/* Nothing outstanding that we know of */
 		return (NULL);
 	}
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 	if (rsm == NULL) {
 		/* Nothing in the transmit map */
 		return (NULL);
 	}
 	if (tp->t_flags & TF_SENTFIN) {
 		/* Fin restricted, don't find anything once a fin is sent */
 		return (NULL);
 	}
 	if (rsm->r_flags & BBR_ACKED) {
 		/*
 		 * Ok the first one is acked (this really should not happen
 		 * since we remove the from the tmap once they are acked)
 		 */
 		rsm = bbr_find_lowest_rsm(bbr);
 		if (rsm == NULL)
 			return (NULL);
 	}
 	idx = rsm->r_rtr_cnt - 1;
 	if (SEQ_LEQ(cts, rsm->r_tim_lastsent[idx])) {
 		/* Send timestamp is the same or less? can't be ready */
 		return (NULL);
 	}
 	/* Get our RTT time */
 	if (bbr_is_lost(bbr, rsm, cts) &&
 	    ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
 	     (rsm->r_flags & BBR_SACK_PASSED))) {
 		if ((rsm->r_flags & BBR_MARKED_LOST) == 0) {
 			rsm->r_flags |= BBR_MARKED_LOST;
 			bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start;
 			bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start;
 		}
 		bbr_cong_signal(tp, NULL, CC_NDUPACK, rsm);
 #ifdef BBR_INVARIANTS
 		if ((rsm->r_end - rsm->r_start) == 0)
 			panic("tp:%p bbr:%p rsm:%p length is 0?", tp, bbr, rsm);
 #endif
 		return (rsm);
 	}
 	return (NULL);
 }
 
 /*
  * RACK Timer, here we simply do logging and house keeping.
  * the normal bbr_output_wtime() function will call the
  * appropriate thing to check if we need to do a RACK retransmit.
  * We return 1, saying don't proceed with bbr_output_wtime only
  * when all timers have been stopped (destroyed PCB?).
  */
 static int
 bbr_timeout_rack(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * This timer simply provides an internal trigger to send out data.
 	 * The check_recovery_mode call will see if there are needed
 	 * retransmissions, if so we will enter fast-recovery. The output
 	 * call may or may not do the same thing depending on sysctl
 	 * settings.
 	 */
 	uint32_t lost;
 
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) {
 		/* Its not time yet */
 		return (0);
 	}
 	BBR_STAT_INC(bbr_to_tot);
 	lost = bbr->r_ctl.rc_lost;
 	if (bbr->r_state && (bbr->r_state != tp->t_state))
 		bbr_set_state(tp, bbr, 0);
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_RACK);
 	if (bbr->r_ctl.rc_resend == NULL) {
 		/* Lets do the check here */
 		bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts);
 	}
 	if (bbr_policer_call_from_rack_to)
 		bbr_lt_bw_sampling(bbr, cts, (bbr->r_ctl.rc_lost > lost));
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
 	return (0);
 }
 
 static __inline void
 bbr_clone_rsm(struct tcp_bbr *bbr, struct bbr_sendmap *nrsm, struct bbr_sendmap *rsm, uint32_t start)
 {
 	int idx;
 
 	nrsm->r_start = start;
 	nrsm->r_end = rsm->r_end;
 	nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
 	nrsm-> r_rtt_not_allowed = rsm->r_rtt_not_allowed;
 	nrsm->r_flags = rsm->r_flags;
 	/* We don't transfer forward the SYN flag */
 	nrsm->r_flags &= ~BBR_HAS_SYN;
 	/* We move forward the FIN flag, not that this should happen */
 	rsm->r_flags &= ~BBR_HAS_FIN;
 	nrsm->r_dupack = rsm->r_dupack;
 	nrsm->r_rtr_bytes = 0;
 	nrsm->r_is_gain = rsm->r_is_gain;
 	nrsm->r_is_drain = rsm->r_is_drain;
 	nrsm->r_delivered = rsm->r_delivered;
 	nrsm->r_ts_valid = rsm->r_ts_valid;
 	nrsm->r_del_ack_ts = rsm->r_del_ack_ts;
 	nrsm->r_del_time = rsm->r_del_time;
 	nrsm->r_app_limited = rsm->r_app_limited;
 	nrsm->r_first_sent_time = rsm->r_first_sent_time;
 	nrsm->r_flight_at_send = rsm->r_flight_at_send;
 	/* We split a piece the lower section looses any just_ret flag. */
 	nrsm->r_bbr_state = rsm->r_bbr_state;
 	for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
 		nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
 	}
 	rsm->r_end = nrsm->r_start;
 	idx = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), bbr->r_ctl.rc_pace_max_segs);
 	idx /= 8;
 	/* Check if we got too small */
 	if ((rsm->r_is_smallmap == 0) &&
 	    ((rsm->r_end - rsm->r_start) <= idx)) {
 		bbr->r_ctl.rc_num_small_maps_alloced++;
 		rsm->r_is_smallmap = 1;
 	}
 	/* Check the new one as well */
 	if ((nrsm->r_end - nrsm->r_start) <= idx) {
 		bbr->r_ctl.rc_num_small_maps_alloced++;
 		nrsm->r_is_smallmap = 1;
 	}
 }
 
 static int
 bbr_sack_mergable(struct bbr_sendmap *at,
 		  uint32_t start, uint32_t end)
 {
 	/*
 	 * Given a sack block defined by
 	 * start and end, and a current position
 	 * at. Return 1 if either side of at
 	 * would show that the block is mergable
 	 * to that side. A block to be mergable
 	 * must have overlap with the start/end
 	 * and be in the SACK'd state.
 	 */
 	struct bbr_sendmap *l_rsm;
 	struct bbr_sendmap *r_rsm;
 
 	/* first get the either side blocks */
 	l_rsm = TAILQ_PREV(at, bbr_head, r_next);
 	r_rsm = TAILQ_NEXT(at, r_next);
 	if (l_rsm && (l_rsm->r_flags & BBR_ACKED)) {
 		/* Potentially mergeable */
 		if ((l_rsm->r_end == start) ||
 		    (SEQ_LT(start, l_rsm->r_end) &&
 		     SEQ_GT(end, l_rsm->r_end))) {
 			    /*
 			     * map blk   |------|
 			     * sack blk         |------|
 			     * <or>
 			     * map blk   |------|
 			     * sack blk      |------|
 			     */
 			    return (1);
 		    }
 	}
 	if (r_rsm && (r_rsm->r_flags & BBR_ACKED)) {
 		/* Potentially mergeable */
 		if ((r_rsm->r_start == end) ||
 		    (SEQ_LT(start, r_rsm->r_start) &&
 		     SEQ_GT(end, r_rsm->r_start))) {
 			/*
 			 * map blk          |---------|
 			 * sack blk    |----|
 			 * <or>
 			 * map blk          |---------|
 			 * sack blk    |-------|
 			 */
 			return (1);
 		}
 	}
 	return (0);
 }
 
 static struct bbr_sendmap *
 bbr_merge_rsm(struct tcp_bbr *bbr,
 	      struct bbr_sendmap *l_rsm,
 	      struct bbr_sendmap *r_rsm)
 {
 	/*
 	 * We are merging two ack'd RSM's,
 	 * the l_rsm is on the left (lower seq
 	 * values) and the r_rsm is on the right
 	 * (higher seq value). The simplest way
 	 * to merge these is to move the right
 	 * one into the left. I don't think there
 	 * is any reason we need to try to find
 	 * the oldest (or last oldest retransmitted).
 	 */
 	l_rsm->r_end = r_rsm->r_end;
 	if (l_rsm->r_dupack < r_rsm->r_dupack)
 		l_rsm->r_dupack = r_rsm->r_dupack;
 	if (r_rsm->r_rtr_bytes)
 		l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
 	if (r_rsm->r_in_tmap) {
 		/* This really should not happen */
 		TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, r_rsm, r_tnext);
 	}
 	if (r_rsm->r_app_limited)
 		l_rsm->r_app_limited = r_rsm->r_app_limited;
 	/* Now the flags */
 	if (r_rsm->r_flags & BBR_HAS_FIN)
 		l_rsm->r_flags |= BBR_HAS_FIN;
 	if (r_rsm->r_flags & BBR_TLP)
 		l_rsm->r_flags |= BBR_TLP;
 	if (r_rsm->r_flags & BBR_RWND_COLLAPSED)
 		l_rsm->r_flags |= BBR_RWND_COLLAPSED;
 	if (r_rsm->r_flags & BBR_MARKED_LOST) {
 		/* This really should not happen */
 		bbr->r_ctl.rc_lost_bytes -= r_rsm->r_end - r_rsm->r_start;
 	}
 	TAILQ_REMOVE(&bbr->r_ctl.rc_map, r_rsm, r_next);
 	if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
 		/* Transfer the split limit to the map we free */
 		r_rsm->r_limit_type = l_rsm->r_limit_type;
 		l_rsm->r_limit_type = 0;
 	}
 	bbr_free(bbr, r_rsm);
 	return(l_rsm);
 }
 
 /*
  * TLP Timer, here we simply setup what segment we want to
  * have the TLP expire on, the normal bbr_output_wtime() will then
  * send it out.
  *
  * We return 1, saying don't proceed with bbr_output_wtime only
  * when all timers have been stopped (destroyed PCB?).
  */
 static int
 bbr_timeout_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * Tail Loss Probe.
 	 */
 	struct bbr_sendmap *rsm = NULL;
 	struct socket *so;
 	uint32_t amm;
 	uint32_t out, avail;
 	uint32_t maxseg;
 	int collapsed_win = 0;
 
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) {
 		/* Its not time yet */
 		return (0);
 	}
 	if (ctf_progress_timeout_check(tp, true)) {
 		bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	/* Did we somehow get into persists? */
 	if (bbr->rc_in_persist) {
 		return (0);
 	}
 	if (bbr->r_state && (bbr->r_state != tp->t_state))
 		bbr_set_state(tp, bbr, 0);
 	BBR_STAT_INC(bbr_tlp_tot);
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	/*
 	 * A TLP timer has expired. We have been idle for 2 rtts. So we now
 	 * need to figure out how to force a full MSS segment out.
 	 */
 	so = tptosocket(tp);
 	avail = sbavail(&so->so_snd);
 	out = ctf_outstanding(tp);
 	if (out > tp->snd_wnd) {
 		/* special case, we need a retransmission */
 		collapsed_win = 1;
 		goto need_retran;
 	}
 	if (avail > out) {
 		/* New data is available */
 		amm = avail - out;
 		if (amm > maxseg) {
 			amm = maxseg;
 		} else if ((amm < maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) {
 			/* not enough to fill a MTU and no-delay is off */
 			goto need_retran;
 		}
 		/* Set the send-new override */
 		if ((out + amm) <= tp->snd_wnd) {
 			bbr->rc_tlp_new_data = 1;
 		} else {
 			goto need_retran;
 		}
 		bbr->r_ctl.rc_tlp_seg_send_cnt = 0;
 		bbr->r_ctl.rc_last_tlp_seq = tp->snd_max;
 		bbr->r_ctl.rc_tlp_send = NULL;
 		/* cap any slots */
 		BBR_STAT_INC(bbr_tlp_newdata);
 		goto send;
 	}
 need_retran:
 	/*
 	 * Ok we need to arrange the last un-acked segment to be re-sent, or
 	 * optionally the first un-acked segment.
 	 */
 	if (collapsed_win == 0) {
 		rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next);
 		if (rsm && (BBR_ACKED | BBR_HAS_FIN)) {
 			rsm = bbr_find_high_nonack(bbr, rsm);
 		}
 		if (rsm == NULL) {
 			goto restore;
 		}
 	} else {
 		/*
 		 * We must find the last segment
 		 * that was acceptable by the client.
 		 */
 		TAILQ_FOREACH_REVERSE(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) {
 			if ((rsm->r_flags & BBR_RWND_COLLAPSED) == 0) {
 				/* Found one */
 				break;
 			}
 		}
 		if (rsm == NULL) {
 			/* None? if so send the first */
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 			if (rsm == NULL)
 				goto restore;
 		}
 	}
 	if ((rsm->r_end - rsm->r_start) > maxseg) {
 		/*
 		 * We need to split this the last segment in two.
 		 */
 		struct bbr_sendmap *nrsm;
 
 		nrsm = bbr_alloc_full_limit(bbr);
 		if (nrsm == NULL) {
 			/*
 			 * We can't get memory to split, we can either just
 			 * not split it. Or retransmit the whole piece, lets
 			 * do the large send (BTLP :-) ).
 			 */
 			goto go_for_it;
 		}
 		bbr_clone_rsm(bbr, nrsm, rsm, (rsm->r_end - maxseg));
 		TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 		if (rsm->r_in_tmap) {
 			TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 			nrsm->r_in_tmap = 1;
 		}
 		rsm->r_flags &= (~BBR_HAS_FIN);
 		rsm = nrsm;
 	}
 go_for_it:
 	bbr->r_ctl.rc_tlp_send = rsm;
 	bbr->rc_tlp_rtx_out = 1;
 	if (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq) {
 		bbr->r_ctl.rc_tlp_seg_send_cnt++;
 		tp->t_rxtshift++;
 	} else {
 		bbr->r_ctl.rc_last_tlp_seq = rsm->r_start;
 		bbr->r_ctl.rc_tlp_seg_send_cnt = 1;
 	}
 send:
 	if (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend) {
 		/*
 		 * Can't [re]/transmit a segment we have retransmitted the
 		 * max times. We need the retransmit timer to take over.
 		 */
 restore:
 		bbr->rc_tlp_new_data = 0;
 		bbr->r_ctl.rc_tlp_send = NULL;
 		if (rsm)
 			rsm->r_flags &= ~BBR_TLP;
 		BBR_STAT_INC(bbr_tlp_retran_fail);
 		return (0);
 	} else if (rsm) {
 		rsm->r_flags |= BBR_TLP;
 	}
 	if (rsm && (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq) &&
 	    (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend)) {
 		/*
 		 * We have retransmitted to many times for TLP. Switch to
 		 * the regular RTO timer
 		 */
 		goto restore;
 	}
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_TLP);
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
 	return (0);
 }
 
 /*
  * Delayed ack Timer, here we simply need to setup the
  * ACK_NOW flag and remove the DELACK flag. From there
  * the output routine will send the ack out.
  *
  * We only return 1, saying don't proceed, if all timers
  * are stopped (destroyed PCB?).
  */
 static int
 bbr_timeout_delack(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_DELACK);
 	tp->t_flags &= ~TF_DELACK;
 	tp->t_flags |= TF_ACKNOW;
 	KMOD_TCPSTAT_INC(tcps_delack);
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
 	return (0);
 }
 
 /*
  * Here we send a KEEP-ALIVE like probe to the
  * peer, we do not send data.
  *
  * We only return 1, saying don't proceed, if all timers
  * are stopped (destroyed PCB?).
  */
 static int
 bbr_timeout_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	struct tcptemp *t_template;
 	int32_t retval = 1;
 
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	if (bbr->rc_in_persist == 0)
 		return (0);
 
 	/*
 	 * Persistence timer into zero window. Force a byte to be output, if
 	 * possible.
 	 */
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_PERSIST);
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
 	KMOD_TCPSTAT_INC(tcps_persisttimeo);
 	/*
 	 * Have we exceeded the user specified progress time?
 	 */
 	if (ctf_progress_timeout_check(tp, true)) {
 		bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	/*
 	 * Hack: if the peer is dead/unreachable, we do not time out if the
 	 * window is closed.  After a full backoff, drop the connection if
 	 * the idle time (no responses to probes) reaches the maximum
 	 * backoff that we would use if retransmitting.
 	 */
 	if (tp->t_rxtshift >= V_tcp_retries &&
 	    (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
 	    ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
 		KMOD_TCPSTAT_INC(tcps_persistdrop);
 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	if ((sbavail(&bbr->rc_inp->inp_socket->so_snd) == 0) &&
 	    tp->snd_una == tp->snd_max) {
 		bbr_exit_persist(tp, bbr, cts, __LINE__);
 		retval = 0;
 		goto out;
 	}
 	/*
 	 * If the user has closed the socket then drop a persisting
 	 * connection after a much reduced timeout.
 	 */
 	if (tp->t_state > TCPS_CLOSE_WAIT &&
 	    (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
 		KMOD_TCPSTAT_INC(tcps_persistdrop);
 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	t_template = tcpip_maketemplate(bbr->rc_inp);
 	if (t_template) {
 		tcp_respond(tp, t_template->tt_ipgen,
 			    &t_template->tt_t, (struct mbuf *)NULL,
 			    tp->rcv_nxt, tp->snd_una - 1, 0);
 		/* This sends an ack */
 		if (tp->t_flags & TF_DELACK)
 			tp->t_flags &= ~TF_DELACK;
 		free(t_template, M_TEMP);
 	}
 	if (tp->t_rxtshift < V_tcp_retries)
 		tp->t_rxtshift++;
 	bbr_start_hpts_timer(bbr, tp, cts, 3, 0, 0);
 out:
 	return (retval);
 }
 
 /*
  * If a keepalive goes off, we had no other timers
  * happening. We always return 1 here since this
  * routine either drops the connection or sends
  * out a segment with respond.
  */
 static int
 bbr_timeout_keepalive(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	struct tcptemp *t_template;
 	struct inpcb *inp = tptoinpcb(tp);
 
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_KEEP);
 	/*
 	 * Keep-alive timer went off; send something or drop connection if
 	 * idle for too long.
 	 */
 	KMOD_TCPSTAT_INC(tcps_keeptimeo);
 	if (tp->t_state < TCPS_ESTABLISHED)
 		goto dropit;
 	if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
 	    tp->t_state <= TCPS_CLOSING) {
 		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
 			goto dropit;
 		/*
 		 * Send a packet designed to force a response if the peer is
 		 * up and reachable: either an ACK if the connection is
 		 * still alive, or an RST if the peer has closed the
 		 * connection due to timeout or reboot. Using sequence
 		 * number tp->snd_una-1 causes the transmitted zero-length
 		 * segment to lie outside the receive window; by the
 		 * protocol spec, this requires the correspondent TCP to
 		 * respond.
 		 */
 		KMOD_TCPSTAT_INC(tcps_keepprobe);
 		t_template = tcpip_maketemplate(inp);
 		if (t_template) {
 			tcp_respond(tp, t_template->tt_ipgen,
 			    &t_template->tt_t, (struct mbuf *)NULL,
 			    tp->rcv_nxt, tp->snd_una - 1, 0);
 			free(t_template, M_TEMP);
 		}
 	}
 	bbr_start_hpts_timer(bbr, tp, cts, 4, 0, 0);
 	return (1);
 dropit:
 	KMOD_TCPSTAT_INC(tcps_keepdrops);
 	tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
 	return (-ETIMEDOUT);	/* tcp_drop() */
 }
 
 /*
  * Retransmit helper function, clear up all the ack
  * flags and take care of important book keeping.
  */
 static void
 bbr_remxt_tmr(struct tcpcb *tp)
 {
 	/*
 	 * The retransmit timer went off, all sack'd blocks must be
 	 * un-acked.
 	 */
 	struct bbr_sendmap *rsm, *trsm = NULL;
 	struct tcp_bbr *bbr;
 	uint32_t cts, lost;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	cts = tcp_get_usecs(&bbr->rc_tv);
 	lost = bbr->r_ctl.rc_lost;
 	if (bbr->r_state && (bbr->r_state != tp->t_state))
 		bbr_set_state(tp, bbr, 0);
 
 	TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) {
 		if (rsm->r_flags & BBR_ACKED) {
 			uint32_t old_flags;
 
 			rsm->r_dupack = 0;
 			if (rsm->r_in_tmap == 0) {
 				/* We must re-add it back to the tlist */
 				if (trsm == NULL) {
 					TAILQ_INSERT_HEAD(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 				} else {
 					TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, trsm, rsm, r_tnext);
 				}
 				rsm->r_in_tmap = 1;
 			}
 			old_flags = rsm->r_flags;
 			rsm->r_flags |= BBR_RXT_CLEARED;
 			rsm->r_flags &= ~(BBR_ACKED | BBR_SACK_PASSED | BBR_WAS_SACKPASS);
 			bbr_log_type_rsmclear(bbr, cts, rsm, old_flags, __LINE__);
 		} else {
 			if ((tp->t_state < TCPS_ESTABLISHED) &&
 			    (rsm->r_start == tp->snd_una)) {
 				/*
 				 * Special case for TCP FO. Where
 				 * we sent more data beyond the snd_max.
 				 * We don't mark that as lost and stop here.
 				 */
 				break;
 			}
 			if ((rsm->r_flags & BBR_MARKED_LOST) == 0) {
 				bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start;
 				bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start;
 			}
 			if (bbr_marks_rxt_sack_passed) {
 				/*
 				 * With this option, we will rack out
 				 * in 1ms increments the rest of the packets.
 				 */
 				rsm->r_flags |= BBR_SACK_PASSED | BBR_MARKED_LOST;
 				rsm->r_flags &= ~BBR_WAS_SACKPASS;
 			} else {
 				/*
 				 * With this option we only mark them lost
 				 * and remove all sack'd markings. We will run
 				 * another RXT or a TLP. This will cause
 				 * us to eventually send more based on what
 				 * ack's come in.
 				 */
 				rsm->r_flags |= BBR_MARKED_LOST;
 				rsm->r_flags &= ~BBR_WAS_SACKPASS;
 				rsm->r_flags &= ~BBR_SACK_PASSED;
 			}
 		}
 		trsm = rsm;
 	}
 	bbr->r_ctl.rc_resend = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 	/* Clear the count (we just un-acked them) */
 	bbr_log_to_event(bbr, cts, BBR_TO_FRM_TMR);
 	bbr->rc_tlp_new_data = 0;
 	bbr->r_ctl.rc_tlp_seg_send_cnt = 0;
 	/* zap the behindness on a rxt */
 	bbr->r_ctl.rc_hptsi_agg_delay = 0;
 	bbr->r_agg_early_set = 0;
 	bbr->r_ctl.rc_agg_early = 0;
 	bbr->rc_tlp_rtx_out = 0;
 	bbr->r_ctl.rc_sacked = 0;
 	bbr->r_ctl.rc_sacklast = NULL;
 	bbr->r_timer_override = 1;
 	bbr_lt_bw_sampling(bbr, cts, (bbr->r_ctl.rc_lost > lost));
 }
 
 /*
  * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
  * we will setup to retransmit the lowest seq number outstanding.
  */
 static int
 bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	int32_t rexmt;
 	int32_t retval = 0;
 	bool isipv6;
 
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
 	if (bbr->rc_all_timers_stopped) {
 		return (1);
 	}
 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    (tp->snd_una == tp->snd_max)) {
 		/* Nothing outstanding .. nothing to do */
 		return (0);
 	}
 	/*
 	 * Retransmission timer went off.  Message has not been acked within
 	 * retransmit interval.  Back off to a longer retransmit interval
 	 * and retransmit one segment.
 	 */
 	if (ctf_progress_timeout_check(tp, true)) {
 		bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	bbr_remxt_tmr(tp);
 	if ((bbr->r_ctl.rc_resend == NULL) ||
 	    ((bbr->r_ctl.rc_resend->r_flags & BBR_RWND_COLLAPSED) == 0)) {
 		/*
 		 * If the rwnd collapsed on
 		 * the one we are retransmitting
 		 * it does not count against the
 		 * rxt count.
 		 */
 		tp->t_rxtshift++;
 	}
 	if (tp->t_rxtshift > V_tcp_retries) {
 		tp->t_rxtshift = V_tcp_retries;
 		KMOD_TCPSTAT_INC(tcps_timeoutdrop);
 		tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
 		/* XXXGL: previously t_softerror was casted to uint16_t */
 		MPASS(tp->t_softerror >= 0);
 		retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
 		return (retval);	/* tcp_drop() */
 	}
 	if (tp->t_state == TCPS_SYN_SENT) {
 		/*
 		 * If the SYN was retransmitted, indicate CWND to be limited
 		 * to 1 segment in cc_conn_init().
 		 */
 		tp->snd_cwnd = 1;
 	} else if (tp->t_rxtshift == 1) {
 		/*
 		 * first retransmit; record ssthresh and cwnd so they can be
 		 * recovered if this turns out to be a "bad" retransmit. A
 		 * retransmit is considered "bad" if an ACK for this segment
 		 * is received within RTT/2 interval; the assumption here is
 		 * that the ACK was already in flight.  See "On Estimating
 		 * End-to-End Network Path Properties" by Allman and Paxson
 		 * for more details.
 		 */
 		tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options;
 		if (!IN_RECOVERY(tp->t_flags)) {
 			tp->snd_cwnd_prev = tp->snd_cwnd;
 			tp->snd_ssthresh_prev = tp->snd_ssthresh;
 			tp->snd_recover_prev = tp->snd_recover;
 			tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
 			tp->t_flags |= TF_PREVVALID;
 		} else {
 			tp->t_flags &= ~TF_PREVVALID;
 		}
 		tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options;
 	} else {
 		tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options;
 		tp->t_flags &= ~TF_PREVVALID;
 	}
 	KMOD_TCPSTAT_INC(tcps_rexmttimeo);
 	if ((tp->t_state == TCPS_SYN_SENT) ||
 	    (tp->t_state == TCPS_SYN_RECEIVED))
 		rexmt = USEC_2_TICKS(BBR_INITIAL_RTO) * tcp_backoff[tp->t_rxtshift];
 	else
 		rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
 	TCPT_RANGESET(tp->t_rxtcur, rexmt,
 	    MSEC_2_TICKS(bbr->r_ctl.rc_min_rto_ms),
 	    MSEC_2_TICKS(((uint32_t)bbr->rc_max_rto_sec) * 1000));
 	/*
 	 * We enter the path for PLMTUD if connection is established or, if
 	 * connection is FIN_WAIT_1 status, reason for the last is that if
 	 * amount of data we send is very small, we could send it in couple
 	 * of packets and process straight to FIN. In that case we won't
 	 * catch ESTABLISHED state.
 	 */
 #ifdef INET6
 	isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
 #else
 	isipv6 = false;
 #endif
 	if (((V_tcp_pmtud_blackhole_detect == 1) ||
 	    (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
 	    (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
 	    ((tp->t_state == TCPS_ESTABLISHED) ||
 	    (tp->t_state == TCPS_FIN_WAIT_1))) {
 		/*
 		 * Idea here is that at each stage of mtu probe (usually,
 		 * 1448 -> 1188 -> 524) should be given 2 chances to recover
 		 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
 		 * should take care of that.
 		 */
 		if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
 		    (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
 		    (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
 		    tp->t_rxtshift % 2 == 0)) {
 			/*
 			 * Enter Path MTU Black-hole Detection mechanism: -
 			 * Disable Path MTU Discovery (IP "DF" bit). -
 			 * Reduce MTU to lower value than what we negotiated
 			 * with peer.
 			 */
 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
 				/*
 				 * Record that we may have found a black
 				 * hole.
 				 */
 				tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
 				/* Keep track of previous MSS. */
 				tp->t_pmtud_saved_maxseg = tp->t_maxseg;
 			}
 			/*
 			 * Reduce the MSS to blackhole value or to the
 			 * default in an attempt to retransmit.
 			 */
 #ifdef INET6
 			isipv6 = bbr->r_is_v6;
 			if (isipv6 &&
 			    tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
 				/* Use the sysctl tuneable blackhole MSS. */
 				tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
 			} else if (isipv6) {
 				/* Use the default MSS. */
 				tp->t_maxseg = V_tcp_v6mssdflt;
 				/*
 				 * Disable Path MTU Discovery when we switch
 				 * to minmss.
 				 */
 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
 			}
 #endif
 #if defined(INET6) && defined(INET)
 			else
 #endif
 #ifdef INET
 			if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
 				/* Use the sysctl tuneable blackhole MSS. */
 				tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
 			} else {
 				/* Use the default MSS. */
 				tp->t_maxseg = V_tcp_mssdflt;
 				/*
 				 * Disable Path MTU Discovery when we switch
 				 * to minmss.
 				 */
 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
 			}
 #endif
 		} else {
 			/*
 			 * If further retransmissions are still unsuccessful
 			 * with a lowered MTU, maybe this isn't a blackhole
 			 * and we restore the previous MSS and blackhole
 			 * detection flags. The limit '6' is determined by
 			 * giving each probe stage (1448, 1188, 524) 2
 			 * chances to recover.
 			 */
 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
 			    (tp->t_rxtshift >= 6)) {
 				tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 				tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
 				tp->t_maxseg = tp->t_pmtud_saved_maxseg;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
 			}
 		}
 	}
 	/*
 	 * Disable RFC1323 and SACK if we haven't got any response to our
 	 * third SYN to work-around some broken terminal servers (most of
 	 * which have hopefully been retired) that have bad VJ header
 	 * compression code which trashes TCP segments containing
 	 * unknown-to-them TCP options.
 	 */
 	if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
 	    (tp->t_rxtshift == 3))
 		tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT);
 	/*
 	 * If we backed off this far, our srtt estimate is probably bogus.
 	 * Clobber it so we'll take the next rtt measurement as our srtt;
 	 * move the current srtt into rttvar to keep the current retransmit
 	 * times until then.
 	 */
 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
 #ifdef INET6
 		if (bbr->r_is_v6)
 			in6_losing(inp);
 		else
 #endif
 			in_losing(inp);
 		tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
 		tp->t_srtt = 0;
 	}
 	sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una);
 	tp->snd_recover = tp->snd_max;
 	tp->t_flags |= TF_ACKNOW;
 	tp->t_rtttime = 0;
 
 	return (retval);
 }
 
 static int
 bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t hpts_calling)
 {
 	int32_t ret = 0;
 	int32_t timers = (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
 
 	if (timers == 0) {
 		return (0);
 	}
 	if (tp->t_state == TCPS_LISTEN) {
 		/* no timers on listen sockets */
 		if (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
 			return (0);
 		return (1);
 	}
 	if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) {
 		uint32_t left;
 
 		if (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 			ret = -1;
 			bbr_log_to_processing(bbr, cts, ret, 0, hpts_calling);
 			return (0);
 		}
 		if (hpts_calling == 0) {
 			ret = -2;
 			bbr_log_to_processing(bbr, cts, ret, 0, hpts_calling);
 			return (0);
 		}
 		/*
 		 * Ok our timer went off early and we are not paced false
 		 * alarm, go back to sleep.
 		 */
 		left = bbr->r_ctl.rc_timer_exp - cts;
 		ret = -3;
 		bbr_log_to_processing(bbr, cts, ret, left, hpts_calling);
 		tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(left));
 		return (1);
 	}
 	bbr->rc_tmr_stopped = 0;
 	bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
 	if (timers & PACE_TMR_DELACK) {
 		ret = bbr_timeout_delack(tp, bbr, cts);
 	} else if (timers & PACE_TMR_PERSIT) {
 		ret = bbr_timeout_persist(tp, bbr, cts);
 	} else if (timers & PACE_TMR_RACK) {
 		bbr->r_ctl.rc_tlp_rxt_last_time = cts;
 		ret = bbr_timeout_rack(tp, bbr, cts);
 	} else if (timers & PACE_TMR_TLP) {
 		bbr->r_ctl.rc_tlp_rxt_last_time = cts;
 		ret = bbr_timeout_tlp(tp, bbr, cts);
 	} else if (timers & PACE_TMR_RXT) {
 		bbr->r_ctl.rc_tlp_rxt_last_time = cts;
 		ret = bbr_timeout_rxt(tp, bbr, cts);
 	} else if (timers & PACE_TMR_KEEP) {
 		ret = bbr_timeout_keepalive(tp, bbr, cts);
 	}
 	bbr_log_to_processing(bbr, cts, ret, timers, hpts_calling);
 	return (ret);
 }
 
 static void
 bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts)
 {
 	if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		uint8_t hpts_removed = 0;
 
 		if (tcp_in_hpts(bbr->rc_tp) &&
 		    (bbr->rc_timer_first == 1)) {
 			/*
 			 * If we are canceling timer's when we have the
 			 * timer ahead of the output being paced. We also
 			 * must remove ourselves from the hpts.
 			 */
 			hpts_removed = 1;
 			tcp_hpts_remove(bbr->rc_tp);
 			if (bbr->r_ctl.rc_last_delay_val) {
 				/* Update the last hptsi delay too */
 				uint32_t time_since_send;
 
 				if (TSTMP_GT(cts, bbr->rc_pacer_started))
 					time_since_send = cts - bbr->rc_pacer_started;
 				else
 					time_since_send = 0;
 				if (bbr->r_ctl.rc_last_delay_val > time_since_send) {
 					/* Cut down our slot time */
 					bbr->r_ctl.rc_last_delay_val -= time_since_send;
 				} else {
 					bbr->r_ctl.rc_last_delay_val = 0;
 				}
 				bbr->rc_pacer_started = cts;
 			}
 		}
 		bbr->rc_timer_first = 0;
 		bbr_log_to_cancel(bbr, line, cts, hpts_removed);
 		bbr->rc_tmr_stopped = bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
 		bbr->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
 	}
 }
 
 static int
 bbr_stopall(struct tcpcb *tp)
 {
 	struct tcp_bbr *bbr;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	bbr->rc_all_timers_stopped = 1;
+
+	tcp_hpts_remove(tp);
+
 	return (0);
 }
 
 static uint32_t
 bbr_get_earliest_send_outstanding(struct tcp_bbr *bbr, struct bbr_sendmap *u_rsm, uint32_t cts)
 {
 	struct bbr_sendmap *rsm;
 
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 	if ((rsm == NULL) || (u_rsm == rsm))
 		return (cts);
 	return(rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
 }
 
 static void
 bbr_update_rsm(struct tcpcb *tp, struct tcp_bbr *bbr,
      struct bbr_sendmap *rsm, uint32_t cts, uint32_t pacing_time)
 {
 	int32_t idx;
 
 	rsm->r_rtr_cnt++;
 	rsm->r_dupack = 0;
 	if (rsm->r_rtr_cnt > BBR_NUM_OF_RETRANS) {
 		rsm->r_rtr_cnt = BBR_NUM_OF_RETRANS;
 		rsm->r_flags |= BBR_OVERMAX;
 	}
 	if (rsm->r_flags & BBR_RWND_COLLAPSED) {
 		/* Take off the collapsed flag at rxt */
 		rsm->r_flags &= ~BBR_RWND_COLLAPSED;
 	}
 	if (rsm->r_flags & BBR_MARKED_LOST) {
 		/* We have retransmitted, its no longer lost */
 		rsm->r_flags &= ~BBR_MARKED_LOST;
 		bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start;
 	}
 	if (rsm->r_flags & BBR_RXT_CLEARED) {
 		/*
 		 * We hit a RXT timer on it and
 		 * we cleared the "acked" flag.
 		 * We now have it going back into
 		 * flight, we can remove the cleared
 		 * flag and possibly do accounting on
 		 * this piece.
 		 */
 		rsm->r_flags &= ~BBR_RXT_CLEARED;
 	}
 	if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & BBR_TLP) == 0)) {
 		bbr->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
 		rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
 	}
 	idx = rsm->r_rtr_cnt - 1;
 	rsm->r_tim_lastsent[idx] = cts;
 	rsm->r_pacing_delay = pacing_time;
 	rsm->r_delivered = bbr->r_ctl.rc_delivered;
 	rsm->r_ts_valid = bbr->rc_ts_valid;
 	if (bbr->rc_ts_valid)
 		rsm->r_del_ack_ts = bbr->r_ctl.last_inbound_ts;
 	if (bbr->r_ctl.r_app_limited_until)
 		rsm->r_app_limited = 1;
 	else
 		rsm->r_app_limited = 0;
 	if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW)
 		rsm->r_bbr_state = bbr_state_val(bbr);
 	else
 		rsm->r_bbr_state = 8;
 	if (rsm->r_flags & BBR_ACKED) {
 		/* Problably MTU discovery messing with us */
 		uint32_t old_flags;
 
 		old_flags = rsm->r_flags;
 		rsm->r_flags &= ~BBR_ACKED;
 		bbr_log_type_rsmclear(bbr, cts, rsm, old_flags, __LINE__);
 		bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 		if (bbr->r_ctl.rc_sacked == 0)
 			bbr->r_ctl.rc_sacklast = NULL;
 	}
 	if (rsm->r_in_tmap) {
 		TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 	}
 	TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 	rsm->r_in_tmap = 1;
 	if (rsm->r_flags & BBR_SACK_PASSED) {
 		/* We have retransmitted due to the SACK pass */
 		rsm->r_flags &= ~BBR_SACK_PASSED;
 		rsm->r_flags |= BBR_WAS_SACKPASS;
 	}
 	rsm->r_first_sent_time = bbr_get_earliest_send_outstanding(bbr, rsm, cts);
 	rsm->r_flight_at_send = ctf_flight_size(bbr->rc_tp,
 						(bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 	bbr->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
 	if (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT) {
 		rsm->r_is_gain = 1;
 		rsm->r_is_drain = 0;
 	} else if (bbr->r_ctl.rc_bbr_hptsi_gain < BBR_UNIT) {
 		rsm->r_is_drain = 1;
 		rsm->r_is_gain = 0;
 	} else {
 		rsm->r_is_drain = 0;
 		rsm->r_is_gain = 0;
 	}
 	rsm->r_del_time = bbr->r_ctl.rc_del_time; /* TEMP GOOGLE CODE */
 }
 
 /*
  * Returns 0, or the sequence where we stopped
  * updating. We also update the lenp to be the amount
  * of data left.
  */
 
 static uint32_t
 bbr_update_entry(struct tcpcb *tp, struct tcp_bbr *bbr,
     struct bbr_sendmap *rsm, uint32_t cts, int32_t *lenp, uint32_t pacing_time)
 {
 	/*
 	 * We (re-)transmitted starting at rsm->r_start for some length
 	 * (possibly less than r_end.
 	 */
 	struct bbr_sendmap *nrsm;
 	uint32_t c_end;
 	int32_t len;
 
 	len = *lenp;
 	c_end = rsm->r_start + len;
 	if (SEQ_GEQ(c_end, rsm->r_end)) {
 		/*
 		 * We retransmitted the whole piece or more than the whole
 		 * slopping into the next rsm.
 		 */
 		bbr_update_rsm(tp, bbr, rsm, cts, pacing_time);
 		if (c_end == rsm->r_end) {
 			*lenp = 0;
 			return (0);
 		} else {
 			int32_t act_len;
 
 			/* Hangs over the end return whats left */
 			act_len = rsm->r_end - rsm->r_start;
 			*lenp = (len - act_len);
 			return (rsm->r_end);
 		}
 		/* We don't get out of this block. */
 	}
 	/*
 	 * Here we retransmitted less than the whole thing which means we
 	 * have to split this into what was transmitted and what was not.
 	 */
 	nrsm = bbr_alloc_full_limit(bbr);
 	if (nrsm == NULL) {
 		*lenp = 0;
 		return (0);
 	}
 	/*
 	 * So here we are going to take the original rsm and make it what we
 	 * retransmitted. nrsm will be the tail portion we did not
 	 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
 	 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
 	 * 1, 6 and the new piece will be 6, 11.
 	 */
 	bbr_clone_rsm(bbr, nrsm, rsm, c_end);
 	TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 	nrsm->r_dupack = 0;
 	if (rsm->r_in_tmap) {
 		TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 		nrsm->r_in_tmap = 1;
 	}
 	rsm->r_flags &= (~BBR_HAS_FIN);
 	bbr_update_rsm(tp, bbr, rsm, cts, pacing_time);
 	*lenp = 0;
 	return (0);
 }
 
 static uint64_t
 bbr_get_hardware_rate(struct tcp_bbr *bbr)
 {
 	uint64_t bw;
 
 	bw = bbr_get_bw(bbr);
 	bw *= (uint64_t)bbr_hptsi_gain[BBR_SUB_GAIN];
 	bw /= (uint64_t)BBR_UNIT;
 	return(bw);
 }
 
 static void
 bbr_setup_less_of_rate(struct tcp_bbr *bbr, uint32_t cts,
 		       uint64_t act_rate, uint64_t rate_wanted)
 {
 	/*
 	 * We could not get a full gains worth
 	 * of rate.
 	 */
 	if (get_filter_value(&bbr->r_ctl.rc_delrate) >= act_rate) {
 		/* we can't even get the real rate */
 		uint64_t red;
 
 		bbr->skip_gain = 1;
 		bbr->gain_is_limited = 0;
 		red = get_filter_value(&bbr->r_ctl.rc_delrate) - act_rate;
 		if (red)
 			filter_reduce_by(&bbr->r_ctl.rc_delrate, red, cts);
 	} else {
 		/* We can use a lower gain */
 		bbr->skip_gain = 0;
 		bbr->gain_is_limited = 1;
 	}
 }
 
 static void
 bbr_update_hardware_pacing_rate(struct tcp_bbr *bbr, uint32_t cts)
 {
 	const struct tcp_hwrate_limit_table *nrte;
 	int error, rate = -1;
 
 	if (bbr->r_ctl.crte == NULL)
 		return;
 	if ((bbr->rc_inp->inp_route.ro_nh == NULL) ||
 	    (bbr->rc_inp->inp_route.ro_nh->nh_ifp == NULL)) {
 		/* Lost our routes? */
 		/* Clear the way for a re-attempt */
 		bbr->bbr_attempt_hdwr_pace = 0;
 lost_rate:
 		bbr->gain_is_limited = 0;
 		bbr->skip_gain = 0;
 		bbr->bbr_hdrw_pacing = 0;
 		counter_u64_add(bbr_flows_whdwr_pacing, -1);
 		counter_u64_add(bbr_flows_nohdwr_pacing, 1);
 		tcp_bbr_tso_size_check(bbr, cts);
 		return;
 	}
 	rate = bbr_get_hardware_rate(bbr);
 	nrte = tcp_chg_pacing_rate(bbr->r_ctl.crte,
 				   bbr->rc_tp,
 				   bbr->rc_inp->inp_route.ro_nh->nh_ifp,
 				   rate,
 				   (RS_PACING_GEQ|RS_PACING_SUB_OK),
 				   &error, NULL);
 	if (nrte == NULL) {
 		goto lost_rate;
 	}
 	if (nrte != bbr->r_ctl.crte) {
 		bbr->r_ctl.crte = nrte;
 		if (error == 0)  {
 			BBR_STAT_INC(bbr_hdwr_rl_mod_ok);
 			if (bbr->r_ctl.crte->rate < rate) {
 				/* We have a problem */
 				bbr_setup_less_of_rate(bbr, cts,
 						       bbr->r_ctl.crte->rate, rate);
 			} else {
 				/* We are good */
 				bbr->gain_is_limited = 0;
 				bbr->skip_gain = 0;
 			}
 		} else {
 			/* A failure should release the tag */
 			BBR_STAT_INC(bbr_hdwr_rl_mod_fail);
 			bbr->gain_is_limited = 0;
 			bbr->skip_gain = 0;
 			bbr->bbr_hdrw_pacing = 0;
 		}
 		bbr_type_log_hdwr_pacing(bbr,
 					 bbr->r_ctl.crte->ptbl->rs_ifp,
 					 rate,
 					 ((bbr->r_ctl.crte == NULL) ? 0 : bbr->r_ctl.crte->rate),
 					 __LINE__,
 					 cts,
 					 error);
 	}
 }
 
 static void
 bbr_adjust_for_hw_pacing(struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * If we have hardware pacing support
 	 * we need to factor that in for our
 	 * TSO size.
 	 */
 	const struct tcp_hwrate_limit_table *rlp;
 	uint32_t cur_delay, seg_sz, maxseg, new_tso, delta, hdwr_delay;
 
 	if ((bbr->bbr_hdrw_pacing == 0) ||
 	    (IN_RECOVERY(bbr->rc_tp->t_flags)) ||
 	    (bbr->r_ctl.crte == NULL))
 		return;
 	if (bbr->hw_pacing_set == 0) {
 		/* Not yet by the hdwr pacing count delay */
 		return;
 	}
 	if (bbr_hdwr_pace_adjust == 0) {
 		/* No adjustment */
 		return;
 	}
 	rlp = bbr->r_ctl.crte;
 	if (bbr->rc_tp->t_maxseg > bbr->rc_last_options)
 		maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options;
 	else
 		maxseg = BBR_MIN_SEG - bbr->rc_last_options;
 	/*
 	 * So lets first get the
 	 * time we will take between
 	 * TSO sized sends currently without
 	 * hardware help.
 	 */
 	cur_delay = bbr_get_pacing_delay(bbr, BBR_UNIT,
 		        bbr->r_ctl.rc_pace_max_segs, cts, 1);
 	hdwr_delay = bbr->r_ctl.rc_pace_max_segs / maxseg;
 	hdwr_delay *= rlp->time_between;
 	if (cur_delay > hdwr_delay)
 		delta = cur_delay - hdwr_delay;
 	else
 		delta = 0;
 	bbr_log_type_tsosize(bbr, cts, delta, cur_delay, hdwr_delay,
 			     (bbr->r_ctl.rc_pace_max_segs / maxseg),
 			     1);
 	if (delta &&
 	    (delta < (max(rlp->time_between,
 			  bbr->r_ctl.bbr_hptsi_segments_delay_tar)))) {
 		/*
 		 * Now lets divide by the pacing
 		 * time between each segment the
 		 * hardware sends rounding up and
 		 * derive a bytes from that. We multiply
 		 * that by bbr_hdwr_pace_adjust to get
 		 * more bang for our buck.
 		 *
 		 * The goal is to have the software pacer
 		 * waiting no more than an additional
 		 * pacing delay if we can (without the
 		 * compensation i.e. x bbr_hdwr_pace_adjust).
 		 */
 		seg_sz = max(((cur_delay + rlp->time_between)/rlp->time_between),
 			     (bbr->r_ctl.rc_pace_max_segs/maxseg));
 		seg_sz *= bbr_hdwr_pace_adjust;
 		if (bbr_hdwr_pace_floor &&
 		    (seg_sz < bbr->r_ctl.crte->ptbl->rs_min_seg)) {
 			/* Currently hardware paces
 			 * out rs_min_seg segments at a time.
 			 * We need to make sure we always send at least
 			 * a full burst of bbr_hdwr_pace_floor down.
 			 */
 			seg_sz = bbr->r_ctl.crte->ptbl->rs_min_seg;
 		}
 		seg_sz *= maxseg;
 	} else if (delta == 0) {
 		/*
 		 * The highest pacing rate is
 		 * above our b/w gained. This means
 		 * we probably are going quite fast at
 		 * the hardware highest rate. Lets just multiply
 		 * the calculated TSO size by the
 		 * multiplier factor (its probably
 		 * 4 segments in the default config for
 		 * mlx).
 		 */
 		seg_sz = bbr->r_ctl.rc_pace_max_segs * bbr_hdwr_pace_adjust;
 		if (bbr_hdwr_pace_floor &&
 		    (seg_sz < bbr->r_ctl.crte->ptbl->rs_min_seg)) {
 			/* Currently hardware paces
 			 * out rs_min_seg segments at a time.
 			 * We need to make sure we always send at least
 			 * a full burst of bbr_hdwr_pace_floor down.
 			 */
 			seg_sz = bbr->r_ctl.crte->ptbl->rs_min_seg;
 		}
 	} else {
 		/*
 		 * The pacing time difference is so
 		 * big that the hardware will
 		 * pace out more rapidly then we
 		 * really want and then we
 		 * will have a long delay. Lets just keep
 		 * the same TSO size so its as if
 		 * we were not using hdwr pacing (we
 		 * just gain a bit of spacing from the
 		 * hardware if seg_sz > 1).
 		 */
 		seg_sz = bbr->r_ctl.rc_pace_max_segs;
 	}
 	if (seg_sz > bbr->r_ctl.rc_pace_max_segs)
 		new_tso = seg_sz;
 	else
 		new_tso = bbr->r_ctl.rc_pace_max_segs;
 	if (new_tso >= (PACE_MAX_IP_BYTES-maxseg))
 		new_tso = PACE_MAX_IP_BYTES - maxseg;
 
 	if (new_tso != bbr->r_ctl.rc_pace_max_segs) {
 		bbr_log_type_tsosize(bbr, cts, new_tso, 0, bbr->r_ctl.rc_pace_max_segs, maxseg, 0);
 		bbr->r_ctl.rc_pace_max_segs = new_tso;
 	}
 }
 
 static void
 tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts)
 {
 	uint64_t bw;
 	uint32_t old_tso = 0, new_tso;
 	uint32_t maxseg, bytes;
 	uint32_t tls_seg=0;
 	/*
 	 * Google/linux uses the following algorithm to determine
 	 * the TSO size based on the b/w of the link (from Neal Cardwell email 9/27/18):
 	 *
 	 *  bytes = bw_in_bytes_per_second / 1000
 	 *  bytes = min(bytes, 64k)
 	 *  tso_segs = bytes / MSS
 	 *  if (bw < 1.2Mbs)
 	 *      min_tso_segs = 1
 	 *  else
 	 *	min_tso_segs = 2
 	 * tso_segs = max(tso_segs, min_tso_segs)
 	 *
 	 * * Note apply a device specific limit (we apply this in the
 	 *   tcp_m_copym).
 	 * Note that before the initial measurement is made google bursts out
 	 * a full iwnd just like new-reno/cubic.
 	 *
 	 * We do not use this algorithm. Instead we
 	 * use a two phased approach:
 	 *
 	 *  if ( bw <= per-tcb-cross-over)
 	 *     goal_tso =  calculate how much with this bw we
 	 *                 can send in goal-time seconds.
 	 *     if (goal_tso > mss)
 	 *         seg = goal_tso / mss
 	 *         tso = seg * mss
 	 *     else
 	 *         tso = mss
 	 *     if (tso > per-tcb-max)
 	 *         tso = per-tcb-max
 	 *  else if ( bw > 512Mbps)
 	 *     tso = max-tso (64k/mss)
 	 *  else
 	 *     goal_tso = bw / per-tcb-divsor
 	 *     seg = (goal_tso + mss-1)/mss
 	 *     tso = seg * mss
 	 *
 	 * if (tso < per-tcb-floor)
 	 *    tso = per-tcb-floor
 	 * if (tso > per-tcb-utter_max)
 	 *    tso = per-tcb-utter_max
 	 *
 	 * Note the default per-tcb-divisor is 1000 (same as google).
 	 * the goal cross over is 30Mbps however. To recreate googles
 	 * algorithm you need to set:
 	 *
 	 * cross-over = 23,168,000 bps
 	 * goal-time = 18000
 	 * per-tcb-max = 2
 	 * per-tcb-divisor = 1000
 	 * per-tcb-floor = 1
 	 *
 	 * This will get you "google bbr" behavior with respect to tso size.
 	 *
 	 * Note we do set anything TSO size until we are past the initial
 	 * window. Before that we gnerally use either a single MSS
 	 * or we use the full IW size (so we burst a IW at a time)
 	 */
 
 	if (bbr->rc_tp->t_maxseg > bbr->rc_last_options) {
 		maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options;
 	} else {
 		maxseg = BBR_MIN_SEG - bbr->rc_last_options;
 	}
 	old_tso = bbr->r_ctl.rc_pace_max_segs;
 	if (bbr->rc_past_init_win == 0) {
 		/*
 		 * Not enough data has been acknowledged to make a
 		 * judgement. Set up the initial TSO based on if we
 		 * are sending a full IW at once or not.
 		 */
 		if (bbr->rc_use_google)
 			bbr->r_ctl.rc_pace_max_segs = ((bbr->rc_tp->t_maxseg - bbr->rc_last_options) * 2);
 		else if (bbr->bbr_init_win_cheat)
 			bbr->r_ctl.rc_pace_max_segs = bbr_initial_cwnd(bbr, bbr->rc_tp);
 		else
 			bbr->r_ctl.rc_pace_max_segs = bbr->rc_tp->t_maxseg - bbr->rc_last_options;
 		if (bbr->r_ctl.rc_pace_min_segs != bbr->rc_tp->t_maxseg)
 			bbr->r_ctl.rc_pace_min_segs = bbr->rc_tp->t_maxseg;
 		if (bbr->r_ctl.rc_pace_max_segs == 0) {
 			bbr->r_ctl.rc_pace_max_segs = maxseg;
 		}
 		bbr_log_type_tsosize(bbr, cts, bbr->r_ctl.rc_pace_max_segs, tls_seg, old_tso, maxseg, 0);
 			bbr_adjust_for_hw_pacing(bbr, cts);
 		return;
 	}
 	/**
 	 * Now lets set the TSO goal based on our delivery rate in
 	 * bytes per second. Note we only do this if
 	 * we have acked at least the initial cwnd worth of data.
 	 */
 	bw = bbr_get_bw(bbr);
 	if (IN_RECOVERY(bbr->rc_tp->t_flags) &&
 	     (bbr->rc_use_google == 0)) {
 		/* We clamp to one MSS in recovery */
 		new_tso = maxseg;
 	} else if (bbr->rc_use_google) {
 		int min_tso_segs;
 
 		/* Google considers the gain too */
 		if (bbr->r_ctl.rc_bbr_hptsi_gain != BBR_UNIT) {
 			bw *= bbr->r_ctl.rc_bbr_hptsi_gain;
 			bw /= BBR_UNIT;
 		}
 		bytes = bw / 1024;
 		if (bytes > (64 * 1024))
 			bytes = 64 * 1024;
 		new_tso = bytes / maxseg;
 		if (bw < ONE_POINT_TWO_MEG)
 			min_tso_segs = 1;
 		else
 			min_tso_segs = 2;
 		if (new_tso < min_tso_segs)
 			new_tso = min_tso_segs;
 		new_tso *= maxseg;
 	} else if (bbr->rc_no_pacing) {
 		new_tso = (PACE_MAX_IP_BYTES / maxseg) * maxseg;
 	} else if (bw <= bbr->r_ctl.bbr_cross_over) {
 		/*
 		 * Calculate the worse case b/w TSO if we are inserting no
 		 * more than a delay_target number of TSO's.
 		 */
 		uint32_t tso_len, min_tso;
 
 		tso_len = bbr_get_pacing_length(bbr, BBR_UNIT, bbr->r_ctl.bbr_hptsi_segments_delay_tar, bw);
 		if (tso_len > maxseg) {
 			new_tso = tso_len / maxseg;
 			if (new_tso > bbr->r_ctl.bbr_hptsi_segments_max)
 				new_tso = bbr->r_ctl.bbr_hptsi_segments_max;
 			new_tso *= maxseg;
 		} else {
 			/*
 			 * less than a full sized frame yikes.. long rtt or
 			 * low bw?
 			 */
 			min_tso = bbr_minseg(bbr);
 			if ((tso_len > min_tso) && (bbr_all_get_min == 0))
 				new_tso = rounddown(tso_len, min_tso);
 			else
 				new_tso = min_tso;
 		}
 	} else if (bw > FIVETWELVE_MBPS) {
 		/*
 		 * This guy is so fast b/w wise that we can TSO as large as
 		 * possible of segments that the NIC will allow.
 		 */
 		new_tso = rounddown(PACE_MAX_IP_BYTES, maxseg);
 	} else {
 		/*
 		 * This formula is based on attempting to send a segment or
 		 * more every bbr_hptsi_per_second. The default is 1000
 		 * which means you are targeting what you can send every 1ms
 		 * based on the peers bw.
 		 *
 		 * If the number drops to say 500, then you are looking more
 		 * at 2ms and you will raise how much we send in a single
 		 * TSO thus saving CPU (less bbr_output_wtime() calls). The
 		 * trade off of course is you will send more at once and
 		 * thus tend to clump up the sends into larger "bursts"
 		 * building a queue.
 		 */
 		bw /= bbr->r_ctl.bbr_hptsi_per_second;
 		new_tso = roundup(bw, (uint64_t)maxseg);
 		/*
 		 * Gate the floor to match what our lower than 48Mbps
 		 * algorithm does. The ceiling (bbr_hptsi_segments_max) thus
 		 * becomes the floor for this calculation.
 		 */
 		if (new_tso < (bbr->r_ctl.bbr_hptsi_segments_max * maxseg))
 			new_tso = (bbr->r_ctl.bbr_hptsi_segments_max * maxseg);
 	}
 	if (bbr->r_ctl.bbr_hptsi_segments_floor && (new_tso < (maxseg * bbr->r_ctl.bbr_hptsi_segments_floor)))
 		new_tso = maxseg * bbr->r_ctl.bbr_hptsi_segments_floor;
 	if (new_tso > PACE_MAX_IP_BYTES)
 		new_tso = rounddown(PACE_MAX_IP_BYTES, maxseg);
 	/* Enforce an utter maximum. */
 	if (bbr->r_ctl.bbr_utter_max && (new_tso > (bbr->r_ctl.bbr_utter_max * maxseg))) {
 		new_tso = bbr->r_ctl.bbr_utter_max * maxseg;
 	}
 	if (old_tso != new_tso) {
 		/* Only log changes */
 		bbr_log_type_tsosize(bbr, cts, new_tso, tls_seg, old_tso, maxseg, 0);
 		bbr->r_ctl.rc_pace_max_segs = new_tso;
 	}
 	/* We have hardware pacing! */
 	bbr_adjust_for_hw_pacing(bbr, cts);
 }
 
 static void
 bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t len,
     uint32_t seq_out, uint16_t th_flags, int32_t err, uint32_t cts,
     struct mbuf *mb, int32_t * abandon, struct bbr_sendmap *hintrsm, uint32_t delay_calc,
     struct sockbuf *sb)
 {
 
 	struct bbr_sendmap *rsm, *nrsm;
 	register uint32_t snd_max, snd_una;
 	uint32_t pacing_time;
 	/*
 	 * Add to the RACK log of packets in flight or retransmitted. If
 	 * there is a TS option we will use the TS echoed, if not we will
 	 * grab a TS.
 	 *
 	 * Retransmissions will increment the count and move the ts to its
 	 * proper place. Note that if options do not include TS's then we
 	 * won't be able to effectively use the ACK for an RTT on a retran.
 	 *
 	 * Notes about r_start and r_end. Lets consider a send starting at
 	 * sequence 1 for 10 bytes. In such an example the r_start would be
 	 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
 	 * This means that r_end is actually the first sequence for the next
 	 * slot (11).
 	 *
 	 */
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (err) {
 		/*
 		 * We don't log errors -- we could but snd_max does not
 		 * advance in this case either.
 		 */
 		return;
 	}
 	if (th_flags & TH_RST) {
 		/*
 		 * We don't log resets and we return immediately from
 		 * sending
 		 */
 		*abandon = 1;
 		return;
 	}
 	snd_una = tp->snd_una;
 	if (th_flags & (TH_SYN | TH_FIN) && (hintrsm == NULL)) {
 		/*
 		 * The call to bbr_log_output is made before bumping
 		 * snd_max. This means we can record one extra byte on a SYN
 		 * or FIN if seq_out is adding more on and a FIN is present
 		 * (and we are not resending).
 		 */
 		if ((th_flags & TH_SYN) && (tp->iss == seq_out))
 			len++;
 		if (th_flags & TH_FIN)
 			len++;
 	}
 	if (SEQ_LEQ((seq_out + len), snd_una)) {
 		/* Are sending an old segment to induce an ack (keep-alive)? */
 		return;
 	}
 	if (SEQ_LT(seq_out, snd_una)) {
 		/* huh? should we panic? */
 		uint32_t end;
 
 		end = seq_out + len;
 		seq_out = snd_una;
 		len = end - seq_out;
 	}
 	snd_max = tp->snd_max;
 	if (len == 0) {
 		/* We don't log zero window probes */
 		return;
 	}
 	pacing_time = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, len, cts, 1);
 	/* First question is it a retransmission? */
 	if (seq_out == snd_max) {
 again:
 		rsm = bbr_alloc(bbr);
 		if (rsm == NULL) {
 			return;
 		}
 		rsm->r_flags = 0;
 		if (th_flags & TH_SYN)
 			rsm->r_flags |= BBR_HAS_SYN;
 		if (th_flags & TH_FIN)
 			rsm->r_flags |= BBR_HAS_FIN;
 		rsm->r_tim_lastsent[0] = cts;
 		rsm->r_rtr_cnt = 1;
 		rsm->r_rtr_bytes = 0;
 		rsm->r_start = seq_out;
 		rsm->r_end = rsm->r_start + len;
 		rsm->r_dupack = 0;
 		rsm->r_delivered = bbr->r_ctl.rc_delivered;
 		rsm->r_pacing_delay = pacing_time;
 		rsm->r_ts_valid = bbr->rc_ts_valid;
 		if (bbr->rc_ts_valid)
 			rsm->r_del_ack_ts = bbr->r_ctl.last_inbound_ts;
 		rsm->r_del_time = bbr->r_ctl.rc_del_time;
 		if (bbr->r_ctl.r_app_limited_until)
 			rsm->r_app_limited = 1;
 		else
 			rsm->r_app_limited = 0;
 		rsm->r_first_sent_time = bbr_get_earliest_send_outstanding(bbr, rsm, cts);
 		rsm->r_flight_at_send = ctf_flight_size(bbr->rc_tp,
 						(bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		/*
 		 * Here we must also add in this rsm since snd_max
 		 * is updated after we return from a new send.
 		 */
 		rsm->r_flight_at_send += len;
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_map, rsm, r_next);
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 1;
 		if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW)
 			rsm->r_bbr_state = bbr_state_val(bbr);
 		else
 			rsm->r_bbr_state = 8;
 		if (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT) {
 			rsm->r_is_gain = 1;
 			rsm->r_is_drain = 0;
 		} else if (bbr->r_ctl.rc_bbr_hptsi_gain < BBR_UNIT) {
 			rsm->r_is_drain = 1;
 			rsm->r_is_gain = 0;
 		} else {
 			rsm->r_is_drain = 0;
 			rsm->r_is_gain = 0;
 		}
 		return;
 	}
 	/*
 	 * If we reach here its a retransmission and we need to find it.
 	 */
 more:
 	if (hintrsm && (hintrsm->r_start == seq_out)) {
 		rsm = hintrsm;
 		hintrsm = NULL;
 	} else if (bbr->r_ctl.rc_next) {
 		/* We have a hint from a previous run */
 		rsm = bbr->r_ctl.rc_next;
 	} else {
 		/* No hints sorry */
 		rsm = NULL;
 	}
 	if ((rsm) && (rsm->r_start == seq_out)) {
 		/*
 		 * We used rc_next or hintrsm  to retransmit, hopefully the
 		 * likely case.
 		 */
 		seq_out = bbr_update_entry(tp, bbr, rsm, cts, &len, pacing_time);
 		if (len == 0) {
 			return;
 		} else {
 			goto more;
 		}
 	}
 	/* Ok it was not the last pointer go through it the hard way. */
 	TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) {
 		if (rsm->r_start == seq_out) {
 			seq_out = bbr_update_entry(tp, bbr, rsm, cts, &len, pacing_time);
 			bbr->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next);
 			if (len == 0) {
 				return;
 			} else {
 				continue;
 			}
 		}
 		if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
 			/* Transmitted within this piece */
 			/*
 			 * Ok we must split off the front and then let the
 			 * update do the rest
 			 */
 			nrsm = bbr_alloc_full_limit(bbr);
 			if (nrsm == NULL) {
 				bbr_update_rsm(tp, bbr, rsm, cts, pacing_time);
 				return;
 			}
 			/*
 			 * copy rsm to nrsm and then trim the front of rsm
 			 * to not include this part.
 			 */
 			bbr_clone_rsm(bbr, nrsm, rsm, seq_out);
 			TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 			if (rsm->r_in_tmap) {
 				TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 				nrsm->r_in_tmap = 1;
 			}
 			rsm->r_flags &= (~BBR_HAS_FIN);
 			seq_out = bbr_update_entry(tp, bbr, nrsm, cts, &len, pacing_time);
 			if (len == 0) {
 				return;
 			}
 		}
 	}
 	/*
 	 * Hmm not found in map did they retransmit both old and on into the
 	 * new?
 	 */
 	if (seq_out == tp->snd_max) {
 		goto again;
 	} else if (SEQ_LT(seq_out, tp->snd_max)) {
 #ifdef BBR_INVARIANTS
 		printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
 		    seq_out, len, tp->snd_una, tp->snd_max);
 		printf("Starting Dump of all rack entries\n");
 		TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) {
 			printf("rsm:%p start:%u end:%u\n",
 			    rsm, rsm->r_start, rsm->r_end);
 		}
 		printf("Dump complete\n");
 		panic("seq_out not found rack:%p tp:%p",
 		    bbr, tp);
 #endif
 	} else {
 #ifdef BBR_INVARIANTS
 		/*
 		 * Hmm beyond sndmax? (only if we are using the new rtt-pack
 		 * flag)
 		 */
 		panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
 		    seq_out, len, tp->snd_max, tp);
 #endif
 	}
 }
 
 static void
 bbr_collapse_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, int32_t rtt)
 {
 	/*
 	 * Collapse timeout back the cum-ack moved.
 	 */
 	tp->t_rxtshift = 0;
 	tp->t_softerror = 0;
 }
 
 static void
 tcp_bbr_xmit_timer(struct tcp_bbr *bbr, uint32_t rtt_usecs, uint32_t rsm_send_time, uint32_t r_start, uint32_t tsin)
 {
 	bbr->rtt_valid = 1;
 	bbr->r_ctl.cur_rtt = rtt_usecs;
 	bbr->r_ctl.ts_in = tsin;
 	if (rsm_send_time)
 		bbr->r_ctl.cur_rtt_send_time = rsm_send_time;
 }
 
 static void
 bbr_make_timestamp_determination(struct tcp_bbr *bbr)
 {
 	/**
 	 * We have in our bbr control:
 	 * 1) The timestamp we started observing cum-acks (bbr->r_ctl.bbr_ts_check_tstmp).
 	 * 2) Our timestamp indicating when we sent that packet (bbr->r_ctl.rsm->bbr_ts_check_our_cts).
 	 * 3) The current timestamp that just came in (bbr->r_ctl.last_inbound_ts)
 	 * 4) The time that the packet that generated that ack was sent (bbr->r_ctl.cur_rtt_send_time)
 	 *
 	 * Now we can calculate the time between the sends by doing:
 	 *
 	 * delta = bbr->r_ctl.cur_rtt_send_time - bbr->r_ctl.bbr_ts_check_our_cts
 	 *
 	 * And the peer's time between receiving them by doing:
 	 *
 	 * peer_delta = bbr->r_ctl.last_inbound_ts - bbr->r_ctl.bbr_ts_check_tstmp
 	 *
 	 * We want to figure out if the timestamp values are in msec, 10msec or usec.
 	 * We also may find that we can't use the timestamps if say we see
 	 * that the peer_delta indicates that though we may have taken 10ms to
 	 * pace out the data, it only saw 1ms between the two packets. This would
 	 * indicate that somewhere on the path is a batching entity that is giving
 	 * out time-slices of the actual b/w. This would mean we could not use
 	 * reliably the peers timestamps.
 	 *
 	 * We expect delta > peer_delta initially. Until we figure out the
 	 * timestamp difference which we will store in bbr->r_ctl.bbr_peer_tsratio.
 	 * If we place 1000 there then its a ms vs our usec. If we place 10000 there
 	 * then its 10ms vs our usec. If the peer is running a usec clock we would
 	 * put a 1 there. If the value is faster then ours, we will disable the
 	 * use of timestamps (though we could revist this later if we find it to be not
 	 * just an isolated one or two flows)).
 	 *
 	 * To detect the batching middle boxes we will come up with our compensation and
 	 * if with it in place, we find the peer is drastically off (by some margin) in
 	 * the smaller direction, then we will assume the worst case and disable use of timestamps.
 	 *
 	 */
 	uint64_t delta, peer_delta, delta_up;
 
 	delta = bbr->r_ctl.cur_rtt_send_time - bbr->r_ctl.bbr_ts_check_our_cts;
 	if (delta < bbr_min_usec_delta) {
 		/*
 		 * Have not seen a min amount of time
 		 * between our send times so we can
 		 * make a determination of the timestamp
 		 * yet.
 		 */
 		return;
 	}
 	peer_delta = bbr->r_ctl.last_inbound_ts - bbr->r_ctl.bbr_ts_check_tstmp;
 	if (peer_delta < bbr_min_peer_delta) {
 		/*
 		 * We may have enough in the form of
 		 * our delta but the peers number
 		 * has not changed that much. It could
 		 * be its clock ratio is such that
 		 * we need more data (10ms tick) or
 		 * there may be other compression scenarios
 		 * going on. In any event we need the
 		 * spread to be larger.
 		 */
 		return;
 	}
 	/* Ok lets first see which way our delta is going */
 	if (peer_delta > delta) {
 		/* Very unlikely, the peer without
 		 * compensation shows that it saw
 		 * the two sends arrive further apart
 		 * then we saw then in micro-seconds.
 		 */
 		if (peer_delta < (delta + ((delta * (uint64_t)1000)/ (uint64_t)bbr_delta_percent))) {
 			/* well it looks like the peer is a micro-second clock. */
 			bbr->rc_ts_clock_set = 1;
 			bbr->r_ctl.bbr_peer_tsratio = 1;
 		} else {
 			bbr->rc_ts_cant_be_used = 1;
 			bbr->rc_ts_clock_set = 1;
 		}
 		return;
 	}
 	/* Ok we know that the peer_delta is smaller than our send distance */
 	bbr->rc_ts_clock_set = 1;
 	/* First question is it within the percentage that they are using usec time? */
 	delta_up = (peer_delta * 1000) / (uint64_t)bbr_delta_percent;
 	if ((peer_delta + delta_up) >= delta) {
 		/* Its a usec clock */
 		bbr->r_ctl.bbr_peer_tsratio = 1;
 		bbr_log_tstmp_validation(bbr, peer_delta, delta);
 		return;
 	}
 	/* Ok if not usec, what about 10usec (though unlikely)? */
 	delta_up = (peer_delta * 1000 * 10) / (uint64_t)bbr_delta_percent;
 	if (((peer_delta * 10) + delta_up) >= delta) {
 		bbr->r_ctl.bbr_peer_tsratio = 10;
 		bbr_log_tstmp_validation(bbr, peer_delta, delta);
 		return;
 	}
 	/* And what about 100usec (though again unlikely)? */
 	delta_up = (peer_delta * 1000 * 100) / (uint64_t)bbr_delta_percent;
 	if (((peer_delta * 100) + delta_up) >= delta) {
 		bbr->r_ctl.bbr_peer_tsratio = 100;
 		bbr_log_tstmp_validation(bbr, peer_delta, delta);
 		return;
 	}
 	/* And how about 1 msec (the most likely one)? */
 	delta_up = (peer_delta * 1000 * 1000) / (uint64_t)bbr_delta_percent;
 	if (((peer_delta * 1000) + delta_up) >= delta) {
 		bbr->r_ctl.bbr_peer_tsratio = 1000;
 		bbr_log_tstmp_validation(bbr, peer_delta, delta);
 		return;
 	}
 	/* Ok if not msec could it be 10 msec? */
 	delta_up = (peer_delta * 1000 * 10000) / (uint64_t)bbr_delta_percent;
 	if (((peer_delta * 10000) + delta_up) >= delta) {
 		bbr->r_ctl.bbr_peer_tsratio = 10000;
 		return;
 	}
 	/* If we fall down here the clock tick so slowly we can't use it */
 	bbr->rc_ts_cant_be_used = 1;
 	bbr->r_ctl.bbr_peer_tsratio = 0;
 	bbr_log_tstmp_validation(bbr, peer_delta, delta);
 }
 
 /*
  * Collect new round-trip time estimate
  * and update averages and current timeout.
  */
 static void
 tcp_bbr_xmit_timer_commit(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts)
 {
 	int32_t delta;
 	uint32_t rtt, tsin;
 	int32_t rtt_ticks;
 
 	if (bbr->rtt_valid == 0)
 		/* No valid sample */
 		return;
 
 	rtt = bbr->r_ctl.cur_rtt;
 	tsin = bbr->r_ctl.ts_in;
 	if (bbr->rc_prtt_set_ts) {
 		/*
 		 * We are to force feed the rttProp filter due
 		 * to an entry into PROBE_RTT. This assures
 		 * that the times are sync'd between when we
 		 * go into PROBE_RTT and the filter expiration.
 		 *
 		 * Google does not use a true filter, so they do
 		 * this implicitly since they only keep one value
 		 * and when they enter probe-rtt they update the
 		 * value to the newest rtt.
 		 */
 		uint32_t rtt_prop;
 
 		bbr->rc_prtt_set_ts = 0;
 		rtt_prop = get_filter_value_small(&bbr->r_ctl.rc_rttprop);
 		if (rtt > rtt_prop)
 			filter_increase_by_small(&bbr->r_ctl.rc_rttprop, (rtt - rtt_prop), cts);
 		else
 			apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts);
 	}
 #ifdef STATS
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rtt));
 #endif
 	if (bbr->rc_ack_was_delayed)
 		rtt += bbr->r_ctl.rc_ack_hdwr_delay;
 
 	if (rtt < bbr->r_ctl.rc_lowest_rtt)
 		bbr->r_ctl.rc_lowest_rtt = rtt;
 	bbr_log_rtt_sample(bbr, rtt, tsin);
 	if (bbr->r_init_rtt) {
 		/*
 		 * The initial rtt is not-trusted, nuke it and lets get
 		 * our first valid measurement in.
 		 */
 		bbr->r_init_rtt = 0;
 		tp->t_srtt = 0;
 	}
 	if ((bbr->rc_ts_clock_set == 0) && bbr->rc_ts_valid) {
 		/*
 		 * So we have not yet figured out
 		 * what the peers TSTMP value is
 		 * in (most likely ms). We need a
 		 * series of cum-ack's to determine
 		 * this reliably.
 		 */
 		if (bbr->rc_ack_is_cumack) {
 			if (bbr->rc_ts_data_set) {
 				/* Lets attempt to determine the timestamp granularity. */
 				bbr_make_timestamp_determination(bbr);
 			} else {
 				bbr->rc_ts_data_set = 1;
 				bbr->r_ctl.bbr_ts_check_tstmp = bbr->r_ctl.last_inbound_ts;
 				bbr->r_ctl.bbr_ts_check_our_cts = bbr->r_ctl.cur_rtt_send_time;
 			}
 		} else {
 			/*
 			 * We have to have consecutive acks
 			 * reset any "filled" state to none.
 			 */
 			bbr->rc_ts_data_set = 0;
 		}
 	}
 	/* Round it up */
 	rtt_ticks = USEC_2_TICKS((rtt + (USECS_IN_MSEC - 1)));
 	if (rtt_ticks == 0)
 		rtt_ticks = 1;
 	if (tp->t_srtt != 0) {
 		/*
 		 * srtt is stored as fixed point with 5 bits after the
 		 * binary point (i.e., scaled by 8).  The following magic is
 		 * equivalent to the smoothing algorithm in rfc793 with an
 		 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
 		 * Adjust rtt to origin 0.
 		 */
 
 		delta = ((rtt_ticks - 1) << TCP_DELTA_SHIFT)
 		    - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
 
 		tp->t_srtt += delta;
 		if (tp->t_srtt <= 0)
 			tp->t_srtt = 1;
 
 		/*
 		 * We accumulate a smoothed rtt variance (actually, a
 		 * smoothed mean difference), then set the retransmit timer
 		 * to smoothed rtt + 4 times the smoothed variance. rttvar
 		 * is stored as fixed point with 4 bits after the binary
 		 * point (scaled by 16).  The following is equivalent to
 		 * rfc793 smoothing with an alpha of .75 (rttvar =
 		 * rttvar*3/4 + |delta| / 4).  This replaces rfc793's
 		 * wired-in beta.
 		 */
 		if (delta < 0)
 			delta = -delta;
 		delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
 		tp->t_rttvar += delta;
 		if (tp->t_rttvar <= 0)
 			tp->t_rttvar = 1;
 	} else {
 		/*
 		 * No rtt measurement yet - use the unsmoothed rtt. Set the
 		 * variance to half the rtt (so our first retransmit happens
 		 * at 3*rtt).
 		 */
 		tp->t_srtt = rtt_ticks << TCP_RTT_SHIFT;
 		tp->t_rttvar = rtt_ticks << (TCP_RTTVAR_SHIFT - 1);
 	}
 	KMOD_TCPSTAT_INC(tcps_rttupdated);
 	if (tp->t_rttupdated < UCHAR_MAX)
 		tp->t_rttupdated++;
 #ifdef STATS
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt_ticks));
 #endif
 	/*
 	 * the retransmit should happen at rtt + 4 * rttvar. Because of the
 	 * way we do the smoothing, srtt and rttvar will each average +1/2
 	 * tick of bias.  When we compute the retransmit timer, we want 1/2
 	 * tick of rounding and 1 extra tick because of +-1/2 tick
 	 * uncertainty in the firing of the timer.  The bias will give us
 	 * exactly the 1.5 tick we need.  But, because the bias is
 	 * statistical, we have to test that we don't drop below the minimum
 	 * feasible timer (which is 2 ticks).
 	 */
 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
 	    max(MSEC_2_TICKS(bbr->r_ctl.rc_min_rto_ms), rtt_ticks + 2),
 	    MSEC_2_TICKS(((uint32_t)bbr->rc_max_rto_sec) * 1000));
 
 	/*
 	 * We received an ack for a packet that wasn't retransmitted; it is
 	 * probably safe to discard any error indications we've received
 	 * recently.  This isn't quite right, but close enough for now (a
 	 * route might have failed after we sent a segment, and the return
 	 * path might not be symmetrical).
 	 */
 	tp->t_softerror = 0;
 	rtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT);
 	if (bbr->r_ctl.bbr_smallest_srtt_this_state > rtt)
 		bbr->r_ctl.bbr_smallest_srtt_this_state = rtt;
 }
 
 static void
 bbr_set_reduced_rtt(struct tcp_bbr *bbr, uint32_t cts, uint32_t line)
 {
 	bbr->r_ctl.rc_rtt_shrinks = cts;
 	if (bbr_can_force_probertt &&
 	    (TSTMP_GT(cts, bbr->r_ctl.last_in_probertt)) &&
 	    ((cts - bbr->r_ctl.last_in_probertt) > bbr->r_ctl.rc_probertt_int)) {
 		/*
 		 * We should enter probe-rtt its been too long
 		 * since we have been there.
 		 */
 		bbr_enter_probe_rtt(bbr, cts, __LINE__);
 	} else
 		bbr_check_probe_rtt_limits(bbr, cts);
 }
 
 static void
 tcp_bbr_commit_bw(struct tcp_bbr *bbr, uint32_t cts)
 {
 	uint64_t orig_bw;
 
 	if (bbr->r_ctl.rc_bbr_cur_del_rate == 0) {
 		/* We never apply a zero measurement */
 		bbr_log_type_bbrupd(bbr, 20, cts, 0, 0,
 				    0, 0, 0, 0, 0, 0);
 		return;
 	}
 	if (bbr->r_ctl.r_measurement_count < 0xffffffff)
 		bbr->r_ctl.r_measurement_count++;
 	orig_bw = get_filter_value(&bbr->r_ctl.rc_delrate);
 	apply_filter_max(&bbr->r_ctl.rc_delrate, bbr->r_ctl.rc_bbr_cur_del_rate, bbr->r_ctl.rc_pkt_epoch);
 	bbr_log_type_bbrupd(bbr, 21, cts, (uint32_t)orig_bw,
 			    (uint32_t)get_filter_value(&bbr->r_ctl.rc_delrate),
 			    0, 0, 0, 0, 0, 0);
 	if (orig_bw &&
 	    (orig_bw != get_filter_value(&bbr->r_ctl.rc_delrate))) {
 		if (bbr->bbr_hdrw_pacing) {
 			/*
 			 * Apply a new rate to the hardware
 			 * possibly.
 			 */
 			bbr_update_hardware_pacing_rate(bbr, cts);
 		}
 		bbr_set_state_target(bbr, __LINE__);
 		tcp_bbr_tso_size_check(bbr, cts);
 		if (bbr->r_recovery_bw)  {
 			bbr_setup_red_bw(bbr, cts);
 			bbr_log_type_bw_reduce(bbr, BBR_RED_BW_USELRBW);
 		}
 	} else if ((orig_bw == 0) && get_filter_value(&bbr->r_ctl.rc_delrate))
 		tcp_bbr_tso_size_check(bbr, cts);
 }
 
 static void
 bbr_nf_measurement(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts)
 {
 	if (bbr->rc_in_persist == 0) {
 		/* We log only when not in persist */
 		/* Translate to a Bytes Per Second */
 		uint64_t tim, bw, ts_diff, ts_bw;
 		uint32_t delivered;
 
 		if (TSTMP_GT(bbr->r_ctl.rc_del_time, rsm->r_del_time))
 			tim = (uint64_t)(bbr->r_ctl.rc_del_time - rsm->r_del_time);
 		else
 			tim = 1;
 		/*
 		 * Now that we have processed the tim (skipping the sample
 		 * or possibly updating the time, go ahead and
 		 * calculate the cdr.
 		 */
 		delivered = (bbr->r_ctl.rc_delivered - rsm->r_delivered);
 		bw = (uint64_t)delivered;
 		bw *= (uint64_t)USECS_IN_SECOND;
 		bw /= tim;
 		if (bw == 0) {
 			/* We must have a calculatable amount */
 			return;
 		}
 		/*
 		 * If we are using this b/w shove it in now so we
 		 * can see in the trace viewer if it gets over-ridden.
 		 */
 		if (rsm->r_ts_valid &&
 		    bbr->rc_ts_valid &&
 		    bbr->rc_ts_clock_set &&
 		    (bbr->rc_ts_cant_be_used == 0) &&
 		    bbr->rc_use_ts_limit) {
 			ts_diff = max((bbr->r_ctl.last_inbound_ts - rsm->r_del_ack_ts), 1);
 			ts_diff *= bbr->r_ctl.bbr_peer_tsratio;
 			if ((delivered == 0) ||
 			    (rtt < 1000)) {
 				/* Can't use the ts */
 				bbr_log_type_bbrupd(bbr, 61, cts,
 						    ts_diff,
 						    bbr->r_ctl.last_inbound_ts,
 						    rsm->r_del_ack_ts, 0,
 						    0, 0, 0, delivered);
 			} else {
 				ts_bw = (uint64_t)delivered;
 				ts_bw *= (uint64_t)USECS_IN_SECOND;
 				ts_bw /= ts_diff;
 				bbr_log_type_bbrupd(bbr, 62, cts,
 						    (ts_bw >> 32),
 						    (ts_bw & 0xffffffff), 0, 0,
 						    0, 0, ts_diff, delivered);
 				if ((bbr->ts_can_raise) &&
 				    (ts_bw > bw)) {
 					bbr_log_type_bbrupd(bbr, 8, cts,
 							    delivered,
 							    ts_diff,
 							    (bw >> 32),
 							    (bw & 0x00000000ffffffff),
 							    0, 0, 0, 0);
 					bw = ts_bw;
 				} else if (ts_bw && (ts_bw < bw)) {
 					bbr_log_type_bbrupd(bbr, 7, cts,
 							    delivered,
 							    ts_diff,
 							    (bw >> 32),
 							    (bw & 0x00000000ffffffff),
 							    0, 0, 0, 0);
 					bw = ts_bw;
 				}
 			}
 		}
 		if (rsm->r_first_sent_time &&
 		    TSTMP_GT(rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)],rsm->r_first_sent_time)) {
 			uint64_t sbw, sti;
 			/*
 			 * We use what was in flight at the time of our
 			 * send  and the size of this send to figure
 			 * out what we have been sending at (amount).
 			 * For the time we take from the time of
 			 * the send of the first send outstanding
 			 * until this send plus this sends pacing
 			 * time. This gives us a good calculation
 			 * as to the rate we have been sending at.
 			 */
 
 			sbw = (uint64_t)(rsm->r_flight_at_send);
 			sbw *= (uint64_t)USECS_IN_SECOND;
 			sti = rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)] - rsm->r_first_sent_time;
 			sti += rsm->r_pacing_delay;
 			sbw /= sti;
 			if (sbw < bw) {
 				bbr_log_type_bbrupd(bbr, 6, cts,
 						    delivered,
 						    (uint32_t)sti,
 						    (bw >> 32),
 						    (uint32_t)bw,
 						    rsm->r_first_sent_time, 0, (sbw >> 32),
 						    (uint32_t)sbw);
 				bw = sbw;
 			}
 		}
 		/* Use the google algorithm for b/w measurements */
 		bbr->r_ctl.rc_bbr_cur_del_rate = bw;
 		if ((rsm->r_app_limited == 0) ||
 		    (bw > get_filter_value(&bbr->r_ctl.rc_delrate))) {
 			tcp_bbr_commit_bw(bbr, cts);
 			bbr_log_type_bbrupd(bbr, 10, cts, (uint32_t)tim, delivered,
 					    0, 0, 0, 0,  bbr->r_ctl.rc_del_time,  rsm->r_del_time);
 		}
 	}
 }
 
 static void
 bbr_google_measurement(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts)
 {
 	if (bbr->rc_in_persist == 0) {
 		/* We log only when not in persist */
 		/* Translate to a Bytes Per Second */
 		uint64_t tim, bw;
 		uint32_t delivered;
 		int no_apply = 0;
 
 		if (TSTMP_GT(bbr->r_ctl.rc_del_time, rsm->r_del_time))
 			tim = (uint64_t)(bbr->r_ctl.rc_del_time - rsm->r_del_time);
 		else
 			tim = 1;
 		/*
 		 * Now that we have processed the tim (skipping the sample
 		 * or possibly updating the time, go ahead and
 		 * calculate the cdr.
 		 */
 		delivered = (bbr->r_ctl.rc_delivered - rsm->r_delivered);
 		bw = (uint64_t)delivered;
 		bw *= (uint64_t)USECS_IN_SECOND;
 		bw /= tim;
 		if (tim < bbr->r_ctl.rc_lowest_rtt) {
 			bbr_log_type_bbrupd(bbr, 99, cts, (uint32_t)tim, delivered,
 					    tim, bbr->r_ctl.rc_lowest_rtt, 0, 0, 0, 0);
 
 			no_apply = 1;
 		}
 		/*
 		 * If we are using this b/w shove it in now so we
 		 * can see in the trace viewer if it gets over-ridden.
 		 */
 		bbr->r_ctl.rc_bbr_cur_del_rate = bw;
 		/* Gate by the sending rate */
 		if (rsm->r_first_sent_time &&
 		    TSTMP_GT(rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)],rsm->r_first_sent_time)) {
 			uint64_t sbw, sti;
 			/*
 			 * We use what was in flight at the time of our
 			 * send  and the size of this send to figure
 			 * out what we have been sending at (amount).
 			 * For the time we take from the time of
 			 * the send of the first send outstanding
 			 * until this send plus this sends pacing
 			 * time. This gives us a good calculation
 			 * as to the rate we have been sending at.
 			 */
 
 			sbw = (uint64_t)(rsm->r_flight_at_send);
 			sbw *= (uint64_t)USECS_IN_SECOND;
 			sti = rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)] - rsm->r_first_sent_time;
 			sti += rsm->r_pacing_delay;
 			sbw /= sti;
 			if (sbw < bw) {
 				bbr_log_type_bbrupd(bbr, 6, cts,
 						    delivered,
 						    (uint32_t)sti,
 						    (bw >> 32),
 						    (uint32_t)bw,
 						    rsm->r_first_sent_time, 0, (sbw >> 32),
 						    (uint32_t)sbw);
 				bw = sbw;
 			}
 			if ((sti > tim) &&
 			    (sti < bbr->r_ctl.rc_lowest_rtt)) {
 				bbr_log_type_bbrupd(bbr, 99, cts, (uint32_t)tim, delivered,
 						    (uint32_t)sti, bbr->r_ctl.rc_lowest_rtt, 0, 0, 0, 0);
 				no_apply = 1;
 			} else
 				no_apply = 0;
 		}
 		bbr->r_ctl.rc_bbr_cur_del_rate = bw;
 		if ((no_apply == 0) &&
 		    ((rsm->r_app_limited == 0) ||
 		     (bw > get_filter_value(&bbr->r_ctl.rc_delrate)))) {
 			tcp_bbr_commit_bw(bbr, cts);
 			bbr_log_type_bbrupd(bbr, 10, cts, (uint32_t)tim, delivered,
 					    0, 0, 0, 0, bbr->r_ctl.rc_del_time,  rsm->r_del_time);
 		}
 	}
 }
 
 static void
 bbr_update_bbr_info(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts, uint32_t tsin,
     uint32_t uts, int32_t match, uint32_t rsm_send_time, int32_t ack_type, struct tcpopt *to)
 {
 	uint64_t old_rttprop;
 
 	/* Update our delivery time and amount */
 	bbr->r_ctl.rc_delivered += (rsm->r_end - rsm->r_start);
 	bbr->r_ctl.rc_del_time = cts;
 	if (rtt == 0) {
 		/*
 		 * 0 means its a retransmit, for now we don't use these for
 		 * the rest of BBR.
 		 */
 		return;
 	}
 	if ((bbr->rc_use_google == 0) &&
 	    (match != BBR_RTT_BY_EXACTMATCH) &&
 	    (match != BBR_RTT_BY_TIMESTAMP)){
 		/*
 		 * We get a lot of rtt updates, lets not pay attention to
 		 * any that are not an exact match. That way we don't have
 		 * to worry about timestamps and the whole nonsense of
 		 * unsure if its a retransmission etc (if we ever had the
 		 * timestamp fixed to always have the last thing sent this
 		 * would not be a issue).
 		 */
 		return;
 	}
 	if ((bbr_no_retran && bbr->rc_use_google) &&
 	    (match != BBR_RTT_BY_EXACTMATCH) &&
 	    (match != BBR_RTT_BY_TIMESTAMP)){
 		/*
 		 * We only do measurements in google mode
 		 * with bbr_no_retran on for sure things.
 		 */
 		return;
 	}
 	/* Only update srtt if we know by exact match */
 	tcp_bbr_xmit_timer(bbr, rtt, rsm_send_time, rsm->r_start, tsin);
 	if (ack_type == BBR_CUM_ACKED)
 		bbr->rc_ack_is_cumack = 1;
 	else
 		bbr->rc_ack_is_cumack = 0;
 	old_rttprop = bbr_get_rtt(bbr, BBR_RTT_PROP);
 	/*
 	 * Note the following code differs to the original
 	 * BBR spec. It calls for <= not <. However after a
 	 * long discussion in email with Neal, he acknowledged
 	 * that it should be < than so that we will have flows
 	 * going into probe-rtt (we were seeing cases where that
 	 * did not happen and caused ugly things to occur). We
 	 * have added this agreed upon fix to our code base.
 	 */
 	if (rtt < old_rttprop) {
 		/* Update when we last saw a rtt drop */
 		bbr_log_rtt_shrinks(bbr, cts, 0, rtt, __LINE__, BBR_RTTS_NEWRTT, 0);
 		bbr_set_reduced_rtt(bbr, cts, __LINE__);
 	}
 	bbr_log_type_bbrrttprop(bbr, rtt, (rsm ? rsm->r_end : 0), uts, cts,
 	    match, rsm->r_start, rsm->r_flags);
 	apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts);
 	if (old_rttprop != bbr_get_rtt(bbr, BBR_RTT_PROP)) {
 		/*
 		 * The RTT-prop moved, reset the target (may be a
 		 * nop for some states).
 		 */
 		bbr_set_state_target(bbr, __LINE__);
 		if (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT)
 			bbr_log_rtt_shrinks(bbr, cts, 0, 0,
 					    __LINE__, BBR_RTTS_NEW_TARGET, 0);
 		else if (old_rttprop < bbr_get_rtt(bbr, BBR_RTT_PROP))
 			/* It went up */
 			bbr_check_probe_rtt_limits(bbr, cts);
 	}
 	if ((bbr->rc_use_google == 0) &&
 	    (match == BBR_RTT_BY_TIMESTAMP)) {
 		/*
 		 * We don't do b/w update with
 		 * these since they are not really
 		 * reliable.
 		 */
 		return;
 	}
 	if (bbr->r_ctl.r_app_limited_until &&
 	    (bbr->r_ctl.rc_delivered >= bbr->r_ctl.r_app_limited_until)) {
 		/* We are no longer app-limited */
 		bbr->r_ctl.r_app_limited_until = 0;
 	}
 	if (bbr->rc_use_google) {
 		bbr_google_measurement(bbr, rsm, rtt, cts);
 	} else {
 		bbr_nf_measurement(bbr, rsm, rtt, cts);
 	}
 }
 
 /*
  * Convert a timestamp that the main stack
  * uses (milliseconds) into one that bbr uses
  * (microseconds). Return that converted timestamp.
  */
 static uint32_t
 bbr_ts_convert(uint32_t cts) {
 	uint32_t sec, msec;
 
 	sec = cts / MS_IN_USEC;
 	msec = cts - (MS_IN_USEC * sec);
 	return ((sec * USECS_IN_SECOND) + (msec * MS_IN_USEC));
 }
 
 /*
  * Return 0 if we did not update the RTT time, return
  * 1 if we did.
  */
 static int
 bbr_update_rtt(struct tcpcb *tp, struct tcp_bbr *bbr,
     struct bbr_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, uint32_t th_ack)
 {
 	int32_t i;
 	uint32_t t, uts = 0;
 
 	if ((rsm->r_flags & BBR_ACKED) ||
 	    (rsm->r_flags & BBR_WAS_RENEGED) ||
 	    (rsm->r_flags & BBR_RXT_CLEARED)) {
 		/* Already done */
 		return (0);
 	}
 	if (rsm->r_rtt_not_allowed) {
 		/* Not allowed */
 		return (0);
 	}
 	if (rsm->r_rtr_cnt == 1) {
 		/*
 		 * Only one transmit. Hopefully the normal case.
 		 */
 		if (TSTMP_GT(cts, rsm->r_tim_lastsent[0]))
 			t = cts - rsm->r_tim_lastsent[0];
 		else
 			t = 1;
 		if ((int)t <= 0)
 			t = 1;
 		bbr->r_ctl.rc_last_rtt = t;
 		bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, 0,
 				    BBR_RTT_BY_EXACTMATCH, rsm->r_tim_lastsent[0], ack_type, to);
 		return (1);
 	}
 	/* Convert to usecs */
 	if ((bbr_can_use_ts_for_rtt == 1) &&
 	    (bbr->rc_use_google == 1) &&
 	    (ack_type == BBR_CUM_ACKED) &&
 	    (to->to_flags & TOF_TS) &&
 	    (to->to_tsecr != 0)) {
 		t = tcp_tv_to_mssectick(&bbr->rc_tv) - to->to_tsecr;
 		if (t < 1)
 			t = 1;
 		t *= MS_IN_USEC;
 		bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, 0,
 				    BBR_RTT_BY_TIMESTAMP,
 				    rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)],
 				    ack_type, to);
 		return (1);
 	}
 	uts = bbr_ts_convert(to->to_tsecr);
 	if ((to->to_flags & TOF_TS) &&
 	    (to->to_tsecr != 0) &&
 	    (ack_type == BBR_CUM_ACKED) &&
 	    ((rsm->r_flags & BBR_OVERMAX) == 0)) {
 		/*
 		 * Now which timestamp does it match? In this block the ACK
 		 * may be coming from a previous transmission.
 		 */
 		uint32_t fudge;
 
 		fudge = BBR_TIMER_FUDGE;
 		for (i = 0; i < rsm->r_rtr_cnt; i++) {
 			if ((SEQ_GEQ(uts, (rsm->r_tim_lastsent[i] - fudge))) &&
 			    (SEQ_LEQ(uts, (rsm->r_tim_lastsent[i] + fudge)))) {
 				if (TSTMP_GT(cts, rsm->r_tim_lastsent[i]))
 					t = cts - rsm->r_tim_lastsent[i];
 				else
 					t = 1;
 				if ((int)t <= 0)
 					t = 1;
 				bbr->r_ctl.rc_last_rtt = t;
 				bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts, BBR_RTT_BY_TSMATCHING,
 						    rsm->r_tim_lastsent[i], ack_type, to);
 				if ((i + 1) < rsm->r_rtr_cnt) {
 					/* Likely */
 					return (0);
 				} else if (rsm->r_flags & BBR_TLP) {
 					bbr->rc_tlp_rtx_out = 0;
 				}
 				return (1);
 			}
 		}
 		/* Fall through if we can't find a matching timestamp */
 	}
 	/*
 	 * Ok its a SACK block that we retransmitted. or a windows
 	 * machine without timestamps. We can tell nothing from the
 	 * time-stamp since its not there or the time the peer last
 	 * recieved a segment that moved forward its cum-ack point.
 	 *
 	 * Lets look at the last retransmit and see what we can tell
 	 * (with BBR for space we only keep 2 note we have to keep
 	 * at least 2 so the map can not be condensed more).
 	 */
 	i = rsm->r_rtr_cnt - 1;
 	if (TSTMP_GT(cts, rsm->r_tim_lastsent[i]))
 		t = cts - rsm->r_tim_lastsent[i];
 	else
 		goto not_sure;
 	if (t < bbr->r_ctl.rc_lowest_rtt) {
 		/*
 		 * We retransmitted and the ack came back in less
 		 * than the smallest rtt we have observed in the
 		 * windowed rtt. We most likey did an improper
 		 * retransmit as outlined in 4.2 Step 3 point 2 in
 		 * the rack-draft.
 		 *
 		 * Use the prior transmission to update all the
 		 * information as long as there is only one prior
 		 * transmission.
 		 */
 		if ((rsm->r_flags & BBR_OVERMAX) == 0) {
 #ifdef BBR_INVARIANTS
 			if (rsm->r_rtr_cnt == 1)
 				panic("rsm:%p bbr:%p rsm has overmax and only 1 retranmit flags:%x?", rsm, bbr, rsm->r_flags);
 #endif
 			i = rsm->r_rtr_cnt - 2;
 			if (TSTMP_GT(cts, rsm->r_tim_lastsent[i]))
 				t = cts - rsm->r_tim_lastsent[i];
 			else
 				t = 1;
 			bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts, BBR_RTT_BY_EARLIER_RET,
 					    rsm->r_tim_lastsent[i], ack_type, to);
 			return (0);
 		} else {
 			/*
 			 * Too many prior transmissions, just
 			 * updated BBR delivered
 			 */
 not_sure:
 			bbr_update_bbr_info(bbr, rsm, 0, cts, to->to_tsecr, uts,
 					    BBR_RTT_BY_SOME_RETRAN, 0, ack_type, to);
 		}
 	} else {
 		/*
 		 * We retransmitted it and the retransmit did the
 		 * job.
 		 */
 		if (rsm->r_flags & BBR_TLP)
 			bbr->rc_tlp_rtx_out = 0;
 		if ((rsm->r_flags & BBR_OVERMAX) == 0)
 			bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts,
 					    BBR_RTT_BY_THIS_RETRAN, 0, ack_type, to);
 		else
 			bbr_update_bbr_info(bbr, rsm, 0, cts, to->to_tsecr, uts,
 					    BBR_RTT_BY_SOME_RETRAN, 0, ack_type, to);
 		return (1);
 	}
 	return (0);
 }
 
 /*
  * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
  */
 static void
 bbr_log_sack_passed(struct tcpcb *tp,
     struct tcp_bbr *bbr, struct bbr_sendmap *rsm)
 {
 	struct bbr_sendmap *nrsm;
 
 	nrsm = rsm;
 	TAILQ_FOREACH_REVERSE_FROM(nrsm, &bbr->r_ctl.rc_tmap,
 	    bbr_head, r_tnext) {
 		if (nrsm == rsm) {
 			/* Skip original segment he is acked */
 			continue;
 		}
 		if (nrsm->r_flags & BBR_ACKED) {
 			/* Skip ack'd segments */
 			continue;
 		}
 		if (nrsm->r_flags & BBR_SACK_PASSED) {
 			/*
 			 * We found one that is already marked
 			 * passed, we have been here before and
 			 * so all others below this are marked.
 			 */
 			break;
 		}
 		BBR_STAT_INC(bbr_sack_passed);
 		nrsm->r_flags |= BBR_SACK_PASSED;
 		if (((nrsm->r_flags & BBR_MARKED_LOST) == 0) &&
 		    bbr_is_lost(bbr, nrsm, bbr->r_ctl.rc_rcvtime)) {
 			bbr->r_ctl.rc_lost += nrsm->r_end - nrsm->r_start;
 			bbr->r_ctl.rc_lost_bytes += nrsm->r_end - nrsm->r_start;
 			nrsm->r_flags |= BBR_MARKED_LOST;
 		}
 		nrsm->r_flags &= ~BBR_WAS_SACKPASS;
 	}
 }
 
 /*
  * Returns the number of bytes that were
  * newly ack'd by sack blocks.
  */
 static uint32_t
 bbr_proc_sack_blk(struct tcpcb *tp, struct tcp_bbr *bbr, struct sackblk *sack,
     struct tcpopt *to, struct bbr_sendmap **prsm, uint32_t cts)
 {
 	int32_t times = 0;
 	uint32_t start, end, changed = 0;
 	struct bbr_sendmap *rsm, *nrsm;
 	int32_t used_ref = 1;
 	uint8_t went_back = 0, went_fwd = 0;
 
 	start = sack->start;
 	end = sack->end;
 	rsm = *prsm;
 	if (rsm == NULL)
 		used_ref = 0;
 
 	/* Do we locate the block behind where we last were? */
 	if (rsm && SEQ_LT(start, rsm->r_start)) {
 		went_back = 1;
 		TAILQ_FOREACH_REVERSE_FROM(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) {
 			if (SEQ_GEQ(start, rsm->r_start) &&
 			    SEQ_LT(start, rsm->r_end)) {
 				goto do_rest_ofb;
 			}
 		}
 	}
 start_at_beginning:
 	went_fwd = 1;
 	/*
 	 * Ok lets locate the block where this guy is fwd from rsm (if its
 	 * set)
 	 */
 	TAILQ_FOREACH_FROM(rsm, &bbr->r_ctl.rc_map, r_next) {
 		if (SEQ_GEQ(start, rsm->r_start) &&
 		    SEQ_LT(start, rsm->r_end)) {
 			break;
 		}
 	}
 do_rest_ofb:
 	if (rsm == NULL) {
 		/*
 		 * This happens when we get duplicate sack blocks with the
 		 * same end. For example SACK 4: 100 SACK 3: 100 The sort
 		 * will not change there location so we would just start at
 		 * the end of the first one and get lost.
 		 */
 		if (tp->t_flags & TF_SENTFIN) {
 			/*
 			 * Check to see if we have not logged the FIN that
 			 * went out.
 			 */
 			nrsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next);
 			if (nrsm && (nrsm->r_end + 1) == tp->snd_max) {
 				/*
 				 * Ok we did not get the FIN logged.
 				 */
 				nrsm->r_end++;
 				rsm = nrsm;
 				goto do_rest_ofb;
 			}
 		}
 		if (times == 1) {
 #ifdef BBR_INVARIANTS
 			panic("tp:%p bbr:%p sack:%p to:%p prsm:%p",
 			    tp, bbr, sack, to, prsm);
 #else
 			goto out;
 #endif
 		}
 		times++;
 		BBR_STAT_INC(bbr_sack_proc_restart);
 		rsm = NULL;
 		goto start_at_beginning;
 	}
 	/* Ok we have an ACK for some piece of rsm */
 	if (rsm->r_start != start) {
 		/*
 		 * Need to split this in two pieces the before and after.
 		 */
 		if (bbr_sack_mergable(rsm, start, end))
 			nrsm = bbr_alloc_full_limit(bbr);
 		else
 			nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT);
 		if (nrsm == NULL) {
 			/* We could not allocate ignore the sack */
 			struct sackblk blk;
 
 			blk.start = start;
 			blk.end = end;
 			sack_filter_reject(&bbr->r_ctl.bbr_sf, &blk);
 			goto out;
 		}
 		bbr_clone_rsm(bbr, nrsm, rsm, start);
 		TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 		if (rsm->r_in_tmap) {
 			TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 			nrsm->r_in_tmap = 1;
 		}
 		rsm->r_flags &= (~BBR_HAS_FIN);
 		rsm = nrsm;
 	}
 	if (SEQ_GEQ(end, rsm->r_end)) {
 		/*
 		 * The end of this block is either beyond this guy or right
 		 * at this guy.
 		 */
 		if ((rsm->r_flags & BBR_ACKED) == 0) {
 			bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_SACKED, 0);
 			changed += (rsm->r_end - rsm->r_start);
 			bbr->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
 			bbr_log_sack_passed(tp, bbr, rsm);
 			if (rsm->r_flags & BBR_MARKED_LOST) {
 				bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start;
 			}
 			/* Is Reordering occuring? */
 			if (rsm->r_flags & BBR_SACK_PASSED) {
 				BBR_STAT_INC(bbr_reorder_seen);
 				bbr->r_ctl.rc_reorder_ts = cts;
 				if (rsm->r_flags & BBR_MARKED_LOST) {
 					bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start;
 					if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost))
 						/* LT sampling also needs adjustment */
 						bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 				}
 			}
 			rsm->r_flags |= BBR_ACKED;
 			rsm->r_flags &= ~(BBR_TLP|BBR_WAS_RENEGED|BBR_RXT_CLEARED|BBR_MARKED_LOST);
 			if (rsm->r_in_tmap) {
 				TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 				rsm->r_in_tmap = 0;
 			}
 		}
 		bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_SACKED);
 		if (end == rsm->r_end) {
 			/* This block only - done */
 			goto out;
 		}
 		/* There is more not coverend by this rsm move on */
 		start = rsm->r_end;
 		nrsm = TAILQ_NEXT(rsm, r_next);
 		rsm = nrsm;
 		times = 0;
 		goto do_rest_ofb;
 	}
 	if (rsm->r_flags & BBR_ACKED) {
 		/* Been here done that */
 		goto out;
 	}
 	/* Ok we need to split off this one at the tail */
 	if (bbr_sack_mergable(rsm, start, end))
 		nrsm = bbr_alloc_full_limit(bbr);
 	else
 		nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT);
 	if (nrsm == NULL) {
 		/* failed XXXrrs what can we do but loose the sack info? */
 		struct sackblk blk;
 
 		blk.start = start;
 		blk.end = end;
 		sack_filter_reject(&bbr->r_ctl.bbr_sf, &blk);
 		goto out;
 	}
 	/* Clone it */
 	bbr_clone_rsm(bbr, nrsm, rsm, end);
 	/* The sack block does not cover this guy fully */
 	rsm->r_flags &= (~BBR_HAS_FIN);
 	TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 	if (rsm->r_in_tmap) {
 		TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 		nrsm->r_in_tmap = 1;
 	}
 	nrsm->r_dupack = 0;
 	bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_SACKED, 0);
 	bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_SACKED);
 	changed += (rsm->r_end - rsm->r_start);
 	bbr->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
 	bbr_log_sack_passed(tp, bbr, rsm);
 	/* Is Reordering occuring? */
 	if (rsm->r_flags & BBR_MARKED_LOST) {
 		bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start;
 	}
 	if (rsm->r_flags & BBR_SACK_PASSED) {
 		BBR_STAT_INC(bbr_reorder_seen);
 		bbr->r_ctl.rc_reorder_ts = cts;
 		if (rsm->r_flags & BBR_MARKED_LOST) {
 			bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start;
 			if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost))
 				/* LT sampling also needs adjustment */
 				bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 		}
 	}
 	rsm->r_flags &= ~(BBR_TLP|BBR_WAS_RENEGED|BBR_RXT_CLEARED|BBR_MARKED_LOST);
 	rsm->r_flags |= BBR_ACKED;
 	if (rsm->r_in_tmap) {
 		TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 0;
 	}
 out:
 	if (rsm && (rsm->r_flags & BBR_ACKED)) {
 		/*
 		 * Now can we merge this newly acked
 		 * block with either the previous or
 		 * next block?
 		 */
 		nrsm = TAILQ_NEXT(rsm, r_next);
 		if (nrsm &&
 		    (nrsm->r_flags & BBR_ACKED)) {
 			/* yep this and next can be merged */
 			rsm = bbr_merge_rsm(bbr, rsm, nrsm);
 		}
 		/* Now what about the previous? */
 		nrsm = TAILQ_PREV(rsm, bbr_head, r_next);
 		if (nrsm &&
 		    (nrsm->r_flags & BBR_ACKED)) {
 			/* yep the previous and this can be merged */
 			rsm = bbr_merge_rsm(bbr, nrsm, rsm);
 		}
 	}
 	if (used_ref == 0) {
 		BBR_STAT_INC(bbr_sack_proc_all);
 	} else {
 		BBR_STAT_INC(bbr_sack_proc_short);
 	}
 	if (went_fwd && went_back) {
 		BBR_STAT_INC(bbr_sack_search_both);
 	} else if (went_fwd) {
 		BBR_STAT_INC(bbr_sack_search_fwd);
 	} else if (went_back) {
 		BBR_STAT_INC(bbr_sack_search_back);
 	}
 	/* Save off where the next seq is */
 	if (rsm)
 		bbr->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next);
 	else
 		bbr->r_ctl.rc_sacklast = NULL;
 	*prsm = rsm;
 	return (changed);
 }
 
 static void inline
 bbr_peer_reneges(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, tcp_seq th_ack)
 {
 	struct bbr_sendmap *tmap;
 
 	BBR_STAT_INC(bbr_reneges_seen);
 	tmap = NULL;
 	while (rsm && (rsm->r_flags & BBR_ACKED)) {
 		/* Its no longer sacked, mark it so */
 		uint32_t oflags;
 		bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 #ifdef BBR_INVARIANTS
 		if (rsm->r_in_tmap) {
 			panic("bbr:%p rsm:%p flags:0x%x in tmap?",
 			    bbr, rsm, rsm->r_flags);
 		}
 #endif
 		oflags = rsm->r_flags;
 		if (rsm->r_flags & BBR_MARKED_LOST) {
 			bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start;
 			bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start;
 			if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost))
 				/* LT sampling also needs adjustment */
 				bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 		}
 		rsm->r_flags &= ~(BBR_ACKED | BBR_SACK_PASSED | BBR_WAS_SACKPASS | BBR_MARKED_LOST);
 		rsm->r_flags |= BBR_WAS_RENEGED;
 		rsm->r_flags |= BBR_RXT_CLEARED;
 		bbr_log_type_rsmclear(bbr, bbr->r_ctl.rc_rcvtime, rsm, oflags, __LINE__);
 		/* Rebuild it into our tmap */
 		if (tmap == NULL) {
 			TAILQ_INSERT_HEAD(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 			tmap = rsm;
 		} else {
 			TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, tmap, rsm, r_tnext);
 			tmap = rsm;
 		}
 		tmap->r_in_tmap = 1;
 		/*
 		 * XXXrrs Delivered? Should we do anything here?
 		 *
 		 * Of course we don't on a rxt timeout so maybe its ok that
 		 * we don't?
 		 *
 		 * For now lets not.
 		 */
 		rsm = TAILQ_NEXT(rsm, r_next);
 	}
 	/*
 	 * Now lets possibly clear the sack filter so we start recognizing
 	 * sacks that cover this area.
 	 */
 	sack_filter_clear(&bbr->r_ctl.bbr_sf, th_ack);
 }
 
 static void
 bbr_log_syn(struct tcpcb *tp, struct tcpopt *to)
 {
 	struct tcp_bbr *bbr;
 	struct bbr_sendmap *rsm;
 	uint32_t cts;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	cts = bbr->r_ctl.rc_rcvtime;
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 	if (rsm && (rsm->r_flags & BBR_HAS_SYN)) {
 		if ((rsm->r_end - rsm->r_start) <= 1) {
 			/* Log out the SYN completely */
 			bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
 			rsm->r_rtr_bytes = 0;
 			TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next);
 			if (rsm->r_in_tmap) {
 				TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 				rsm->r_in_tmap = 0;
 			}
 			if (bbr->r_ctl.rc_next == rsm) {
 				/* scoot along the marker */
 				bbr->r_ctl.rc_next = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 			}
 			if (to != NULL)
 				bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_CUM_ACKED, 0);
 			bbr_free(bbr, rsm);
 		} else {
 			/* There is more (Fast open)? strip out SYN. */
 			rsm->r_flags &= ~BBR_HAS_SYN;
 			rsm->r_start++;
 		}
 	}
 }
 
 /*
  * Returns the number of bytes that were
  * acknowledged by SACK blocks.
  */
 
 static uint32_t
 bbr_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
     uint32_t *prev_acked)
 {
 	uint32_t changed, last_seq, entered_recovery = 0;
 	struct tcp_bbr *bbr;
 	struct bbr_sendmap *rsm;
 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
 	register uint32_t th_ack;
 	int32_t i, j, k, new_sb, num_sack_blks = 0;
 	uint32_t cts, acked, ack_point, sack_changed = 0;
 	uint32_t p_maxseg, maxseg, p_acked = 0;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (tcp_get_flags(th) & TH_RST) {
 		/* We don't log resets */
 		return (0);
 	}
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	cts = bbr->r_ctl.rc_rcvtime;
 
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 	changed = 0;
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	p_maxseg = min(bbr->r_ctl.rc_pace_max_segs, maxseg);
 	th_ack = th->th_ack;
 	if (SEQ_GT(th_ack, tp->snd_una)) {
 		acked = th_ack - tp->snd_una;
 		bbr_log_progress_event(bbr, tp, ticks, PROGRESS_UPDATE, __LINE__);
 		bbr->rc_tp->t_acktime = ticks;
 	} else
 		acked = 0;
 	if (SEQ_LEQ(th_ack, tp->snd_una)) {
 		/* Only sent here for sack processing */
 		goto proc_sack;
 	}
 	if (rsm && SEQ_GT(th_ack, rsm->r_start)) {
 		changed = th_ack - rsm->r_start;
 	} else if ((rsm == NULL) && ((th_ack - 1) == tp->iss)) {
 		/*
 		 * For the SYN incoming case we will not have called
 		 * tcp_output for the sending of the SYN, so there will be
 		 * no map. All other cases should probably be a panic.
 		 */
 		if ((to->to_flags & TOF_TS) && (to->to_tsecr != 0)) {
 			/*
 			 * We have a timestamp that can be used to generate
 			 * an initial RTT.
 			 */
 			uint32_t ts, now, rtt;
 
 			ts = bbr_ts_convert(to->to_tsecr);
 			now = bbr_ts_convert(tcp_tv_to_mssectick(&bbr->rc_tv));
 			rtt = now - ts;
 			if (rtt < 1)
 				rtt = 1;
 			bbr_log_type_bbrrttprop(bbr, rtt,
 						tp->iss, 0, cts,
 						BBR_RTT_BY_TIMESTAMP, tp->iss, 0);
 			apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts);
 			changed = 1;
 			bbr->r_wanted_output = 1;
 			goto out;
 		}
 		goto proc_sack;
 	} else if (rsm == NULL) {
 		goto out;
 	}
 	if (changed) {
 		/*
 		 * The ACK point is advancing to th_ack, we must drop off
 		 * the packets in the rack log and calculate any eligble
 		 * RTT's.
 		 */
 		bbr->r_wanted_output = 1;
 more:
 		if (rsm == NULL) {
 			if (tp->t_flags & TF_SENTFIN) {
 				/* if we send a FIN we will not hav a map */
 				goto proc_sack;
 			}
 #ifdef BBR_INVARIANTS
 			panic("No rack map tp:%p for th:%p state:%d bbr:%p snd_una:%u snd_max:%u chg:%d\n",
 			    tp,
 			    th, tp->t_state, bbr,
 			    tp->snd_una, tp->snd_max, changed);
 #endif
 			goto proc_sack;
 		}
 	}
 	if (SEQ_LT(th_ack, rsm->r_start)) {
 		/* Huh map is missing this */
 #ifdef BBR_INVARIANTS
 		printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d bbr:%p\n",
 		    rsm->r_start,
 		    th_ack, tp->t_state,
 		    bbr->r_state, bbr);
 		panic("th-ack is bad bbr:%p tp:%p", bbr, tp);
 #endif
 		goto proc_sack;
 	} else if (th_ack == rsm->r_start) {
 		/* None here to ack */
 		goto proc_sack;
 	}
 	/*
 	 * Clear the dup ack counter, it will
 	 * either be freed or if there is some
 	 * remaining we need to start it at zero.
 	 */
 	rsm->r_dupack = 0;
 	/* Now do we consume the whole thing? */
 	if (SEQ_GEQ(th_ack, rsm->r_end)) {
 		/* Its all consumed. */
 		uint32_t left;
 
 		if (rsm->r_flags & BBR_ACKED) {
 			/*
 			 * It was acked on the scoreboard -- remove it from
 			 * total
 			 */
 			p_acked += (rsm->r_end - rsm->r_start);
 			bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 			if (bbr->r_ctl.rc_sacked == 0)
 				bbr->r_ctl.rc_sacklast = NULL;
 		} else {
 			bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_CUM_ACKED, th_ack);
 			if (rsm->r_flags & BBR_MARKED_LOST) {
 				bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start;
 			}
 			if (rsm->r_flags & BBR_SACK_PASSED) {
 				/*
 				 * There are acked segments ACKED on the
 				 * scoreboard further up. We are seeing
 				 * reordering.
 				 */
 				BBR_STAT_INC(bbr_reorder_seen);
 				bbr->r_ctl.rc_reorder_ts = cts;
 				if (rsm->r_flags & BBR_MARKED_LOST) {
 					bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start;
 					if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost))
 						/* LT sampling also needs adjustment */
 						bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost;
 				}
 			}
 			rsm->r_flags &= ~BBR_MARKED_LOST;
 		}
 		bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
 		rsm->r_rtr_bytes = 0;
 		TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next);
 		if (rsm->r_in_tmap) {
 			TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 			rsm->r_in_tmap = 0;
 		}
 		if (bbr->r_ctl.rc_next == rsm) {
 			/* scoot along the marker */
 			bbr->r_ctl.rc_next = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 		}
 		bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_CUM_ACKED);
 		/* Adjust the packet counts */
 		left = th_ack - rsm->r_end;
 		/* Free back to zone */
 		bbr_free(bbr, rsm);
 		if (left) {
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 			goto more;
 		}
 		goto proc_sack;
 	}
 	if (rsm->r_flags & BBR_ACKED) {
 		/*
 		 * It was acked on the scoreboard -- remove it from total
 		 * for the part being cum-acked.
 		 */
 		p_acked += (rsm->r_end - rsm->r_start);
 		bbr->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
 		if (bbr->r_ctl.rc_sacked == 0)
 			bbr->r_ctl.rc_sacklast = NULL;
 	} else {
 		/*
 		 * It was acked up to th_ack point for the first time
 		 */
 		struct bbr_sendmap lrsm;
 
 		memcpy(&lrsm, rsm, sizeof(struct bbr_sendmap));
 		lrsm.r_end = th_ack;
 		bbr_update_rtt(tp, bbr, &lrsm, to, cts, BBR_CUM_ACKED, th_ack);
 	}
 	if ((rsm->r_flags & BBR_MARKED_LOST) &&
 	    ((rsm->r_flags & BBR_ACKED) == 0)) {
 		/*
 		 * It was marked lost and partly ack'd now
 		 * for the first time. We lower the rc_lost_bytes
 		 * and still leave it MARKED.
 		 */
 		bbr->r_ctl.rc_lost_bytes -= th_ack - rsm->r_start;
 	}
 	bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_CUM_ACKED);
 	bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
 	rsm->r_rtr_bytes = 0;
 	/* adjust packet count */
 	rsm->r_start = th_ack;
 proc_sack:
 	/* Check for reneging */
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 	if (rsm && (rsm->r_flags & BBR_ACKED) && (th_ack == rsm->r_start)) {
 		/*
 		 * The peer has moved snd_una up to the edge of this send,
 		 * i.e. one that it had previously acked. The only way that
 		 * can be true if the peer threw away data (space issues)
 		 * that it had previously sacked (else it would have given
 		 * us snd_una up to (rsm->r_end). We need to undo the acked
 		 * markings here.
 		 *
 		 * Note we have to look to make sure th_ack is our
 		 * rsm->r_start in case we get an old ack where th_ack is
 		 * behind snd_una.
 		 */
 		bbr_peer_reneges(bbr, rsm, th->th_ack);
 	}
 	if ((to->to_flags & TOF_SACK) == 0) {
 		/* We are done nothing left to log */
 		goto out;
 	}
 	rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next);
 	if (rsm) {
 		last_seq = rsm->r_end;
 	} else {
 		last_seq = tp->snd_max;
 	}
 	/* Sack block processing */
 	if (SEQ_GT(th_ack, tp->snd_una))
 		ack_point = th_ack;
 	else
 		ack_point = tp->snd_una;
 	for (i = 0; i < to->to_nsacks; i++) {
 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
 		    &sack, sizeof(sack));
 		sack.start = ntohl(sack.start);
 		sack.end = ntohl(sack.end);
 		if (SEQ_GT(sack.end, sack.start) &&
 		    SEQ_GT(sack.start, ack_point) &&
 		    SEQ_LT(sack.start, tp->snd_max) &&
 		    SEQ_GT(sack.end, ack_point) &&
 		    SEQ_LEQ(sack.end, tp->snd_max)) {
 			if ((bbr->r_ctl.rc_num_small_maps_alloced > bbr_sack_block_limit) &&
 			    (SEQ_LT(sack.end, last_seq)) &&
 			    ((sack.end - sack.start) < (p_maxseg / 8))) {
 				/*
 				 * Not the last piece and its smaller than
 				 * 1/8th of a p_maxseg. We ignore this.
 				 */
 				BBR_STAT_INC(bbr_runt_sacks);
 				continue;
 			}
 			sack_blocks[num_sack_blks] = sack;
 			num_sack_blks++;
 		} else if (SEQ_LEQ(sack.start, th_ack) &&
 		    SEQ_LEQ(sack.end, th_ack)) {
 			/*
 			 * Its a D-SACK block.
 			 */
 			tcp_record_dsack(tp, sack.start, sack.end, 0);
 		}
 	}
 	if (num_sack_blks == 0)
 		goto out;
 	/*
 	 * Sort the SACK blocks so we can update the rack scoreboard with
 	 * just one pass.
 	 */
 	new_sb = sack_filter_blks(&bbr->r_ctl.bbr_sf, sack_blocks,
 				  num_sack_blks, th->th_ack);
 	ctf_log_sack_filter(bbr->rc_tp, new_sb, sack_blocks);
 	BBR_STAT_ADD(bbr_sack_blocks, num_sack_blks);
 	BBR_STAT_ADD(bbr_sack_blocks_skip, (num_sack_blks - new_sb));
 	num_sack_blks = new_sb;
 	if (num_sack_blks < 2) {
 		goto do_sack_work;
 	}
 	/* Sort the sacks */
 	for (i = 0; i < num_sack_blks; i++) {
 		for (j = i + 1; j < num_sack_blks; j++) {
 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
 				sack = sack_blocks[i];
 				sack_blocks[i] = sack_blocks[j];
 				sack_blocks[j] = sack;
 			}
 		}
 	}
 	/*
 	 * Now are any of the sack block ends the same (yes some
 	 * implememtations send these)?
 	 */
 again:
 	if (num_sack_blks > 1) {
 		for (i = 0; i < num_sack_blks; i++) {
 			for (j = i + 1; j < num_sack_blks; j++) {
 				if (sack_blocks[i].end == sack_blocks[j].end) {
 					/*
 					 * Ok these two have the same end we
 					 * want the smallest end and then
 					 * throw away the larger and start
 					 * again.
 					 */
 					if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
 						/*
 						 * The second block covers
 						 * more area use that
 						 */
 						sack_blocks[i].start = sack_blocks[j].start;
 					}
 					/*
 					 * Now collapse out the dup-sack and
 					 * lower the count
 					 */
 					for (k = (j + 1); k < num_sack_blks; k++) {
 						sack_blocks[j].start = sack_blocks[k].start;
 						sack_blocks[j].end = sack_blocks[k].end;
 						j++;
 					}
 					num_sack_blks--;
 					goto again;
 				}
 			}
 		}
 	}
 do_sack_work:
 	rsm = bbr->r_ctl.rc_sacklast;
 	for (i = 0; i < num_sack_blks; i++) {
 		acked = bbr_proc_sack_blk(tp, bbr, &sack_blocks[i], to, &rsm, cts);
 		if (acked) {
 			bbr->r_wanted_output = 1;
 			changed += acked;
 			sack_changed += acked;
 		}
 	}
 out:
 	*prev_acked = p_acked;
 	if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) {
 		/*
 		 * Ok we have a high probability that we need to go in to
 		 * recovery since we have data sack'd
 		 */
 		struct bbr_sendmap *rsm;
 
 		rsm = bbr_check_recovery_mode(tp, bbr, cts);
 		if (rsm) {
 			/* Enter recovery */
 			entered_recovery = 1;
 			bbr->r_wanted_output = 1;
 			/*
 			 * When we enter recovery we need to assure we send
 			 * one packet.
 			 */
 			if (bbr->r_ctl.rc_resend == NULL) {
 				bbr->r_ctl.rc_resend = rsm;
 			}
 		}
 	}
 	if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) {
 		/*
 		 * See if we need to rack-retransmit anything if so set it
 		 * up as the thing to resend assuming something else is not
 		 * already in that position.
 		 */
 		if (bbr->r_ctl.rc_resend == NULL) {
 			bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts);
 		}
 	}
 	/*
 	 * We return the amount that changed via sack, this is used by the
 	 * ack-received code to augment what was changed between th_ack <->
 	 * snd_una.
 	 */
 	return (sack_changed);
 }
 
 static void
 bbr_strike_dupack(struct tcp_bbr *bbr)
 {
 	struct bbr_sendmap *rsm;
 
 	rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap);
 	if (rsm && (rsm->r_dupack < 0xff)) {
 		rsm->r_dupack++;
 		if (rsm->r_dupack >= DUP_ACK_THRESHOLD)
 			bbr->r_wanted_output = 1;
 	}
 }
 
 /*
  * Return value of 1, we do not need to call bbr_process_data().
  * return value of 0, bbr_process_data can be called.
  * For ret_val if its 0 the TCB is locked and valid, if its non-zero
  * its unlocked and probably unsafe to touch the TCB.
  */
 static int
 bbr_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to,
     uint32_t tiwin, int32_t tlen,
     int32_t * ofia, int32_t thflags, int32_t * ret_val)
 {
 	int32_t ourfinisacked = 0;
 	int32_t acked_amount;
 	uint16_t nsegs;
 	int32_t acked;
 	uint32_t lost, sack_changed = 0;
 	struct mbuf *mfree;
 	struct tcp_bbr *bbr;
 	uint32_t prev_acked = 0;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	lost = bbr->r_ctl.rc_lost;
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	if (SEQ_GT(th->th_ack, tp->snd_max)) {
 		ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val);
 		bbr->r_wanted_output = 1;
 		return (1);
 	}
 	if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
 		/* Process the ack */
 		if (bbr->rc_in_persist)
 			tp->t_rxtshift = 0;
 		if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd))
 			bbr_strike_dupack(bbr);
 		sack_changed = bbr_log_ack(tp, to, th, &prev_acked);
 	}
 	bbr_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime, (bbr->r_ctl.rc_lost > lost));
 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
 		/*
 		 * Old ack, behind the last one rcv'd or a duplicate ack
 		 * with SACK info.
 		 */
 		if (th->th_ack == tp->snd_una) {
 			bbr_ack_received(tp, bbr, th, 0, sack_changed, prev_acked, __LINE__, 0);
 			if (bbr->r_state == TCPS_SYN_SENT) {
 				/*
 				 * Special case on where we sent SYN. When
 				 * the SYN-ACK is processed in syn_sent
 				 * state it bumps the snd_una. This causes
 				 * us to hit here even though we did ack 1
 				 * byte.
 				 *
 				 * Go through the nothing left case so we
 				 * send data.
 				 */
 				goto nothing_left;
 			}
 		}
 		return (0);
 	}
 	/*
 	 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
 	 * something we sent.
 	 */
 	if (tp->t_flags & TF_NEEDSYN) {
 		/*
 		 * T/TCP: Connection was half-synchronized, and our SYN has
 		 * been ACK'd (so connection is now fully synchronized).  Go
 		 * to non-starred state, increment snd_una for ACK of SYN,
 		 * and check if we can do window scaling.
 		 */
 		tp->t_flags &= ~TF_NEEDSYN;
 		tp->snd_una++;
 		/* Do window scaling? */
 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 			tp->rcv_scale = tp->request_r_scale;
 			/* Send window already scaled. */
 		}
 	}
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	acked = BYTES_THIS_ACK(tp, th);
 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
 
 	/*
 	 * If we just performed our first retransmit, and the ACK arrives
 	 * within our recovery window, then it was a mistake to do the
 	 * retransmit in the first place.  Recover our original cwnd and
 	 * ssthresh, and proceed to transmit where we left off.
 	 */
 	if (tp->t_flags & TF_PREVVALID) {
 		tp->t_flags &= ~TF_PREVVALID;
 		if (tp->t_rxtshift == 1 &&
 		    (int)(ticks - tp->t_badrxtwin) < 0)
 			bbr_cong_signal(tp, th, CC_RTO_ERR, NULL);
 	}
 	SOCKBUF_LOCK(&so->so_snd);
 	acked_amount = min(acked, (int)sbavail(&so->so_snd));
 	tp->snd_wnd -= acked_amount;
 	mfree = sbcut_locked(&so->so_snd, acked_amount);
 	/* NB: sowwakeup_locked() does an implicit unlock. */
 	sowwakeup_locked(so);
 	m_freem(mfree);
 	if (SEQ_GT(th->th_ack, tp->snd_una)) {
 		bbr_collapse_rtt(tp, bbr, TCP_REXMTVAL(tp));
 	}
 	tp->snd_una = th->th_ack;
 	bbr_ack_received(tp, bbr, th, acked, sack_changed, prev_acked, __LINE__, (bbr->r_ctl.rc_lost - lost));
 	if (IN_RECOVERY(tp->t_flags)) {
 		if (SEQ_LT(th->th_ack, tp->snd_recover) &&
 		    (SEQ_LT(th->th_ack, tp->snd_max))) {
 			tcp_bbr_partialack(tp);
 		} else {
 			bbr_post_recovery(tp);
 		}
 	}
 	if (SEQ_GT(tp->snd_una, tp->snd_recover)) {
 		tp->snd_recover = tp->snd_una;
 	}
 	if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
 		tp->snd_nxt = tp->snd_max;
 	}
 	if (tp->snd_una == tp->snd_max) {
 		/* Nothing left outstanding */
 nothing_left:
 		bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__);
 		if (sbavail(&so->so_snd) == 0)
 			bbr->rc_tp->t_acktime = 0;
 		if ((sbused(&so->so_snd) == 0) &&
 		    (tp->t_flags & TF_SENTFIN)) {
 			ourfinisacked = 1;
 		}
 		bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 		if (bbr->rc_in_persist == 0) {
 			bbr->r_ctl.rc_went_idle_time = bbr->r_ctl.rc_rcvtime;
 		}
 		sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una);
 		bbr_log_ack_clear(bbr, bbr->r_ctl.rc_rcvtime);
 		/*
 		 * We invalidate the last ack here since we
 		 * don't want to transfer forward the time
 		 * for our sum's calculations.
 		 */
 		if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 		    (sbavail(&so->so_snd) == 0) &&
 		    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
 			/*
 			 * The socket was gone and the peer sent data, time
 			 * to reset him.
 			 */
 			*ret_val = 1;
 			tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
 			/* tcp_close will kill the inp pre-log the Reset */
 			tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 			tp = tcp_close(tp);
 			ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
 			BBR_STAT_INC(bbr_dropped_af_data);
 			return (1);
 		}
 		/* Set need output so persist might get set */
 		bbr->r_wanted_output = 1;
 	}
 	if (ofia)
 		*ofia = ourfinisacked;
 	return (0);
 }
 
 static void
 bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	if (bbr->rc_in_persist == 0) {
 		bbr_timer_cancel(bbr, __LINE__, cts);
 		bbr->r_ctl.rc_last_delay_val = 0;
 		tp->t_rxtshift = 0;
 		bbr->rc_in_persist = 1;
 		bbr->r_ctl.rc_went_idle_time = cts;
 		/* We should be capped when rw went to 0 but just in case */
 		bbr_log_type_pesist(bbr, cts, 0, line, 1);
 		/* Time freezes for the state, so do the accounting now */
 		if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 			uint32_t time_in;
 
 			time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 			if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) {
 				int32_t idx;
 
 				idx = bbr_state_val(bbr);
 				counter_u64_add(bbr_state_time[(idx + 5)], time_in);
 			} else {
 				counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 			}
 		}
 		bbr->r_ctl.rc_bbr_state_time = cts;
 	}
 }
 
 static void
 bbr_restart_after_idle(struct tcp_bbr *bbr, uint32_t cts, uint32_t idle_time)
 {
 	/*
 	 * Note that if idle time does not exceed our
 	 * threshold, we do nothing continuing the state
 	 * transitions we were last walking through.
 	 */
 	if (idle_time >= bbr_idle_restart_threshold) {
 		if (bbr->rc_use_idle_restart) {
 			bbr->rc_bbr_state = BBR_STATE_IDLE_EXIT;
 			/*
 			 * Set our target using BBR_UNIT, so
 			 * we increase at a dramatic rate but
 			 * we stop when we get the pipe
 			 * full again for our current b/w estimate.
 			 */
 			bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 			bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT;
 			bbr_set_state_target(bbr, __LINE__);
 			/* Now setup our gains to ramp up */
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg;
 			bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg;
 			bbr_log_type_statechange(bbr, cts, __LINE__);
 		} else if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) {
 			bbr_substate_change(bbr, cts, __LINE__, 1);
 		}
 	}
 }
 
 static void
 bbr_exit_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	uint32_t idle_time;
 
 	if (bbr->rc_in_persist == 0)
 		return;
 	idle_time = bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time);
 	bbr->rc_in_persist = 0;
 	bbr->rc_hit_state_1 = 0;
 	bbr->r_ctl.rc_del_time = cts;
 	/*
 	 * We invalidate the last ack here since we
 	 * don't want to transfer forward the time
 	 * for our sum's calculations.
 	 */
 	if (tcp_in_hpts(bbr->rc_tp)) {
 		tcp_hpts_remove(bbr->rc_tp);
 		bbr->rc_timer_first = 0;
 		bbr->r_ctl.rc_hpts_flags = 0;
 		bbr->r_ctl.rc_last_delay_val = 0;
 		bbr->r_ctl.rc_hptsi_agg_delay = 0;
 		bbr->r_agg_early_set = 0;
 		bbr->r_ctl.rc_agg_early = 0;
 	}
 	bbr_log_type_pesist(bbr, cts, idle_time, line, 0);
 	if (idle_time >= bbr_rtt_probe_time) {
 		/*
 		 * This qualifies as a RTT_PROBE session since we drop the
 		 * data outstanding to nothing and waited more than
 		 * bbr_rtt_probe_time.
 		 */
 		bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_PERSIST, 0);
 		bbr->r_ctl.last_in_probertt = bbr->r_ctl.rc_rtt_shrinks = cts;
 	}
 	tp->t_rxtshift = 0;
 	/*
 	 * If in probeBW and we have persisted more than an RTT lets do
 	 * special handling.
 	 */
 	/* Force a time based epoch */
 	bbr_set_epoch(bbr, cts, __LINE__);
 	/*
 	 * Setup the lost so we don't count anything against the guy
 	 * we have been stuck with during persists.
 	 */
 	bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 	/* Time un-freezes for the state */
 	bbr->r_ctl.rc_bbr_state_time = cts;
 	if ((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) ||
 	    (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT)) {
 		/*
 		 * If we are going back to probe-bw
 		 * or probe_rtt, we may need to possibly
 		 * do a fast restart.
 		 */
 		bbr_restart_after_idle(bbr, cts, idle_time);
 	}
 }
 
 static void
 bbr_collapsed_window(struct tcp_bbr *bbr)
 {
 	/*
 	 * Now we must walk the
 	 * send map and divide the
 	 * ones left stranded. These
 	 * guys can't cause us to abort
 	 * the connection and are really
 	 * "unsent". However if a buggy
 	 * client actually did keep some
 	 * of the data i.e. collapsed the win
 	 * and refused to ack and then opened
 	 * the win and acked that data. We would
 	 * get into an ack war, the simplier
 	 * method then of just pretending we
 	 * did not send those segments something
 	 * won't work.
 	 */
 	struct bbr_sendmap *rsm, *nrsm;
 	tcp_seq max_seq;
 	uint32_t maxseg;
 	int can_split = 0;
 	int fnd = 0;
 
 	maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options;
 	max_seq = bbr->rc_tp->snd_una + bbr->rc_tp->snd_wnd;
 	bbr_log_type_rwnd_collapse(bbr, max_seq, 1, 0);
 	TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) {
 		/* Find the first seq past or at maxseq */
 		if (rsm->r_flags & BBR_RWND_COLLAPSED)
 			rsm->r_flags &= ~BBR_RWND_COLLAPSED;
 		if (SEQ_GEQ(max_seq, rsm->r_start) &&
 		    SEQ_GEQ(rsm->r_end, max_seq)) {
 			fnd = 1;
 			break;
 		}
 	}
 	bbr->rc_has_collapsed = 0;
 	if (!fnd) {
 		/* Nothing to do strange */
 		return;
 	}
 	/*
 	 * Now can we split?
 	 *
 	 * We don't want to split if splitting
 	 * would generate too many small segments
 	 * less we let an attacker fragment our
 	 * send_map and leave us out of memory.
 	 */
 	if ((max_seq != rsm->r_start) &&
 	    (max_seq != rsm->r_end)){
 		/* can we split? */
 		int res1, res2;
 
 		res1 = max_seq - rsm->r_start;
 		res2 = rsm->r_end - max_seq;
 		if ((res1 >= (maxseg/8)) &&
 		    (res2 >= (maxseg/8))) {
 			/* No small pieces here */
 			can_split = 1;
 		} else if (bbr->r_ctl.rc_num_small_maps_alloced < bbr_sack_block_limit) {
 			/* We are under the limit */
 			can_split = 1;
 		}
 	}
 	/* Ok do we need to split this rsm? */
 	if (max_seq == rsm->r_start) {
 		/* It's this guy no split required */
 		nrsm = rsm;
 	} else if (max_seq == rsm->r_end) {
 		/* It's the next one no split required. */
 		nrsm = TAILQ_NEXT(rsm, r_next);
 		if (nrsm == NULL) {
 			/* Huh? */
 			return;
 		}
 	} else if (can_split && SEQ_LT(max_seq, rsm->r_end)) {
 		/* yep we need to split it */
 		nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT);
 		if (nrsm == NULL) {
 			/* failed XXXrrs what can we do mark the whole? */
 			nrsm = rsm;
 			goto no_split;
 		}
 		/* Clone it */
 		bbr_log_type_rwnd_collapse(bbr, max_seq, 3, 0);
 		bbr_clone_rsm(bbr, nrsm, rsm, max_seq);
 		TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next);
 		if (rsm->r_in_tmap) {
 			TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 			nrsm->r_in_tmap = 1;
 		}
 	} else {
 		/*
 		 * Split not allowed just start here just
 		 * use this guy.
 		 */
 		nrsm = rsm;
 	}
 no_split:
 	BBR_STAT_INC(bbr_collapsed_win);
 	/* reuse fnd as a count */
 	fnd = 0;
 	TAILQ_FOREACH_FROM(nrsm, &bbr->r_ctl.rc_map, r_next) {
 		nrsm->r_flags |= BBR_RWND_COLLAPSED;
 		fnd++;
 		bbr->rc_has_collapsed = 1;
 	}
 	bbr_log_type_rwnd_collapse(bbr, max_seq, 4, fnd);
 }
 
 static void
 bbr_un_collapse_window(struct tcp_bbr *bbr)
 {
 	struct bbr_sendmap *rsm;
 	int cleared = 0;
 
 	TAILQ_FOREACH_REVERSE(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) {
 		if (rsm->r_flags & BBR_RWND_COLLAPSED) {
 			/* Clear the flag */
 			rsm->r_flags &= ~BBR_RWND_COLLAPSED;
 			cleared++;
 		} else
 			break;
 	}
 	bbr_log_type_rwnd_collapse(bbr,
 				   (bbr->rc_tp->snd_una + bbr->rc_tp->snd_wnd), 0, cleared);
 	bbr->rc_has_collapsed = 0;
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
 {
 	/*
 	 * Update window information. Don't look at window if no ACK: TAC's
 	 * send garbage on first SYN.
 	 */
 	uint16_t nsegs;
 	int32_t tfo_syn;
 	struct tcp_bbr *bbr;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LT(tp->snd_wl1, th->th_seq) ||
 	    (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
 	    (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
 		/* keep track of pure window updates */
 		if (tlen == 0 &&
 		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
 			KMOD_TCPSTAT_INC(tcps_rcvwinupd);
 		tp->snd_wnd = tiwin;
 		tp->snd_wl1 = th->th_seq;
 		tp->snd_wl2 = th->th_ack;
 		if (tp->snd_wnd > tp->max_sndwnd)
 			tp->max_sndwnd = tp->snd_wnd;
 		bbr->r_wanted_output = 1;
 	} else if (thflags & TH_ACK) {
 		if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
 			tp->snd_wnd = tiwin;
 			tp->snd_wl1 = th->th_seq;
 			tp->snd_wl2 = th->th_ack;
 		}
 	}
 	if (tp->snd_wnd < ctf_outstanding(tp))
 		/* The peer collapsed its window on us */
 		bbr_collapsed_window(bbr);
  	else if (bbr->rc_has_collapsed)
 		bbr_un_collapse_window(bbr);
 	/* Was persist timer active and now we have window space? */
 	if ((bbr->rc_in_persist != 0) &&
 	    (tp->snd_wnd >= min((bbr->r_ctl.rc_high_rwnd/2),
 				bbr_minseg(bbr)))) {
 		/*
 		 * Make the rate persist at end of persist mode if idle long
 		 * enough
 		 */
 		bbr_exit_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 
 		/* Make sure we output to start the timer */
 		bbr->r_wanted_output = 1;
 	}
 	/* Do we need to enter persist? */
 	if ((bbr->rc_in_persist == 0) &&
 	    (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    (tp->snd_max == tp->snd_una) &&
 	    sbavail(&so->so_snd) &&
 	    (sbavail(&so->so_snd) > tp->snd_wnd)) {
 		/* No send window.. we must enter persist */
 		bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 	}
 	if (tp->t_flags2 & TF2_DROP_AF_DATA) {
 		m_freem(m);
 		return (0);
 	}
 	/*
 	 * We don't support urgent data but
 	 * drag along the up just to make sure
 	 * if there is a stack switch no one
 	 * is surprised.
 	 */
 	tp->rcv_up = tp->rcv_nxt;
 
 	/*
 	 * Process the segment text, merging it into the TCP sequencing
 	 * queue, and arranging for acknowledgment of receipt if necessary.
 	 * This process logically involves adjusting tp->rcv_wnd as data is
 	 * presented to the user (this happens in tcp_usrreq.c, case
 	 * PRU_RCVD).  If a FIN has already been received on this connection
 	 * then we just ignore the text.
 	 */
 	tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
 		   IS_FASTOPEN(tp->t_flags));
 	if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
 		tcp_seq save_start = th->th_seq;
 		tcp_seq save_rnxt  = tp->rcv_nxt;
 		int     save_tlen  = tlen;
 
 		m_adj(m, drop_hdrlen);	/* delayed header drop */
 		/*
 		 * Insert segment which includes th into TCP reassembly
 		 * queue with control block tp.  Set thflags to whether
 		 * reassembly now includes a segment with FIN.  This handles
 		 * the common case inline (segment is the next to be
 		 * received on an established connection, and the queue is
 		 * empty), avoiding linkage into and removal from the queue
 		 * and repetition of various conversions. Set DELACK for
 		 * segments received in order, but ack immediately when
 		 * segments are out of order (so fast retransmit can work).
 		 */
 		if (th->th_seq == tp->rcv_nxt &&
 		    SEGQ_EMPTY(tp) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state) ||
 		    tfo_syn)) {
 #ifdef NETFLIX_SB_LIMITS
 			u_int mcnt, appended;
 
 			if (so->so_rcv.sb_shlim) {
 				mcnt = m_memcnt(m);
 				appended = 0;
 				if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
 				    CFO_NOSLEEP, NULL) == false) {
 					counter_u64_add(tcp_sb_shlim_fails, 1);
 					m_freem(m);
 					return (0);
 				}
 			}
 
 #endif
 			if (DELAY_ACK(tp, bbr, nsegs) || tfo_syn) {
 				bbr->bbr_segs_rcvd += max(1, nsegs);
 				tp->t_flags |= TF_DELACK;
 				bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 			} else {
 				bbr->r_wanted_output = 1;
 				tp->t_flags |= TF_ACKNOW;
 			}
 			tp->rcv_nxt += tlen;
 			if (tlen &&
 			    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
 			    (tp->t_fbyte_in == 0)) {
 				tp->t_fbyte_in = ticks;
 				if (tp->t_fbyte_in == 0)
 					tp->t_fbyte_in = 1;
 				if (tp->t_fbyte_out && tp->t_fbyte_in)
 					tp->t_flags2 |= TF2_FBYTES_COMPLETE;
 			}
 			thflags = tcp_get_flags(th) & TH_FIN;
 			KMOD_TCPSTAT_ADD(tcps_rcvpack, (int)nsegs);
 			KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
 			SOCKBUF_LOCK(&so->so_rcv);
 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
 				m_freem(m);
 			else
 #ifdef NETFLIX_SB_LIMITS
 				appended =
 #endif
 					sbappendstream_locked(&so->so_rcv, m, 0);
 			/* NB: sorwakeup_locked() does an implicit unlock. */
 			sorwakeup_locked(so);
 #ifdef NETFLIX_SB_LIMITS
 			if (so->so_rcv.sb_shlim && appended != mcnt)
 				counter_fo_release(so->so_rcv.sb_shlim,
 				    mcnt - appended);
 #endif
 
 		} else {
 			/*
 			 * XXX: Due to the header drop above "th" is
 			 * theoretically invalid by now.  Fortunately
 			 * m_adj() doesn't actually frees any mbufs when
 			 * trimming from the head.
 			 */
 			tcp_seq temp = save_start;
 
 			thflags = tcp_reass(tp, th, &temp, &tlen, m);
 			tp->t_flags |= TF_ACKNOW;
 			if (tp->t_flags & TF_WAKESOR) {
 				tp->t_flags &= ~TF_WAKESOR;
 				/* NB: sorwakeup_locked() does an implicit unlock. */
 				sorwakeup_locked(so);
 			}
 		}
 		if ((tp->t_flags & TF_SACK_PERMIT) &&
 		    (save_tlen > 0) &&
 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
 			if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
 				/*
 				 * DSACK actually handled in the fastpath
 				 * above.
 				 */
 				tcp_update_sack_list(tp, save_start,
 				    save_start + save_tlen);
 			} else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
 				if ((tp->rcv_numsacks >= 1) &&
 				    (tp->sackblks[0].end == save_start)) {
 					/*
 					 * Partial overlap, recorded at todrop
 					 * above.
 					 */
 					tcp_update_sack_list(tp,
 					    tp->sackblks[0].start,
 					    tp->sackblks[0].end);
 				} else {
 					tcp_update_dsack_list(tp, save_start,
 					    save_start + save_tlen);
 				}
 			} else if (tlen >= save_tlen) {
 				/* Update of sackblks. */
 				tcp_update_dsack_list(tp, save_start,
 				    save_start + save_tlen);
 			} else if (tlen > 0) {
 				tcp_update_dsack_list(tp, save_start,
 				    save_start + tlen);
 			}
 		}
 	} else {
 		m_freem(m);
 		thflags &= ~TH_FIN;
 	}
 
 	/*
 	 * If FIN is received ACK the FIN and let the user know that the
 	 * connection is closing.
 	 */
 	if (thflags & TH_FIN) {
 		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
 			/* The socket upcall is handled by socantrcvmore. */
 			socantrcvmore(so);
 			/*
 			 * If connection is half-synchronized (ie NEEDSYN
 			 * flag on) then delay ACK, so it may be piggybacked
 			 * when SYN is sent. Otherwise, since we received a
 			 * FIN then no more input can be expected, send ACK
 			 * now.
 			 */
 			if (tp->t_flags & TF_NEEDSYN) {
 				tp->t_flags |= TF_DELACK;
 				bbr_timer_cancel(bbr,
 				    __LINE__, bbr->r_ctl.rc_rcvtime);
 			} else {
 				tp->t_flags |= TF_ACKNOW;
 			}
 			tp->rcv_nxt++;
 		}
 		switch (tp->t_state) {
 			/*
 			 * In SYN_RECEIVED and ESTABLISHED STATES enter the
 			 * CLOSE_WAIT state.
 			 */
 		case TCPS_SYN_RECEIVED:
 			tp->t_starttime = ticks;
 			/* FALLTHROUGH */
 		case TCPS_ESTABLISHED:
 			tcp_state_change(tp, TCPS_CLOSE_WAIT);
 			break;
 
 			/*
 			 * If still in FIN_WAIT_1 STATE FIN has not been
 			 * acked so enter the CLOSING state.
 			 */
 		case TCPS_FIN_WAIT_1:
 			tcp_state_change(tp, TCPS_CLOSING);
 			break;
 
 			/*
 			 * In FIN_WAIT_2 state enter the TIME_WAIT state,
 			 * starting the time-wait timer, turning off the
 			 * other standard timers.
 			 */
 		case TCPS_FIN_WAIT_2:
 			bbr->rc_timer_first = 1;
 			bbr_timer_cancel(bbr,
 			    __LINE__, bbr->r_ctl.rc_rcvtime);
 			tcp_twstart(tp);
 			return (1);
 		}
 	}
 	/*
 	 * Return any desired output.
 	 */
 	if ((tp->t_flags & TF_ACKNOW) ||
 	    (sbavail(&so->so_snd) > ctf_outstanding(tp))) {
 		bbr->r_wanted_output = 1;
 	}
 	return (0);
 }
 
 /*
  * Here nothing is really faster, its just that we
  * have broken out the fast-data path also just like
  * the fast-ack. Return 1 if we processed the packet
  * return 0 if you need to take the "slow-path".
  */
 static int
 bbr_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t nxt_pkt)
 {
 	uint16_t nsegs;
 	int32_t newsize = 0;	/* automatic sockbuf scaling */
 	struct tcp_bbr *bbr;
 #ifdef NETFLIX_SB_LIMITS
 	u_int mcnt, appended;
 #endif
 
 	/* On the hpts and we would have called output */
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * the timestamp. NOTE that the test is modified according to the
 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
 	 */
 	if (bbr->r_ctl.rc_resend != NULL) {
 		return (0);
 	}
 	if (tiwin && tiwin != tp->snd_wnd) {
 		return (0);
 	}
 	if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
 		return (0);
 	}
 	if (__predict_false((to->to_flags & TOF_TS) &&
 	    (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
 		return (0);
 	}
 	if (__predict_false((th->th_ack != tp->snd_una))) {
 		return (0);
 	}
 	if (__predict_false(tlen > sbspace(&so->so_rcv))) {
 		return (0);
 	}
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * This is a pure, in-sequence data packet with nothing on the
 	 * reassembly queue and we have enough buffer space to take it.
 	 */
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 
 #ifdef NETFLIX_SB_LIMITS
 	if (so->so_rcv.sb_shlim) {
 		mcnt = m_memcnt(m);
 		appended = 0;
 		if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
 		    CFO_NOSLEEP, NULL) == false) {
 			counter_u64_add(tcp_sb_shlim_fails, 1);
 			m_freem(m);
 			return (1);
 		}
 	}
 #endif
 	/* Clean receiver SACK report if present */
 	if (tp->rcv_numsacks)
 		tcp_clean_sackreport(tp);
 	KMOD_TCPSTAT_INC(tcps_preddat);
 	tp->rcv_nxt += tlen;
 	if (tlen &&
 	    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
 	    (tp->t_fbyte_in == 0)) {
 		tp->t_fbyte_in = ticks;
 		if (tp->t_fbyte_in == 0)
 			tp->t_fbyte_in = 1;
 		if (tp->t_fbyte_out && tp->t_fbyte_in)
 			tp->t_flags2 |= TF2_FBYTES_COMPLETE;
 	}
 	/*
 	 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
 	 */
 	tp->snd_wl1 = th->th_seq;
 	/*
 	 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
 	 */
 	tp->rcv_up = tp->rcv_nxt;
 	KMOD_TCPSTAT_ADD(tcps_rcvpack, (int)nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
 	newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
 
 	/* Add data to socket buffer. */
 	SOCKBUF_LOCK(&so->so_rcv);
 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 		m_freem(m);
 	} else {
 		/*
 		 * Set new socket buffer size. Give up when limit is
 		 * reached.
 		 */
 		if (newsize)
 			if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
 				so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
 		m_adj(m, drop_hdrlen);	/* delayed header drop */
 
 #ifdef NETFLIX_SB_LIMITS
 		appended =
 #endif
 			sbappendstream_locked(&so->so_rcv, m, 0);
 		ctf_calc_rwin(so, tp);
 	}
 	/* NB: sorwakeup_locked() does an implicit unlock. */
 	sorwakeup_locked(so);
 #ifdef NETFLIX_SB_LIMITS
 	if (so->so_rcv.sb_shlim && mcnt != appended)
 		counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
 #endif
 	if (DELAY_ACK(tp, bbr, nsegs)) {
 		bbr->bbr_segs_rcvd += max(1, nsegs);
 		tp->t_flags |= TF_DELACK;
 		bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 	} else {
 		bbr->r_wanted_output = 1;
 		tp->t_flags |= TF_ACKNOW;
 	}
 	return (1);
 }
 
 /*
  * This subfunction is used to try to highly optimize the
  * fast path. We again allow window updates that are
  * in sequence to remain in the fast-path. We also add
  * in the __predict's to attempt to help the compiler.
  * Note that if we return a 0, then we can *not* process
  * it and the caller should push the packet into the
  * slow-path. If we return 1, then all is well and
  * the packet is fully processed.
  */
 static int
 bbr_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t acked;
 	uint16_t nsegs;
 	uint32_t sack_changed;
 	uint32_t prev_acked = 0;
 	struct tcp_bbr *bbr;
 
 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
 		/* Old ack, behind (or duplicate to) the last one rcv'd */
 		return (0);
 	}
 	if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
 		/* Above what we have sent? */
 		return (0);
 	}
 	if (__predict_false(tiwin == 0)) {
 		/* zero window */
 		return (0);
 	}
 	if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
 		/* We need a SYN or a FIN, unlikely.. */
 		return (0);
 	}
 	if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
 		/* Timestamp is behind .. old ack with seq wrap? */
 		return (0);
 	}
 	if (__predict_false(IN_RECOVERY(tp->t_flags))) {
 		/* Still recovering */
 		return (0);
 	}
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	if (__predict_false(bbr->r_ctl.rc_resend != NULL)) {
 		/* We are retransmitting */
 		return (0);
 	}
 	if (__predict_false(bbr->rc_in_persist != 0)) {
 		/* In persist mode */
 		return (0);
 	}
 	if (bbr->r_ctl.rc_sacked) {
 		/* We have sack holes on our scoreboard */
 		return (0);
 	}
 	/* Ok if we reach here, we can process a fast-ack */
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	sack_changed = bbr_log_ack(tp, to, th, &prev_acked);
 	/*
 	 * We never detect loss in fast ack [we can't
 	 * have a sack and can't be in recovery so
 	 * we always pass 0 (nothing detected)].
 	 */
 	bbr_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime, 0);
 	/* Did the window get updated? */
 	if (tiwin != tp->snd_wnd) {
 		tp->snd_wnd = tiwin;
 		tp->snd_wl1 = th->th_seq;
 		if (tp->snd_wnd > tp->max_sndwnd)
 			tp->max_sndwnd = tp->snd_wnd;
 	}
 	/* Do we need to exit persists? */
 	if ((bbr->rc_in_persist != 0) &&
 	    (tp->snd_wnd >= min((bbr->r_ctl.rc_high_rwnd/2),
 			       bbr_minseg(bbr)))) {
 		bbr_exit_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 		bbr->r_wanted_output = 1;
 	}
 	/* Do we need to enter persists? */
 	if ((bbr->rc_in_persist == 0) &&
 	    (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    (tp->snd_max == tp->snd_una) &&
 	    sbavail(&so->so_snd) &&
 	    (sbavail(&so->so_snd) > tp->snd_wnd)) {
 		/* No send window.. we must enter persist */
 		bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * the timestamp. NOTE that the test is modified according to the
 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
 		tp->ts_recent_age = bbr->r_ctl.rc_rcvtime;
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * This is a pure ack for outstanding data.
 	 */
 	KMOD_TCPSTAT_INC(tcps_predack);
 
 	/*
 	 * "bad retransmit" recovery.
 	 */
 	if (tp->t_flags & TF_PREVVALID) {
 		tp->t_flags &= ~TF_PREVVALID;
 		if (tp->t_rxtshift == 1 &&
 		    (int)(ticks - tp->t_badrxtwin) < 0)
 			bbr_cong_signal(tp, th, CC_RTO_ERR, NULL);
 	}
 	/*
 	 * Recalculate the transmit timer / rtt.
 	 *
 	 * Some boxes send broken timestamp replies during the SYN+ACK
 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
 	 * and blow up the retransmit timer.
 	 */
 	acked = BYTES_THIS_ACK(tp, th);
 
 #ifdef TCP_HHOOK
 	/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
 	hhook_run_tcp_est_in(tp, th, to);
 #endif
 
 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
 	sbdrop(&so->so_snd, acked);
 
 	if (SEQ_GT(th->th_ack, tp->snd_una))
 		bbr_collapse_rtt(tp, bbr, TCP_REXMTVAL(tp));
 	tp->snd_una = th->th_ack;
 	if (tp->snd_wnd < ctf_outstanding(tp))
 		/* The peer collapsed its window on us */
 		bbr_collapsed_window(bbr);
 	else if (bbr->rc_has_collapsed)
 		bbr_un_collapse_window(bbr);
 
 	if (SEQ_GT(tp->snd_una, tp->snd_recover)) {
 		tp->snd_recover = tp->snd_una;
 	}
 	bbr_ack_received(tp, bbr, th, acked, sack_changed, prev_acked, __LINE__, 0);
 	/*
 	 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
 	 */
 	tp->snd_wl2 = th->th_ack;
 	m_freem(m);
 	/*
 	 * If all outstanding data are acked, stop retransmit timer,
 	 * otherwise restart timer using current (possibly backed-off)
 	 * value. If process is waiting for space, wakeup/selwakeup/signal.
 	 * If data are ready to send, let tcp_output decide between more
 	 * output or persist.
 	 * Wake up the socket if we have room to write more.
 	 */
 	sowwakeup(so);
 	if (tp->snd_una == tp->snd_max) {
 		/* Nothing left outstanding */
 		bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__);
 		if (sbavail(&so->so_snd) == 0)
 			bbr->rc_tp->t_acktime = 0;
 		bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 		if (bbr->rc_in_persist == 0) {
 			bbr->r_ctl.rc_went_idle_time = bbr->r_ctl.rc_rcvtime;
 		}
 		sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una);
 		bbr_log_ack_clear(bbr, bbr->r_ctl.rc_rcvtime);
 		/*
 		 * We invalidate the last ack here since we
 		 * don't want to transfer forward the time
 		 * for our sum's calculations.
 		 */
 		bbr->r_wanted_output = 1;
 	}
 	if (sbavail(&so->so_snd)) {
 		bbr->r_wanted_output = 1;
 	}
 	return (1);
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t todrop;
 	int32_t ourfinisacked = 0;
 	struct tcp_bbr *bbr;
 	int32_t ret_val = 0;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	/*
 	 * If the state is SYN_SENT: if seg contains an ACK, but not for our
 	 * SYN, drop the input. if seg contains a RST, then drop the
 	 * connection. if seg does not contain SYN, then drop it. Otherwise
 	 * this is an acceptable SYN segment initialize tp->rcv_nxt and
 	 * tp->irs if seg contains ack then advance tp->snd_una. BRR does
 	 * not support ECN so we will not say we are capable. if SYN has
 	 * been acked change to ESTABLISHED else SYN_RCVD state arrange for
 	 * segment to be acked (eventually) continue processing rest of
 	 * data/controls, beginning with URG
 	 */
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->iss) ||
 	    SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
 		TCP_PROBE5(connect__refused, NULL, tp,
 		    mtod(m, const char *), tp, th);
 		tp = tcp_drop(tp, ECONNREFUSED);
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (thflags & TH_RST) {
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (!(thflags & TH_SYN)) {
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	tp->irs = th->th_seq;
 	tcp_rcvseqinit(tp);
 	if (thflags & TH_ACK) {
 		int tfo_partial = 0;
 
 		KMOD_TCPSTAT_INC(tcps_connects);
 		soisconnected(so);
 #ifdef MAC
 		mac_socketpeer_set_from_mbuf(m, so);
 #endif
 		/* Do window scaling on this connection? */
 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 			tp->rcv_scale = tp->request_r_scale;
 		}
 		tp->rcv_adv += min(tp->rcv_wnd,
 		    TCP_MAXWIN << tp->rcv_scale);
 		/*
 		 * If not all the data that was sent in the TFO SYN
 		 * has been acked, resend the remainder right away.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) &&
 		    (tp->snd_una != tp->snd_max)) {
 			tp->snd_nxt = th->th_ack;
 			tfo_partial = 1;
 		}
 		/*
 		 * If there's data, delay ACK; if there's also a FIN ACKNOW
 		 * will be turned on later.
 		 */
 		if (DELAY_ACK(tp, bbr, 1) && tlen != 0 && !tfo_partial) {
 			bbr->bbr_segs_rcvd += 1;
 			tp->t_flags |= TF_DELACK;
 			bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
 		} else {
 			bbr->r_wanted_output = 1;
 			tp->t_flags |= TF_ACKNOW;
 		}
 		if (SEQ_GT(th->th_ack, tp->iss)) {
 			/*
 			 * The SYN is acked
 			 * handle it specially.
 			 */
 			bbr_log_syn(tp, to);
 		}
 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
 			/*
 			 * We advance snd_una for the
 			 * fast open case. If th_ack is
 			 * acknowledging data beyond
 			 * snd_una we can't just call
 			 * ack-processing since the
 			 * data stream in our send-map
 			 * will start at snd_una + 1 (one
 			 * beyond the SYN). If its just
 			 * equal we don't need to do that
 			 * and there is no send_map.
 			 */
 			tp->snd_una++;
 		}
 		/*
 		 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
 		 * SYN_SENT  --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
 		 */
 		tp->t_starttime = ticks;
 		if (tp->t_flags & TF_NEEDFIN) {
 			tcp_state_change(tp, TCPS_FIN_WAIT_1);
 			tp->t_flags &= ~TF_NEEDFIN;
 			thflags &= ~TH_SYN;
 		} else {
 			tcp_state_change(tp, TCPS_ESTABLISHED);
 			TCP_PROBE5(connect__established, NULL, tp,
 			    mtod(m, const char *), tp, th);
 			cc_conn_init(tp);
 		}
 	} else {
 		/*
 		 * Received initial SYN in SYN-SENT[*] state => simultaneous
 		 * open.  If segment contains CC option and there is a
 		 * cached CC, apply TAO test. If it succeeds, connection is *
 		 * half-synchronized. Otherwise, do 3-way handshake:
 		 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
 		 * there was no CC option, clear cached CC value.
 		 */
 		tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
 		tcp_state_change(tp, TCPS_SYN_RECEIVED);
 	}
 	/*
 	 * Advance th->th_seq to correspond to first data byte. If data,
 	 * trim to stay within window, dropping FIN if necessary.
 	 */
 	th->th_seq++;
 	if (tlen > tp->rcv_wnd) {
 		todrop = tlen - tp->rcv_wnd;
 		m_adj(m, -todrop);
 		tlen = tp->rcv_wnd;
 		thflags &= ~TH_FIN;
 		KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
 		KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
 	}
 	tp->snd_wl1 = th->th_seq - 1;
 	tp->rcv_up = th->th_seq;
 	/*
 	 * Client side of transaction: already sent SYN and data. If the
 	 * remote host used T/TCP to validate the SYN, our data will be
 	 * ACK'd; if so, enter normal data segment processing in the middle
 	 * of step 5, ack processing. Otherwise, goto step 6.
 	 */
 	if (thflags & TH_ACK) {
 		if ((to->to_flags & TOF_TS) != 0) {
 			uint32_t t, rtt;
 
 			t = tcp_tv_to_mssectick(&bbr->rc_tv);
 			if (TSTMP_GEQ(t, to->to_tsecr)) {
 				rtt = t - to->to_tsecr;
 				if (rtt == 0) {
 					rtt = 1;
 				}
 				rtt *= MS_IN_USEC;
 				tcp_bbr_xmit_timer(bbr, rtt, 0, 0, 0);
 				apply_filter_min_small(&bbr->r_ctl.rc_rttprop,
 						       rtt, bbr->r_ctl.rc_rcvtime);
 			}
 		}
 		if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
 			return (ret_val);
 		/* We may have changed to FIN_WAIT_1 above */
 		if (tp->t_state == TCPS_FIN_WAIT_1) {
 			/*
 			 * In FIN_WAIT_1 STATE in addition to the processing
 			 * for the ESTABLISHED state if our FIN is now
 			 * acknowledged then enter FIN_WAIT_2.
 			 */
 			if (ourfinisacked) {
 				/*
 				 * If we can't receive any more data, then
 				 * closing user can proceed. Starting the
 				 * timer is contrary to the specification,
 				 * but if we don't get a FIN we'll hang
 				 * forever.
 				 *
 				 * XXXjl: we should release the tp also, and
 				 * use a compressed state.
 				 */
 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 					soisdisconnected(so);
 					tcp_timer_activate(tp, TT_2MSL,
 					    (tcp_fast_finwait2_recycle ?
 					    tcp_finwait2_timeout :
 					    TP_MAXIDLE(tp)));
 				}
 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
 			}
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
 		struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
 		uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ourfinisacked = 0;
 	int32_t ret_val;
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->snd_una) ||
 	     SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if (IS_FASTOPEN(tp->t_flags)) {
 		/*
 		 * When a TFO connection is in SYN_RECEIVED, the only valid
 		 * packets are the initial SYN, a retransmit/copy of the
 		 * initial SYN (possibly with a subset of the original
 		 * data), a valid ACK, a FIN, or a RST.
 		 */
 		if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
 			tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		} else if (thflags & TH_SYN) {
 			/* non-initial SYN is ignored */
 			if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
 			    (bbr->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
 			    (bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
 				ctf_do_drop(m, NULL);
 				return (0);
 			}
 		} else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	/*
 	 * In the SYN-RECEIVED state, validate that the packet belongs to
 	 * this connection before trimming the data to fit the receive
 	 * window.  Check the sequence number versus IRS since we know the
 	 * sequence numbers haven't wrapped.  This is a partial fix for the
 	 * "LAND" DoS attack.
 	 */
 	if (SEQ_LT(th->th_seq, tp->irs)) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 		    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	tp->snd_wnd = tiwin;
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (IS_FASTOPEN(tp->t_flags)) {
 			cc_conn_init(tp);
 		}
 		return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 					 tiwin, thflags, nxt_pkt));
 	}
 	KMOD_TCPSTAT_INC(tcps_connects);
 	if (tp->t_flags & TF_SONOTCONN) {
 		tp->t_flags &= ~TF_SONOTCONN;
 		soisconnected(so);
 	}
 	/* Do window scaling? */
 	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 		tp->rcv_scale = tp->request_r_scale;
 	}
 	/*
 	 * ok for the first time in lets see if we can use the ts to figure
 	 * out what the initial RTT was.
 	 */
 	if ((to->to_flags & TOF_TS) != 0) {
 		uint32_t t, rtt;
 
 		t = tcp_tv_to_mssectick(&bbr->rc_tv);
 		if (TSTMP_GEQ(t, to->to_tsecr)) {
 			rtt = t - to->to_tsecr;
 			if (rtt == 0) {
 				rtt = 1;
 			}
 			rtt *= MS_IN_USEC;
 			tcp_bbr_xmit_timer(bbr, rtt, 0, 0, 0);
 			apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, bbr->r_ctl.rc_rcvtime);
 		}
 	}
 	/* Drop off any SYN in the send map (probably not there)  */
 	if (thflags & TH_ACK)
 		bbr_log_syn(tp, to);
 	if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
 		tp->t_tfo_pending = NULL;
 	}
 	/*
 	 * Make transitions: SYN-RECEIVED  -> ESTABLISHED SYN-RECEIVED* ->
 	 * FIN-WAIT-1
 	 */
 	tp->t_starttime = ticks;
 	if (tp->t_flags & TF_NEEDFIN) {
 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
 		tp->t_flags &= ~TF_NEEDFIN;
 	} else {
 		tcp_state_change(tp, TCPS_ESTABLISHED);
 		TCP_PROBE5(accept__established, NULL, tp,
 			   mtod(m, const char *), tp, th);
 		/*
 		 * TFO connections call cc_conn_init() during SYN
 		 * processing.  Calling it again here for such connections
 		 * is not harmless as it would undo the snd_cwnd reduction
 		 * that occurs when a TFO SYN|ACK is retransmitted.
 		 */
 		if (!IS_FASTOPEN(tp->t_flags))
 			cc_conn_init(tp);
 	}
 	/*
 	 * Account for the ACK of our SYN prior to
 	 * regular ACK processing below, except for
 	 * simultaneous SYN, which is handled later.
 	 */
 	if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
 		tp->snd_una++;
 	/*
 	 * If segment contains data or ACK, will call tcp_reass() later; if
 	 * not, do so now to pass queued data to user.
 	 */
 	if (tlen == 0 && (thflags & TH_FIN) == 0) {
 		(void)tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
 			(struct mbuf *)0);
 		if (tp->t_flags & TF_WAKESOR) {
 			tp->t_flags &= ~TF_WAKESOR;
 			/* NB: sorwakeup_locked() does an implicit unlock. */
 			sorwakeup_locked(so);
 		}
 	}
 	tp->snd_wl1 = th->th_seq - 1;
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (tp->t_state == TCPS_FIN_WAIT_1) {
 		/* We could have went to FIN_WAIT_1 (or EST) above */
 		/*
 		 * In FIN_WAIT_1 STATE in addition to the processing for the
 		 * ESTABLISHED state if our FIN is now acknowledged then
 		 * enter FIN_WAIT_2.
 		 */
 		if (ourfinisacked) {
 			/*
 			 * If we can't receive any more data, then closing
 			 * user can proceed. Starting the timer is contrary
 			 * to the specification, but if we don't get a FIN
 			 * we'll hang forever.
 			 *
 			 * XXXjl: we should release the tp also, and use a
 			 * compressed state.
 			 */
 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 				soisdisconnected(so);
 				tcp_timer_activate(tp, TT_2MSL,
 						   (tcp_fast_finwait2_recycle ?
 						    tcp_finwait2_timeout :
 						    TP_MAXIDLE(tp)));
 			}
 			tcp_state_change(tp, TCPS_FIN_WAIT_2);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 				 tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	struct tcp_bbr *bbr;
 	int32_t ret_val;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	/*
 	 * Header prediction: check for the two common cases of a
 	 * uni-directional data xfer.  If the packet has no control flags,
 	 * is in-sequence, the window didn't change and we're not
 	 * retransmitting, it's a candidate.  If the length is zero and the
 	 * ack moved forward, we're the sender side of the xfer.  Just free
 	 * the data acked & wake any higher level process that was blocked
 	 * waiting for space.  If the length is non-zero and the ack didn't
 	 * move, we're the receiver side.  If we're getting packets in-order
 	 * (the reassembly queue is empty), add the data toc The socket
 	 * buffer and note that we need a delayed ack. Make sure that the
 	 * hidden state-flags are also off. Since we check for
 	 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
 	 */
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	if (bbr->r_ctl.rc_delivered < (4 * tp->t_maxseg)) {
 		/*
 		 * If we have delived under 4 segments increase the initial
 		 * window if raised by the peer. We use this to determine
 		 * dynamic and static rwnd's at the end of a connection.
 		 */
 		bbr->r_ctl.rc_init_rwnd = max(tiwin, tp->snd_wnd);
 	}
 	if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
 	    __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) &&
 	    __predict_true(SEGQ_EMPTY(tp)) &&
 	    __predict_true(th->th_seq == tp->rcv_nxt)) {
 		if (tlen == 0) {
 			if (bbr_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
 			    tiwin, nxt_pkt, iptos)) {
 				return (0);
 			}
 		} else {
 			if (bbr_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
 			    tiwin, nxt_pkt)) {
 				return (0);
 			}
 		}
 	}
 	ctf_calc_rwin(so, tp);
 
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	/* State changes only happen in bbr_process_data() */
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	struct tcp_bbr *bbr;
 	int32_t ret_val;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 static int
 bbr_check_data_after_close(struct mbuf *m, struct tcp_bbr *bbr,
     struct tcpcb *tp, int32_t * tlen, struct tcphdr *th, struct socket *so)
 {
 
 	if (bbr->rc_allow_data_af_clo == 0) {
 close_now:
 		tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
 		/* tcp_close will kill the inp pre-log the Reset */
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 		tp = tcp_close(tp);
 		KMOD_TCPSTAT_INC(tcps_rcvafterclose);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
 		return (1);
 	}
 	if (sbavail(&so->so_snd) == 0)
 		goto close_now;
 	/* Ok we allow data that is ignored and a followup reset */
 	tp->rcv_nxt = th->th_seq + *tlen;
 	tp->t_flags2 |= TF2_DROP_AF_DATA;
 	bbr->r_wanted_output = 1;
 	*tlen = 0;
 	return (0);
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ourfinisacked = 0;
 	int32_t ret_val;
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 * We call a new function now so we might continue and setup
 	 * to reset at all data being ack'd.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    bbr_check_data_after_close(m, bbr, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		/*
 		 * If we can't receive any more data, then closing user can
 		 * proceed. Starting the timer is contrary to the
 		 * specification, but if we don't get a FIN we'll hang
 		 * forever.
 		 *
 		 * XXXjl: we should release the tp also, and use a
 		 * compressed state.
 		 */
 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 			soisdisconnected(so);
 			tcp_timer_activate(tp, TT_2MSL,
 			    (tcp_fast_finwait2_recycle ?
 			    tcp_finwait2_timeout :
 			    TP_MAXIDLE(tp)));
 		}
 		tcp_state_change(tp, TCPS_FIN_WAIT_2);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ourfinisacked = 0;
 	int32_t ret_val;
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 * We call a new function now so we might continue and setup
 	 * to reset at all data being ack'd.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    bbr_check_data_after_close(m, bbr, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		tcp_twstart(tp);
 		m_freem(m);
 		return (1);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ourfinisacked = 0;
 	int32_t ret_val;
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 * We call a new function now so we might continue and setup
 	 * to reset at all data being ack'd.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    bbr_check_data_after_close(m, bbr, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * case TCPS_LAST_ACK: Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		tp = tcp_close(tp);
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCB is still
  * locked.
  */
 static int
 bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ourfinisacked = 0;
 	int32_t ret_val;
 	struct tcp_bbr *bbr;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	/* Reset receive buffer auto scaling when not in bulk receive mode. */
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (ctf_process_rst(m, th, so, tp));
 
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then we may RST the other end depending on the outcome
 	 * of bbr_check_data_after_close.
 	 * We call a new function now so we might continue and setup
 	 * to reset at all data being ack'd.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    bbr_check_data_after_close(m, bbr, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			bbr->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 static void
 bbr_stop_all_timers(struct tcpcb *tp, struct tcp_bbr *bbr)
 {
 	/*
 	 * Assure no timers are running.
 	 */
 	if (tcp_timer_active(tp, TT_PERSIST)) {
 		/* We enter in persists, set the flag appropriately */
 		bbr->rc_in_persist = 1;
 	}
 	if (tcp_in_hpts(bbr->rc_tp)) {
 		tcp_hpts_remove(bbr->rc_tp);
 	}
 }
 
 static void
 bbr_google_mode_on(struct tcp_bbr *bbr)
 {
 	bbr->rc_use_google = 1;
 	bbr->rc_no_pacing = 0;
 	bbr->r_ctl.bbr_google_discount = bbr_google_discount;
 	bbr->r_use_policer = bbr_policer_detection_enabled;
 	bbr->r_ctl.rc_probertt_int = (USECS_IN_SECOND * 10);
 	bbr->bbr_use_rack_cheat = 0;
 	bbr->r_ctl.rc_incr_tmrs = 0;
 	bbr->r_ctl.rc_inc_tcp_oh = 0;
 	bbr->r_ctl.rc_inc_ip_oh = 0;
 	bbr->r_ctl.rc_inc_enet_oh = 0;
 	reset_time(&bbr->r_ctl.rc_delrate,
 		   BBR_NUM_RTTS_FOR_GOOG_DEL_LIMIT);
 	reset_time_small(&bbr->r_ctl.rc_rttprop,
 			 (11 * USECS_IN_SECOND));
 	tcp_bbr_tso_size_check(bbr, tcp_get_usecs(&bbr->rc_tv));
 }
 
 static void
 bbr_google_mode_off(struct tcp_bbr *bbr)
 {
 	bbr->rc_use_google = 0;
 	bbr->r_ctl.bbr_google_discount = 0;
 	bbr->no_pacing_until = bbr_no_pacing_until;
 	bbr->r_use_policer = 0;
 	if (bbr->no_pacing_until)
 		bbr->rc_no_pacing = 1;
 	else
 		bbr->rc_no_pacing = 0;
 	if (bbr_use_rack_resend_cheat)
 		bbr->bbr_use_rack_cheat = 1;
 	else
 		bbr->bbr_use_rack_cheat = 0;
 	if (bbr_incr_timers)
 		bbr->r_ctl.rc_incr_tmrs = 1;
 	else
 		bbr->r_ctl.rc_incr_tmrs = 0;
 	if (bbr_include_tcp_oh)
 		bbr->r_ctl.rc_inc_tcp_oh = 1;
 	else
 		bbr->r_ctl.rc_inc_tcp_oh = 0;
 	if (bbr_include_ip_oh)
 		bbr->r_ctl.rc_inc_ip_oh = 1;
 	else
 		bbr->r_ctl.rc_inc_ip_oh = 0;
 	if (bbr_include_enet_oh)
 		bbr->r_ctl.rc_inc_enet_oh = 1;
 	else
 		bbr->r_ctl.rc_inc_enet_oh = 0;
 	bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit;
 	reset_time(&bbr->r_ctl.rc_delrate,
 		   bbr_num_pktepo_for_del_limit);
 	reset_time_small(&bbr->r_ctl.rc_rttprop,
 			 (bbr_filter_len_sec * USECS_IN_SECOND));
 	tcp_bbr_tso_size_check(bbr, tcp_get_usecs(&bbr->rc_tv));
 }
 /*
  * Return 0 on success, non-zero on failure
  * which indicates the error (usually no memory).
  */
 static int
 bbr_init(struct tcpcb *tp, void **ptr)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct tcp_bbr *bbr = NULL;
 	uint32_t cts;
 
 	tcp_hpts_init(tp);
 
 	*ptr = uma_zalloc(bbr_pcb_zone, (M_NOWAIT | M_ZERO));
 	if (*ptr == NULL) {
 		/*
 		 * We need to allocate memory but cant. The INP and INP_INFO
 		 * locks and they are recursive (happens during setup. So a
 		 * scheme to drop the locks fails :(
 		 *
 		 */
 		return (ENOMEM);
 	}
 	bbr = (struct tcp_bbr *)*ptr;
 	bbr->rtt_valid = 0;
 	tp->t_flags2 |= TF2_CANNOT_DO_ECN;
 	tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 	/* Take off any undesired flags */
 	tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY;
 	tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
 	tp->t_flags2 &= ~TF2_MBUF_ACKCMP;
 	tp->t_flags2 &= ~TF2_MBUF_L_ACKS;
 
 	TAILQ_INIT(&bbr->r_ctl.rc_map);
 	TAILQ_INIT(&bbr->r_ctl.rc_free);
 	TAILQ_INIT(&bbr->r_ctl.rc_tmap);
 	bbr->rc_tp = tp;
 	bbr->rc_inp = inp;
 	cts = tcp_get_usecs(&bbr->rc_tv);
 	tp->t_acktime = 0;
 	bbr->rc_allow_data_af_clo = bbr_ignore_data_after_close;
 	bbr->r_ctl.rc_reorder_fade = bbr_reorder_fade;
 	bbr->rc_tlp_threshold = bbr_tlp_thresh;
 	bbr->r_ctl.rc_reorder_shift = bbr_reorder_thresh;
 	bbr->r_ctl.rc_pkt_delay = bbr_pkt_delay;
 	bbr->r_ctl.rc_min_to = bbr_min_to;
 	bbr->rc_bbr_state = BBR_STATE_STARTUP;
 	bbr->r_ctl.bbr_lost_at_state = 0;
 	bbr->r_ctl.rc_lost_at_startup = 0;
 	bbr->rc_all_timers_stopped = 0;
 	bbr->r_ctl.rc_bbr_lastbtlbw = 0;
 	bbr->r_ctl.rc_pkt_epoch_del = 0;
 	bbr->r_ctl.rc_pkt_epoch = 0;
 	bbr->r_ctl.rc_lowest_rtt = 0xffffffff;
 	bbr->r_ctl.rc_bbr_hptsi_gain = bbr_high_gain;
 	bbr->r_ctl.rc_bbr_cwnd_gain = bbr_high_gain;
 	bbr->r_ctl.rc_went_idle_time = cts;
 	bbr->rc_pacer_started = cts;
 	bbr->r_ctl.rc_pkt_epoch_time = cts;
 	bbr->r_ctl.rc_rcvtime = cts;
 	bbr->r_ctl.rc_bbr_state_time = cts;
 	bbr->r_ctl.rc_del_time = cts;
 	bbr->r_ctl.rc_tlp_rxt_last_time = cts;
 	bbr->r_ctl.last_in_probertt = cts;
 	bbr->skip_gain = 0;
 	bbr->gain_is_limited = 0;
 	bbr->no_pacing_until = bbr_no_pacing_until;
 	if (bbr->no_pacing_until)
 		bbr->rc_no_pacing = 1;
 	if (bbr_use_google_algo) {
 		bbr->rc_no_pacing = 0;
 		bbr->rc_use_google = 1;
 		bbr->r_ctl.bbr_google_discount = bbr_google_discount;
 		bbr->r_use_policer = bbr_policer_detection_enabled;
 	} else {
 		bbr->rc_use_google = 0;
 		bbr->r_ctl.bbr_google_discount = 0;
 		bbr->r_use_policer = 0;
 	}
 	if (bbr_ts_limiting)
 		bbr->rc_use_ts_limit = 1;
 	else
 		bbr->rc_use_ts_limit = 0;
 	if (bbr_ts_can_raise)
 		bbr->ts_can_raise = 1;
 	else
 		bbr->ts_can_raise = 0;
 	if (V_tcp_delack_enabled == 1)
 		tp->t_delayed_ack = 2;
 	else if (V_tcp_delack_enabled == 0)
 		tp->t_delayed_ack = 0;
 	else if (V_tcp_delack_enabled < 100)
 		tp->t_delayed_ack = V_tcp_delack_enabled;
 	else
 		tp->t_delayed_ack = 2;
 	if (bbr->rc_use_google == 0)
 		bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit;
 	else
 		bbr->r_ctl.rc_probertt_int = (USECS_IN_SECOND * 10);
 	bbr->r_ctl.rc_min_rto_ms = bbr_rto_min_ms;
 	bbr->rc_max_rto_sec = bbr_rto_max_sec;
 	bbr->rc_init_win = bbr_def_init_win;
 	if (tp->t_flags & TF_REQ_TSTMP)
 		bbr->rc_last_options = TCP_TS_OVERHEAD;
 	bbr->r_ctl.rc_pace_max_segs = tp->t_maxseg - bbr->rc_last_options;
 	bbr->r_ctl.rc_high_rwnd = tp->snd_wnd;
 	bbr->r_init_rtt = 1;
 
 	counter_u64_add(bbr_flows_nohdwr_pacing, 1);
 	if (bbr_allow_hdwr_pacing)
 		bbr->bbr_hdw_pace_ena = 1;
 	else
 		bbr->bbr_hdw_pace_ena = 0;
 	if (bbr_sends_full_iwnd)
 		bbr->bbr_init_win_cheat = 1;
 	else
 		bbr->bbr_init_win_cheat = 0;
 	bbr->r_ctl.bbr_utter_max = bbr_hptsi_utter_max;
 	bbr->r_ctl.rc_drain_pg = bbr_drain_gain;
 	bbr->r_ctl.rc_startup_pg = bbr_high_gain;
 	bbr->rc_loss_exit = bbr_exit_startup_at_loss;
 	bbr->r_ctl.bbr_rttprobe_gain_val = bbr_rttprobe_gain;
 	bbr->r_ctl.bbr_hptsi_per_second = bbr_hptsi_per_second;
 	bbr->r_ctl.bbr_hptsi_segments_delay_tar = bbr_hptsi_segments_delay_tar;
 	bbr->r_ctl.bbr_hptsi_segments_max = bbr_hptsi_segments_max;
 	bbr->r_ctl.bbr_hptsi_segments_floor = bbr_hptsi_segments_floor;
 	bbr->r_ctl.bbr_hptsi_bytes_min = bbr_hptsi_bytes_min;
 	bbr->r_ctl.bbr_cross_over = bbr_cross_over;
 	bbr->r_ctl.rc_rtt_shrinks = cts;
 	if (bbr->rc_use_google) {
 		setup_time_filter(&bbr->r_ctl.rc_delrate,
 				  FILTER_TYPE_MAX,
 				  BBR_NUM_RTTS_FOR_GOOG_DEL_LIMIT);
 		setup_time_filter_small(&bbr->r_ctl.rc_rttprop,
 					FILTER_TYPE_MIN, (11 * USECS_IN_SECOND));
 	} else {
 		setup_time_filter(&bbr->r_ctl.rc_delrate,
 				  FILTER_TYPE_MAX,
 				  bbr_num_pktepo_for_del_limit);
 		setup_time_filter_small(&bbr->r_ctl.rc_rttprop,
 					FILTER_TYPE_MIN, (bbr_filter_len_sec * USECS_IN_SECOND));
 	}
 	bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_INIT, 0);
 	if (bbr_uses_idle_restart)
 		bbr->rc_use_idle_restart = 1;
 	else
 		bbr->rc_use_idle_restart = 0;
 	bbr->r_ctl.rc_bbr_cur_del_rate = 0;
 	bbr->r_ctl.rc_initial_hptsi_bw = bbr_initial_bw_bps;
 	if (bbr_resends_use_tso)
 		bbr->rc_resends_use_tso = 1;
 	if (tp->snd_una != tp->snd_max) {
 		/* Create a send map for the current outstanding data */
 		struct bbr_sendmap *rsm;
 
 		rsm = bbr_alloc(bbr);
 		if (rsm == NULL) {
 			uma_zfree(bbr_pcb_zone, *ptr);
 			*ptr = NULL;
 			return (ENOMEM);
 		}
 		rsm->r_rtt_not_allowed = 1;
 		rsm->r_tim_lastsent[0] = cts;
 		rsm->r_rtr_cnt = 1;
 		rsm->r_rtr_bytes = 0;
 		rsm->r_start = tp->snd_una;
 		rsm->r_end = tp->snd_max;
 		rsm->r_dupack = 0;
 		rsm->r_delivered = bbr->r_ctl.rc_delivered;
 		rsm->r_ts_valid = 0;
 		rsm->r_del_ack_ts = tp->ts_recent;
 		rsm->r_del_time = cts;
 		if (bbr->r_ctl.r_app_limited_until)
 			rsm->r_app_limited = 1;
 		else
 			rsm->r_app_limited = 0;
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_map, rsm, r_next);
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 1;
 		if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW)
 			rsm->r_bbr_state = bbr_state_val(bbr);
 		else
 			rsm->r_bbr_state = 8;
 	}
 	if (bbr_use_rack_resend_cheat && (bbr->rc_use_google == 0))
 		bbr->bbr_use_rack_cheat = 1;
 	if (bbr_incr_timers && (bbr->rc_use_google == 0))
 		bbr->r_ctl.rc_incr_tmrs = 1;
 	if (bbr_include_tcp_oh && (bbr->rc_use_google == 0))
 		bbr->r_ctl.rc_inc_tcp_oh = 1;
 	if (bbr_include_ip_oh && (bbr->rc_use_google == 0))
 		bbr->r_ctl.rc_inc_ip_oh = 1;
 	if (bbr_include_enet_oh && (bbr->rc_use_google == 0))
 		bbr->r_ctl.rc_inc_enet_oh = 1;
 
 	bbr_log_type_statechange(bbr, cts, __LINE__);
 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    (tp->t_srtt)) {
 		uint32_t rtt;
 
 		rtt = (TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT);
 		apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts);
 	}
 	/* announce the settings and state */
 	bbr_log_settings_change(bbr, BBR_RECOVERY_LOWRTT);
 	tcp_bbr_tso_size_check(bbr, cts);
 	/*
 	 * Now call the generic function to start a timer. This will place
 	 * the TCB on the hptsi wheel if a timer is needed with appropriate
 	 * flags.
 	 */
 	bbr_stop_all_timers(tp, bbr);
 	/* 
 	 * Validate the timers are not in usec, if they are convert.
 	 * BBR should in theory move to USEC and get rid of a
 	 * lot of the TICKS_2 calls.. but for now we stay
 	 * with tick timers.
 	 */
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
 	TCPT_RANGESET(tp->t_rxtcur,
 	    ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
 	    tp->t_rttmin, TCPTV_REXMTMAX);
 	bbr_start_hpts_timer(bbr, tp, cts, 5, 0, 0);
 	return (0);
 }
 
 /*
  * Return 0 if we can accept the connection. Return
  * non-zero if we can't handle the connection. A EAGAIN
  * means you need to wait until the connection is up.
  * a EADDRNOTAVAIL means we can never handle the connection
  * (no SACK).
  */
 static int
 bbr_handoff_ok(struct tcpcb *tp)
 {
 	if ((tp->t_state == TCPS_CLOSED) ||
 	    (tp->t_state == TCPS_LISTEN)) {
 		/* Sure no problem though it may not stick */
 		return (0);
 	}
 	if ((tp->t_state == TCPS_SYN_SENT) ||
 	    (tp->t_state == TCPS_SYN_RECEIVED)) {
 		/*
 		 * We really don't know you have to get to ESTAB or beyond
 		 * to tell.
 		 */
 		return (EAGAIN);
 	}
 	if (tp->t_flags & TF_SENTFIN)
 		return (EINVAL);
 	if ((tp->t_flags & TF_SACK_PERMIT) || bbr_sack_not_required) {
 		return (0);
 	}
 	/*
 	 * If we reach here we don't do SACK on this connection so we can
 	 * never do rack.
 	 */
 	return (EINVAL);
 }
 
 static void
 bbr_fini(struct tcpcb *tp, int32_t tcb_is_purged)
 {
 	if (tp->t_fb_ptr) {
 		uint32_t calc;
 		struct tcp_bbr *bbr;
 		struct bbr_sendmap *rsm;
 
 		bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 		if (bbr->r_ctl.crte)
 			tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp);
 		bbr_log_flowend(bbr);
 		bbr->rc_tp = NULL;
 		if (bbr->bbr_hdrw_pacing)
 			counter_u64_add(bbr_flows_whdwr_pacing, -1);
 		else
 			counter_u64_add(bbr_flows_nohdwr_pacing, -1);
 		if (bbr->r_ctl.crte != NULL) {
 			tcp_rel_pacing_rate(bbr->r_ctl.crte, tp);
 			bbr->r_ctl.crte = NULL;
 		}
 		rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 		while (rsm) {
 			TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next);
 			uma_zfree(bbr_zone, rsm);
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 		}
 		rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free);
 		while (rsm) {
 			TAILQ_REMOVE(&bbr->r_ctl.rc_free, rsm, r_next);
 			uma_zfree(bbr_zone, rsm);
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free);
 		}
 		calc = bbr->r_ctl.rc_high_rwnd - bbr->r_ctl.rc_init_rwnd;
 		if (calc > (bbr->r_ctl.rc_init_rwnd / 10))
 			BBR_STAT_INC(bbr_dynamic_rwnd);
 		else
 			BBR_STAT_INC(bbr_static_rwnd);
 		bbr->r_ctl.rc_free_cnt = 0;
 		uma_zfree(bbr_pcb_zone, tp->t_fb_ptr);
 		tp->t_fb_ptr = NULL;
 	}
 	/* Make sure snd_nxt is correctly set */
 	tp->snd_nxt = tp->snd_max;
 }
 
 static void
 bbr_set_state(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t win)
 {
 	switch (tp->t_state) {
 	case TCPS_SYN_SENT:
 		bbr->r_state = TCPS_SYN_SENT;
 		bbr->r_substate = bbr_do_syn_sent;
 		break;
 	case TCPS_SYN_RECEIVED:
 		bbr->r_state = TCPS_SYN_RECEIVED;
 		bbr->r_substate = bbr_do_syn_recv;
 		break;
 	case TCPS_ESTABLISHED:
 		bbr->r_ctl.rc_init_rwnd = max(win, bbr->rc_tp->snd_wnd);
 		bbr->r_state = TCPS_ESTABLISHED;
 		bbr->r_substate = bbr_do_established;
 		break;
 	case TCPS_CLOSE_WAIT:
 		bbr->r_state = TCPS_CLOSE_WAIT;
 		bbr->r_substate = bbr_do_close_wait;
 		break;
 	case TCPS_FIN_WAIT_1:
 		bbr->r_state = TCPS_FIN_WAIT_1;
 		bbr->r_substate = bbr_do_fin_wait_1;
 		break;
 	case TCPS_CLOSING:
 		bbr->r_state = TCPS_CLOSING;
 		bbr->r_substate = bbr_do_closing;
 		break;
 	case TCPS_LAST_ACK:
 		bbr->r_state = TCPS_LAST_ACK;
 		bbr->r_substate = bbr_do_lastack;
 		break;
 	case TCPS_FIN_WAIT_2:
 		bbr->r_state = TCPS_FIN_WAIT_2;
 		bbr->r_substate = bbr_do_fin_wait_2;
 		break;
 	case TCPS_LISTEN:
 	case TCPS_CLOSED:
 	case TCPS_TIME_WAIT:
 	default:
 		break;
 	};
 }
 
 static void
 bbr_substate_change(struct tcp_bbr *bbr, uint32_t cts, int32_t line, int dolog)
 {
 	/*
 	 * Now what state are we going into now? Is there adjustments
 	 * needed?
 	 */
 	int32_t old_state;
 
 	old_state = bbr_state_val(bbr);
 	if (bbr_state_val(bbr) == BBR_SUB_LEVEL1) {
 		/* Save the lowest srtt we saw in our end of the sub-state */
 		bbr->rc_hit_state_1 = 0;
 		if (bbr->r_ctl.bbr_smallest_srtt_this_state != 0xffffffff)
 			bbr->r_ctl.bbr_smallest_srtt_state2 = bbr->r_ctl.bbr_smallest_srtt_this_state;
 	}
 	bbr->rc_bbr_substate++;
 	if (bbr->rc_bbr_substate >= BBR_SUBSTATE_COUNT) {
 		/* Cycle back to first state-> gain */
 		bbr->rc_bbr_substate = 0;
 	}
 	if (bbr_state_val(bbr) == BBR_SUB_GAIN) {
 		/*
 		 * We enter the gain(5/4) cycle (possibly less if
 		 * shallow buffer detection is enabled)
 		 */
 		if (bbr->skip_gain) {
 			/*
 			 * Hardware pacing has set our rate to
 			 * the max and limited our b/w just
 			 * do level i.e. no gain.
 			 */
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_LEVEL1];
 		} else if (bbr->gain_is_limited &&
 			   bbr->bbr_hdrw_pacing &&
 			   bbr->r_ctl.crte) {
 			/*
 			 * We can't gain above the hardware pacing
 			 * rate which is less than our rate + the gain
 			 * calculate the gain needed to reach the hardware
 			 * pacing rate..
 			 */
 			uint64_t bw, rate, gain_calc;
 
 			bw = bbr_get_bw(bbr);
 			rate = bbr->r_ctl.crte->rate;
 			if ((rate > bw) &&
 			    (((bw *  (uint64_t)bbr_hptsi_gain[BBR_SUB_GAIN]) / (uint64_t)BBR_UNIT) > rate)) {
 				gain_calc = (rate * BBR_UNIT) / bw;
 				if (gain_calc < BBR_UNIT)
 					gain_calc = BBR_UNIT;
 				bbr->r_ctl.rc_bbr_hptsi_gain = (uint16_t)gain_calc;
 			} else {
 				bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_GAIN];
 			}
 		} else
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_GAIN];
 		if ((bbr->rc_use_google == 0) && (bbr_gain_to_target == 0)) {
 			bbr->r_ctl.rc_bbr_state_atflight = cts;
 		} else
 			bbr->r_ctl.rc_bbr_state_atflight = 0;
 	} else if (bbr_state_val(bbr) == BBR_SUB_DRAIN) {
 		bbr->rc_hit_state_1 = 1;
 		bbr->r_ctl.rc_exta_time_gd = 0;
 		bbr->r_ctl.flightsize_at_drain = ctf_flight_size(bbr->rc_tp,
 						     (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		if (bbr_state_drain_2_tar) {
 			bbr->r_ctl.rc_bbr_state_atflight = 0;
 		} else
 			bbr->r_ctl.rc_bbr_state_atflight = cts;
 		bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_DRAIN];
 	} else {
 		/* All other cycles hit here 2-7 */
 		if ((old_state == BBR_SUB_DRAIN) && bbr->rc_hit_state_1) {
 			if (bbr_sub_drain_slam_cwnd &&
 			    (bbr->rc_use_google == 0) &&
 			    (bbr->rc_tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) {
 				bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd;
 				bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 			}
 			if ((cts - bbr->r_ctl.rc_bbr_state_time) > bbr_get_rtt(bbr, BBR_RTT_PROP))
 				bbr->r_ctl.rc_exta_time_gd += ((cts - bbr->r_ctl.rc_bbr_state_time) -
 							       bbr_get_rtt(bbr, BBR_RTT_PROP));
 			else
 				bbr->r_ctl.rc_exta_time_gd = 0;
 			if (bbr->r_ctl.rc_exta_time_gd) {
 				bbr->r_ctl.rc_level_state_extra = bbr->r_ctl.rc_exta_time_gd;
 				/* Now chop up the time for each state (div by 7) */
 				bbr->r_ctl.rc_level_state_extra /= 7;
 				if (bbr_rand_ot && bbr->r_ctl.rc_level_state_extra) {
 					/* Add a randomization */
 					bbr_randomize_extra_state_time(bbr);
 				}
 			}
 		}
 		bbr->r_ctl.rc_bbr_state_atflight = max(1, cts);
 		bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[bbr_state_val(bbr)];
 	}
 	if (bbr->rc_use_google) {
 		bbr->r_ctl.rc_bbr_state_atflight = max(1, cts);
 	}
 	bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 	bbr->r_ctl.rc_bbr_cwnd_gain = bbr_cwnd_gain;
 	if (dolog)
 		bbr_log_type_statechange(bbr, cts, line);
 
 	if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 		uint32_t time_in;
 
 		time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 		if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) {
 			counter_u64_add(bbr_state_time[(old_state + 5)], time_in);
 		} else {
 			counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 		}
 	}
 	bbr->r_ctl.bbr_smallest_srtt_this_state = 0xffffffff;
 	bbr_set_state_target(bbr, __LINE__);
 	if (bbr_sub_drain_slam_cwnd &&
 	    (bbr->rc_use_google == 0) &&
 	    (bbr_state_val(bbr) == BBR_SUB_DRAIN)) {
 		/* Slam down the cwnd */
 		bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd;
 		bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 		if (bbr_sub_drain_app_limit) {
 			/* Go app limited if we are on a long drain */
 			bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.rc_delivered +
 							  ctf_flight_size(bbr->rc_tp,
 							      (bbr->r_ctl.rc_sacked +
 							       bbr->r_ctl.rc_lost_bytes)));
 		}
 		bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 	}
 	if (bbr->rc_lt_use_bw) {
 		/* In policed mode we clamp pacing_gain to BBR_UNIT */
 		bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 	}
 	/* Google changes TSO size every cycle */
 	if (bbr->rc_use_google)
 		tcp_bbr_tso_size_check(bbr, cts);
 	bbr->r_ctl.gain_epoch = cts;
 	bbr->r_ctl.rc_bbr_state_time = cts;
 	bbr->r_ctl.substate_pe = bbr->r_ctl.rc_pkt_epoch;
 }
 
 static void
 bbr_set_probebw_google_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses)
 {
 	if ((bbr_state_val(bbr) == BBR_SUB_DRAIN) &&
 	    (google_allow_early_out == 1) &&
 	    (bbr->r_ctl.rc_flight_at_input <= bbr->r_ctl.rc_target_at_state)) {
 		/* We have reached out target flight size possibly early */
 		goto change_state;
 	}
 	if (TSTMP_LT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 		return;
 	}
 	if ((cts - bbr->r_ctl.rc_bbr_state_time) < bbr_get_rtt(bbr, BBR_RTT_PROP)) {
 		/*
 		 * Must be a rttProp movement forward before
 		 * we can change states.
 		 */
 		return;
 	}
 	if (bbr_state_val(bbr) == BBR_SUB_GAIN) {
 		/*
 		 * The needed time has passed but for
 		 * the gain cycle extra rules apply:
 		 * 1) If we have seen loss, we exit
 		 * 2) If we have not reached the target
 		 *    we stay in GAIN (gain-to-target).
 		 */
 		if (google_consider_lost && losses)
 			goto change_state;
 		if (bbr->r_ctl.rc_target_at_state > bbr->r_ctl.rc_flight_at_input) {
 			return;
 		}
 	}
 change_state:
 	/* For gain we must reach our target, all others last 1 rttProp */
 	bbr_substate_change(bbr, cts, __LINE__, 1);
 }
 
 static void
 bbr_set_probebw_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses)
 {
 	uint32_t flight, bbr_cur_cycle_time;
 
 	if (bbr->rc_use_google) {
 		bbr_set_probebw_google_gains(bbr, cts, losses);
 		return;
 	}
 	if (cts == 0) {
 		/*
 		 * Never alow cts to be 0 we
 		 * do this so we can judge if
 		 * we have set a timestamp.
 		 */
 		cts = 1;
 	}
 	if (bbr_state_is_pkt_epoch)
 		bbr_cur_cycle_time = bbr_get_rtt(bbr, BBR_RTT_PKTRTT);
 	else
 		bbr_cur_cycle_time = bbr_get_rtt(bbr, BBR_RTT_PROP);
 
 	if (bbr->r_ctl.rc_bbr_state_atflight == 0) {
 		if (bbr_state_val(bbr) == BBR_SUB_DRAIN) {
 			flight = ctf_flight_size(bbr->rc_tp,
 				     (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 			if (bbr_sub_drain_slam_cwnd && bbr->rc_hit_state_1) {
 				/* Keep it slam down */
 				if (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state) {
 					bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 					bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 				}
 				if (bbr_sub_drain_app_limit) {
 					/* Go app limited if we are on a long drain */
 					bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.rc_delivered + flight);
 				}
 			}
 			if (TSTMP_GT(cts, bbr->r_ctl.gain_epoch) &&
 			    (((cts - bbr->r_ctl.gain_epoch) > bbr_get_rtt(bbr, BBR_RTT_PROP)) ||
 			     (flight >= bbr->r_ctl.flightsize_at_drain))) {
 				/*
 				 * Still here after the same time as
 				 * the gain. We need to drain harder
 				 * for the next srtt. Reduce by a set amount
 				 * the gain drop is capped at DRAIN states
 				 * value (88).
 				 */
 				bbr->r_ctl.flightsize_at_drain = flight;
 				if (bbr_drain_drop_mul &&
 				    bbr_drain_drop_div &&
 				    (bbr_drain_drop_mul < bbr_drain_drop_div)) {
 					/* Use your specific drop value (def 4/5 = 20%) */
 					bbr->r_ctl.rc_bbr_hptsi_gain *= bbr_drain_drop_mul;
 					bbr->r_ctl.rc_bbr_hptsi_gain /= bbr_drain_drop_div;
 				} else {
 					/* You get drop of 20% */
 					bbr->r_ctl.rc_bbr_hptsi_gain *= 4;
 					bbr->r_ctl.rc_bbr_hptsi_gain /= 5;
 				}
 				if (bbr->r_ctl.rc_bbr_hptsi_gain <= bbr_drain_floor) {
 					/* Reduce our gain again to the bottom  */
 					bbr->r_ctl.rc_bbr_hptsi_gain = max(bbr_drain_floor, 1);
 				}
 				bbr_log_exit_gain(bbr, cts, 4);
 				/*
 				 * Extend out so we wait another
 				 * epoch before dropping again.
 				 */
 				bbr->r_ctl.gain_epoch = cts;
 			}
 			if (flight <= bbr->r_ctl.rc_target_at_state) {
 				if (bbr_sub_drain_slam_cwnd &&
 				    (bbr->rc_use_google == 0) &&
 				    (bbr->rc_tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) {
 					bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd;
 					bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 				}
 				bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1);
 				bbr_log_exit_gain(bbr, cts, 3);
 			}
 		} else {
 			/* Its a gain  */
 			if (bbr->r_ctl.rc_lost > bbr->r_ctl.bbr_lost_at_state) {
 				bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1);
 				goto change_state;
 			}
 			if ((ctf_outstanding(bbr->rc_tp) >= bbr->r_ctl.rc_target_at_state) ||
 			    ((ctf_outstanding(bbr->rc_tp) +  bbr->rc_tp->t_maxseg - 1) >=
 			     bbr->rc_tp->snd_wnd)) {
 				bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1);
 				bbr_log_exit_gain(bbr, cts, 2);
 			}
 		}
 		/**
 		 * We fall through and return always one of two things has
 		 * occurred.
 		 * 1) We are still not at target
 		 *    <or>
 		 * 2) We reached the target and set rc_bbr_state_atflight
 		 *    which means we no longer hit this block
 		 *    next time we are called.
 		 */
 		return;
 	}
 change_state:
 	if (TSTMP_LT(cts, bbr->r_ctl.rc_bbr_state_time))
 		return;
 	if ((cts - bbr->r_ctl.rc_bbr_state_time) < bbr_cur_cycle_time) {
 		/* Less than a full time-period has passed */
 		return;
 	}
 	if (bbr->r_ctl.rc_level_state_extra &&
 	    (bbr_state_val(bbr) > BBR_SUB_DRAIN) &&
 	    ((cts - bbr->r_ctl.rc_bbr_state_time) <
 	     (bbr_cur_cycle_time + bbr->r_ctl.rc_level_state_extra))) {
 		/* Less than a full time-period + extra has passed */
 		return;
 	}
 	if (bbr_gain_gets_extra_too &&
 	    bbr->r_ctl.rc_level_state_extra &&
 	    (bbr_state_val(bbr) == BBR_SUB_GAIN) &&
 	    ((cts - bbr->r_ctl.rc_bbr_state_time) <
 	     (bbr_cur_cycle_time + bbr->r_ctl.rc_level_state_extra))) {
 		/* Less than a full time-period + extra has passed */
 		return;
 	}
 	bbr_substate_change(bbr, cts, __LINE__, 1);
 }
 
 static uint32_t
 bbr_get_a_state_target(struct tcp_bbr *bbr, uint32_t gain)
 {
 	uint32_t mss, tar;
 
 	if (bbr->rc_use_google) {
 		/* Google just uses the cwnd target */
 		tar = bbr_get_target_cwnd(bbr, bbr_get_bw(bbr), gain);
 	} else {
 		mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options),
 			  bbr->r_ctl.rc_pace_max_segs);
 		/* Get the base cwnd with gain rounded to a mss */
 		tar = roundup(bbr_get_raw_target_cwnd(bbr, bbr_get_bw(bbr),
 						      gain), mss);
 		/* Make sure it is within our min */
 		if (tar < get_min_cwnd(bbr))
 			return (get_min_cwnd(bbr));
 	}
 	return (tar);
 }
 
 static void
 bbr_set_state_target(struct tcp_bbr *bbr, int line)
 {
 	uint32_t tar, meth;
 
 	if ((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) &&
 	    ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google)) {
 		/* Special case using old probe-rtt method */
 		tar = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options);
 		meth = 1;
 	} else {
 		/* Non-probe-rtt case and reduced probe-rtt  */
 		if ((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) &&
 		    (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT)) {
 			/* For gain cycle we use the hptsi gain */
 			tar = bbr_get_a_state_target(bbr, bbr->r_ctl.rc_bbr_hptsi_gain);
 			meth = 2;
 		} else if ((bbr_target_is_bbunit) || bbr->rc_use_google) {
 			/*
 			 * If configured, or for google all other states
 			 * get BBR_UNIT.
 			 */
 			tar = bbr_get_a_state_target(bbr, BBR_UNIT);
 			meth = 3;
 		} else {
 			/*
 			 * Or we set a target based on the pacing gain
 			 * for non-google mode and default (non-configured).
 			 * Note we don't set a target goal below drain (192).
 			 */
 			if (bbr->r_ctl.rc_bbr_hptsi_gain < bbr_hptsi_gain[BBR_SUB_DRAIN])  {
 				tar = bbr_get_a_state_target(bbr, bbr_hptsi_gain[BBR_SUB_DRAIN]);
 				meth = 4;
 			} else {
 				tar = bbr_get_a_state_target(bbr, bbr->r_ctl.rc_bbr_hptsi_gain);
 				meth = 5;
 			}
 		}
 	}
 	bbr_log_set_of_state_target(bbr, tar, line, meth);
 	bbr->r_ctl.rc_target_at_state = tar;
 }
 
 static void
 bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line)
 {
 	/* Change to probe_rtt */
 	uint32_t time_in;
 
 	bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 	bbr->r_ctl.flightsize_at_drain = ctf_flight_size(bbr->rc_tp,
 					     (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 	bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.flightsize_at_drain
 					  + bbr->r_ctl.rc_delivered);
 	/* Setup so we force feed the filter */
 	if (bbr->rc_use_google || bbr_probertt_sets_rtt)
 		bbr->rc_prtt_set_ts = 1;
 	if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 		time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 		counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 	}
 	bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_ENTERPROBE, 0);
 	bbr->r_ctl.rc_rtt_shrinks = cts;
 	bbr->r_ctl.last_in_probertt = cts;
 	bbr->r_ctl.rc_probertt_srttchktim = cts;
 	bbr->r_ctl.rc_bbr_state_time = cts;
 	bbr->rc_bbr_state = BBR_STATE_PROBE_RTT;
 	/* We need to force the filter to update */
 
 	if ((bbr_sub_drain_slam_cwnd) &&
 	    bbr->rc_hit_state_1 &&
 	    (bbr->rc_use_google == 0) &&
 	    (bbr_state_val(bbr) == BBR_SUB_DRAIN)) {
 		if (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_saved_cwnd)
 			bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd;
 	} else
 		bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd;
 	/* Update the lost */
 	bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 	if ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google){
 		/* Set to the non-configurable default of 4 (PROBE_RTT_MIN)  */
 		bbr->rc_tp->snd_cwnd = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options);
 		bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 		bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 		bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT;
 		bbr_log_set_of_state_target(bbr, bbr->rc_tp->snd_cwnd, __LINE__, 6);
 		bbr->r_ctl.rc_target_at_state = bbr->rc_tp->snd_cwnd;
 	} else {
 		/*
 		 * We bring it down slowly by using a hptsi gain that is
 		 * probably 75%. This will slowly float down our outstanding
 		 * without tampering with the cwnd.
 		 */
 		bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.bbr_rttprobe_gain_val;
 		bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT;
 		bbr_set_state_target(bbr, __LINE__);
 		if (bbr_prtt_slam_cwnd &&
 		    (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) {
 			bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 		}
 	}
 	if (ctf_flight_size(bbr->rc_tp,
 		(bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <=
 	    bbr->r_ctl.rc_target_at_state) {
 		/* We are at target */
 		bbr->r_ctl.rc_bbr_enters_probertt = cts;
 	} else {
 		/* We need to come down to reach target before our time begins */
 		bbr->r_ctl.rc_bbr_enters_probertt = 0;
 	}
 	bbr->r_ctl.rc_pe_of_prtt = bbr->r_ctl.rc_pkt_epoch;
 	BBR_STAT_INC(bbr_enter_probertt);
 	bbr_log_exit_gain(bbr, cts, 0);
 	bbr_log_type_statechange(bbr, cts, line);
 }
 
 static void
 bbr_check_probe_rtt_limits(struct tcp_bbr *bbr, uint32_t cts)
 {
 	/*
 	 * Sanity check on probe-rtt intervals.
 	 * In crazy situations where we are competing
 	 * against new-reno flows with huge buffers
 	 * our rtt-prop interval could come to dominate
 	 * things if we can't get through a full set
 	 * of cycles, we need to adjust it.
 	 */
 	if (bbr_can_adjust_probertt &&
 	    (bbr->rc_use_google == 0)) {
 		uint16_t val = 0;
 		uint32_t cur_rttp, fval, newval, baseval;
 
 		/* Are we to small and go into probe-rtt to often? */
 		baseval = (bbr_get_rtt(bbr, BBR_RTT_PROP) * (BBR_SUBSTATE_COUNT + 1));
 		cur_rttp = roundup(baseval, USECS_IN_SECOND);
 		fval = bbr_filter_len_sec * USECS_IN_SECOND;
 		if (bbr_is_ratio == 0) {
 			if (fval > bbr_rtt_probe_limit)
 				newval = cur_rttp + (fval - bbr_rtt_probe_limit);
 			else
 				newval = cur_rttp;
 		} else {
 			int mul;
 
 			mul = fval / bbr_rtt_probe_limit;
 			newval = cur_rttp * mul;
 		}
 		if (cur_rttp > 	bbr->r_ctl.rc_probertt_int) {
 			bbr->r_ctl.rc_probertt_int = cur_rttp;
 			reset_time_small(&bbr->r_ctl.rc_rttprop, newval);
 			val = 1;
 		} else {
 			/*
 			 * No adjustments were made
 			 * do we need to shrink it?
 			 */
 			if (bbr->r_ctl.rc_probertt_int > bbr_rtt_probe_limit) {
 				if (cur_rttp <= bbr_rtt_probe_limit) {
 					/*
 					 * Things have calmed down lets
 					 * shrink all the way to default
 					 */
 					bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit;
 					reset_time_small(&bbr->r_ctl.rc_rttprop,
 							 (bbr_filter_len_sec * USECS_IN_SECOND));
 					cur_rttp = bbr_rtt_probe_limit;
 					newval = (bbr_filter_len_sec * USECS_IN_SECOND);
 					val = 2;
 				} else {
 					/*
 					 * Well does some adjustment make sense?
 					 */
 					if (cur_rttp < bbr->r_ctl.rc_probertt_int) {
 						/* We can reduce interval time some */
 						bbr->r_ctl.rc_probertt_int = cur_rttp;
 						reset_time_small(&bbr->r_ctl.rc_rttprop, newval);
 						val = 3;
 					}
 				}
 			}
 		}
 		if (val)
 			bbr_log_rtt_shrinks(bbr, cts, cur_rttp, newval, __LINE__, BBR_RTTS_RESETS_VALUES, val);
 	}
 }
 
 static void
 bbr_exit_probe_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
 {
 	/* Exit probe-rtt */
 
 	if (tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd) {
 		tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd;
 		bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 	}
 	bbr_log_exit_gain(bbr, cts, 1);
 	bbr->rc_hit_state_1 = 0;
 	bbr->r_ctl.rc_rtt_shrinks = cts;
 	bbr->r_ctl.last_in_probertt = cts;
 	bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_RTTPROBE, 0);
 	bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 	bbr->r_ctl.r_app_limited_until = (ctf_flight_size(tp,
 					      (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) +
 					  bbr->r_ctl.rc_delivered);
 	if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 		uint32_t time_in;
 
 		time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 		counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 	}
 	if (bbr->rc_filled_pipe) {
 		/* Switch to probe_bw */
 		bbr->rc_bbr_state = BBR_STATE_PROBE_BW;
 		bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts);
 		bbr->r_ctl.rc_bbr_cwnd_gain = bbr_cwnd_gain;
 		bbr_substate_change(bbr, cts, __LINE__, 0);
 		bbr_log_type_statechange(bbr, cts, __LINE__);
 	} else {
 		/* Back to startup */
 		bbr->rc_bbr_state = BBR_STATE_STARTUP;
 		bbr->r_ctl.rc_bbr_state_time = cts;
 		/*
 		 * We don't want to give a complete free 3
 		 * measurements until we exit, so we use
 		 * the number of pe's we were in probe-rtt
 		 * to add to the startup_epoch. That way
 		 * we will still retain the old state.
 		 */
 		bbr->r_ctl.rc_bbr_last_startup_epoch += (bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_pe_of_prtt);
 		bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 		/* Make sure to use the lower pg when shifting back in */
 		if (bbr->r_ctl.rc_lost &&
 		    bbr_use_lower_gain_in_startup &&
 		    (bbr->rc_use_google == 0))
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr_startup_lower;
 		else
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg;
 		bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg;
 		/* Probably not needed but set it anyway */
 		bbr_set_state_target(bbr, __LINE__);
 		bbr_log_type_statechange(bbr, cts, __LINE__);
 		bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 		    bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 0);
 	}
 	bbr_check_probe_rtt_limits(bbr, cts);
 }
 
 static int32_t inline
 bbr_should_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts)
 {
 	if ((bbr->rc_past_init_win == 1) &&
 	    (bbr->rc_in_persist == 0) &&
 	    (bbr_calc_time(cts, bbr->r_ctl.rc_rtt_shrinks) >= bbr->r_ctl.rc_probertt_int)) {
 		return (1);
 	}
 	if (bbr_can_force_probertt &&
 	    (bbr->rc_in_persist == 0) &&
 	    (TSTMP_GT(cts, bbr->r_ctl.last_in_probertt)) &&
 	    ((cts - bbr->r_ctl.last_in_probertt) > bbr->r_ctl.rc_probertt_int)) {
 		return (1);
 	}
 	return (0);
 }
 
 static int32_t
 bbr_google_startup(struct tcp_bbr *bbr, uint32_t cts, int32_t  pkt_epoch)
 {
 	uint64_t btlbw, gain;
 	if (pkt_epoch == 0) {
 		/*
 		 * Need to be on a pkt-epoch to continue.
 		 */
 		return (0);
 	}
 	btlbw = bbr_get_full_bw(bbr);
 	gain = ((bbr->r_ctl.rc_bbr_lastbtlbw *
 		 (uint64_t)bbr_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw;
 	if (btlbw >= gain) {
 		bbr->r_ctl.rc_bbr_last_startup_epoch = bbr->r_ctl.rc_pkt_epoch;
 		bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 				      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 3);
 		bbr->r_ctl.rc_bbr_lastbtlbw = btlbw;
 	}
 	if ((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS)
 		return (1);
 	bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 			      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 8);
 	return(0);
 }
 
 static int32_t inline
 bbr_state_startup(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, int32_t pkt_epoch)
 {
 	/* Have we gained 25% in the last 3 packet based epoch's? */
 	uint64_t btlbw, gain;
 	int do_exit;
 	int delta, rtt_gain;
 
 	if ((bbr->rc_tp->snd_una == bbr->rc_tp->snd_max) &&
 	    (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) {
 		/*
 		 * This qualifies as a RTT_PROBE session since we drop the
 		 * data outstanding to nothing and waited more than
 		 * bbr_rtt_probe_time.
 		 */
 		bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0);
 		bbr_set_reduced_rtt(bbr, cts, __LINE__);
 	}
 	if (bbr_should_enter_probe_rtt(bbr, cts)) {
 		bbr_enter_probe_rtt(bbr, cts, __LINE__);
 		return (0);
 	}
 	if (bbr->rc_use_google)
 		return (bbr_google_startup(bbr, cts,  pkt_epoch));
 
 	if ((bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_startup) &&
 	    (bbr_use_lower_gain_in_startup)) {
 		/* Drop to a lower gain 1.5 x since we saw loss */
 		bbr->r_ctl.rc_bbr_hptsi_gain = bbr_startup_lower;
 	}
 	if (pkt_epoch == 0) {
 		/*
 		 * Need to be on a pkt-epoch to continue.
 		 */
 		return (0);
 	}
 	if (bbr_rtt_gain_thresh) {
 		/*
 		 * Do we allow a flow to stay
 		 * in startup with no loss and no
 		 * gain in rtt over a set threshold?
 		 */
 		if (bbr->r_ctl.rc_pkt_epoch_rtt &&
 		    bbr->r_ctl.startup_last_srtt &&
 		    (bbr->r_ctl.rc_pkt_epoch_rtt > bbr->r_ctl.startup_last_srtt)) {
 			delta = bbr->r_ctl.rc_pkt_epoch_rtt - bbr->r_ctl.startup_last_srtt;
 			rtt_gain = (delta * 100) / bbr->r_ctl.startup_last_srtt;
 		} else
 			rtt_gain = 0;
 		if ((bbr->r_ctl.startup_last_srtt == 0)  ||
 		    (bbr->r_ctl.rc_pkt_epoch_rtt < bbr->r_ctl.startup_last_srtt))
 			/* First time or new lower value */
 			bbr->r_ctl.startup_last_srtt = bbr->r_ctl.rc_pkt_epoch_rtt;
 
 		if ((bbr->r_ctl.rc_lost == 0) &&
 		    (rtt_gain < bbr_rtt_gain_thresh)) {
 			/*
 			 * No loss, and we are under
 			 * our gain threhold for
 			 * increasing RTT.
 			 */
 			if (bbr->r_ctl.rc_bbr_last_startup_epoch < bbr->r_ctl.rc_pkt_epoch)
 				bbr->r_ctl.rc_bbr_last_startup_epoch++;
 			bbr_log_startup_event(bbr, cts, rtt_gain,
 					      delta, bbr->r_ctl.startup_last_srtt, 10);
 			return (0);
 		}
 	}
 	if ((bbr->r_ctl.r_measurement_count == bbr->r_ctl.last_startup_measure) &&
 	    (bbr->r_ctl.rc_lost_at_startup == bbr->r_ctl.rc_lost) &&
 	    (!IN_RECOVERY(bbr->rc_tp->t_flags))) {
 		/*
 		 * We only assess if we have a new measurement when
 		 * we have no loss and are not in recovery.
 		 * Drag up by one our last_startup epoch so we will hold
 		 * the number of non-gain we have already accumulated.
 		 */
 		if (bbr->r_ctl.rc_bbr_last_startup_epoch < bbr->r_ctl.rc_pkt_epoch)
 			bbr->r_ctl.rc_bbr_last_startup_epoch++;
 		bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 				      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 9);
 		return (0);
 	}
 	/* Case where we reduced the lost (bad retransmit) */
 	if (bbr->r_ctl.rc_lost_at_startup > bbr->r_ctl.rc_lost)
 		bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 	bbr->r_ctl.last_startup_measure = bbr->r_ctl.r_measurement_count;
 	btlbw = bbr_get_full_bw(bbr);
 	if (bbr->r_ctl.rc_bbr_hptsi_gain == bbr_startup_lower)
 		gain = ((bbr->r_ctl.rc_bbr_lastbtlbw *
 			 (uint64_t)bbr_low_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw;
 	else
 		gain = ((bbr->r_ctl.rc_bbr_lastbtlbw *
 			 (uint64_t)bbr_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw;
 	do_exit = 0;
 	if (btlbw > bbr->r_ctl.rc_bbr_lastbtlbw)
 		bbr->r_ctl.rc_bbr_lastbtlbw = btlbw;
 	if (btlbw >= gain) {
 		bbr->r_ctl.rc_bbr_last_startup_epoch = bbr->r_ctl.rc_pkt_epoch;
 		/* Update the lost so we won't exit in next set of tests */
 		bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 		bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 				      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 3);
 	}
 	if ((bbr->rc_loss_exit &&
 	     (bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_startup) &&
 	     (bbr->r_ctl.rc_pkt_epoch_loss_rate > bbr_startup_loss_thresh)) &&
 	    ((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS)) {
 		/*
 		 * If we had no gain,  we had loss and that loss was above
 		 * our threshould, the rwnd is not constrained, and we have
 		 * had at least 3 packet epochs exit. Note that this is
 		 * switched off by sysctl. Google does not do this by the
 		 * way.
 		 */
 		if ((ctf_flight_size(bbr->rc_tp,
 			 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) +
 		     (2 * max(bbr->r_ctl.rc_pace_max_segs, bbr->rc_tp->t_maxseg))) <= bbr->rc_tp->snd_wnd) {
 			do_exit = 1;
 			bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 					      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 4);
 		} else {
 			/* Just record an updated loss value */
 			bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 			bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 					      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 5);
 		}
 	} else
 		bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost;
 	if (((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS) ||
 	    do_exit) {
 		/* Return 1 to exit the startup state. */
 		return (1);
 	}
 	/* Stay in startup */
 	bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 			      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 8);
 	return (0);
 }
 
 static void
 bbr_state_change(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, int32_t pkt_epoch, uint32_t losses)
 {
 	/*
 	 * A tick occurred in the rtt epoch do we need to do anything?
 	 */
 #ifdef BBR_INVARIANTS
 	if ((bbr->rc_bbr_state != BBR_STATE_STARTUP) &&
 	    (bbr->rc_bbr_state != BBR_STATE_DRAIN) &&
 	    (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) &&
 	    (bbr->rc_bbr_state != BBR_STATE_IDLE_EXIT) &&
 	    (bbr->rc_bbr_state != BBR_STATE_PROBE_BW)) {
 		/* Debug code? */
 		panic("Unknown BBR state %d?\n", bbr->rc_bbr_state);
 	}
 #endif
 	if (bbr->rc_bbr_state == BBR_STATE_STARTUP) {
 		/* Do we exit the startup state? */
 		if (bbr_state_startup(bbr, cts, epoch, pkt_epoch)) {
 			uint32_t time_in;
 
 			bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch,
 					      bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 6);
 			bbr->rc_filled_pipe = 1;
 			bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 			if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 				time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 				counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 			} else
 				time_in = 0;
 			if (bbr->rc_no_pacing)
 				bbr->rc_no_pacing = 0;
 			bbr->r_ctl.rc_bbr_state_time = cts;
 			bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_drain_pg;
 			bbr->rc_bbr_state = BBR_STATE_DRAIN;
 			bbr_set_state_target(bbr, __LINE__);
 			if ((bbr->rc_use_google == 0) &&
 			    bbr_slam_cwnd_in_main_drain) {
 				/* Here we don't have to worry about probe-rtt */
 				bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd;
 				bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 				bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 			}
 			bbr->r_ctl.rc_bbr_cwnd_gain = bbr_high_gain;
 			bbr_log_type_statechange(bbr, cts, __LINE__);
 			if (ctf_flight_size(bbr->rc_tp,
 			        (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <=
 			    bbr->r_ctl.rc_target_at_state) {
 				/*
 				 * Switch to probe_bw if we are already
 				 * there
 				 */
 				bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts);
 				bbr_substate_change(bbr, cts, __LINE__, 0);
 				bbr->rc_bbr_state = BBR_STATE_PROBE_BW;
 				bbr_log_type_statechange(bbr, cts, __LINE__);
 			}
 		}
 	} else if (bbr->rc_bbr_state == BBR_STATE_IDLE_EXIT) {
 		uint32_t inflight;
 		struct tcpcb *tp;
 
 		tp = bbr->rc_tp;
 		inflight = ctf_flight_size(tp,
 			      (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		if (inflight >= bbr->r_ctl.rc_target_at_state) {
 			/* We have reached a flight of the cwnd target */
 			bbr->rc_bbr_state = BBR_STATE_PROBE_BW;
 			bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 			bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT;
 			bbr_set_state_target(bbr, __LINE__);
 			/*
 			 * Rig it so we don't do anything crazy and
 			 * start fresh with a new randomization.
 			 */
 			bbr->r_ctl.bbr_smallest_srtt_this_state = 0xffffffff;
 			bbr->rc_bbr_substate = BBR_SUB_LEVEL6;
 			bbr_substate_change(bbr, cts, __LINE__, 1);
 		}
 	} else if (bbr->rc_bbr_state == BBR_STATE_DRAIN) {
 		/* Has in-flight reached the bdp (or less)? */
 		uint32_t inflight;
 		struct tcpcb *tp;
 
 		tp = bbr->rc_tp;
 		inflight = ctf_flight_size(tp,
 			      (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		if ((bbr->rc_use_google == 0) &&
 		    bbr_slam_cwnd_in_main_drain &&
 		    (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) {
 			/*
 			 * Here we don't have to worry about probe-rtt
 			 * re-slam it, but keep it slammed down.
 			 */
 			bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 		}
 		if (inflight <= bbr->r_ctl.rc_target_at_state) {
 			/* We have drained */
 			bbr->rc_bbr_state = BBR_STATE_PROBE_BW;
 			bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost;
 			if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) {
 				uint32_t time_in;
 
 				time_in = cts - bbr->r_ctl.rc_bbr_state_time;
 				counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in);
 			}
 			if ((bbr->rc_use_google == 0) &&
 			    bbr_slam_cwnd_in_main_drain &&
 			    (tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) {
 				/* Restore the cwnd */
 				tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd;
 				bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 			}
 			/* Setup probe-rtt has being done now RRS-HERE */
 			bbr->r_ctl.rc_rtt_shrinks = cts;
 			bbr->r_ctl.last_in_probertt = cts;
 			bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_LEAVE_DRAIN, 0);
 			/* Randomly pick a sub-state */
 			bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts);
 			bbr_substate_change(bbr, cts, __LINE__, 0);
 			bbr_log_type_statechange(bbr, cts, __LINE__);
 		}
 	} else if (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) {
 		uint32_t flight;
 
 		flight = ctf_flight_size(bbr->rc_tp,
 			     (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		bbr->r_ctl.r_app_limited_until = (flight + bbr->r_ctl.rc_delivered);
 		if (((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google) &&
 		    (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) {
 			/*
 			 * We must keep cwnd at the desired MSS.
 			 */
 			bbr->rc_tp->snd_cwnd = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options);
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 		} else if ((bbr_prtt_slam_cwnd) &&
 			   (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) {
 			/* Re-slam it */
 			bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state;
 			bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__);
 		}
 		if (bbr->r_ctl.rc_bbr_enters_probertt == 0) {
 			/* Has outstanding reached our target? */
 			if (flight <= bbr->r_ctl.rc_target_at_state) {
 				bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_REACHTAR, 0);
 				bbr->r_ctl.rc_bbr_enters_probertt = cts;
 				/* If time is exactly 0, be 1usec off */
 				if (bbr->r_ctl.rc_bbr_enters_probertt == 0)
 					bbr->r_ctl.rc_bbr_enters_probertt = 1;
 				if (bbr->rc_use_google == 0) {
 					/*
 					 * Restore any lowering that as occurred to
 					 * reach here
 					 */
 					if (bbr->r_ctl.bbr_rttprobe_gain_val)
 						bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.bbr_rttprobe_gain_val;
 					else
 						bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT;
 				}
 			}
 			if ((bbr->r_ctl.rc_bbr_enters_probertt == 0) &&
 			    (bbr->rc_use_google == 0) &&
 			    bbr->r_ctl.bbr_rttprobe_gain_val &&
 			    (((cts - bbr->r_ctl.rc_probertt_srttchktim) > bbr_get_rtt(bbr, bbr_drain_rtt)) ||
 			     (flight >= bbr->r_ctl.flightsize_at_drain))) {
 				/*
 				 * We have doddled with our current hptsi
 				 * gain an srtt and have still not made it
 				 * to target, or we have increased our flight.
 				 * Lets reduce the gain by xx%
 				 * flooring the reduce at DRAIN (based on
 				 * mul/div)
 				 */
 				int red;
 
 				bbr->r_ctl.flightsize_at_drain = flight;
 				bbr->r_ctl.rc_probertt_srttchktim = cts;
 				red = max((bbr->r_ctl.bbr_rttprobe_gain_val / 10), 1);
 				if ((bbr->r_ctl.rc_bbr_hptsi_gain - red) > max(bbr_drain_floor, 1)) {
 					/* Reduce our gain again */
 					bbr->r_ctl.rc_bbr_hptsi_gain -= red;
 					bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_SHRINK_PG, 0);
 				} else if (bbr->r_ctl.rc_bbr_hptsi_gain > max(bbr_drain_floor, 1)) {
 					/* one more chance before we give up */
 					bbr->r_ctl.rc_bbr_hptsi_gain = max(bbr_drain_floor, 1);
 					bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_SHRINK_PG_FINAL, 0);
 				} else {
 					/* At the very bottom */
 					bbr->r_ctl.rc_bbr_hptsi_gain = max((bbr_drain_floor-1), 1);
 				}
 			}
 		}
 		if (bbr->r_ctl.rc_bbr_enters_probertt &&
 		    (TSTMP_GT(cts, bbr->r_ctl.rc_bbr_enters_probertt)) &&
 		    ((cts - bbr->r_ctl.rc_bbr_enters_probertt) >= bbr_rtt_probe_time)) {
 			/* Time to exit probe RTT normally */
 			bbr_exit_probe_rtt(bbr->rc_tp, bbr, cts);
 		}
 	} else if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) {
 		if ((bbr->rc_tp->snd_una == bbr->rc_tp->snd_max) &&
 		    (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) {
 			/*
 			 * This qualifies as a RTT_PROBE session since we
 			 * drop the data outstanding to nothing and waited
 			 * more than bbr_rtt_probe_time.
 			 */
 			bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0);
 			bbr_set_reduced_rtt(bbr, cts, __LINE__);
 		}
 		if (bbr_should_enter_probe_rtt(bbr, cts)) {
 			bbr_enter_probe_rtt(bbr, cts, __LINE__);
 		} else {
 			bbr_set_probebw_gains(bbr, cts, losses);
 		}
 	}
 }
 
 static void
 bbr_check_bbr_for_state(struct tcp_bbr *bbr, uint32_t cts, int32_t line, uint32_t losses)
 {
 	int32_t epoch = 0;
 
 	if ((cts - bbr->r_ctl.rc_rcv_epoch_start) >= bbr_get_rtt(bbr, BBR_RTT_PROP)) {
 		bbr_set_epoch(bbr, cts, line);
 		/* At each epoch doe lt bw sampling */
 		epoch = 1;
 	}
 	bbr_state_change(bbr, cts, epoch, bbr->rc_is_pkt_epoch_now, losses);
 }
 
 static int
 bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
     int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt,
     struct timeval *tv)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 	int32_t thflags, retval;
 	uint32_t cts, lcts;
 	uint32_t tiwin;
 	struct tcpopt to;
 	struct tcp_bbr *bbr;
 	struct bbr_sendmap *rsm;
 	struct timeval ltv;
 	int32_t did_out = 0;
 	uint16_t nsegs;
 	int32_t prev_state;
 	uint32_t lost;
 
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	/* add in our stats */
 	kern_prefetch(bbr, &prev_state);
 	prev_state = 0;
 	thflags = tcp_get_flags(th);
 	/*
 	 * If this is either a state-changing packet or current state isn't
 	 * established, we require a write lock on tcbinfo.  Otherwise, we
 	 * allow the tcbinfo to be in either alocked or unlocked, as the
 	 * caller may have unnecessarily acquired a write lock due to a
 	 * race.
 	 */
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
 	    __func__));
 	KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
 	    __func__));
 
 	tp->t_rcvtime = ticks;
 	/*
 	 * Unscale the window into a 32-bit value. For the SYN_SENT state
 	 * the scale is zero.
 	 */
 	tiwin = th->th_win << tp->snd_scale;
 #ifdef STATS
 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
 #endif
 
 	if (m->m_flags & M_TSTMP) {
 		/* Prefer the hardware timestamp if present */
 		struct timespec ts;
 
 		mbuf_tstmp2timespec(m, &ts);
 		bbr->rc_tv.tv_sec = ts.tv_sec;
 		bbr->rc_tv.tv_usec = ts.tv_nsec / 1000;
 		bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv);
 	} else if (m->m_flags & M_TSTMP_LRO) {
 		/* Next the arrival timestamp */
 		struct timespec ts;
 
 		mbuf_tstmp2timespec(m, &ts);
 		bbr->rc_tv.tv_sec = ts.tv_sec;
 		bbr->rc_tv.tv_usec = ts.tv_nsec / 1000;
 		bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv);
 	} else {
 		/*
 		 * Ok just get the current time.
 		 */
 		bbr->r_ctl.rc_rcvtime = lcts = cts = tcp_get_usecs(&bbr->rc_tv);
 	}
 	/*
 	 * Parse options on any incoming segment.
 	 */
 	tcp_dooptions(&to, (u_char *)(th + 1),
 	    (th->th_off << 2) - sizeof(struct tcphdr),
 	    (thflags & TH_SYN) ? TO_SYN : 0);
 
 	/*
 	 * If timestamps were negotiated during SYN/ACK and a
 	 * segment without a timestamp is received, silently drop
 	 * the segment, unless it is a RST segment or missing timestamps are
 	 * tolerated.
 	 * See section 3.2 of RFC 7323.
 	 */
 	if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
 	    ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
 		retval = 0;
 		m_freem(m);
 		goto done_with_input;
 	}
 	/*
 	 * If echoed timestamp is later than the current time, fall back to
 	 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
 	 * were used when this connection was established.
 	 */
 	if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
 		to.to_tsecr -= tp->ts_offset;
 		if (TSTMP_GT(to.to_tsecr, tcp_tv_to_mssectick(&bbr->rc_tv)))
 			to.to_tsecr = 0;
 	}
 	/*
 	 * If its the first time in we need to take care of options and
 	 * verify we can do SACK for rack!
 	 */
 	if (bbr->r_state == 0) {
 		/*
 		 * Process options only when we get SYN/ACK back. The SYN
 		 * case for incoming connections is handled in tcp_syncache.
 		 * According to RFC1323 the window field in a SYN (i.e., a
 		 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
 		 * this is traditional behavior, may need to be cleaned up.
 		 */
 		if (bbr->rc_inp == NULL) {
 			bbr->rc_inp = inp;
 		}
 		/*
 		 * We need to init rc_inp here since its not init'd when
 		 * bbr_init is called
 		 */
 		if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
 			if ((to.to_flags & TOF_SCALE) &&
 			    (tp->t_flags & TF_REQ_SCALE)) {
 				tp->t_flags |= TF_RCVD_SCALE;
 				tp->snd_scale = to.to_wscale;
 			} else
 				tp->t_flags &= ~TF_REQ_SCALE;
 			/*
 			 * Initial send window.  It will be updated with the
 			 * next incoming segment to the scaled value.
 			 */
 			tp->snd_wnd = th->th_win;
 			if ((to.to_flags & TOF_TS) &&
 			    (tp->t_flags & TF_REQ_TSTMP)) {
 				tp->t_flags |= TF_RCVD_TSTMP;
 				tp->ts_recent = to.to_tsval;
 				tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv);
 			} else
 			    tp->t_flags &= ~TF_REQ_TSTMP;
 			if (to.to_flags & TOF_MSS)
 				tcp_mss(tp, to.to_mss);
 			if ((tp->t_flags & TF_SACK_PERMIT) &&
 			    (to.to_flags & TOF_SACKPERM) == 0)
 				tp->t_flags &= ~TF_SACK_PERMIT;
 			if (IS_FASTOPEN(tp->t_flags)) {
 				if (to.to_flags & TOF_FASTOPEN) {
 					uint16_t mss;
 
 					if (to.to_flags & TOF_MSS)
 						mss = to.to_mss;
 					else
 						if ((inp->inp_vflag & INP_IPV6) != 0)
 							mss = TCP6_MSS;
 						else
 							mss = TCP_MSS;
 					tcp_fastopen_update_cache(tp, mss,
 					    to.to_tfo_len, to.to_tfo_cookie);
 				} else
 					tcp_fastopen_disable_path(tp);
 			}
 		}
 		/*
 		 * At this point we are at the initial call. Here we decide
 		 * if we are doing RACK or not. We do this by seeing if
 		 * TF_SACK_PERMIT is set, if not rack is *not* possible and
 		 * we switch to the default code.
 		 */
 		if ((tp->t_flags & TF_SACK_PERMIT) == 0) {
 			/* Bail */
 			tcp_switch_back_to_default(tp);
 			(*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen,
 			    tlen, iptos);
 			return (1);
 		}
 		/* Set the flag */
 		bbr->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
 		tcp_set_hpts(tp);
 		sack_filter_clear(&bbr->r_ctl.bbr_sf, th->th_ack);
 	}
 	if (thflags & TH_ACK) {
 		/* Track ack types */
 		if (to.to_flags & TOF_SACK)
 			BBR_STAT_INC(bbr_acks_with_sacks);
 		else
 			BBR_STAT_INC(bbr_plain_acks);
 	}
 	/*
 	 * This is the one exception case where we set the rack state
 	 * always. All other times (timers etc) we must have a rack-state
 	 * set (so we assure we have done the checks above for SACK).
 	 */
 	if (thflags & TH_FIN)
 		tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
 	if (bbr->r_state != tp->t_state)
 		bbr_set_state(tp, bbr, tiwin);
 
 	if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map)) != NULL)
 		kern_prefetch(rsm, &prev_state);
 	prev_state = bbr->r_state;
 	bbr->rc_ack_was_delayed = 0;
 	lost = bbr->r_ctl.rc_lost;
 	bbr->rc_is_pkt_epoch_now = 0;
 	if (m->m_flags & (M_TSTMP|M_TSTMP_LRO)) {
 		/* Get the real time into lcts and figure the real delay */
 		lcts = tcp_get_usecs(&ltv);
 		if (TSTMP_GT(lcts, cts)) {
 			bbr->r_ctl.rc_ack_hdwr_delay = lcts - cts;
 			bbr->rc_ack_was_delayed = 1;
 			if (TSTMP_GT(bbr->r_ctl.rc_ack_hdwr_delay,
 				     bbr->r_ctl.highest_hdwr_delay))
 				bbr->r_ctl.highest_hdwr_delay = bbr->r_ctl.rc_ack_hdwr_delay;
 		} else {
 			bbr->r_ctl.rc_ack_hdwr_delay = 0;
 			bbr->rc_ack_was_delayed = 0;
 		}
 	} else {
 		bbr->r_ctl.rc_ack_hdwr_delay = 0;
 		bbr->rc_ack_was_delayed = 0;
 	}
 	bbr_log_ack_event(bbr, th, &to, tlen, nsegs, cts, nxt_pkt, m);
 	if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
 		retval = 0;
 		m_freem(m);
 		goto done_with_input;
 	}
 	/*
 	 * If a segment with the ACK-bit set arrives in the SYN-SENT state
 	 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
 	 */
 	if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if (tiwin > bbr->r_ctl.rc_high_rwnd)
 		bbr->r_ctl.rc_high_rwnd = tiwin;
 	bbr->r_ctl.rc_flight_at_input = ctf_flight_size(tp,
 					    (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 	bbr->rtt_valid = 0;
 	if (to.to_flags & TOF_TS) {
 		bbr->rc_ts_valid = 1;
 		bbr->r_ctl.last_inbound_ts = to.to_tsval;
 	} else {
 		bbr->rc_ts_valid = 0;
 		bbr->r_ctl.last_inbound_ts = 0;
 	}
 	retval = (*bbr->r_substate) (m, th, so,
 	    tp, &to, drop_hdrlen,
 	    tlen, tiwin, thflags, nxt_pkt, iptos);
 	if (nxt_pkt == 0)
 		BBR_STAT_INC(bbr_rlock_left_ret0);
 	else
 		BBR_STAT_INC(bbr_rlock_left_ret1);
 	if (retval == 0) {
 		/*
 		 * If retval is 1 the tcb is unlocked and most likely the tp
 		 * is gone.
 		 */
 		INP_WLOCK_ASSERT(inp);
 		tcp_bbr_xmit_timer_commit(bbr, tp, cts);
 		if (bbr->rc_is_pkt_epoch_now)
 			bbr_set_pktepoch(bbr, cts, __LINE__);
 		bbr_check_bbr_for_state(bbr, cts, __LINE__, (bbr->r_ctl.rc_lost - lost));
 		if (nxt_pkt == 0) {
 			if (bbr->r_wanted_output != 0) {
 				bbr->rc_output_starts_timer = 0;
 				did_out = 1;
 				if (tcp_output(tp) < 0)
 					return (1);
 			} else
 				bbr_start_hpts_timer(bbr, tp, cts, 6, 0, 0);
 		}
 		if ((nxt_pkt == 0) &&
 		    ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
 		    (SEQ_GT(tp->snd_max, tp->snd_una) ||
 		     (tp->t_flags & TF_DELACK) ||
 		     ((V_tcp_always_keepalive || bbr->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
 		      (tp->t_state <= TCPS_CLOSING)))) {
 			/*
 			 * We could not send (probably in the hpts but
 			 * stopped the timer)?
 			 */
 			if ((tp->snd_max == tp->snd_una) &&
 			    ((tp->t_flags & TF_DELACK) == 0) &&
 			    (tcp_in_hpts(tp)) &&
 			    (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 				/*
 				 * keep alive not needed if we are hptsi
 				 * output yet
 				 */
 				;
 			} else {
 				if (tcp_in_hpts(tp)) {
 					tcp_hpts_remove(tp);
 					if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 					    (TSTMP_GT(lcts, bbr->rc_pacer_started))) {
 						uint32_t del;
 
 						del = lcts - bbr->rc_pacer_started;
 						if (bbr->r_ctl.rc_last_delay_val > del) {
 							BBR_STAT_INC(bbr_force_timer_start);
 							bbr->r_ctl.rc_last_delay_val -= del;
 							bbr->rc_pacer_started = lcts;
 						} else {
 							/* We are late */
 							bbr->r_ctl.rc_last_delay_val = 0;
 							BBR_STAT_INC(bbr_force_output);
 							if (tcp_output(tp) < 0)
 								return (1);
 						}
 					}
 				}
 				bbr_start_hpts_timer(bbr, tp, cts, 8, bbr->r_ctl.rc_last_delay_val,
 				    0);
 			}
 		} else if ((bbr->rc_output_starts_timer == 0) && (nxt_pkt == 0)) {
 			/* Do we have the correct timer running? */
 			bbr_timer_audit(tp, bbr, lcts, &so->so_snd);
 		}
 		/* Clear the flag, it may have been cleared by output but we may not have  */
 		if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS))
 			tp->t_flags2 &= ~TF2_HPTS_CALLS;
 		/* Do we have a new state */
 		if (bbr->r_state != tp->t_state)
 			bbr_set_state(tp, bbr, tiwin);
 done_with_input:
 		bbr_log_doseg_done(bbr, cts, nxt_pkt, did_out);
 		if (did_out)
 			bbr->r_wanted_output = 0;
 	}
 	return (retval);
 }
 
 static void
 bbr_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
     int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
 {
 	struct timeval tv;
 	int retval;
 
 	/* First lets see if we have old packets */
 	if (!STAILQ_EMPTY(&tp->t_inqueue)) {
 		if (ctf_do_queued_segments(tp, 1)) {
 			m_freem(m);
 			return;
 		}
 	}
 	if (m->m_flags & M_TSTMP_LRO) {
 		mbuf_tstmp2timeval(m, &tv);
 	} else {
 		/* Should not be should we kassert instead? */
 		tcp_get_usecs(&tv);
 	}
 	retval = bbr_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos,
 	    0, &tv);
 	if (retval == 0) {
 		INP_WUNLOCK(tptoinpcb(tp));
 	}
 }
 
 /*
  * Return how much data can be sent without violating the
  * cwnd or rwnd.
  */
 
 static inline uint32_t
 bbr_what_can_we_send(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t sendwin,
     uint32_t avail, int32_t sb_offset, uint32_t cts)
 {
 	uint32_t len;
 
 	if (ctf_outstanding(tp) >= tp->snd_wnd) {
 		/* We never want to go over our peers rcv-window */
 		len = 0;
 	} else {
 		uint32_t flight;
 
 		flight = ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
 		if (flight >= sendwin) {
 			/*
 			 * We have in flight what we are allowed by cwnd (if
 			 * it was rwnd blocking it would have hit above out
 			 * >= tp->snd_wnd).
 			 */
 			return (0);
 		}
 		len = sendwin - flight;
 		if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
 			/* We would send too much (beyond the rwnd) */
 			len = tp->snd_wnd - ctf_outstanding(tp);
 		}
 		if ((len + sb_offset) > avail) {
 			/*
 			 * We don't have that much in the SB, how much is
 			 * there?
 			 */
 			len = avail - sb_offset;
 		}
 	}
 	return (len);
 }
 
 static inline void
 bbr_do_send_accounting(struct tcpcb *tp, struct tcp_bbr *bbr, struct bbr_sendmap *rsm, int32_t len, int32_t error)
 {
 	if (error) {
 		return;
 	}
 	if (rsm) {
 		if (rsm->r_flags & BBR_TLP) {
 			/*
 			 * TLP should not count in retran count, but in its
 			 * own bin
 			 */
 			KMOD_TCPSTAT_INC(tcps_tlpresends);
 			KMOD_TCPSTAT_ADD(tcps_tlpresend_bytes, len);
 		} else {
 			/* Retransmit */
 			tp->t_sndrexmitpack++;
 			KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
 			KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
 #ifdef STATS
 			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
 			    len);
 #endif
 		}
 		/*
 		 * Logs in 0 - 8, 8 is all non probe_bw states 0-7 is
 		 * sub-state
 		 */
 		counter_u64_add(bbr_state_lost[rsm->r_bbr_state], len);
 		if (bbr->rc_bbr_state != BBR_STATE_PROBE_BW) {
 			/* Non probe_bw log in 1, 2, or 4. */
 			counter_u64_add(bbr_state_resend[bbr->rc_bbr_state], len);
 		} else {
 			/*
 			 * Log our probe state 3, and log also 5-13 to show
 			 * us the recovery sub-state for the send. This
 			 * means that 3 == (5+6+7+8+9+10+11+12+13)
 			 */
 			counter_u64_add(bbr_state_resend[BBR_STATE_PROBE_BW], len);
 			counter_u64_add(bbr_state_resend[(bbr_state_val(bbr) + 5)], len);
 		}
 		/* Place in both 16's the totals of retransmitted */
 		counter_u64_add(bbr_state_lost[16], len);
 		counter_u64_add(bbr_state_resend[16], len);
 		/* Place in 17's the total sent */
 		counter_u64_add(bbr_state_resend[17], len);
 		counter_u64_add(bbr_state_lost[17], len);
 
 	} else {
 		/* New sends */
 		KMOD_TCPSTAT_INC(tcps_sndpack);
 		KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
 		/* Place in 17's the total sent */
 		counter_u64_add(bbr_state_resend[17], len);
 		counter_u64_add(bbr_state_lost[17], len);
 #ifdef STATS
 		stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
 		    len);
 #endif
 	}
 }
 
 static void
 bbr_cwnd_limiting(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t in_level)
 {
 	if (bbr->rc_filled_pipe && bbr_target_cwnd_mult_limit && (bbr->rc_use_google == 0)) {
 		/*
 		 * Limit the cwnd to not be above N x the target plus whats
 		 * is outstanding. The target is based on the current b/w
 		 * estimate.
 		 */
 		uint32_t target;
 
 		target = bbr_get_target_cwnd(bbr, bbr_get_bw(bbr), BBR_UNIT);
 		target += ctf_outstanding(tp);
 		target *= bbr_target_cwnd_mult_limit;
 		if (tp->snd_cwnd > target)
 			tp->snd_cwnd = target;
 		bbr_log_type_cwndupd(bbr, 0, 0, 0, 10, 0, 0, __LINE__);
 	}
 }
 
 static int
 bbr_window_update_needed(struct tcpcb *tp, struct socket *so, uint32_t recwin, int32_t maxseg)
 {
 	/*
 	 * "adv" is the amount we could increase the window, taking into
 	 * account that we are limited by TCP_MAXWIN << tp->rcv_scale.
 	 */
 	int32_t adv;
 	int32_t oldwin;
 
 	adv = recwin;
 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
 		oldwin = (tp->rcv_adv - tp->rcv_nxt);
 		if (adv > oldwin)
 			adv -= oldwin;
 		else {
 			/* We can't increase the window */
 			adv = 0;
 		}
 	} else
 		oldwin = 0;
 
 	/*
 	 * If the new window size ends up being the same as or less
 	 * than the old size when it is scaled, then don't force
 	 * a window update.
 	 */
 	if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
 		return (0);
 
 	if (adv >= (2 * maxseg) &&
 	    (adv >= (so->so_rcv.sb_hiwat / 4) ||
 	    recwin <= (so->so_rcv.sb_hiwat / 8) ||
 	    so->so_rcv.sb_hiwat <= 8 * maxseg)) {
 		return (1);
 	}
 	if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat)
 		return (1);
 	return (0);
 }
 
 /*
  * Return 0 on success and a errno on failure to send.
  * Note that a 0 return may not mean we sent anything
  * if the TCB was on the hpts. A non-zero return
  * does indicate the error we got from ip[6]_output.
  */
 static int
 bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
 {
 	struct socket *so;
 	int32_t len;
 	uint32_t cts;
 	uint32_t recwin, sendwin;
 	int32_t sb_offset;
 	int32_t flags, abandon, error = 0;
 	struct tcp_log_buffer *lgb = NULL;
 	struct mbuf *m;
 	struct mbuf *mb;
 	uint32_t if_hw_tsomaxsegcount = 0;
 	uint32_t if_hw_tsomaxsegsize = 0;
 	uint32_t if_hw_tsomax = 0;
 	struct ip *ip = NULL;
 	struct tcp_bbr *bbr;
 	struct tcphdr *th;
 	struct udphdr *udp = NULL;
 	u_char opt[TCP_MAXOLEN];
 	unsigned ipoptlen, optlen, hdrlen;
 	unsigned ulen;
 	uint32_t bbr_seq;
 	uint32_t delay_calc=0;
 	uint8_t doing_tlp = 0;
 	uint8_t local_options;
 #ifdef BBR_INVARIANTS
 	uint8_t doing_retran_from = 0;
 	uint8_t picked_up_retran = 0;
 #endif
 	uint8_t wanted_cookie = 0;
 	uint8_t more_to_rxt=0;
 	int32_t prefetch_so_done = 0;
 	int32_t prefetch_rsm = 0;
 	uint32_t tot_len = 0;
 	uint32_t maxseg, pace_max_segs, p_maxseg;
 	int32_t csum_flags = 0;
  	int32_t hw_tls;
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	unsigned ipsec_optlen = 0;
 
 #endif
 	volatile int32_t sack_rxmit;
 	struct bbr_sendmap *rsm = NULL;
 	int32_t tso, mtu;
 	struct tcpopt to;
 	int32_t slot = 0;
 	struct inpcb *inp;
 	struct sockbuf *sb;
 	bool hpts_calling;
 #ifdef INET6
 	struct ip6_hdr *ip6 = NULL;
 	int32_t isipv6;
 #endif
 	uint8_t app_limited = BBR_JR_SENT_DATA;
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	/* We take a cache hit here */
 	memcpy(&bbr->rc_tv, tv, sizeof(struct timeval));
 	cts = tcp_tv_to_usectick(&bbr->rc_tv);
 	inp = bbr->rc_inp;
 	hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS);
 	tp->t_flags2 &= ~TF2_HPTS_CALLS;
 	so = inp->inp_socket;
 	sb = &so->so_snd;
 	if (tp->t_nic_ktls_xmit)
  		hw_tls = 1;
  	else
  		hw_tls = 0;
 	kern_prefetch(sb, &maxseg);
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	if (bbr_minseg(bbr) < maxseg) {
 		tcp_bbr_tso_size_check(bbr, cts);
 	}
 	/* Remove any flags that indicate we are pacing on the inp  */
 	pace_max_segs = bbr->r_ctl.rc_pace_max_segs;
 	p_maxseg = min(maxseg, pace_max_segs);
 	INP_WLOCK_ASSERT(inp);
 #ifdef TCP_OFFLOAD
 	if (tp->t_flags & TF_TOE)
 		return (tcp_offload_output(tp));
 #endif
 
 #ifdef INET6
 	if (bbr->r_state) {
 		/* Use the cache line loaded if possible */
 		isipv6 = bbr->r_is_v6;
 	} else {
 		isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
 	}
 #endif
 	if (((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
 	    tcp_in_hpts(tp)) {
 		/*
 		 * We are on the hpts for some timer but not hptsi output.
 		 * Possibly remove from the hpts so we can send/recv etc.
 		 */
 		if ((tp->t_flags & TF_ACKNOW) == 0) {
 			/*
 			 * No immediate demand right now to send an ack, but
 			 * the user may have read, making room for new data
 			 * (a window update). If so we may want to cancel
 			 * whatever timer is running (KEEP/DEL-ACK?) and
 			 * continue to send out a window update. Or we may
 			 * have gotten more data into the socket buffer to
 			 * send.
 			 */
 			recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
 				      (long)TCP_MAXWIN << tp->rcv_scale);
 			if ((bbr_window_update_needed(tp, so, recwin, maxseg) == 0) &&
 			    ((tcp_outflags[tp->t_state] & TH_RST) == 0) &&
 			    ((sbavail(sb) + ((tcp_outflags[tp->t_state] & TH_FIN) ? 1 : 0)) <=
 			    (tp->snd_max - tp->snd_una))) {
 				/*
 				 * Nothing new to send and no window update
 				 * is needed to send. Lets just return and
 				 * let the timer-run off.
 				 */
 				return (0);
 			}
 		}
 		tcp_hpts_remove(tp);
 		bbr_timer_cancel(bbr, __LINE__, cts);
 	}
 	if (bbr->r_ctl.rc_last_delay_val) {
 		/* Calculate a rough delay for early escape to sending  */
 		if (SEQ_GT(cts, bbr->rc_pacer_started))
 			delay_calc = cts - bbr->rc_pacer_started;
 		if (delay_calc >= bbr->r_ctl.rc_last_delay_val)
 			delay_calc -= bbr->r_ctl.rc_last_delay_val;
 		else
 			delay_calc = 0;
 	}
 	/* Mark that we have called bbr_output(). */
 	if ((bbr->r_timer_override) ||
 	    (tp->t_state < TCPS_ESTABLISHED)) {
 		/* Timeouts or early states are exempt */
 		if (tcp_in_hpts(tp))
 			tcp_hpts_remove(tp);
 	} else if (tcp_in_hpts(tp)) {
 		if ((bbr->r_ctl.rc_last_delay_val) &&
 		    (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 		    delay_calc) {
 			/*
 			 * We were being paced for output and the delay has
 			 * already exceeded when we were supposed to be
 			 * called, lets go ahead and pull out of the hpts
 			 * and call output.
 			 */
 			counter_u64_add(bbr_out_size[TCP_MSS_ACCT_LATE], 1);
 			bbr->r_ctl.rc_last_delay_val = 0;
 			tcp_hpts_remove(tp);
 		} else if (tp->t_state == TCPS_CLOSED) {
 			bbr->r_ctl.rc_last_delay_val = 0;
 			tcp_hpts_remove(tp);
 		} else {
 			/*
 			 * On the hpts, you shall not pass! even if ACKNOW
 			 * is on, we will when the hpts fires, unless of
 			 * course we are overdue.
 			 */
 			counter_u64_add(bbr_out_size[TCP_MSS_ACCT_INPACE], 1);
 			return (0);
 		}
 	}
 	bbr->rc_cwnd_limited = 0;
 	if (bbr->r_ctl.rc_last_delay_val) {
 		/* recalculate the real delay and deal with over/under  */
 		if (SEQ_GT(cts, bbr->rc_pacer_started))
 			delay_calc = cts - bbr->rc_pacer_started;
 		else
 			delay_calc = 0;
 		if (delay_calc >= bbr->r_ctl.rc_last_delay_val)
 			/* Setup the delay which will be added in */
 			delay_calc -= bbr->r_ctl.rc_last_delay_val;
 		else {
 			/*
 			 * We are early setup to adjust
 			 * our slot time.
 			 */
 			uint64_t merged_val;
 
 			bbr->r_ctl.rc_agg_early += (bbr->r_ctl.rc_last_delay_val - delay_calc);
 			bbr->r_agg_early_set = 1;
 			if (bbr->r_ctl.rc_hptsi_agg_delay) {
 				if (bbr->r_ctl.rc_hptsi_agg_delay >= bbr->r_ctl.rc_agg_early) {
 					/* Nope our previous late cancels out the early */
 					bbr->r_ctl.rc_hptsi_agg_delay -= bbr->r_ctl.rc_agg_early;
 					bbr->r_agg_early_set = 0;
 					bbr->r_ctl.rc_agg_early = 0;
 				} else {
 					bbr->r_ctl.rc_agg_early -= bbr->r_ctl.rc_hptsi_agg_delay;
 					bbr->r_ctl.rc_hptsi_agg_delay = 0;
 				}
 			}
 			merged_val = bbr->rc_pacer_started;
 			merged_val <<= 32;
 			merged_val |= bbr->r_ctl.rc_last_delay_val;
 			bbr_log_pacing_delay_calc(bbr, hpts_calling,
 						 bbr->r_ctl.rc_agg_early, cts, delay_calc, merged_val,
 						 bbr->r_agg_early_set, 3);
 			bbr->r_ctl.rc_last_delay_val = 0;
 			BBR_STAT_INC(bbr_early);
 			delay_calc = 0;
 		}
 	} else {
 		/* We were not delayed due to hptsi */
 		if (bbr->r_agg_early_set)
 			bbr->r_ctl.rc_agg_early = 0;
 		bbr->r_agg_early_set = 0;
 		delay_calc = 0;
 	}
 	if (delay_calc) {
 		/*
 		 * We had a hptsi delay which means we are falling behind on
 		 * sending at the expected rate. Calculate an extra amount
 		 * of data we can send, if any, to put us back on track.
 		 */
 		if ((bbr->r_ctl.rc_hptsi_agg_delay + delay_calc) < bbr->r_ctl.rc_hptsi_agg_delay)
 			bbr->r_ctl.rc_hptsi_agg_delay = 0xffffffff;
 		else
 			bbr->r_ctl.rc_hptsi_agg_delay += delay_calc;
 	}
 	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
 	if ((tp->snd_una == tp->snd_max) &&
 	    (bbr->rc_bbr_state != BBR_STATE_IDLE_EXIT) &&
 	    (sbavail(sb))) {
 		/*
 		 * Ok we have been idle with nothing outstanding
 		 * we possibly need to start fresh with either a new
 		 * suite of states or a fast-ramp up.
 		 */
 		bbr_restart_after_idle(bbr,
 				       cts, bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time));
 	}
 	/*
 	 * Now was there a hptsi delay where we are behind? We only count
 	 * being behind if: a) We are not in recovery. b) There was a delay.
 	 * <and> c) We had room to send something.
 	 *
 	 */
 	if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		int retval;
 
 		retval = bbr_process_timers(tp, bbr, cts, hpts_calling);
 		if (retval != 0) {
 			counter_u64_add(bbr_out_size[TCP_MSS_ACCT_ATIMER], 1);
 			/*
 			 * If timers want tcp_drop(), then pass error out,
 			 * otherwise suppress it.
 			 */
 			return (retval < 0 ? retval : 0);
 		}
 	}
 	bbr->rc_tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY;
 	if (hpts_calling &&
 	    (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 		bbr->r_ctl.rc_last_delay_val = 0;
 	}
 	bbr->r_timer_override = 0;
 	bbr->r_wanted_output = 0;
 	/*
 	 * For TFO connections in SYN_RECEIVED, only allow the initial
 	 * SYN|ACK and those sent by the retransmit timer.
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    ((tp->t_state == TCPS_SYN_RECEIVED) ||
 	     (tp->t_state == TCPS_SYN_SENT)) &&
 	    SEQ_GT(tp->snd_max, tp->snd_una) &&	/* initial SYN or SYN|ACK sent */
 	    (tp->t_rxtshift == 0)) {	/* not a retransmit */
 		len = 0;
 		goto just_return_nolock;
 	}
 	/*
 	 * Before sending anything check for a state update. For hpts
 	 * calling without input this is important. If its input calling
 	 * then this was already done.
 	 */
 	if (bbr->rc_use_google == 0)
 		bbr_check_bbr_for_state(bbr, cts, __LINE__, 0);
 again:
 	/*
 	 * If we've recently taken a timeout, snd_max will be greater than
 	 * snd_max. BBR in general does not pay much attention to snd_nxt
 	 * for historic reasons the persist timer still uses it. This means
 	 * we have to look at it. All retransmissions that are not persits
 	 * use the rsm that needs to be sent so snd_nxt is ignored. At the
 	 * end of this routine we pull snd_nxt always up to snd_max.
 	 */
 	doing_tlp = 0;
 #ifdef BBR_INVARIANTS
 	doing_retran_from = picked_up_retran = 0;
 #endif
 	error = 0;
 	tso = 0;
 	slot = 0;
 	mtu = 0;
 	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
 	sb_offset = tp->snd_max - tp->snd_una;
 	flags = tcp_outflags[tp->t_state];
 	sack_rxmit = 0;
 	len = 0;
 	rsm = NULL;
 	if (flags & TH_RST) {
 		SOCKBUF_LOCK(sb);
 		goto send;
 	}
 recheck_resend:
 	while (bbr->r_ctl.rc_free_cnt < bbr_min_req_free) {
 		/* We need to always have one in reserve */
 		rsm = bbr_alloc(bbr);
 		if (rsm == NULL) {
 			error = ENOMEM;
 			/* Lie to get on the hpts */
 			tot_len = tp->t_maxseg;
 			if (hpts_calling)
 				/* Retry in a ms */
 				slot = 1001;
 			goto just_return_nolock;
 		}
 		TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next);
 		bbr->r_ctl.rc_free_cnt++;
 		rsm = NULL;
 	}
 	/* What do we send, a resend? */
 	if (bbr->r_ctl.rc_resend == NULL) {
 		/* Check for rack timeout */
 		bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts);
 		if (bbr->r_ctl.rc_resend) {
 #ifdef BBR_INVARIANTS
 			picked_up_retran = 1;
 #endif
 			bbr_cong_signal(tp, NULL, CC_NDUPACK, bbr->r_ctl.rc_resend);
 		}
 	}
 	if (bbr->r_ctl.rc_resend) {
 		rsm = bbr->r_ctl.rc_resend;
 #ifdef BBR_INVARIANTS
 		doing_retran_from = 1;
 #endif
 		/* Remove any TLP flags its a RACK or T-O */
 		rsm->r_flags &= ~BBR_TLP;
 		bbr->r_ctl.rc_resend = NULL;
 		if (SEQ_LT(rsm->r_start, tp->snd_una)) {
 #ifdef BBR_INVARIANTS
 			panic("Huh, tp:%p bbr:%p rsm:%p start:%u < snd_una:%u\n",
 			    tp, bbr, rsm, rsm->r_start, tp->snd_una);
 			goto recheck_resend;
 #else
 			/* TSNH */
 			rsm = NULL;
 			goto recheck_resend;
 #endif
 		}
 		if (rsm->r_flags & BBR_HAS_SYN) {
 			/* Only retransmit a SYN by itself */
 			len = 0;
 			if ((flags & TH_SYN) == 0) {
 				/* Huh something is wrong */
 				rsm->r_start++;
 				if (rsm->r_start == rsm->r_end) {
 					/* Clean it up, somehow we missed the ack? */
 					bbr_log_syn(tp, NULL);
 				} else {
 					/* TFO with data? */
 					rsm->r_flags &= ~BBR_HAS_SYN;
 					len = rsm->r_end - rsm->r_start;
 				}
 			} else {
 				/* Retransmitting SYN */
 				rsm = NULL;
 				SOCKBUF_LOCK(sb);
 				goto send;
 			}
 		} else
 			len = rsm->r_end - rsm->r_start;
 		if ((bbr->rc_resends_use_tso == 0) &&
 		    (len > maxseg)) {
 			len = maxseg;
 			more_to_rxt = 1;
 		}
 		sb_offset = rsm->r_start - tp->snd_una;
 		if (len > 0) {
 			sack_rxmit = 1;
 			KMOD_TCPSTAT_INC(tcps_sack_rexmits);
 			KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
 			    min(len, maxseg));
 		} else {
 			/* I dont think this can happen */
 			rsm = NULL;
 			goto recheck_resend;
 		}
 		BBR_STAT_INC(bbr_resends_set);
 	} else if (bbr->r_ctl.rc_tlp_send) {
 		/*
 		 * Tail loss probe
 		 */
 		doing_tlp = 1;
 		rsm = bbr->r_ctl.rc_tlp_send;
 		bbr->r_ctl.rc_tlp_send = NULL;
 		sack_rxmit = 1;
 		len = rsm->r_end - rsm->r_start;
 		if ((bbr->rc_resends_use_tso == 0) && (len > maxseg))
 			len = maxseg;
 
 		if (SEQ_GT(tp->snd_una, rsm->r_start)) {
 #ifdef BBR_INVARIANTS
 			panic("tp:%p bbc:%p snd_una:%u rsm:%p r_start:%u",
 			    tp, bbr, tp->snd_una, rsm, rsm->r_start);
 #else
 			/* TSNH */
 			rsm = NULL;
 			goto recheck_resend;
 #endif
 		}
 		sb_offset = rsm->r_start - tp->snd_una;
 		BBR_STAT_INC(bbr_tlp_set);
 	}
 	/*
 	 * Enforce a connection sendmap count limit if set
 	 * as long as we are not retransmiting.
 	 */
 	if ((rsm == NULL) &&
 	    (V_tcp_map_entries_limit > 0) &&
 	    (bbr->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
 		BBR_STAT_INC(bbr_alloc_limited);
 		if (!bbr->alloc_limit_reported) {
 			bbr->alloc_limit_reported = 1;
 			BBR_STAT_INC(bbr_alloc_limited_conns);
 		}
 		goto just_return_nolock;
 	}
 #ifdef BBR_INVARIANTS
 	if (rsm && SEQ_LT(rsm->r_start, tp->snd_una)) {
 		panic("tp:%p bbr:%p rsm:%p sb_offset:%u len:%u",
 		    tp, bbr, rsm, sb_offset, len);
 	}
 #endif
 	/*
 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
 	 * state flags.
 	 */
 	if (tp->t_flags & TF_NEEDFIN && (rsm == NULL))
 		flags |= TH_FIN;
 	if (tp->t_flags & TF_NEEDSYN)
 		flags |= TH_SYN;
 
 	if (rsm && (rsm->r_flags & BBR_HAS_FIN)) {
 		/* we are retransmitting the fin */
 		len--;
 		if (len) {
 			/*
 			 * When retransmitting data do *not* include the
 			 * FIN. This could happen from a TLP probe if we
 			 * allowed data with a FIN.
 			 */
 			flags &= ~TH_FIN;
 		}
 	} else if (rsm) {
 		if (flags & TH_FIN)
 			flags &= ~TH_FIN;
 	}
 	if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
 		void *end_rsm;
 
 		end_rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_tmap, bbr_sendmap, r_tnext);
 		if (end_rsm)
 			kern_prefetch(end_rsm, &prefetch_rsm);
 		prefetch_rsm = 1;
 	}
 	SOCKBUF_LOCK(sb);
 	/*
 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
 	 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
 	 * negative length.  This can also occur when TCP opens up its
 	 * congestion window while receiving additional duplicate acks after
 	 * fast-retransmit because TCP will reset snd_nxt to snd_max after
 	 * the fast-retransmit.
 	 *
 	 * In the normal retransmit-FIN-only case, however, snd_nxt will be
 	 * set to snd_una, the sb_offset will be 0, and the length may wind
 	 * up 0.
 	 *
 	 * If sack_rxmit is true we are retransmitting from the scoreboard
 	 * in which case len is already set.
 	 */
 	if (sack_rxmit == 0) {
 		uint32_t avail;
 
 		avail = sbavail(sb);
 		if (SEQ_GT(tp->snd_max, tp->snd_una))
 			sb_offset = tp->snd_max - tp->snd_una;
 		else
 			sb_offset = 0;
 		if (bbr->rc_tlp_new_data) {
 			/* TLP is forcing out new data */
 			uint32_t tlplen;
 
 			doing_tlp = 1;
 			tlplen = maxseg;
 
 			if (tlplen > (uint32_t)(avail - sb_offset)) {
 				tlplen = (uint32_t)(avail - sb_offset);
 			}
 			if (tlplen > tp->snd_wnd) {
 				len = tp->snd_wnd;
 			} else {
 				len = tlplen;
 			}
 			bbr->rc_tlp_new_data = 0;
 		} else {
 			len = bbr_what_can_we_send(tp, bbr, sendwin, avail, sb_offset, cts);
 			if ((len < p_maxseg) &&
 			    (bbr->rc_in_persist == 0) &&
 			    (ctf_outstanding(tp) >= (2 * p_maxseg)) &&
 			    ((avail - sb_offset) >= p_maxseg)) {
 				/*
 				 * We are not completing whats in the socket
 				 * buffer (i.e. there is at least a segment
 				 * waiting to send) and we have 2 or more
 				 * segments outstanding. There is no sense
 				 * of sending a little piece. Lets defer and
 				 * and wait until we can send a whole
 				 * segment.
 				 */
 				len = 0;
 			}
 			if (bbr->rc_in_persist) {
 				/*
 				 * We are in persists, figure out if
 				 * a retransmit is available (maybe the previous
 				 * persists we sent) or if we have to send new
 				 * data.
 				 */
 				rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 				if (rsm) {
 					len = rsm->r_end - rsm->r_start;
 					if (rsm->r_flags & BBR_HAS_FIN)
 						len--;
 					if ((bbr->rc_resends_use_tso == 0) && (len > maxseg))
 						len = maxseg;
 					if (len > 1)
 						BBR_STAT_INC(bbr_persist_reneg);
 					/*
 					 * XXXrrs we could force the len to
 					 * 1 byte here to cause the chunk to
 					 * split apart.. but that would then
 					 * mean we always retransmit it as
 					 * one byte even after the window
 					 * opens.
 					 */
 					sack_rxmit = 1;
 					sb_offset = rsm->r_start - tp->snd_una;
 				} else {
 					/*
 					 * First time through in persists or peer
 					 * acked our one byte. Though we do have
 					 * to have something in the sb.
 					 */
 					len = 1;
 					sb_offset = 0;
 					if (avail == 0)
 					    len = 0;
 				}
 			}
 		}
 	}
 	if (prefetch_so_done == 0) {
 		kern_prefetch(so, &prefetch_so_done);
 		prefetch_so_done = 1;
 	}
 	/*
 	 * Lop off SYN bit if it has already been sent.  However, if this is
 	 * SYN-SENT state and if segment contains data and if we don't know
 	 * that foreign host supports TAO, suppress sending segment.
 	 */
 	if ((flags & TH_SYN) && (rsm == NULL) &&
 	    SEQ_GT(tp->snd_max, tp->snd_una)) {
 		if (tp->t_state != TCPS_SYN_RECEIVED)
 			flags &= ~TH_SYN;
 		/*
 		 * When sending additional segments following a TFO SYN|ACK,
 		 * do not include the SYN bit.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) &&
 		    (tp->t_state == TCPS_SYN_RECEIVED))
 			flags &= ~TH_SYN;
 		sb_offset--, len++;
 		if (sbavail(sb) == 0)
 			len = 0;
 	} else if ((flags & TH_SYN) && rsm) {
 		/*
 		 * Subtract one from the len for the SYN being
 		 * retransmitted.
 		 */
 		len--;
 	}
 	/*
 	 * Be careful not to send data and/or FIN on SYN segments. This
 	 * measure is needed to prevent interoperability problems with not
 	 * fully conformant TCP implementations.
 	 */
 	if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
 		len = 0;
 		flags &= ~TH_FIN;
 	}
 	/*
 	 * On TFO sockets, ensure no data is sent in the following cases:
 	 *
 	 *  - When retransmitting SYN|ACK on a passively-created socket
 	 *  - When retransmitting SYN on an actively created socket
 	 *  - When sending a zero-length cookie (cookie request) on an
 	 *    actively created socket
 	 *  - When the socket is in the CLOSED state (RST is being sent)
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
 	     ((tp->t_state == TCPS_SYN_SENT) &&
 	      (tp->t_tfo_client_cookie_len == 0)) ||
 	     (flags & TH_RST))) {
 		len = 0;
 		sack_rxmit = 0;
 		rsm = NULL;
 	}
 	/* Without fast-open there should never be data sent on a SYN */
 	if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags)))
 		len = 0;
 	if (len <= 0) {
 		/*
 		 * If FIN has been sent but not acked, but we haven't been
 		 * called to retransmit, len will be < 0.  Otherwise, window
 		 * shrank after we sent into it.  If window shrank to 0,
 		 * cancel pending retransmit, pull snd_nxt back to (closed)
 		 * window, and set the persist timer if it isn't already
 		 * going.  If the window didn't close completely, just wait
 		 * for an ACK.
 		 *
 		 * We also do a general check here to ensure that we will
 		 * set the persist timer when we have data to send, but a
 		 * 0-byte window. This makes sure the persist timer is set
 		 * even if the packet hits one of the "goto send" lines
 		 * below.
 		 */
 		len = 0;
 		if ((tp->snd_wnd == 0) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
 		    (tp->snd_una == tp->snd_max) &&
 		    (sb_offset < (int)sbavail(sb))) {
 			/*
 			 * Not enough room in the rwnd to send
 			 * a paced segment out.
 			 */
 			bbr_enter_persist(tp, bbr, cts, __LINE__);
 		}
 	} else if ((rsm == NULL) &&
 		   (doing_tlp == 0) &&
 		   (len < bbr->r_ctl.rc_pace_max_segs)) {
 		/*
 		 * We are not sending a full segment for
 		 * some reason. Should we not send anything (think
 		 * sws or persists)?
 		 */
 		if ((tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
 		    (len < (int)(sbavail(sb) - sb_offset))) {
 			/*
 			 * Here the rwnd is less than
 			 * the pacing size, this is not a retransmit,
 			 * we are established and
 			 * the send is not the last in the socket buffer
 			 * lets not send, and possibly enter persists.
 			 */
 			len = 0;
 			if (tp->snd_max == tp->snd_una)
 				bbr_enter_persist(tp, bbr, cts, __LINE__);
 		} else if ((tp->snd_cwnd >= bbr->r_ctl.rc_pace_max_segs) &&
 			   (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 						 bbr->r_ctl.rc_lost_bytes)) > (2 * maxseg)) &&
 			   (len < (int)(sbavail(sb) - sb_offset)) &&
 			   (len < bbr_minseg(bbr))) {
 			/*
 			 * Here we are not retransmitting, and
 			 * the cwnd is not so small that we could
 			 * not send at least a min size (rxt timer
 			 * not having gone off), We have 2 segments or
 			 * more already in flight, its not the tail end
 			 * of the socket buffer  and the cwnd is blocking
 			 * us from sending out minimum pacing segment size.
 			 * Lets not send anything.
 			 */
 			bbr->rc_cwnd_limited = 1;
 			len = 0;
 		} else if (((tp->snd_wnd - ctf_outstanding(tp)) <
 			    min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
 			   (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 						 bbr->r_ctl.rc_lost_bytes)) > (2 * maxseg)) &&
 			   (len < (int)(sbavail(sb) - sb_offset)) &&
 			   (TCPS_HAVEESTABLISHED(tp->t_state))) {
 			/*
 			 * Here we have a send window but we have
 			 * filled it up and we can't send another pacing segment.
 			 * We also have in flight more than 2 segments
 			 * and we are not completing the sb i.e. we allow
 			 * the last bytes of the sb to go out even if
 			 * its not a full pacing segment.
 			 */
 			len = 0;
 		}
 	}
 	/* len will be >= 0 after this point. */
 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
 	tcp_sndbuf_autoscale(tp, so, sendwin);
 	/*
 	 *
 	 */
 	if (bbr->rc_in_persist &&
 	    len &&
 	    (rsm == NULL) &&
 	    (len < min((bbr->r_ctl.rc_high_rwnd/2), bbr->r_ctl.rc_pace_max_segs))) {
 		/*
 		 * We are in persist, not doing a retransmit and don't have enough space
 		 * yet to send a full TSO. So is it at the end of the sb
 		 * if so we need to send else nuke to 0 and don't send.
 		 */
 		int sbleft;
 		if (sbavail(sb) > sb_offset)
 			sbleft = sbavail(sb) - sb_offset;
 		else
 			sbleft = 0;
 		if (sbleft >= min((bbr->r_ctl.rc_high_rwnd/2), bbr->r_ctl.rc_pace_max_segs)) {
 			/* not at end of sb lets not send */
 			len = 0;
 		}
 	}
 	/*
 	 * Decide if we can use TCP Segmentation Offloading (if supported by
 	 * hardware).
 	 *
 	 * TSO may only be used if we are in a pure bulk sending state.  The
 	 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
 	 * options prevent using TSO.  With TSO the TCP header is the same
 	 * (except for the sequence number) for all generated packets.  This
 	 * makes it impossible to transmit any options which vary per
 	 * generated segment or packet.
 	 *
 	 * IPv4 handling has a clear separation of ip options and ip header
 	 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen()
 	 * does the right thing below to provide length of just ip options
 	 * and thus checking for ipoptlen is enough to decide if ip options
 	 * are present.
 	 */
 #ifdef INET6
 	if (isipv6)
 		ipoptlen = ip6_optlen(inp);
 	else
 #endif
 	if (inp->inp_options)
 		ipoptlen = inp->inp_options->m_len -
 		    offsetof(struct ipoption, ipopt_list);
 	else
 		ipoptlen = 0;
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	/*
 	 * Pre-calculate here as we save another lookup into the darknesses
 	 * of IPsec that way and can actually decide if TSO is ok.
 	 */
 #ifdef INET6
 	if (isipv6 && IPSEC_ENABLED(ipv6))
 		ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
 #ifdef INET
 	else
 #endif
 #endif				/* INET6 */
 #ifdef INET
 	if (IPSEC_ENABLED(ipv4))
 		ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
 #endif				/* INET */
 #endif				/* IPSEC */
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	ipoptlen += ipsec_optlen;
 #endif
 	if ((tp->t_flags & TF_TSO) && V_tcp_do_tso &&
 	    (len > maxseg) &&
 	    (tp->t_port == 0) &&
 	    ((tp->t_flags & TF_SIGNATURE) == 0) &&
 	    tp->rcv_numsacks == 0 &&
 	    ipoptlen == 0)
 		tso = 1;
 
 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
 	    (long)TCP_MAXWIN << tp->rcv_scale);
 	/*
 	 * Sender silly window avoidance.   We transmit under the following
 	 * conditions when len is non-zero:
 	 *
 	 * - We have a full segment (or more with TSO) - This is the last
 	 * buffer in a write()/send() and we are either idle or running
 	 * NODELAY - we've timed out (e.g. persist timer) - we have more
 	 * then 1/2 the maximum send window's worth of data (receiver may be
 	 * limited the window size) - we need to retransmit
 	 */
 	if (rsm)
 		goto send;
 	if (len) {
 		if (sack_rxmit)
 			goto send;
 		if (len >= p_maxseg)
 			goto send;
 		/*
 		 * NOTE! on localhost connections an 'ack' from the remote
 		 * end may occur synchronously with the output and cause us
 		 * to flush a buffer queued with moretocome.  XXX
 		 *
 		 */
 		if (((tp->t_flags & TF_MORETOCOME) == 0) &&	/* normal case */
 		    ((tp->t_flags & TF_NODELAY) ||
 		    ((uint32_t)len + (uint32_t)sb_offset) >= sbavail(&so->so_snd)) &&
 		    (tp->t_flags & TF_NOPUSH) == 0) {
 			goto send;
 		}
 		if ((tp->snd_una == tp->snd_max) && len) {	/* Nothing outstanding */
 			goto send;
 		}
 		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
 			goto send;
 		}
 	}
 	/*
 	 * Sending of standalone window updates.
 	 *
 	 * Window updates are important when we close our window due to a
 	 * full socket buffer and are opening it again after the application
 	 * reads data from it.  Once the window has opened again and the
 	 * remote end starts to send again the ACK clock takes over and
 	 * provides the most current window information.
 	 *
 	 * We must avoid the silly window syndrome whereas every read from
 	 * the receive buffer, no matter how small, causes a window update
 	 * to be sent.  We also should avoid sending a flurry of window
 	 * updates when the socket buffer had queued a lot of data and the
 	 * application is doing small reads.
 	 *
 	 * Prevent a flurry of pointless window updates by only sending an
 	 * update when we can increase the advertized window by more than
 	 * 1/4th of the socket buffer capacity.  When the buffer is getting
 	 * full or is very small be more aggressive and send an update
 	 * whenever we can increase by two mss sized segments. In all other
 	 * situations the ACK's to new incoming data will carry further
 	 * window increases.
 	 *
 	 * Don't send an independent window update if a delayed ACK is
 	 * pending (it will get piggy-backed on it) or the remote side
 	 * already has done a half-close and won't send more data.  Skip
 	 * this if the connection is in T/TCP half-open state.
 	 */
 	if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
 	    !(tp->t_flags & TF_DELACK) &&
 	    !TCPS_HAVERCVDFIN(tp->t_state)) {
 		/* Check to see if we should do a window update */
 		if (bbr_window_update_needed(tp, so, recwin, maxseg))
 			goto send;
 	}
 	/*
 	 * Send if we owe the peer an ACK, RST, SYN.  ACKNOW
 	 * is also a catch-all for the retransmit timer timeout case.
 	 */
 	if (tp->t_flags & TF_ACKNOW) {
 		goto send;
 	}
 	if (flags & TH_RST) {
 		/* Always send a RST if one is due */
 		goto send;
 	}
 	if ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0) {
 		goto send;
 	}
 	/*
 	 * If our state indicates that FIN should be sent and we have not
 	 * yet done so, then we need to send.
 	 */
 	if (flags & TH_FIN &&
 	    ((tp->t_flags & TF_SENTFIN) == 0)) {
 		goto send;
 	}
 	/*
 	 * No reason to send a segment, just return.
 	 */
 just_return:
 	SOCKBUF_UNLOCK(sb);
 just_return_nolock:
 	if (tot_len)
 		slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
 	if (bbr->rc_no_pacing)
 		slot = 0;
 	if (tot_len == 0) {
 		if ((ctf_outstanding(tp) + min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) >=
 		    tp->snd_wnd) {
 			BBR_STAT_INC(bbr_rwnd_limited);
 			app_limited = BBR_JR_RWND_LIMITED;
 			bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp));
 			if ((bbr->rc_in_persist == 0) &&
 			    TCPS_HAVEESTABLISHED(tp->t_state) &&
 			    (tp->snd_max == tp->snd_una) &&
 			    sbavail(&so->so_snd)) {
 				/* No send window.. we must enter persist */
 				bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
 			}
 		} else if (ctf_outstanding(tp) >= sbavail(sb)) {
 			BBR_STAT_INC(bbr_app_limited);
 			app_limited = BBR_JR_APP_LIMITED;
 			bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp));
 		} else if ((ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 						 bbr->r_ctl.rc_lost_bytes)) + p_maxseg) >= tp->snd_cwnd) {
 			BBR_STAT_INC(bbr_cwnd_limited);
  			app_limited = BBR_JR_CWND_LIMITED;
 			bbr_cwnd_limiting(tp, bbr, ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 									bbr->r_ctl.rc_lost_bytes)));
 			bbr->rc_cwnd_limited = 1;
 		} else {
 			BBR_STAT_INC(bbr_app_limited);
 			app_limited = BBR_JR_APP_LIMITED;
 			bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp));
 		}
 		bbr->r_ctl.rc_hptsi_agg_delay = 0;
 		bbr->r_agg_early_set = 0;
 		bbr->r_ctl.rc_agg_early = 0;
 		bbr->r_ctl.rc_last_delay_val = 0;
 	} else if (bbr->rc_use_google == 0)
 		bbr_check_bbr_for_state(bbr, cts, __LINE__, 0);
 	/* Are we app limited? */
 	if ((app_limited == BBR_JR_APP_LIMITED) ||
 	    (app_limited == BBR_JR_RWND_LIMITED)) {
 		/**
 		 * We are application limited.
 		 */
 		bbr->r_ctl.r_app_limited_until = (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 								       bbr->r_ctl.rc_lost_bytes)) + bbr->r_ctl.rc_delivered);
 	}
 	if (tot_len == 0)
 		counter_u64_add(bbr_out_size[TCP_MSS_ACCT_JUSTRET], 1);
 	/* Dont update the time if we did not send */
 	bbr->r_ctl.rc_last_delay_val = 0;
 	bbr->rc_output_starts_timer = 1;
 	bbr_start_hpts_timer(bbr, tp, cts, 9, slot, tot_len);
 	bbr_log_type_just_return(bbr, cts, tot_len, hpts_calling, app_limited, p_maxseg, len);
 	if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
 		/* Make sure snd_nxt is drug up */
 		tp->snd_nxt = tp->snd_max;
 	}
 	return (error);
 
 send:
 	if (doing_tlp == 0) {
 		/*
 		 * Data not a TLP, and its not the rxt firing. If it is the
 		 * rxt firing, we want to leave the tlp_in_progress flag on
 		 * so we don't send another TLP. It has to be a rack timer
 		 * or normal send (response to acked data) to clear the tlp
 		 * in progress flag.
 		 */
 		bbr->rc_tlp_in_progress = 0;
 		bbr->rc_tlp_rtx_out = 0;
 	} else {
 		/*
 		 * Its a TLP.
 		 */
 		bbr->rc_tlp_in_progress = 1;
 	}
 	bbr_timer_cancel(bbr, __LINE__, cts);
 	if (rsm == NULL) {
 		if (sbused(sb) > 0) {
 			/*
 			 * This is sub-optimal. We only send a stand alone
 			 * FIN on its own segment.
 			 */
 			if (flags & TH_FIN) {
 				flags &= ~TH_FIN;
 				if ((len == 0) && ((tp->t_flags & TF_ACKNOW) == 0)) {
 					/* Lets not send this */
 					slot = 0;
 					goto just_return;
 				}
 			}
 		}
 	} else {
 		/*
 		 * We do *not* send a FIN on a retransmit if it has data.
 		 * The if clause here where len > 1 should never come true.
 		 */
 		if ((len > 0) &&
 		    (((rsm->r_flags & BBR_HAS_FIN) == 0) &&
 		    (flags & TH_FIN))) {
 			flags &= ~TH_FIN;
 			len--;
 		}
 	}
 	SOCKBUF_LOCK_ASSERT(sb);
 	if (len > 0) {
 		if ((tp->snd_una == tp->snd_max) &&
 		    (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) {
 			/*
 			 * This qualifies as a RTT_PROBE session since we
 			 * drop the data outstanding to nothing and waited
 			 * more than bbr_rtt_probe_time.
 			 */
 			bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0);
 			bbr_set_reduced_rtt(bbr, cts, __LINE__);
 		}
 		if (len >= maxseg)
 			tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
 	}
 	/*
 	 * Before ESTABLISHED, force sending of initial options unless TCP
 	 * set not to do any options. NOTE: we assume that the IP/TCP header
 	 * plus TCP options always fit in a single mbuf, leaving room for a
 	 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
 	 * + optlen <= MCLBYTES
 	 */
 	optlen = 0;
 #ifdef INET6
 	if (isipv6)
 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 	else
 #endif
 		hdrlen = sizeof(struct tcpiphdr);
 
 	/*
 	 * Compute options for segment. We only have to care about SYN and
 	 * established connection segments.  Options for SYN-ACK segments
 	 * are handled in TCP syncache.
 	 */
 	to.to_flags = 0;
 	local_options = 0;
 	if ((tp->t_flags & TF_NOOPT) == 0) {
 		/* Maximum segment size. */
 		if (flags & TH_SYN) {
 			to.to_mss = tcp_mssopt(&inp->inp_inc);
 			if (tp->t_port)
 				to.to_mss -= V_tcp_udp_tunneling_overhead;
 			to.to_flags |= TOF_MSS;
 			/*
 			 * On SYN or SYN|ACK transmits on TFO connections,
 			 * only include the TFO option if it is not a
 			 * retransmit, as the presence of the TFO option may
 			 * have caused the original SYN or SYN|ACK to have
 			 * been dropped by a middlebox.
 			 */
 			if (IS_FASTOPEN(tp->t_flags) &&
 			    (tp->t_rxtshift == 0)) {
 				if (tp->t_state == TCPS_SYN_RECEIVED) {
 					to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
 					to.to_tfo_cookie =
 					    (u_int8_t *)&tp->t_tfo_cookie.server;
 					to.to_flags |= TOF_FASTOPEN;
 					wanted_cookie = 1;
 				} else if (tp->t_state == TCPS_SYN_SENT) {
 					to.to_tfo_len =
 					    tp->t_tfo_client_cookie_len;
 					to.to_tfo_cookie =
 					    tp->t_tfo_cookie.client;
 					to.to_flags |= TOF_FASTOPEN;
 					wanted_cookie = 1;
 				}
 			}
 		}
 		/* Window scaling. */
 		if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
 			to.to_wscale = tp->request_r_scale;
 			to.to_flags |= TOF_SCALE;
 		}
 		/* Timestamps. */
 		if ((tp->t_flags & TF_RCVD_TSTMP) ||
 		    ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
 			to.to_tsval = 	tcp_tv_to_mssectick(&bbr->rc_tv) + tp->ts_offset;
 			to.to_tsecr = tp->ts_recent;
 			to.to_flags |= TOF_TS;
 			local_options += TCPOLEN_TIMESTAMP + 2;
 		}
 		/* Set receive buffer autosizing timestamp. */
 		if (tp->rfbuf_ts == 0 &&
 		    (so->so_rcv.sb_flags & SB_AUTOSIZE))
 			tp->rfbuf_ts = 	tcp_tv_to_mssectick(&bbr->rc_tv);
 		/* Selective ACK's. */
 		if (flags & TH_SYN)
 			to.to_flags |= TOF_SACKPERM;
 		else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    tp->rcv_numsacks > 0) {
 			to.to_flags |= TOF_SACK;
 			to.to_nsacks = tp->rcv_numsacks;
 			to.to_sacks = (u_char *)tp->sackblks;
 		}
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		/* TCP-MD5 (RFC2385). */
 		if (tp->t_flags & TF_SIGNATURE)
 			to.to_flags |= TOF_SIGNATURE;
 #endif				/* TCP_SIGNATURE */
 
 		/* Processing the options. */
 		hdrlen += (optlen = tcp_addoptions(&to, opt));
 		/*
 		 * If we wanted a TFO option to be added, but it was unable
 		 * to fit, ensure no data is sent.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
 		    !(to.to_flags & TOF_FASTOPEN))
 			len = 0;
 	}
 	if (tp->t_port) {
 		if (V_tcp_udp_tunneling_port == 0) {
 			/* The port was removed?? */
 			SOCKBUF_UNLOCK(&so->so_snd);
 			return (EHOSTUNREACH);
 		}
 		hdrlen += sizeof(struct udphdr);
 	}
 #ifdef INET6
 	if (isipv6)
 		ipoptlen = ip6_optlen(inp);
 	else
 #endif
 	if (inp->inp_options)
 		ipoptlen = inp->inp_options->m_len -
 		    offsetof(struct ipoption, ipopt_list);
 	else
 		ipoptlen = 0;
 	ipoptlen = 0;
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	ipoptlen += ipsec_optlen;
 #endif
 	if (bbr->rc_last_options != local_options) {
 		/*
 		 * Cache the options length this generally does not change
 		 * on a connection. We use this to calculate TSO.
 		 */
 		bbr->rc_last_options = local_options;
 	}
 	maxseg = tp->t_maxseg - (ipoptlen + optlen);
 	p_maxseg = min(maxseg, pace_max_segs);
 	/*
 	 * Adjust data length if insertion of options will bump the packet
 	 * length beyond the t_maxseg length. Clear the FIN bit because we
 	 * cut off the tail of the segment.
 	 */
 	if (len > maxseg) {
 		if (len != 0 && (flags & TH_FIN)) {
 			flags &= ~TH_FIN;
 		}
 		if (tso) {
 			uint32_t moff;
 			int32_t max_len;
 
 			/* extract TSO information */
 			if_hw_tsomax = tp->t_tsomax;
 			if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
 			if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
 			KASSERT(ipoptlen == 0,
 			    ("%s: TSO can't do IP options", __func__));
 
 			/*
 			 * Check if we should limit by maximum payload
 			 * length:
 			 */
 			if (if_hw_tsomax != 0) {
 				/* compute maximum TSO length */
 				max_len = (if_hw_tsomax - hdrlen -
 				    max_linkhdr);
 				if (max_len <= 0) {
 					len = 0;
 				} else if (len > max_len) {
 					len = max_len;
 				}
 			}
 			/*
 			 * Prevent the last segment from being fractional
 			 * unless the send sockbuf can be emptied:
 			 */
 			if ((sb_offset + len) < sbavail(sb)) {
 				moff = len % (uint32_t)maxseg;
 				if (moff != 0) {
 					len -= moff;
 				}
 			}
 			/*
 			 * In case there are too many small fragments don't
 			 * use TSO:
 			 */
 			if (len <= maxseg) {
 				len = maxseg;
 				tso = 0;
 			}
 		} else {
 			/* Not doing TSO */
 			if (optlen + ipoptlen >= tp->t_maxseg) {
 				/*
 				 * Since we don't have enough space to put
 				 * the IP header chain and the TCP header in
 				 * one packet as required by RFC 7112, don't
 				 * send it. Also ensure that at least one
 				 * byte of the payload can be put into the
 				 * TCP segment.
 				 */
 				SOCKBUF_UNLOCK(&so->so_snd);
 				error = EMSGSIZE;
 				sack_rxmit = 0;
 				goto out;
 			}
 			len = maxseg;
 		}
 	} else {
 		/* Not doing TSO */
 		if_hw_tsomaxsegcount = 0;
 		tso = 0;
 	}
 	KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
 	    ("%s: len > IP_MAXPACKET", __func__));
 #ifdef DIAGNOSTIC
 #ifdef INET6
 	if (max_linkhdr + hdrlen > MCLBYTES)
 #else
 	if (max_linkhdr + hdrlen > MHLEN)
 #endif
 		panic("tcphdr too big");
 #endif
 	/*
 	 * This KASSERT is here to catch edge cases at a well defined place.
 	 * Before, those had triggered (random) panic conditions further
 	 * down.
 	 */
 #ifdef BBR_INVARIANTS
 	if (sack_rxmit) {
 		if (SEQ_LT(rsm->r_start, tp->snd_una)) {
 			panic("RSM:%p TP:%p bbr:%p start:%u is < snd_una:%u",
 			    rsm, tp, bbr, rsm->r_start, tp->snd_una);
 		}
 	}
 #endif
 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
 	if ((len == 0) &&
 	    (flags & TH_FIN) &&
 	    (sbused(sb))) {
 		/*
 		 * We have outstanding data, don't send a fin by itself!.
 		 */
 		slot = 0;
 		goto just_return;
 	}
 	/*
 	 * Grab a header mbuf, attaching a copy of data to be transmitted,
 	 * and initialize the header from the template for sends on this
 	 * connection.
 	 */
 	if (len) {
 		uint32_t moff;
 
 		/*
 		 * We place a limit on sending with hptsi.
 		 */
 		if ((rsm == NULL) && len > pace_max_segs)
 			len = pace_max_segs;
 		if (len <= maxseg)
 			tso = 0;
 #ifdef INET6
 		if (MHLEN < hdrlen + max_linkhdr)
 			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 		else
 #endif
 			m = m_gethdr(M_NOWAIT, MT_DATA);
 
 		if (m == NULL) {
 			BBR_STAT_INC(bbr_failed_mbuf_aloc);
 			bbr_log_enobuf_jmp(bbr, len, cts, __LINE__, len, 0, 0);
 			SOCKBUF_UNLOCK(sb);
 			error = ENOBUFS;
 			sack_rxmit = 0;
 			goto out;
 		}
 		m->m_data += max_linkhdr;
 		m->m_len = hdrlen;
 		/*
 		 * Start the m_copy functions from the closest mbuf to the
 		 * sb_offset in the socket buffer chain.
 		 */
 		if ((sb_offset > sbavail(sb)) || ((len + sb_offset) > sbavail(sb))) {
 #ifdef BBR_INVARIANTS
 			if ((len + sb_offset) > (sbavail(sb) + ((flags & (TH_FIN | TH_SYN)) ? 1 : 0)))
 				panic("tp:%p bbr:%p len:%u sb_offset:%u sbavail:%u rsm:%p %u:%u:%u",
 				    tp, bbr, len, sb_offset, sbavail(sb), rsm,
 				    doing_retran_from,
 				    picked_up_retran,
 				    doing_tlp);
 
 #endif
 			/*
 			 * In this messed up situation we have two choices,
 			 * a) pretend the send worked, and just start timers
 			 * and what not (not good since that may lead us
 			 * back here a lot). <or> b) Send the lowest segment
 			 * in the map. <or> c) Drop the connection. Lets do
 			 * <b> which if it continues to happen will lead to
 			 * <c> via timeouts.
 			 */
 			BBR_STAT_INC(bbr_offset_recovery);
 			rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map);
 			sb_offset = 0;
 			if (rsm == NULL) {
 				sack_rxmit = 0;
 				len = sbavail(sb);
 			} else {
 				sack_rxmit = 1;
 				if (rsm->r_start != tp->snd_una) {
 					/*
 					 * Things are really messed up, <c>
 					 * is the only thing to do.
 					 */
 					BBR_STAT_INC(bbr_offset_drop);
 					SOCKBUF_UNLOCK(sb);
 					(void)m_free(m);
 					return (-EFAULT); /* tcp_drop() */
 				}
 				len = rsm->r_end - rsm->r_start;
 			}
 			if (len > sbavail(sb))
 				len = sbavail(sb);
 			if (len > maxseg)
 				len = maxseg;
 		}
 		mb = sbsndptr_noadv(sb, sb_offset, &moff);
 		if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
 			m_copydata(mb, moff, (int)len,
 			    mtod(m, caddr_t)+hdrlen);
 			if (rsm == NULL)
 				sbsndptr_adv(sb, mb, len);
 			m->m_len += len;
 		} else {
 			struct sockbuf *msb;
 
 			if (rsm)
 				msb = NULL;
 			else
 				msb = sb;
 #ifdef BBR_INVARIANTS
 			if ((len + moff) > (sbavail(sb) + ((flags & (TH_FIN | TH_SYN)) ? 1 : 0))) {
 				if (rsm) {
 					panic("tp:%p bbr:%p len:%u moff:%u sbavail:%u rsm:%p snd_una:%u rsm_start:%u flg:%x %u:%u:%u sr:%d ",
 					    tp, bbr, len, moff,
 					    sbavail(sb), rsm,
 					    tp->snd_una, rsm->r_flags, rsm->r_start,
 					    doing_retran_from,
 					    picked_up_retran,
 					    doing_tlp, sack_rxmit);
 				} else {
 					panic("tp:%p bbr:%p len:%u moff:%u sbavail:%u sb_offset:%u snd_una:%u",
 					    tp, bbr, len, moff, sbavail(sb), sb_offset, tp->snd_una);
 				}
 			}
 #endif
 			m->m_next = tcp_m_copym(
 				mb, moff, &len,
 				if_hw_tsomaxsegcount,
 				if_hw_tsomaxsegsize, msb,
 				((rsm == NULL) ? hw_tls : 0)
 #ifdef NETFLIX_COPY_ARGS
 				, NULL, NULL
 #endif
 				);
 			if (len <= maxseg) {
 				/*
 				 * Must have ran out of mbufs for the copy
 				 * shorten it to no longer need tso. Lets
 				 * not put on sendalot since we are low on
 				 * mbufs.
 				 */
 				tso = 0;
 			}
 			if (m->m_next == NULL) {
 				SOCKBUF_UNLOCK(sb);
 				(void)m_free(m);
 				error = ENOBUFS;
 				sack_rxmit = 0;
 				goto out;
 			}
 		}
 #ifdef BBR_INVARIANTS
 		if (tso && len < maxseg) {
 			panic("tp:%p tso on, but len:%d < maxseg:%d",
 			    tp, len, maxseg);
 		}
 		if (tso && if_hw_tsomaxsegcount) {
 			int32_t seg_cnt = 0;
 			struct mbuf *foo;
 
 			foo = m;
 			while (foo) {
 				seg_cnt++;
 				foo = foo->m_next;
 			}
 			if (seg_cnt > if_hw_tsomaxsegcount) {
 				panic("seg_cnt:%d > max:%d", seg_cnt, if_hw_tsomaxsegcount);
 			}
 		}
 #endif
 		/*
 		 * If we're sending everything we've got, set PUSH. (This
 		 * will keep happy those implementations which only give
 		 * data to the user when a buffer fills or a PUSH comes in.)
 		 */
 		if (sb_offset + len == sbused(sb) &&
 		    sbused(sb) &&
 		    !(flags & TH_SYN)) {
 			flags |= TH_PUSH;
 		}
 		SOCKBUF_UNLOCK(sb);
 	} else {
 		SOCKBUF_UNLOCK(sb);
 		if (tp->t_flags & TF_ACKNOW)
 			KMOD_TCPSTAT_INC(tcps_sndacks);
 		else if (flags & (TH_SYN | TH_FIN | TH_RST))
 			KMOD_TCPSTAT_INC(tcps_sndctrl);
 		else
 			KMOD_TCPSTAT_INC(tcps_sndwinup);
 
 		m = m_gethdr(M_NOWAIT, MT_DATA);
 		if (m == NULL) {
 			BBR_STAT_INC(bbr_failed_mbuf_aloc);
 			bbr_log_enobuf_jmp(bbr, len, cts, __LINE__, len, 0, 0);
 			error = ENOBUFS;
 			/* Fudge the send time since we could not send */
 			sack_rxmit = 0;
 			goto out;
 		}
 #ifdef INET6
 		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
 		    MHLEN >= hdrlen) {
 			M_ALIGN(m, hdrlen);
 		} else
 #endif
 			m->m_data += max_linkhdr;
 		m->m_len = hdrlen;
 	}
 	SOCKBUF_UNLOCK_ASSERT(sb);
 	m->m_pkthdr.rcvif = (struct ifnet *)0;
 #ifdef MAC
 	mac_inpcb_create_mbuf(inp, m);
 #endif
 #ifdef INET6
 	if (isipv6) {
 		ip6 = mtod(m, struct ip6_hdr *);
 		if (tp->t_port) {
 			udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 			udp->uh_dport = tp->t_port;
 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
 			udp->uh_ulen = htons(ulen);
 			th = (struct tcphdr *)(udp + 1);
 		} else {
 			th = (struct tcphdr *)(ip6 + 1);
 		}
 		tcpip_fillheaders(inp, tp->t_port, ip6, th);
 	} else
 #endif				/* INET6 */
 	{
 		ip = mtod(m, struct ip *);
 		if (tp->t_port) {
 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 			udp->uh_dport = tp->t_port;
 			ulen = hdrlen + len - sizeof(struct ip);
 			udp->uh_ulen = htons(ulen);
 			th = (struct tcphdr *)(udp + 1);
 		} else {
 			th = (struct tcphdr *)(ip + 1);
 		}
 		tcpip_fillheaders(inp, tp->t_port, ip, th);
 	}
 	/*
 	 * If we are doing retransmissions, then snd_nxt will not reflect
 	 * the first unsent octet.  For ACK only packets, we do not want the
 	 * sequence number of the retransmitted packet, we want the sequence
 	 * number of the next unsent octet.  So, if there is no data (and no
 	 * SYN or FIN), use snd_max instead of snd_nxt when filling in
 	 * ti_seq.  But if we are in persist state, snd_max might reflect
 	 * one byte beyond the right edge of the window, so use snd_nxt in
 	 * that case, since we know we aren't doing a retransmission.
 	 * (retransmit and persist are mutually exclusive...)
 	 */
 	if (sack_rxmit == 0) {
 		if (len && ((flags & (TH_FIN | TH_SYN | TH_RST)) == 0)) {
 			/* New data (including new persists) */
 			th->th_seq = htonl(tp->snd_max);
 			bbr_seq = tp->snd_max;
 		} else if (flags & TH_SYN) {
 			/* Syn's always send from iss */
 			th->th_seq = htonl(tp->iss);
 			bbr_seq = tp->iss;
 		} else if (flags & TH_FIN) {
 			if (flags & TH_FIN && tp->t_flags & TF_SENTFIN) {
 				/*
 				 * If we sent the fin already its 1 minus
 				 * snd_max
 				 */
 				th->th_seq = (htonl(tp->snd_max - 1));
 				bbr_seq = (tp->snd_max - 1);
 			} else {
 				/* First time FIN use snd_max */
 				th->th_seq = htonl(tp->snd_max);
 				bbr_seq = tp->snd_max;
 			}
 		} else {
 			/*
 			 * len == 0 and not persist we use snd_max, sending
 			 * an ack unless we have sent the fin then its 1
 			 * minus.
 			 */
 			/*
 			 * XXXRRS Question if we are in persists and we have
 			 * nothing outstanding to send and we have not sent
 			 * a FIN, we will send an ACK. In such a case it
 			 * might be better to send (tp->snd_una - 1) which
 			 * would force the peer to ack.
 			 */
 			if (tp->t_flags & TF_SENTFIN) {
 				th->th_seq = htonl(tp->snd_max - 1);
 				bbr_seq = (tp->snd_max - 1);
 			} else {
 				th->th_seq = htonl(tp->snd_max);
 				bbr_seq = tp->snd_max;
 			}
 		}
 	} else {
 		/* All retransmits use the rsm to guide the send */
 		th->th_seq = htonl(rsm->r_start);
 		bbr_seq = rsm->r_start;
 	}
 	th->th_ack = htonl(tp->rcv_nxt);
 	if (optlen) {
 		bcopy(opt, th + 1, optlen);
 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
 	}
 	tcp_set_flags(th, flags);
 	/*
 	 * Calculate receive window.  Don't shrink window, but avoid silly
 	 * window syndrome.
 	 */
 	if ((flags & TH_RST) || ((recwin < (so->so_rcv.sb_hiwat / 4) &&
 				  recwin < maxseg)))
 		recwin = 0;
 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
 	    recwin < (tp->rcv_adv - tp->rcv_nxt))
 		recwin = (tp->rcv_adv - tp->rcv_nxt);
 	if (recwin > TCP_MAXWIN << tp->rcv_scale)
 		recwin = TCP_MAXWIN << tp->rcv_scale;
 
 	/*
 	 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
 	 * <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK> case is
 	 * handled in syncache.
 	 */
 	if (flags & TH_SYN)
 		th->th_win = htons((u_short)
 		    (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
 	else {
 		/* Avoid shrinking window with window scaling. */
 		recwin = roundup2(recwin, 1 << tp->rcv_scale);
 		th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
 	}
 	/*
 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
 	 * window.  This may cause the remote transmitter to stall.  This
 	 * flag tells soreceive() to disable delayed acknowledgements when
 	 * draining the buffer.  This can occur if the receiver is
 	 * attempting to read more data than can be buffered prior to
 	 * transmitting on the connection.
 	 */
 	if (th->th_win == 0) {
 		tp->t_sndzerowin++;
 		tp->t_flags |= TF_RXWIN0SENT;
 	} else
 		tp->t_flags &= ~TF_RXWIN0SENT;
 	/*
 	 * We don't support urgent data, but drag along
 	 * the pointer in case of a stack switch.
 	 */
 	tp->snd_up = tp->snd_una;
 	/*
 	 * Put TCP length in extended header, and then checksum extended
 	 * header and data.
 	 */
 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
 
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	if (to.to_flags & TOF_SIGNATURE) {
 		/*
 		 * Calculate MD5 signature and put it into the place
 		 * determined before. NOTE: since TCP options buffer doesn't
 		 * point into mbuf's data, calculate offset and use it.
 		 */
 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
 		    (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
 			/*
 			 * Do not send segment if the calculation of MD5
 			 * digest has failed.
 			 */
 			goto out;
 		}
 	}
 #endif
 
 #ifdef INET6
 	if (isipv6) {
 		/*
 		 * ip6_plen is not need to be filled now, and will be filled
 		 * in ip6_output.
 		 */
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			csum_flags = m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) +
 			    optlen + len, IPPROTO_TCP, 0);
 		}
 	}
 #endif
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
 			    ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			csum_flags = m->m_pkthdr.csum_flags = CSUM_TCP;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
 			    ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
 			    IPPROTO_TCP + len + optlen));
 		}
 		/* IP version must be set here for ipv4/ipv6 checking later */
 		KASSERT(ip->ip_v == IPVERSION,
 		    ("%s: IP version incorrect: %d", __func__, ip->ip_v));
 	}
 #endif
 
 	/*
 	 * Enable TSO and specify the size of the segments. The TCP pseudo
 	 * header checksum is always provided. XXX: Fixme: This is currently
 	 * not the case for IPv6.
 	 */
 	if (tso) {
 		KASSERT(len > maxseg,
 		    ("%s: len:%d <= tso_segsz:%d", __func__, len, maxseg));
 		m->m_pkthdr.csum_flags |= CSUM_TSO;
 		csum_flags |= CSUM_TSO;
 		m->m_pkthdr.tso_segsz = maxseg;
 	}
 	KASSERT(len + hdrlen == m_length(m, NULL),
 	    ("%s: mbuf chain different than expected: %d + %u != %u",
 	    __func__, len, hdrlen, m_length(m, NULL)));
 
 #ifdef TCP_HHOOK
 	/* Run HHOOK_TC_ESTABLISHED_OUT helper hooks. */
 	hhook_run_tcp_est_out(tp, th, &to, len, tso);
 #endif
 
 	/* Log to the black box */
 	if (tcp_bblogging_on(tp)) {
 		union tcp_log_stackspecific log;
 
 		bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
 		/* Record info on type of transmission */
 		log.u_bbr.flex1 = bbr->r_ctl.rc_hptsi_agg_delay;
 		log.u_bbr.flex2 = (bbr->r_recovery_bw << 3);
 		log.u_bbr.flex3 = maxseg;
 		log.u_bbr.flex4 = delay_calc;
 		log.u_bbr.flex5 = bbr->rc_past_init_win;
 		log.u_bbr.flex5 <<= 1;
 		log.u_bbr.flex5 |= bbr->rc_no_pacing;
 		log.u_bbr.flex5 <<= 29;
 		log.u_bbr.flex5 |= tp->t_maxseg;
 		log.u_bbr.flex6 = bbr->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex7 = (bbr->rc_bbr_state << 8) | bbr_state_val(bbr);
 		/* lets poke in the low and the high here for debugging */
 		log.u_bbr.pkts_out = bbr->rc_tp->t_maxseg;
 		if (rsm || sack_rxmit) {
 			if (doing_tlp)
 				log.u_bbr.flex8 = 2;
 			else
 				log.u_bbr.flex8 = 1;
 		} else {
 			log.u_bbr.flex8 = 0;
 		}
 		lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
 		    len, &log, false, NULL, NULL, 0, tv);
 	} else {
 		lgb = NULL;
 	}
 	/*
 	 * Fill in IP length and desired time to live and send to IP level.
 	 * There should be a better way to handle ttl and tos; we could keep
 	 * them in the template, but need a way to checksum without them.
 	 */
 	/*
 	 * m->m_pkthdr.len should have been set before cksum calcuration,
 	 * because in6_cksum() need it.
 	 */
 #ifdef INET6
 	if (isipv6) {
 		/*
 		 * we separately set hoplimit for every segment, since the
 		 * user might want to change the value via setsockopt. Also,
 		 * desired default hop limit might be changed via Neighbor
 		 * Discovery.
 		 */
 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
 
 		/*
 		 * Set the packet size here for the benefit of DTrace
 		 * probes. ip6_output() will set it properly; it's supposed
 		 * to include the option header lengths as well.
 		 */
 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
 
 		if (V_path_mtu_discovery && maxseg > V_tcp_minmss)
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 
 		if (tp->t_state == TCPS_SYN_SENT)
 			TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
 
 		TCP_PROBE5(send, NULL, tp, ip6, tp, th);
 		/* TODO: IPv6 IP6TOS_ECT bit on */
 		error = ip6_output(m, inp->in6p_outputopts,
 		    &inp->inp_route6,
 		    ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
 		    NULL, NULL, inp);
 
 		if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
 			mtu = inp->inp_route6.ro_nh->nh_mtu;
 	}
 #endif				/* INET6 */
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		ip->ip_len = htons(m->m_pkthdr.len);
 #ifdef INET6
 		if (isipv6)
 			ip->ip_ttl = in6_selecthlim(inp, NULL);
 #endif				/* INET6 */
 		/*
 		 * If we do path MTU discovery, then we set DF on every
 		 * packet. This might not be the best thing to do according
 		 * to RFC3390 Section 2. However the tcp hostcache migitates
 		 * the problem so it affects only the first tcp connection
 		 * with a host.
 		 *
 		 * NB: Don't set DF on small MTU/MSS to have a safe
 		 * fallback.
 		 */
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 			if (tp->t_port == 0 || len < V_tcp_minmss) {
 				ip->ip_off |= htons(IP_DF);
 			}
 		} else {
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 		}
 
 		if (tp->t_state == TCPS_SYN_SENT)
 			TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
 
 		TCP_PROBE5(send, NULL, tp, ip, tp, th);
 
 		error = ip_output(m, inp->inp_options, &inp->inp_route,
 		    ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
 		    inp);
 		if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
 			mtu = inp->inp_route.ro_nh->nh_mtu;
 	}
 #endif				/* INET */
 out:
 
 	if (lgb) {
 		lgb->tlb_errno = error;
 		lgb = NULL;
 	}
 	/*
 	 * In transmit state, time the transmission and arrange for the
 	 * retransmit.  In persist state, just set snd_max.
 	 */
 	if (error == 0) {
 		tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    (tp->t_flags & TF_SACK_PERMIT) &&
 		    tp->rcv_numsacks > 0)
 			tcp_clean_dsack_blocks(tp);
 		/* We sent an ack clear the bbr_segs_rcvd count */
 		bbr->output_error_seen = 0;
 		bbr->oerror_cnt = 0;
 		bbr->bbr_segs_rcvd = 0;
 		if (len == 0)
 			counter_u64_add(bbr_out_size[TCP_MSS_ACCT_SNDACK], 1);
 		/* Do accounting for new sends */
 		if ((len > 0) && (rsm == NULL)) {
 			int idx;
 			if (tp->snd_una == tp->snd_max) {
 				/*
 				 * Special case to match google, when
 				 * nothing is in flight the delivered
 				 * time does get updated to the current
 				 * time (see tcp_rate_bsd.c).
 				 */
 				bbr->r_ctl.rc_del_time = cts;
 			}
 			if (len >= maxseg) {
 				idx = (len / maxseg) + 3;
 				if (idx >= TCP_MSS_ACCT_ATIMER)
 					counter_u64_add(bbr_out_size[(TCP_MSS_ACCT_ATIMER - 1)], 1);
 				else
 					counter_u64_add(bbr_out_size[idx], 1);
 			} else {
 				/* smaller than a MSS */
 				idx = len / (bbr_hptsi_bytes_min - bbr->rc_last_options);
 				if (idx >= TCP_MSS_SMALL_MAX_SIZE_DIV)
 					idx = (TCP_MSS_SMALL_MAX_SIZE_DIV - 1);
 				counter_u64_add(bbr_out_size[(idx + TCP_MSS_SMALL_SIZE_OFF)], 1);
 			}
 		}
 	}
 	abandon = 0;
 	/*
 	 * We must do the send accounting before we log the output,
 	 * otherwise the state of the rsm could change and we account to the
 	 * wrong bucket.
 	 */
 	if (len > 0) {
 		bbr_do_send_accounting(tp, bbr, rsm, len, error);
 		if (error == 0) {
 			if (tp->snd_una == tp->snd_max)
 				bbr->r_ctl.rc_tlp_rxt_last_time = cts;
 		}
 	}
 	bbr_log_output(bbr, tp, &to, len, bbr_seq, (uint8_t) flags, error,
 	    cts, mb, &abandon, rsm, 0, sb);
 	if (abandon) {
 		/*
 		 * If bbr_log_output destroys the TCB or sees a TH_RST being
 		 * sent we should hit this condition.
 		 */
 		return (0);
 	}
 	if (bbr->rc_in_persist == 0) {
 		/*
 		 * Advance snd_nxt over sequence space of this segment.
 		 */
 		if (error)
 			/* We don't log or do anything with errors */
 			goto skip_upd;
 
 		if (tp->snd_una == tp->snd_max &&
 		    (len || (flags & (TH_SYN | TH_FIN)))) {
 			/*
 			 * Update the time we just added data since none was
 			 * outstanding.
 			 */
 			bbr_log_progress_event(bbr, tp, ticks, PROGRESS_START, __LINE__);
 			bbr->rc_tp->t_acktime  = ticks;
 		}
 		if (flags & (TH_SYN | TH_FIN) && (rsm == NULL)) {
 			if (flags & TH_SYN) {
 				/*
 				 * Smack the snd_max to iss + 1
 				 * if its a FO we will add len below.
 				 */
 				tp->snd_max = tp->iss + 1;
 			}
 			if ((flags & TH_FIN) && ((tp->t_flags & TF_SENTFIN) == 0)) {
 				tp->snd_max++;
 				tp->t_flags |= TF_SENTFIN;
 			}
 		}
 		if (sack_rxmit == 0)
 			tp->snd_max += len;
 skip_upd:
 		if ((error == 0) && len)
 			tot_len += len;
 	} else {
 		/* Persists case */
 		int32_t xlen = len;
 
 		if (error)
 			goto nomore;
 
 		if (flags & TH_SYN)
 			++xlen;
 		if ((flags & TH_FIN) && ((tp->t_flags & TF_SENTFIN) == 0)) {
 			++xlen;
 			tp->t_flags |= TF_SENTFIN;
 		}
 		if (xlen && (tp->snd_una == tp->snd_max)) {
 			/*
 			 * Update the time we just added data since none was
 			 * outstanding.
 			 */
 			bbr_log_progress_event(bbr, tp, ticks, PROGRESS_START, __LINE__);
 			bbr->rc_tp->t_acktime = ticks;
 		}
 		if (sack_rxmit == 0)
 			tp->snd_max += xlen;
 		tot_len += (len + optlen + ipoptlen);
 	}
 nomore:
 	if (error) {
 		/*
 		 * Failures do not advance the seq counter above. For the
 		 * case of ENOBUFS we will fall out and become ack-clocked.
 		 * capping the cwnd at the current flight.
 		 * Everything else will just have to retransmit with the timer
 		 * (no pacer).
 		 */
 		SOCKBUF_UNLOCK_ASSERT(sb);
 		BBR_STAT_INC(bbr_saw_oerr);
 		/* Clear all delay/early tracks */
 		bbr->r_ctl.rc_hptsi_agg_delay = 0;
 		bbr->r_ctl.rc_agg_early = 0;
 		bbr->r_agg_early_set = 0;
 		bbr->output_error_seen = 1;
 		if (bbr->oerror_cnt < 0xf)
 			bbr->oerror_cnt++;
 		if (bbr_max_net_error_cnt && (bbr->oerror_cnt >= bbr_max_net_error_cnt)) {
 			/* drop the session */
 			return (-ENETDOWN);
 		}
 		switch (error) {
 		case ENOBUFS:
 			/*
 			 * Make this guy have to get ack's to send
 			 * more but lets make sure we don't
 			 * slam him below a T-O (1MSS).
 			 */
 			if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) {
 				tp->snd_cwnd = ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 								    bbr->r_ctl.rc_lost_bytes)) - maxseg;
 				if (tp->snd_cwnd < maxseg)
 					tp->snd_cwnd = maxseg;
 			}
 			slot = (bbr_error_base_paceout + 1) << bbr->oerror_cnt;
 			BBR_STAT_INC(bbr_saw_enobuf);
 			if (bbr->bbr_hdrw_pacing)
 				counter_u64_add(bbr_hdwr_pacing_enobuf, 1);
 			else
 				counter_u64_add(bbr_nohdwr_pacing_enobuf, 1);
 			/*
 			 * Here even in the enobuf's case we want to do our
 			 * state update. The reason being we may have been
 			 * called by the input function. If so we have had
 			 * things change.
 			 */
 			error = 0;
 			goto enobufs;
 		case EMSGSIZE:
 			/*
 			 * For some reason the interface we used initially
 			 * to send segments changed to another or lowered
 			 * its MTU. If TSO was active we either got an
 			 * interface without TSO capabilits or TSO was
 			 * turned off. If we obtained mtu from ip_output()
 			 * then update it and try again.
 			 */
 			/* Turn on tracing (or try to) */
 			{
 				int old_maxseg;
 
 				old_maxseg = tp->t_maxseg;
 				BBR_STAT_INC(bbr_saw_emsgsiz);
 				bbr_log_msgsize_fail(bbr, tp, len, maxseg, mtu, csum_flags, tso, cts);
 				if (mtu != 0)
 					tcp_mss_update(tp, -1, mtu, NULL, NULL);
 				if (old_maxseg <= tp->t_maxseg) {
 					/* Huh it did not shrink? */
 					tp->t_maxseg = old_maxseg - 40;
 					bbr_log_msgsize_fail(bbr, tp, len, maxseg, mtu, 0, tso, cts);
 				}
 				/*
 				 * Nuke all other things that can interfere
 				 * with slot
 				 */
 				if ((tot_len + len) && (len >= tp->t_maxseg)) {
 					slot = bbr_get_pacing_delay(bbr,
 					    bbr->r_ctl.rc_bbr_hptsi_gain,
 					    (tot_len + len), cts, 0);
 					if (slot < bbr_error_base_paceout)
 						slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
 				} else
 					slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
 				bbr->rc_output_starts_timer = 1;
 				bbr_start_hpts_timer(bbr, tp, cts, 10, slot,
 				    tot_len);
 				return (error);
 			}
 		case EPERM:
 			tp->t_softerror = error;
 			/* FALLTHROUGH */
 		case EHOSTDOWN:
 		case EHOSTUNREACH:
 		case ENETDOWN:
 		case ENETUNREACH:
 			if (TCPS_HAVERCVDSYN(tp->t_state)) {
 				tp->t_softerror = error;
 			}
 			/* FALLTHROUGH */
 		default:
 			slot = (bbr_error_base_paceout + 3) << bbr->oerror_cnt;
 			bbr->rc_output_starts_timer = 1;
 			bbr_start_hpts_timer(bbr, tp, cts, 11, slot, 0);
 			return (error);
 		}
 #ifdef STATS
 	} else if (((tp->t_flags & TF_GPUTINPROG) == 0) &&
 		    len &&
 		    (rsm == NULL) &&
 	    (bbr->rc_in_persist == 0)) {
 		tp->gput_seq = bbr_seq;
 		tp->gput_ack = bbr_seq +
 		    min(sbavail(&so->so_snd) - sb_offset, sendwin);
 		tp->gput_ts = cts;
 		tp->t_flags |= TF_GPUTINPROG;
 #endif
 	}
 	KMOD_TCPSTAT_INC(tcps_sndtotal);
 	if ((bbr->bbr_hdw_pace_ena) &&
 	    (bbr->bbr_attempt_hdwr_pace == 0) &&
 	    (bbr->rc_past_init_win) &&
 	    (bbr->rc_bbr_state != BBR_STATE_STARTUP) &&
 	    (get_filter_value(&bbr->r_ctl.rc_delrate)) &&
 	    (inp->inp_route.ro_nh &&
 	     inp->inp_route.ro_nh->nh_ifp)) {
 		/*
 		 * We are past the initial window and
 		 * have at least one measurement so we
 		 * could use hardware pacing if its available.
 		 * We have an interface and we have not attempted
 		 * to setup hardware pacing, lets try to now.
 		 */
 		uint64_t rate_wanted;
 		int err = 0;
 
 		rate_wanted = bbr_get_hardware_rate(bbr);
 		bbr->bbr_attempt_hdwr_pace = 1;
 		bbr->r_ctl.crte = tcp_set_pacing_rate(bbr->rc_tp,
 						      inp->inp_route.ro_nh->nh_ifp,
 						      rate_wanted,
 						      (RS_PACING_GEQ|RS_PACING_SUB_OK),
 						      &err, NULL);
 		if (bbr->r_ctl.crte) {
 			bbr_type_log_hdwr_pacing(bbr,
 						 bbr->r_ctl.crte->ptbl->rs_ifp,
 						 rate_wanted,
 						 bbr->r_ctl.crte->rate,
 						 __LINE__, cts, err);
 			BBR_STAT_INC(bbr_hdwr_rl_add_ok);
 			counter_u64_add(bbr_flows_nohdwr_pacing, -1);
 			counter_u64_add(bbr_flows_whdwr_pacing, 1);
 			bbr->bbr_hdrw_pacing = 1;
 			/* Now what is our gain status? */
 			if (bbr->r_ctl.crte->rate < rate_wanted) {
 				/* We have a problem */
 				bbr_setup_less_of_rate(bbr, cts,
 						       bbr->r_ctl.crte->rate, rate_wanted);
 			} else {
 				/* We are good */
 				bbr->gain_is_limited = 0;
 				bbr->skip_gain = 0;
 			}
 			tcp_bbr_tso_size_check(bbr, cts);
 		} else {
 			bbr_type_log_hdwr_pacing(bbr,
 						 inp->inp_route.ro_nh->nh_ifp,
 						 rate_wanted,
 						 0,
 						 __LINE__, cts, err);
 			BBR_STAT_INC(bbr_hdwr_rl_add_fail);
 		}
 	}
 	if (bbr->bbr_hdrw_pacing) {
 		/*
 		 * Worry about cases where the route
 		 * changes or something happened that we
 		 * lost our hardware pacing possibly during
 		 * the last ip_output call.
 		 */
 		if (inp->inp_snd_tag == NULL) {
 			/* A change during ip output disabled hw pacing? */
 			bbr->bbr_hdrw_pacing = 0;
 		} else if ((inp->inp_route.ro_nh == NULL) ||
 		    (inp->inp_route.ro_nh->nh_ifp != inp->inp_snd_tag->ifp)) {
 			/*
 			 * We had an interface or route change,
 			 * detach from the current hdwr pacing
 			 * and setup to re-attempt next go
 			 * round.
 			 */
 			bbr->bbr_hdrw_pacing = 0;
 			bbr->bbr_attempt_hdwr_pace = 0;
 			tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp);
 			tcp_bbr_tso_size_check(bbr, cts);
 		}
 	}
 	/*
 	 * Data sent (as far as we can tell). If this advertises a larger
 	 * window than any other segment, then remember the size of the
 	 * advertised window. Any pending ACK has now been sent.
 	 */
 	if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
 		tp->rcv_adv = tp->rcv_nxt + recwin;
 
 	tp->last_ack_sent = tp->rcv_nxt;
 	if ((error == 0) &&
 	    (bbr->r_ctl.rc_pace_max_segs > tp->t_maxseg) &&
 	    (doing_tlp == 0) &&
 	    (tso == 0) &&
 	    (len > 0) &&
 	    ((flags & TH_RST) == 0) &&
 	    ((flags & TH_SYN) == 0) &&
 	    (IN_RECOVERY(tp->t_flags) == 0) &&
 	    (bbr->rc_in_persist == 0) &&
 	    (tot_len < bbr->r_ctl.rc_pace_max_segs)) {
 		/*
 		 * For non-tso we need to goto again until we have sent out
 		 * enough data to match what we are hptsi out every hptsi
 		 * interval.
 		 */
 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
 			/* Make sure snd_nxt is drug up */
 			tp->snd_nxt = tp->snd_max;
 		}
 		if (rsm != NULL) {
 			rsm = NULL;
 			goto skip_again;
 		}
 		rsm = NULL;
 		sack_rxmit = 0;
 		tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
 		goto again;
 	}
 skip_again:
 	if ((error == 0) && (flags & TH_FIN))
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
 	if ((error == 0) && (flags & TH_RST))
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 	if (((flags & (TH_RST | TH_SYN | TH_FIN)) == 0) && tot_len) {
 		/*
 		 * Calculate/Re-Calculate the hptsi slot in usecs based on
 		 * what we have sent so far
 		 */
 		slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
 		if (bbr->rc_no_pacing)
 			slot = 0;
 	}
 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
 enobufs:
 	if (bbr->rc_use_google == 0)
 		bbr_check_bbr_for_state(bbr, cts, __LINE__, 0);
 	bbr_cwnd_limiting(tp, bbr, ctf_flight_size(tp, (bbr->r_ctl.rc_sacked +
 							bbr->r_ctl.rc_lost_bytes)));
 	bbr->rc_output_starts_timer = 1;
 	if (bbr->bbr_use_rack_cheat &&
 	    (more_to_rxt ||
 	     ((bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts)) != NULL))) {
 		/* Rack cheats and shotguns out all rxt's 1ms apart */
 		if (slot > 1000)
 			slot = 1000;
 	}
 	if (bbr->bbr_hdrw_pacing && (bbr->hw_pacing_set == 0)) {
 		/*
 		 * We don't change the tso size until some number of sends
 		 * to give the hardware commands time to get down
 		 * to the interface.
 		 */
 		bbr->r_ctl.bbr_hdwr_cnt_noset_snt++;
 		if (bbr->r_ctl.bbr_hdwr_cnt_noset_snt >= bbr_hdwr_pacing_delay_cnt) {
 			bbr->hw_pacing_set = 1;
 			tcp_bbr_tso_size_check(bbr, cts);
 		}
 	}
 	bbr_start_hpts_timer(bbr, tp, cts, 12, slot, tot_len);
 	if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
 		/* Make sure snd_nxt is drug up */
 		tp->snd_nxt = tp->snd_max;
 	}
 	return (error);
 
 }
 
 /*
  * See bbr_output_wtime() for return values.
  */
 static int
 bbr_output(struct tcpcb *tp)
 {
 	int32_t ret;
 	struct timeval tv;
 
 	NET_EPOCH_ASSERT();
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	(void)tcp_get_usecs(&tv);
 	ret = bbr_output_wtime(tp, &tv);
 	return (ret);
 }
 
 static void
 bbr_mtu_chg(struct tcpcb *tp)
 {
 	struct tcp_bbr *bbr;
 	struct bbr_sendmap *rsm, *frsm = NULL;
 	uint32_t maxseg;
 
 	/*
 	 * The MTU has changed. a) Clear the sack filter. b) Mark everything
 	 * over the current size as SACK_PASS so a retransmit will occur.
 	 */
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	maxseg = tp->t_maxseg - bbr->rc_last_options;
 	sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una);
 	TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) {
 		/* Don't mess with ones acked (by sack?) */
 		if (rsm->r_flags & BBR_ACKED)
 			continue;
 		if ((rsm->r_end - rsm->r_start) > maxseg) {
 			/*
 			 * We mark sack-passed on all the previous large
 			 * sends we did. This will force them to retransmit.
 			 */
 			rsm->r_flags |= BBR_SACK_PASSED;
 			if (((rsm->r_flags & BBR_MARKED_LOST) == 0) &&
 			    bbr_is_lost(bbr, rsm, bbr->r_ctl.rc_rcvtime)) {
 				bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start;
 				bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start;
 				rsm->r_flags |= BBR_MARKED_LOST;
 			}
 			if (frsm == NULL)
 				frsm = rsm;
 		}
 	}
 	if (frsm) {
 		bbr->r_ctl.rc_resend = frsm;
 	}
 }
 
 static int
 bbr_pru_options(struct tcpcb *tp, int flags)
 {
 	if (flags & PRUS_OOB)
 		return (EOPNOTSUPP);
 	return (0);
 }
 
 static void
 bbr_switch_failed(struct tcpcb *tp)
 {
 	/*
 	 * If a switch fails we only need to
 	 * make sure mbuf_queuing is still in place.
 	 * We also need to make sure we are still in
 	 * ticks granularity (though we should probably
 	 * change bbr to go to USECs).
 	 *
 	 * For timers we need to see if we are still in the
 	 * pacer (if our flags are up) if so we are good, if
 	 * not we need to get back into the pacer.
 	 */
 	struct timeval tv;
 	uint32_t cts;
 	uint32_t toval;
 	struct tcp_bbr *bbr;
 	struct hpts_diag diag;
 
 	tp->t_flags2 |= TF2_CANNOT_DO_ECN;
 	tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
 	if (tp->t_in_hpts > IHPTS_NONE) {
 		return;
 	}
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	cts = tcp_get_usecs(&tv);
 	if (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 		if (TSTMP_GT(bbr->rc_pacer_started, cts)) {
 			toval = bbr->rc_pacer_started - cts;
 		} else {
 			/* one slot please */
 			toval = HPTS_TICKS_PER_SLOT;
 		}
 	} else if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		if (TSTMP_GT(bbr->r_ctl.rc_timer_exp, cts)) {
 			toval = bbr->r_ctl.rc_timer_exp - cts;
 		} else {
 			/* one slot please */
 			toval = HPTS_TICKS_PER_SLOT;
 		}
 	} else
 		toval = HPTS_TICKS_PER_SLOT;
 	(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
 				   __LINE__, &diag);
 	bbr_log_hpts_diag(bbr, cts, &diag);
 }
 
 struct tcp_function_block __tcp_bbr = {
 	.tfb_tcp_block_name = __XSTRING(STACKNAME),
 	.tfb_tcp_output = bbr_output,
 	.tfb_do_queued_segments = ctf_do_queued_segments,
 	.tfb_do_segment_nounlock = bbr_do_segment_nounlock,
 	.tfb_tcp_do_segment = bbr_do_segment,
 	.tfb_tcp_ctloutput = bbr_ctloutput,
 	.tfb_tcp_fb_init = bbr_init,
 	.tfb_tcp_fb_fini = bbr_fini,
 	.tfb_tcp_timer_stop_all = bbr_stopall,
 	.tfb_tcp_rexmit_tmr = bbr_remxt_tmr,
 	.tfb_tcp_handoff_ok = bbr_handoff_ok,
 	.tfb_tcp_mtu_chg = bbr_mtu_chg,
 	.tfb_pru_options = bbr_pru_options,
 	.tfb_switch_failed = bbr_switch_failed,
 	.tfb_flags = TCP_FUNC_OUTPUT_CANDROP,
 };
 
 /*
  * bbr_ctloutput() must drop the inpcb lock before performing copyin on
  * socket option arguments.  When it re-acquires the lock after the copy, it
  * has to revalidate that the connection is still valid for the socket
  * option.
  */
 static int
 bbr_set_sockopt(struct tcpcb *tp, struct sockopt *sopt)
 {
 	struct epoch_tracker et;
 	struct inpcb *inp = tptoinpcb(tp);
 	struct tcp_bbr *bbr;
 	int32_t error = 0, optval;
 
 	switch (sopt->sopt_level) {
 	case IPPROTO_IPV6:
 	case IPPROTO_IP:
 		return (tcp_default_ctloutput(tp, sopt));
 	}
 
 	switch (sopt->sopt_name) {
 	case TCP_RACK_PACE_MAX_SEG:
 	case TCP_RACK_MIN_TO:
 	case TCP_RACK_REORD_THRESH:
 	case TCP_RACK_REORD_FADE:
 	case TCP_RACK_TLP_THRESH:
 	case TCP_RACK_PKT_DELAY:
 	case TCP_BBR_ALGORITHM:
 	case TCP_BBR_TSLIMITS:
 	case TCP_BBR_IWINTSO:
 	case TCP_BBR_RECFORCE:
 	case TCP_BBR_STARTUP_PG:
 	case TCP_BBR_DRAIN_PG:
 	case TCP_BBR_RWND_IS_APP:
 	case TCP_BBR_PROBE_RTT_INT:
 	case TCP_BBR_PROBE_RTT_GAIN:
 	case TCP_BBR_PROBE_RTT_LEN:
 	case TCP_BBR_STARTUP_LOSS_EXIT:
 	case TCP_BBR_USEDEL_RATE:
 	case TCP_BBR_MIN_RTO:
 	case TCP_BBR_MAX_RTO:
 	case TCP_BBR_PACE_PER_SEC:
 	case TCP_DELACK:
 	case TCP_BBR_PACE_DEL_TAR:
 	case TCP_BBR_SEND_IWND_IN_TSO:
 	case TCP_BBR_EXTRA_STATE:
 	case TCP_BBR_UTTER_MAX_TSO:
 	case TCP_BBR_MIN_TOPACEOUT:
 	case TCP_BBR_FLOOR_MIN_TSO:
 	case TCP_BBR_TSTMP_RAISES:
 	case TCP_BBR_POLICER_DETECT:
 	case TCP_BBR_USE_RACK_CHEAT:
 	case TCP_DATA_AFTER_CLOSE:
 	case TCP_BBR_HDWR_PACE:
 	case TCP_BBR_PACE_SEG_MAX:
 	case TCP_BBR_PACE_SEG_MIN:
 	case TCP_BBR_PACE_CROSS:
 	case TCP_BBR_PACE_OH:
 	case TCP_BBR_TMR_PACE_OH:
 	case TCP_BBR_RACK_RTT_USE:
 	case TCP_BBR_RETRAN_WTSO:
 		break;
 	default:
 		return (tcp_default_ctloutput(tp, sopt));
 		break;
 	}
 	INP_WUNLOCK(inp);
 	error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
 	if (error)
 		return (error);
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	if (tp->t_fb != &__tcp_bbr) {
 		INP_WUNLOCK(inp);
 		return (ENOPROTOOPT);
 	}
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	switch (sopt->sopt_name) {
 	case TCP_BBR_PACE_PER_SEC:
 		BBR_OPTS_INC(tcp_bbr_pace_per_sec);
 		bbr->r_ctl.bbr_hptsi_per_second = optval;
 		break;
 	case TCP_BBR_PACE_DEL_TAR:
 		BBR_OPTS_INC(tcp_bbr_pace_del_tar);
 		bbr->r_ctl.bbr_hptsi_segments_delay_tar = optval;
 		break;
 	case TCP_BBR_PACE_SEG_MAX:
 		BBR_OPTS_INC(tcp_bbr_pace_seg_max);
 		bbr->r_ctl.bbr_hptsi_segments_max = optval;
 		break;
 	case TCP_BBR_PACE_SEG_MIN:
 		BBR_OPTS_INC(tcp_bbr_pace_seg_min);
 		bbr->r_ctl.bbr_hptsi_bytes_min = optval;
 		break;
 	case TCP_BBR_PACE_CROSS:
 		BBR_OPTS_INC(tcp_bbr_pace_cross);
 		bbr->r_ctl.bbr_cross_over = optval;
 		break;
 	case TCP_BBR_ALGORITHM:
 		BBR_OPTS_INC(tcp_bbr_algorithm);
 		if (optval && (bbr->rc_use_google == 0)) {
 			/* Turn on the google mode */
 			bbr_google_mode_on(bbr);
 			if ((optval > 3) && (optval < 500)) {
 				/*
 				 * Must be at least greater than .3%
 				 * and must be less than 50.0%.
 				 */
 				bbr->r_ctl.bbr_google_discount = optval;
 			}
 		} else if ((optval == 0) && (bbr->rc_use_google == 1)) {
 			/* Turn off the google mode */
 			bbr_google_mode_off(bbr);
 		}
 		break;
 	case TCP_BBR_TSLIMITS:
 		BBR_OPTS_INC(tcp_bbr_tslimits);
 		if (optval == 1)
 			bbr->rc_use_ts_limit = 1;
 		else if (optval == 0)
 			bbr->rc_use_ts_limit = 0;
 		else
 			error = EINVAL;
 		break;
 
 	case TCP_BBR_IWINTSO:
 		BBR_OPTS_INC(tcp_bbr_iwintso);
 		if ((optval >= 0) && (optval < 128)) {
 			uint32_t twin;
 
 			bbr->rc_init_win = optval;
 			twin = bbr_initial_cwnd(bbr, tp);
 			if ((bbr->rc_past_init_win == 0) && (twin > tp->snd_cwnd))
 				tp->snd_cwnd = twin;
 			else
 				error = EBUSY;
 		} else
 			error = EINVAL;
 		break;
 	case TCP_BBR_STARTUP_PG:
 		BBR_OPTS_INC(tcp_bbr_startup_pg);
 		if ((optval > 0) && (optval < BBR_MAX_GAIN_VALUE)) {
 			bbr->r_ctl.rc_startup_pg = optval;
 			if (bbr->rc_bbr_state == BBR_STATE_STARTUP) {
 				bbr->r_ctl.rc_bbr_hptsi_gain = optval;
 			}
 		} else
 			error = EINVAL;
 		break;
 	case TCP_BBR_DRAIN_PG:
 		BBR_OPTS_INC(tcp_bbr_drain_pg);
 		if ((optval > 0) && (optval < BBR_MAX_GAIN_VALUE))
 			bbr->r_ctl.rc_drain_pg = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_PROBE_RTT_LEN:
 		BBR_OPTS_INC(tcp_bbr_probertt_len);
 		if (optval <= 1)
 			reset_time_small(&bbr->r_ctl.rc_rttprop, (optval * USECS_IN_SECOND));
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_PROBE_RTT_GAIN:
 		BBR_OPTS_INC(tcp_bbr_probertt_gain);
 		if (optval <= BBR_UNIT)
 			bbr->r_ctl.bbr_rttprobe_gain_val = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_PROBE_RTT_INT:
 		BBR_OPTS_INC(tcp_bbr_probe_rtt_int);
 		if (optval > 1000)
 			bbr->r_ctl.rc_probertt_int = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_MIN_TOPACEOUT:
 		BBR_OPTS_INC(tcp_bbr_topaceout);
 		if (optval == 0) {
 			bbr->no_pacing_until = 0;
 			bbr->rc_no_pacing = 0;
 		} else if (optval <= 0x00ff) {
 			bbr->no_pacing_until = optval;
 			if ((bbr->r_ctl.rc_pkt_epoch < bbr->no_pacing_until) &&
 			    (bbr->rc_bbr_state == BBR_STATE_STARTUP)){
 				/* Turn on no pacing */
 				bbr->rc_no_pacing = 1;
 			}
 		} else
 			error = EINVAL;
 		break;
 	case TCP_BBR_STARTUP_LOSS_EXIT:
 		BBR_OPTS_INC(tcp_bbr_startup_loss_exit);
 		bbr->rc_loss_exit = optval;
 		break;
 	case TCP_BBR_USEDEL_RATE:
 		error = EINVAL;
 		break;
 	case TCP_BBR_MIN_RTO:
 		BBR_OPTS_INC(tcp_bbr_min_rto);
 		bbr->r_ctl.rc_min_rto_ms = optval;
 		break;
 	case TCP_BBR_MAX_RTO:
 		BBR_OPTS_INC(tcp_bbr_max_rto);
 		bbr->rc_max_rto_sec = optval;
 		break;
 	case TCP_RACK_MIN_TO:
 		/* Minimum time between rack t-o's in ms */
 		BBR_OPTS_INC(tcp_rack_min_to);
 		bbr->r_ctl.rc_min_to = optval;
 		break;
 	case TCP_RACK_REORD_THRESH:
 		/* RACK reorder threshold (shift amount) */
 		BBR_OPTS_INC(tcp_rack_reord_thresh);
 		if ((optval > 0) && (optval < 31))
 			bbr->r_ctl.rc_reorder_shift = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_RACK_REORD_FADE:
 		/* Does reordering fade after ms time */
 		BBR_OPTS_INC(tcp_rack_reord_fade);
 		bbr->r_ctl.rc_reorder_fade = optval;
 		break;
 	case TCP_RACK_TLP_THRESH:
 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
 		BBR_OPTS_INC(tcp_rack_tlp_thresh);
 		if (optval)
 			bbr->rc_tlp_threshold = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_USE_RACK_CHEAT:
 		BBR_OPTS_INC(tcp_use_rackcheat);
 		if (bbr->rc_use_google) {
 			error = EINVAL;
 			break;
 		}
 		BBR_OPTS_INC(tcp_rack_cheat);
 		if (optval)
 			bbr->bbr_use_rack_cheat = 1;
 		else
 			bbr->bbr_use_rack_cheat = 0;
 		break;
 	case TCP_BBR_FLOOR_MIN_TSO:
 		BBR_OPTS_INC(tcp_utter_max_tso);
 		if ((optval >= 0) && (optval < 40))
 			bbr->r_ctl.bbr_hptsi_segments_floor = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_UTTER_MAX_TSO:
 		BBR_OPTS_INC(tcp_utter_max_tso);
 		if ((optval >= 0) && (optval < 0xffff))
 			bbr->r_ctl.bbr_utter_max = optval;
 		else
 			error = EINVAL;
 		break;
 
 	case TCP_BBR_EXTRA_STATE:
 		BBR_OPTS_INC(tcp_extra_state);
 		if (optval)
 			bbr->rc_use_idle_restart = 1;
 		else
 			bbr->rc_use_idle_restart = 0;
 		break;
 	case TCP_BBR_SEND_IWND_IN_TSO:
 		BBR_OPTS_INC(tcp_iwnd_tso);
 		if (optval) {
 			bbr->bbr_init_win_cheat = 1;
 			if (bbr->rc_past_init_win == 0) {
 				uint32_t cts;
 				cts = tcp_get_usecs(&bbr->rc_tv);
 				tcp_bbr_tso_size_check(bbr, cts);
 			}
 		} else
 			bbr->bbr_init_win_cheat = 0;
 		break;
 	case TCP_BBR_HDWR_PACE:
 		BBR_OPTS_INC(tcp_hdwr_pacing);
 		if (optval){
 			bbr->bbr_hdw_pace_ena = 1;
 			bbr->bbr_attempt_hdwr_pace = 0;
 		} else {
 			bbr->bbr_hdw_pace_ena = 0;
 #ifdef RATELIMIT
 			if (bbr->r_ctl.crte != NULL) {
 				tcp_rel_pacing_rate(bbr->r_ctl.crte, tp);
 				bbr->r_ctl.crte = NULL;
 			}
 #endif
 		}
 		break;
 
 	case TCP_DELACK:
 		BBR_OPTS_INC(tcp_delack);
 		if (optval < 100) {
 			if (optval == 0) /* off */
 				tp->t_delayed_ack = 0;
 			else if (optval == 1) /* on which is 2 */
 				tp->t_delayed_ack = 2;
 			else /* higher than 2 and less than 100 */
 				tp->t_delayed_ack = optval;
 			if (tp->t_flags & TF_DELACK) {
 				tp->t_flags &= ~TF_DELACK;
 				tp->t_flags |= TF_ACKNOW;
 				NET_EPOCH_ENTER(et);
 				bbr_output(tp);
 				NET_EPOCH_EXIT(et);
 			}
 		} else
 			error = EINVAL;
 		break;
 	case TCP_RACK_PKT_DELAY:
 		/* RACK added ms i.e. rack-rtt + reord + N */
 		BBR_OPTS_INC(tcp_rack_pkt_delay);
 		bbr->r_ctl.rc_pkt_delay = optval;
 		break;
 
 	case TCP_BBR_RETRAN_WTSO:
 		BBR_OPTS_INC(tcp_retran_wtso);
 		if (optval)
 			bbr->rc_resends_use_tso = 1;
 		else
 			bbr->rc_resends_use_tso = 0;
 		break;
 	case TCP_DATA_AFTER_CLOSE:
 		BBR_OPTS_INC(tcp_data_ac);
 		if (optval)
 			bbr->rc_allow_data_af_clo = 1;
 		else
 			bbr->rc_allow_data_af_clo = 0;
 		break;
 	case TCP_BBR_POLICER_DETECT:
 		BBR_OPTS_INC(tcp_policer_det);
 		if (bbr->rc_use_google == 0)
 			error = EINVAL;
 		else if (optval)
 			bbr->r_use_policer = 1;
 		else
 			bbr->r_use_policer = 0;
 		break;
 
 	case TCP_BBR_TSTMP_RAISES:
 		BBR_OPTS_INC(tcp_ts_raises);
 		if (optval)
 			bbr->ts_can_raise = 1;
 		else
 			bbr->ts_can_raise = 0;
 		break;
 	case TCP_BBR_TMR_PACE_OH:
 		BBR_OPTS_INC(tcp_pacing_oh_tmr);
 		if (bbr->rc_use_google) {
 			error = EINVAL;
 		} else {
 			if (optval)
 				bbr->r_ctl.rc_incr_tmrs = 1;
 			else
 				bbr->r_ctl.rc_incr_tmrs = 0;
 		}
 		break;
 	case TCP_BBR_PACE_OH:
 		BBR_OPTS_INC(tcp_pacing_oh);
 		if (bbr->rc_use_google) {
 			error = EINVAL;
 		} else {
 			if (optval > (BBR_INCL_TCP_OH|
 				      BBR_INCL_IP_OH|
 				      BBR_INCL_ENET_OH)) {
 				error = EINVAL;
 				break;
 			}
 			if (optval & BBR_INCL_TCP_OH)
 				bbr->r_ctl.rc_inc_tcp_oh = 1;
 			else
 				bbr->r_ctl.rc_inc_tcp_oh = 0;
 			if (optval & BBR_INCL_IP_OH)
 				bbr->r_ctl.rc_inc_ip_oh = 1;
 			else
 				bbr->r_ctl.rc_inc_ip_oh = 0;
 			if (optval & BBR_INCL_ENET_OH)
 				bbr->r_ctl.rc_inc_enet_oh = 1;
 			else
 				bbr->r_ctl.rc_inc_enet_oh = 0;
 		}
 		break;
 	default:
 		return (tcp_default_ctloutput(tp, sopt));
 		break;
 	}
 	tcp_log_socket_option(tp, sopt->sopt_name, optval, error);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 
 /*
  * return 0 on success, error-num on failure
  */
 static int
 bbr_get_sockopt(struct tcpcb *tp, struct sockopt *sopt)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct tcp_bbr *bbr;
 	int32_t error, optval;
 
 	bbr = (struct tcp_bbr *)tp->t_fb_ptr;
 	if (bbr == NULL) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	/*
 	 * Because all our options are either boolean or an int, we can just
 	 * pull everything into optval and then unlock and copy. If we ever
 	 * add a option that is not a int, then this will have quite an
 	 * impact to this routine.
 	 */
 	switch (sopt->sopt_name) {
 	case TCP_BBR_PACE_PER_SEC:
 		optval = bbr->r_ctl.bbr_hptsi_per_second;
 		break;
 	case TCP_BBR_PACE_DEL_TAR:
 		optval = bbr->r_ctl.bbr_hptsi_segments_delay_tar;
 		break;
 	case TCP_BBR_PACE_SEG_MAX:
 		optval = bbr->r_ctl.bbr_hptsi_segments_max;
 		break;
 	case TCP_BBR_MIN_TOPACEOUT:
 		optval = bbr->no_pacing_until;
 		break;
 	case TCP_BBR_PACE_SEG_MIN:
 		optval = bbr->r_ctl.bbr_hptsi_bytes_min;
 		break;
 	case TCP_BBR_PACE_CROSS:
 		optval = bbr->r_ctl.bbr_cross_over;
 		break;
 	case TCP_BBR_ALGORITHM:
 		optval = bbr->rc_use_google;
 		break;
 	case TCP_BBR_TSLIMITS:
 		optval = bbr->rc_use_ts_limit;
 		break;
 	case TCP_BBR_IWINTSO:
 		optval = bbr->rc_init_win;
 		break;
 	case TCP_BBR_STARTUP_PG:
 		optval = bbr->r_ctl.rc_startup_pg;
 		break;
 	case TCP_BBR_DRAIN_PG:
 		optval = bbr->r_ctl.rc_drain_pg;
 		break;
 	case TCP_BBR_PROBE_RTT_INT:
 		optval = bbr->r_ctl.rc_probertt_int;
 		break;
 	case TCP_BBR_PROBE_RTT_LEN:
 		optval = (bbr->r_ctl.rc_rttprop.cur_time_limit / USECS_IN_SECOND);
 		break;
 	case TCP_BBR_PROBE_RTT_GAIN:
 		optval = bbr->r_ctl.bbr_rttprobe_gain_val;
 		break;
 	case TCP_BBR_STARTUP_LOSS_EXIT:
 		optval = bbr->rc_loss_exit;
 		break;
 	case TCP_BBR_USEDEL_RATE:
 		error = EINVAL;
 		break;
 	case TCP_BBR_MIN_RTO:
 		optval = bbr->r_ctl.rc_min_rto_ms;
 		break;
 	case TCP_BBR_MAX_RTO:
 		optval = bbr->rc_max_rto_sec;
 		break;
 	case TCP_RACK_PACE_MAX_SEG:
 		/* Max segments in a pace */
 		optval = bbr->r_ctl.rc_pace_max_segs;
 		break;
 	case TCP_RACK_MIN_TO:
 		/* Minimum time between rack t-o's in ms */
 		optval = bbr->r_ctl.rc_min_to;
 		break;
 	case TCP_RACK_REORD_THRESH:
 		/* RACK reorder threshold (shift amount) */
 		optval = bbr->r_ctl.rc_reorder_shift;
 		break;
 	case TCP_RACK_REORD_FADE:
 		/* Does reordering fade after ms time */
 		optval = bbr->r_ctl.rc_reorder_fade;
 		break;
 	case TCP_BBR_USE_RACK_CHEAT:
 		/* Do we use the rack cheat for rxt */
 		optval = bbr->bbr_use_rack_cheat;
 		break;
 	case TCP_BBR_FLOOR_MIN_TSO:
 		optval = bbr->r_ctl.bbr_hptsi_segments_floor;
 		break;
 	case TCP_BBR_UTTER_MAX_TSO:
 		optval = bbr->r_ctl.bbr_utter_max;
 		break;
 	case TCP_BBR_SEND_IWND_IN_TSO:
 		/* Do we send TSO size segments initially */
 		optval = bbr->bbr_init_win_cheat;
 		break;
 	case TCP_BBR_EXTRA_STATE:
 		optval = bbr->rc_use_idle_restart;
 		break;
 	case TCP_RACK_TLP_THRESH:
 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
 		optval = bbr->rc_tlp_threshold;
 		break;
 	case TCP_RACK_PKT_DELAY:
 		/* RACK added ms i.e. rack-rtt + reord + N */
 		optval = bbr->r_ctl.rc_pkt_delay;
 		break;
 	case TCP_BBR_RETRAN_WTSO:
 		optval = bbr->rc_resends_use_tso;
 		break;
 	case TCP_DATA_AFTER_CLOSE:
 		optval = bbr->rc_allow_data_af_clo;
 		break;
 	case TCP_DELACK:
 		optval = tp->t_delayed_ack;
 		break;
 	case TCP_BBR_HDWR_PACE:
 		optval = bbr->bbr_hdw_pace_ena;
 		break;
 	case TCP_BBR_POLICER_DETECT:
 		optval = bbr->r_use_policer;
 		break;
 	case TCP_BBR_TSTMP_RAISES:
 		optval = bbr->ts_can_raise;
 		break;
 	case TCP_BBR_TMR_PACE_OH:
 		optval = bbr->r_ctl.rc_incr_tmrs;
 		break;
 	case TCP_BBR_PACE_OH:
 		optval = 0;
 		if (bbr->r_ctl.rc_inc_tcp_oh)
 			optval |= BBR_INCL_TCP_OH;
 		if (bbr->r_ctl.rc_inc_ip_oh)
 			optval |= BBR_INCL_IP_OH;
 		if (bbr->r_ctl.rc_inc_enet_oh)
 			optval |= BBR_INCL_ENET_OH;
 		break;
 	default:
 		return (tcp_default_ctloutput(tp, sopt));
 		break;
 	}
 	INP_WUNLOCK(inp);
 	error = sooptcopyout(sopt, &optval, sizeof optval);
 	return (error);
 }
 
 /*
  * return 0 on success, error-num on failure
  */
 static int
 bbr_ctloutput(struct tcpcb *tp, struct sockopt *sopt)
 {
 	if (sopt->sopt_dir == SOPT_SET) {
 		return (bbr_set_sockopt(tp, sopt));
 	} else if (sopt->sopt_dir == SOPT_GET) {
 		return (bbr_get_sockopt(tp, sopt));
 	} else {
 		panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
 	}
 }
 
 static const char *bbr_stack_names[] = {
 	__XSTRING(STACKNAME),
 #ifdef STACKALIAS
 	__XSTRING(STACKALIAS),
 #endif
 };
 
 static bool bbr_mod_inited = false;
 
 static int
 tcp_addbbr(module_t mod, int32_t type, void *data)
 {
 	int32_t err = 0;
 	int num_stacks;
 
 	switch (type) {
 	case MOD_LOAD:
 		printf("Attempting to load " __XSTRING(MODNAME) "\n");
 		bbr_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
 		    sizeof(struct bbr_sendmap),
 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 		bbr_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
 		    sizeof(struct tcp_bbr),
 		    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
 		sysctl_ctx_init(&bbr_sysctl_ctx);
 		bbr_sysctl_root = SYSCTL_ADD_NODE(&bbr_sysctl_ctx,
 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
 		    OID_AUTO,
 #ifdef STACKALIAS
 		    __XSTRING(STACKALIAS),
 #else
 		    __XSTRING(STACKNAME),
 #endif
 		    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 		    "");
 		if (bbr_sysctl_root == NULL) {
 			printf("Failed to add sysctl node\n");
 			err = EFAULT;
 			goto free_uma;
 		}
 		bbr_init_sysctls();
 		num_stacks = nitems(bbr_stack_names);
 		err = register_tcp_functions_as_names(&__tcp_bbr, M_WAITOK,
 		    bbr_stack_names, &num_stacks);
 		if (err) {
 			printf("Failed to register %s stack name for "
 			    "%s module\n", bbr_stack_names[num_stacks],
 			    __XSTRING(MODNAME));
 			sysctl_ctx_free(&bbr_sysctl_ctx);
 	free_uma:
 			uma_zdestroy(bbr_zone);
 			uma_zdestroy(bbr_pcb_zone);
 			bbr_counter_destroy();
 			printf("Failed to register " __XSTRING(MODNAME)
 			    " module err:%d\n", err);
 			return (err);
 		}
 		tcp_lro_reg_mbufq();
 		bbr_mod_inited = true;
 		printf(__XSTRING(MODNAME) " is now available\n");
 		break;
 	case MOD_QUIESCE:
 		err = deregister_tcp_functions(&__tcp_bbr, true, false);
 		break;
 	case MOD_UNLOAD:
 		err = deregister_tcp_functions(&__tcp_bbr, false, true);
 		if (err == EBUSY)
 			break;
 		if (bbr_mod_inited) {
 			uma_zdestroy(bbr_zone);
 			uma_zdestroy(bbr_pcb_zone);
 			sysctl_ctx_free(&bbr_sysctl_ctx);
 			bbr_counter_destroy();
 			printf(__XSTRING(MODNAME)
 			    " is now no longer available\n");
 			bbr_mod_inited = false;
 		}
 		tcp_lro_dereg_mbufq();
 		err = 0;
 		break;
 	default:
 		return (EOPNOTSUPP);
 	}
 	return (err);
 }
 
 static moduledata_t tcp_bbr = {
 	.name = __XSTRING(MODNAME),
 	    .evhand = tcp_addbbr,
 	    .priv = 0
 };
 
 MODULE_VERSION(MODNAME, 1);
 DECLARE_MODULE(MODNAME, tcp_bbr, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
index e7027dd1b2dd..229f36008a6a 100644
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -1,24433 +1,24437 @@
 /*-
  * Copyright (c) 2016-2020 Netflix, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  */
 
 #include <sys/cdefs.h>
 #include "opt_inet.h"
 #include "opt_inet6.h"
 #include "opt_ipsec.h"
 #include "opt_ratelimit.h"
 #include "opt_kern_tls.h"
 #if defined(INET) || defined(INET6)
 #include <sys/param.h>
 #include <sys/arb.h>
 #include <sys/module.h>
 #include <sys/kernel.h>
 #ifdef TCP_HHOOK
 #include <sys/hhook.h>
 #endif
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/mbuf.h>
 #include <sys/proc.h>		/* for proc0 declaration */
 #include <sys/socket.h>
 #include <sys/socketvar.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 #ifdef STATS
 #include <sys/qmath.h>
 #include <sys/tree.h>
 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
 #else
 #include <sys/tree.h>
 #endif
 #include <sys/refcount.h>
 #include <sys/queue.h>
 #include <sys/tim_filter.h>
 #include <sys/smp.h>
 #include <sys/kthread.h>
 #include <sys/kern_prefetch.h>
 #include <sys/protosw.h>
 #ifdef TCP_ACCOUNTING
 #include <sys/sched.h>
 #include <machine/cpu.h>
 #endif
 #include <vm/uma.h>
 
 #include <net/route.h>
 #include <net/route/nhop.h>
 #include <net/vnet.h>
 
 #define TCPSTATES		/* for logging */
 
 #include <netinet/in.h>
 #include <netinet/in_kdtrace.h>
 #include <netinet/in_pcb.h>
 #include <netinet/ip.h>
 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
 #include <netinet/ip_var.h>
 #include <netinet/ip6.h>
 #include <netinet6/in6_pcb.h>
 #include <netinet6/ip6_var.h>
 #include <netinet/tcp.h>
 #define	TCPOUTFLAGS
 #include <netinet/tcp_fsm.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
 #include <netinet/tcp_log_buf.h>
 #include <netinet/tcp_syncache.h>
 #include <netinet/tcp_hpts.h>
 #include <netinet/tcp_ratelimit.h>
 #include <netinet/tcp_accounting.h>
 #include <netinet/tcpip.h>
 #include <netinet/cc/cc.h>
 #include <netinet/cc/cc_newreno.h>
 #include <netinet/tcp_fastopen.h>
 #include <netinet/tcp_lro.h>
 #ifdef NETFLIX_SHARED_CWND
 #include <netinet/tcp_shared_cwnd.h>
 #endif
 #ifdef TCP_OFFLOAD
 #include <netinet/tcp_offload.h>
 #endif
 #ifdef INET6
 #include <netinet6/tcp6_var.h>
 #endif
 #include <netinet/tcp_ecn.h>
 
 #include <netipsec/ipsec_support.h>
 
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 #include <netipsec/ipsec.h>
 #include <netipsec/ipsec6.h>
 #endif				/* IPSEC */
 
 #include <netinet/udp.h>
 #include <netinet/udp_var.h>
 #include <machine/in_cksum.h>
 
 #ifdef MAC
 #include <security/mac/mac_framework.h>
 #endif
 #include "sack_filter.h"
 #include "tcp_rack.h"
 #include "tailq_hash.h"
 #include "rack_bbr_common.h"
 
 uma_zone_t rack_zone;
 uma_zone_t rack_pcb_zone;
 
 #ifndef TICKS2SBT
 #define	TICKS2SBT(__t)	(tick_sbt * ((sbintime_t)(__t)))
 #endif
 
 VNET_DECLARE(uint32_t, newreno_beta);
 VNET_DECLARE(uint32_t, newreno_beta_ecn);
 #define V_newreno_beta VNET(newreno_beta)
 #define V_newreno_beta_ecn VNET(newreno_beta_ecn)
 
 
 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block");
 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options");
 
 struct sysctl_ctx_list rack_sysctl_ctx;
 struct sysctl_oid *rack_sysctl_root;
 
 #define CUM_ACKED 1
 #define SACKED 2
 
 /*
  * The RACK module incorporates a number of
  * TCP ideas that have been put out into the IETF
  * over the last few years:
  * - Matt Mathis's Rate Halving which slowly drops
  *    the congestion window so that the ack clock can
  *    be maintained during a recovery.
  * - Yuchung Cheng's RACK TCP (for which its named) that
  *    will stop us using the number of dup acks and instead
  *    use time as the gage of when we retransmit.
  * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
  *    of Dukkipati et.al.
  * RACK depends on SACK, so if an endpoint arrives that
  * cannot do SACK the state machine below will shuttle the
  * connection back to using the "default" TCP stack that is
  * in FreeBSD.
  *
  * To implement RACK the original TCP stack was first decomposed
  * into a functional state machine with individual states
  * for each of the possible TCP connection states. The do_segment
  * functions role in life is to mandate the connection supports SACK
  * initially and then assure that the RACK state matches the conenction
  * state before calling the states do_segment function. Each
  * state is simplified due to the fact that the original do_segment
  * has been decomposed and we *know* what state we are in (no
  * switches on the state) and all tests for SACK are gone. This
  * greatly simplifies what each state does.
  *
  * TCP output is also over-written with a new version since it
  * must maintain the new rack scoreboard.
  *
  */
 static int32_t rack_tlp_thresh = 1;
 static int32_t rack_tlp_limit = 2;	/* No more than 2 TLPs w-out new data */
 static int32_t rack_tlp_use_greater = 1;
 static int32_t rack_reorder_thresh = 2;
 static int32_t rack_reorder_fade = 60000000;	/* 0 - never fade, def 60,000,000
 						 * - 60 seconds */
 static uint32_t rack_clamp_ss_upper = 110;
 static uint32_t rack_clamp_ca_upper = 105;
 static uint32_t rack_rxt_min_rnds = 10;	/* Min rounds if drastic rxt clamp is in place */
 static uint32_t rack_unclamp_round_thresh = 100;	/* number of perfect rounds before we unclamp */
 static uint32_t rack_unclamp_rxt_thresh = 5;	/* .5%  and under */
 static uint64_t rack_rxt_clamp_thresh = 0;	/* Do we do the rxt clamp thing */
 static int32_t rack_dnd_default = 0;		/* For rr_conf = 3, what is the default for dnd */
 static int32_t rack_rxt_controls = 0;
 static int32_t rack_fill_cw_state = 0;
 static uint8_t rack_req_measurements = 1;
 /* Attack threshold detections */
 static uint32_t rack_highest_sack_thresh_seen = 0;
 static uint32_t rack_highest_move_thresh_seen = 0;
 static uint32_t rack_merge_out_sacks_on_attack = 0;
 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */
 static int32_t rack_hw_pace_extra_slots = 0;	/* 2 extra MSS time betweens */
 static int32_t rack_hw_rate_caps = 0; /* 1; */
 static int32_t rack_hw_rate_cap_per = 0;	/* 0 -- off  */
 static int32_t rack_hw_rate_min = 0; /* 1500000;*/
 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */
 static int32_t rack_hw_up_only = 0;
 static int32_t rack_stats_gets_ms_rtt = 1;
 static int32_t rack_prr_addbackmax = 2;
 static int32_t rack_do_hystart = 0;
 static int32_t rack_apply_rtt_with_reduced_conf = 0;
 static int32_t rack_hibeta_setting = 0;
 static int32_t rack_default_pacing_divisor = 250;
 static int32_t rack_uses_full_dgp_in_rec = 1;
 static uint16_t rack_pacing_min_seg = 0;
 
 
 static uint32_t sad_seg_size_per = 800;	/* 80.0 % */
 static int32_t rack_pkt_delay = 1000;
 static int32_t rack_send_a_lot_in_prr = 1;
 static int32_t rack_min_to = 1000;	/* Number of microsecond  min timeout */
 static int32_t rack_verbose_logging = 0;
 static int32_t rack_ignore_data_after_close = 1;
 static int32_t rack_enable_shared_cwnd = 1;
 static int32_t rack_use_cmp_acks = 1;
 static int32_t rack_use_fsb = 1;
 static int32_t rack_use_rfo = 1;
 static int32_t rack_use_rsm_rfo = 1;
 static int32_t rack_max_abc_post_recovery = 2;
 static int32_t rack_client_low_buf = 0;
 static int32_t rack_dsack_std_based = 0x3;	/* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */
 static int32_t rack_bw_multipler = 2;		/* Limit on fill cw's jump up to be this x gp_est */
 #ifdef TCP_ACCOUNTING
 static int32_t rack_tcp_accounting = 0;
 #endif
 static int32_t rack_limits_scwnd = 1;
 static int32_t rack_enable_mqueue_for_nonpaced = 0;
 static int32_t rack_hybrid_allow_set_maxseg = 0;
 static int32_t rack_disable_prr = 0;
 static int32_t use_rack_rr = 1;
 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */
 static int32_t rack_persist_min = 250000;	/* 250usec */
 static int32_t rack_persist_max = 2000000;	/* 2 Second in usec's */
 static int32_t rack_sack_not_required = 1;	/* set to one to allow non-sack to use rack */
 static int32_t rack_default_init_window = 0;	/* Use system default */
 static int32_t rack_limit_time_with_srtt = 0;
 static int32_t rack_autosndbuf_inc = 20;	/* In percentage form */
 static int32_t rack_enobuf_hw_boost_mult = 0;	/* How many times the hw rate we boost slot using time_between */
 static int32_t rack_enobuf_hw_max = 12000;	/* 12 ms in usecs */
 static int32_t rack_enobuf_hw_min = 10000;	/* 10 ms in usecs */
 static int32_t rack_hw_rwnd_factor = 2;		/* How many max_segs the rwnd must be before we hold off sending */
 static int32_t rack_hw_check_queue = 0;		/* Do we always pre-check queue depth of a hw queue */
 static int32_t rack_full_buffer_discount = 10;
 /*
  * Currently regular tcp has a rto_min of 30ms
  * the backoff goes 12 times so that ends up
  * being a total of 122.850 seconds before a
  * connection is killed.
  */
 static uint32_t rack_def_data_window = 20;
 static uint32_t rack_goal_bdp = 2;
 static uint32_t rack_min_srtts = 1;
 static uint32_t rack_min_measure_usec = 0;
 static int32_t rack_tlp_min = 10000;	/* 10ms */
 static int32_t rack_rto_min = 30000;	/* 30,000 usec same as main freebsd */
 static int32_t rack_rto_max = 4000000;	/* 4 seconds in usec's */
 static const int32_t rack_free_cache = 2;
 static int32_t rack_hptsi_segments = 40;
 static int32_t rack_rate_sample_method = USE_RTT_LOW;
 static int32_t rack_pace_every_seg = 0;
 static int32_t rack_delayed_ack_time = 40000;	/* 40ms in usecs */
 static int32_t rack_slot_reduction = 4;
 static int32_t rack_wma_divisor = 8;		/* For WMA calculation */
 static int32_t rack_cwnd_block_ends_measure = 0;
 static int32_t rack_rwnd_block_ends_measure = 0;
 static int32_t rack_def_profile = 0;
 
 static int32_t rack_lower_cwnd_at_tlp = 0;
 static int32_t rack_limited_retran = 0;
 static int32_t rack_always_send_oldest = 0;
 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE;
 
 static uint16_t rack_per_of_gp_ss = 250;	/* 250 % slow-start */
 static uint16_t rack_per_of_gp_ca = 200;	/* 200 % congestion-avoidance */
 static uint16_t rack_per_of_gp_rec = 200;	/* 200 % of bw */
 
 /* Probertt */
 static uint16_t rack_per_of_gp_probertt = 60;	/* 60% of bw */
 static uint16_t rack_per_of_gp_lowthresh = 40;	/* 40% is bottom */
 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */
 static uint16_t rack_atexit_prtt_hbp = 130;	/* Clamp to 130% on exit prtt if highly buffered path */
 static uint16_t rack_atexit_prtt = 130;	/* Clamp to 100% on exit prtt if non highly buffered path */
 
 static uint32_t rack_max_drain_wait = 2;	/* How man gp srtt's before we give up draining */
 static uint32_t rack_must_drain = 1;		/* How many GP srtt's we *must* wait */
 static uint32_t rack_probertt_use_min_rtt_entry = 1;	/* Use the min to calculate the goal else gp_srtt */
 static uint32_t rack_probertt_use_min_rtt_exit = 0;
 static uint32_t rack_probe_rtt_sets_cwnd = 0;
 static uint32_t rack_probe_rtt_safety_val = 2000000;	/* No more than 2 sec in probe-rtt */
 static uint32_t rack_time_between_probertt = 9600000;	/* 9.6 sec in usecs */
 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0;	/* How many srtt periods does probe-rtt last top fraction */
 static uint32_t rack_probertt_gpsrtt_cnt_div = 0;	/* How many srtt periods does probe-rtt last bottom fraction */
 static uint32_t rack_min_probertt_hold = 40000;		/* Equal to delayed ack time */
 static uint32_t rack_probertt_filter_life = 10000000;
 static uint32_t rack_probertt_lower_within = 10;
 static uint32_t rack_min_rtt_movement = 250000;	/* Must move at least 250ms (in microseconds)  to count as a lowering */
 static int32_t rack_pace_one_seg = 0;		/* Shall we pace for less than 1.4Meg 1MSS at a time */
 static int32_t rack_probertt_clear_is = 1;
 static int32_t rack_max_drain_hbp = 1;		/* Extra drain times gpsrtt for highly buffered paths */
 static int32_t rack_hbp_thresh = 3;		/* what is the divisor max_rtt/min_rtt to decided a hbp */
 
 /* Part of pacing */
 static int32_t rack_max_per_above = 30;		/* When we go to increment stop if above 100+this% */
 
 /* Timely information:
  *
  * Here we have various control parameters on how
  * timely may change the multiplier. rack_gain_p5_ub
  * is associated with timely but not directly influencing
  * the rate decision like the other variables. It controls
  * the way fill-cw interacts with timely and caps how much
  * timely can boost the fill-cw b/w.
  *
  * The other values are various boost/shrink numbers as well
  * as potential caps when adjustments are made to the timely
  * gain (returned by rack_get_output_gain(). Remember too that
  * the gain returned can be overriden by other factors such as
  * probeRTT as well as fixed-rate-pacing.
  */
 static int32_t rack_gain_p5_ub = 250;
 static int32_t rack_gp_per_bw_mul_up = 2;	/* 2% */
 static int32_t rack_gp_per_bw_mul_down = 4;	/* 4% */
 static int32_t rack_gp_rtt_maxmul = 3;		/* 3 x maxmin */
 static int32_t rack_gp_rtt_minmul = 1;		/* minrtt + (minrtt/mindiv) is lower rtt */
 static int32_t rack_gp_rtt_mindiv = 4;		/* minrtt + (minrtt * minmul/mindiv) is lower rtt */
 static int32_t rack_gp_decrease_per = 80;	/* Beta value of timely decrease (.8) = 80 */
 static int32_t rack_gp_increase_per = 2;	/* 2% increase in multiplier */
 static int32_t rack_per_lower_bound = 50;	/* Don't allow to drop below this multiplier */
 static int32_t rack_per_upper_bound_ss = 0;	/* Don't allow SS to grow above this */
 static int32_t rack_per_upper_bound_ca = 0;	/* Don't allow CA to grow above this */
 static int32_t rack_do_dyn_mul = 0;		/* Are the rack gp multipliers dynamic */
 static int32_t rack_gp_no_rec_chg = 1;		/* Prohibit recovery from reducing it's multiplier */
 static int32_t rack_timely_dec_clear = 6;	/* Do we clear decrement count at a value (6)? */
 static int32_t rack_timely_max_push_rise = 3;	/* One round of pushing */
 static int32_t rack_timely_max_push_drop = 3;	/* Three round of pushing */
 static int32_t rack_timely_min_segs = 4;	/* 4 segment minimum */
 static int32_t rack_use_max_for_nobackoff = 0;
 static int32_t rack_timely_int_timely_only = 0;	/* do interim timely's only use the timely algo (no b/w changes)? */
 static int32_t rack_timely_no_stopping = 0;
 static int32_t rack_down_raise_thresh = 100;
 static int32_t rack_req_segs = 1;
 static uint64_t rack_bw_rate_cap = 0;
 
 
 /* Rack specific counters */
 counter_u64_t rack_saw_enobuf;
 counter_u64_t rack_saw_enobuf_hw;
 counter_u64_t rack_saw_enetunreach;
 counter_u64_t rack_persists_sends;
 counter_u64_t rack_persists_acks;
 counter_u64_t rack_persists_loss;
 counter_u64_t rack_persists_lost_ends;
 counter_u64_t rack_total_bytes;
 #ifdef INVARIANTS
 counter_u64_t rack_adjust_map_bw;
 #endif
 /* Tail loss probe counters */
 counter_u64_t rack_tlp_tot;
 counter_u64_t rack_tlp_newdata;
 counter_u64_t rack_tlp_retran;
 counter_u64_t rack_tlp_retran_bytes;
 counter_u64_t rack_to_tot;
 counter_u64_t rack_hot_alloc;
 counter_u64_t rack_to_alloc;
 counter_u64_t rack_to_alloc_hard;
 counter_u64_t rack_to_alloc_emerg;
 counter_u64_t rack_to_alloc_limited;
 counter_u64_t rack_alloc_limited_conns;
 counter_u64_t rack_split_limited;
 counter_u64_t rack_rxt_clamps_cwnd;
 counter_u64_t rack_rxt_clamps_cwnd_uniq;
 
 counter_u64_t rack_multi_single_eq;
 counter_u64_t rack_proc_non_comp_ack;
 
 counter_u64_t rack_fto_send;
 counter_u64_t rack_fto_rsm_send;
 counter_u64_t rack_nfto_resend;
 counter_u64_t rack_non_fto_send;
 counter_u64_t rack_extended_rfo;
 
 counter_u64_t rack_sack_proc_all;
 counter_u64_t rack_sack_proc_short;
 counter_u64_t rack_sack_proc_restart;
 counter_u64_t rack_sack_attacks_detected;
 counter_u64_t rack_sack_attacks_reversed;
 counter_u64_t rack_sack_attacks_suspect;
 counter_u64_t rack_sack_used_next_merge;
 counter_u64_t rack_sack_splits;
 counter_u64_t rack_sack_used_prev_merge;
 counter_u64_t rack_sack_skipped_acked;
 counter_u64_t rack_ack_total;
 counter_u64_t rack_express_sack;
 counter_u64_t rack_sack_total;
 counter_u64_t rack_move_none;
 counter_u64_t rack_move_some;
 
 counter_u64_t rack_input_idle_reduces;
 counter_u64_t rack_collapsed_win;
 counter_u64_t rack_collapsed_win_seen;
 counter_u64_t rack_collapsed_win_rxt;
 counter_u64_t rack_collapsed_win_rxt_bytes;
 counter_u64_t rack_try_scwnd;
 counter_u64_t rack_hw_pace_init_fail;
 counter_u64_t rack_hw_pace_lost;
 
 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE];
 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
 
 
 #define	RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
 
 #define	RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do {	\
 	(tv) = (value) + slop;	 \
 	if ((u_long)(tv) < (u_long)(tvmin)) \
 		(tv) = (tvmin); \
 	if ((u_long)(tv) > (u_long)(tvmax)) \
 		(tv) = (tvmax); \
 } while (0)
 
 static void
 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line);
 
 static int
 rack_process_ack(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to,
     uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val);
 static int
 rack_process_data(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt);
 static void
 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack,
    uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery);
 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack);
 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack,
     uint8_t limit_type);
 static struct rack_sendmap *
 rack_check_recovery_mode(struct tcpcb *tp,
     uint32_t tsused);
 static void
 rack_cong_signal(struct tcpcb *tp,
 		 uint32_t type, uint32_t ack, int );
 static void rack_counter_destroy(void);
 static int
 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt);
 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how);
 static void
 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override);
 static void
 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
     int32_t drop_hdrlen, int32_t tlen, uint8_t iptos);
 static void rack_dtor(void *mem, int32_t size, void *arg);
 static void
 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
     uint32_t flex1, uint32_t flex2,
     uint32_t flex3, uint32_t flex4,
     uint32_t flex5, uint32_t flex6,
     uint16_t flex7, uint8_t mod);
 
 static void
 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
    uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
    struct rack_sendmap *rsm, uint8_t quality);
 static struct rack_sendmap *
 rack_find_high_nonack(struct tcp_rack *rack,
     struct rack_sendmap *rsm);
 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack);
 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged);
 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt);
 static void
 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
 			    tcp_seq th_ack, int line, uint8_t quality);
 static void
 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm);
 
 static uint32_t
 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss);
 static int32_t rack_handoff_ok(struct tcpcb *tp);
 static int32_t rack_init(struct tcpcb *tp, void **ptr);
 static void rack_init_sysctls(void);
 
 static void
 rack_log_ack(struct tcpcb *tp, struct tcpopt *to,
     struct tcphdr *th, int entered_rec, int dup_ack_struck,
     int *dsack_seen, int *sacks_seen);
 static void
 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
     uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts,
     struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz);
 
 static uint64_t rack_get_gp_est(struct tcp_rack *rack);
 
 static void
 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm);
 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
 static int32_t rack_output(struct tcpcb *tp);
 
 static uint32_t
 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack,
     struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm,
     uint32_t cts, int *no_extra, int *moved_two, uint32_t segsiz);
 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq);
 static void rack_remxt_tmr(struct tcpcb *tp);
 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt);
 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack);
 static int32_t rack_stopall(struct tcpcb *tp);
 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line);
 static uint32_t
 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag, int segsiz);
 static void
 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz);
 static int
 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
 static int32_t tcp_addrack(module_t mod, int32_t type, void *data);
 static int
 rack_do_close_wait(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_closing(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_established(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_lastack(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static int
 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th,
     struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen,
     int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos);
 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts);
 struct rack_sendmap *
 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack,
     uint32_t tsused);
 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt,
     uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
 static void
      tcp_rack_partialack(struct tcpcb *tp);
 static int
 rack_set_profile(struct tcp_rack *rack, int prof);
 static void
 rack_apply_deferred_options(struct tcp_rack *rack);
 
 int32_t rack_clear_counter=0;
 
 static uint64_t
 rack_get_lt_bw(struct tcp_rack *rack)
 {
 	struct timeval tv;
 	uint64_t tim, bytes;
 
 	tim = rack->r_ctl.lt_bw_time;
 	bytes = rack->r_ctl.lt_bw_bytes;
 	if (rack->lt_bw_up) {
 		/* Include all the current bytes too */
 		microuptime(&tv);
 		bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq);
 		tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
 	}
 	if ((bytes != 0) && (tim != 0))
 		return ((bytes * (uint64_t)1000000) / tim);
 	else
 		return (0);
 }
 
 static void
 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8)
 {
 	struct sockopt sopt;
 	struct cc_newreno_opts opt;
 	struct newreno old;
 	struct tcpcb *tp;
 	int error, failed = 0;
 
 	tp = rack->rc_tp;
 	if (tp->t_cc == NULL) {
 		/* Tcb is leaving */
 		return;
 	}
 	rack->rc_pacing_cc_set = 1;
 	if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
 		/* Not new-reno we can't play games with beta! */
 		failed = 1;
 		goto out;
 
 	}
 	if (CC_ALGO(tp)->ctl_output == NULL)  {
 		/* Huh, not using new-reno so no swaps.? */
 		failed = 2;
 		goto out;
 	}
 	/* Get the current values out */
 	sopt.sopt_valsize = sizeof(struct cc_newreno_opts);
 	sopt.sopt_dir = SOPT_GET;
 	opt.name = CC_NEWRENO_BETA;
 	error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 	if (error)  {
 		failed = 3;
 		goto out;
 	}
 	old.beta = opt.val;
 	opt.name = CC_NEWRENO_BETA_ECN;
 	error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 	if (error)  {
 		failed = 4;
 		goto out;
 	}
 	old.beta_ecn = opt.val;
 
 	/* Now lets set in the values we have stored */
 	sopt.sopt_dir = SOPT_SET;
 	opt.name = CC_NEWRENO_BETA;
 	opt.val = rack->r_ctl.rc_saved_beta.beta;
 	error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 	if (error)  {
 		failed = 5;
 		goto out;
 	}
 	opt.name = CC_NEWRENO_BETA_ECN;
 	opt.val = rack->r_ctl.rc_saved_beta.beta_ecn;
 	error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 	if (error) {
 		failed = 6;
 		goto out;
 	}
 	/* Save off the values for restoral */
 	memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno));
 out:
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 		struct newreno *ptr;
 
 		ptr = ((struct newreno *)tp->t_ccv.cc_data);
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex1 = ptr->beta;
 		log.u_bbr.flex2 = ptr->beta_ecn;
 		log.u_bbr.flex3 = ptr->newreno_flags;
 		log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta;
 		log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn;
 		log.u_bbr.flex6 = failed;
 		log.u_bbr.flex7 = rack->gp_ready;
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->use_fixed_rate;
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->rc_pacing_cc_set;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex8 = flex8;
 		tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error,
 			       0, &log, false, NULL, NULL, 0, &tv);
 	}
 }
 
 static void
 rack_set_cc_pacing(struct tcp_rack *rack)
 {
 	if (rack->rc_pacing_cc_set)
 		return;
 	/*
 	 * Use the swap utility placing in 3 for flex8 to id a
 	 * set of a new set of values.
 	 */
 	rack->rc_pacing_cc_set = 1;
 	rack_swap_beta_values(rack, 3);
 }
 
 static void
 rack_undo_cc_pacing(struct tcp_rack *rack)
 {
 	if (rack->rc_pacing_cc_set == 0)
 		return;
 	/*
 	 * Use the swap utility placing in 4 for flex8 to id a
 	 * restoral of the old values.
 	 */
 	rack->rc_pacing_cc_set = 0;
 	rack_swap_beta_values(rack, 4);
 }
 
 static void
 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t,
 	       uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm)
 {
 	if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = seq_end;
 		log.u_bbr.flex2 = rack->rc_tp->gput_seq;
 		log.u_bbr.flex3 = ack_end_t;
 		log.u_bbr.flex4 = rack->rc_tp->gput_ts;
 		log.u_bbr.flex5 = send_end_t;
 		log.u_bbr.flex6 = rack->rc_tp->gput_ack;
 		log.u_bbr.flex7 = mode;
 		log.u_bbr.flex8 = 69;
 		log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts;
 		log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts;
 		log.u_bbr.pkts_out = line;
 		log.u_bbr.cwnd_gain = rack->app_limited_needs_set;
 		log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt;
 		if (rsm != NULL) {
 			log.u_bbr.applimited = rsm->r_start;
 			log.u_bbr.delivered = rsm->r_end;
 			log.u_bbr.epoch = rsm->r_flags;
 		}
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HPTSI_CALC, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static int
 sysctl_rack_clear(SYSCTL_HANDLER_ARGS)
 {
 	uint32_t stat;
 	int32_t error;
 
 	error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t));
 	if (error || req->newptr == NULL)
 		return error;
 
 	error = SYSCTL_IN(req, &stat, sizeof(uint32_t));
 	if (error)
 		return (error);
 	if (stat == 1) {
 #ifdef INVARIANTS
 		printf("Clearing RACK counters\n");
 #endif
 		counter_u64_zero(rack_tlp_tot);
 		counter_u64_zero(rack_tlp_newdata);
 		counter_u64_zero(rack_tlp_retran);
 		counter_u64_zero(rack_tlp_retran_bytes);
 		counter_u64_zero(rack_to_tot);
 		counter_u64_zero(rack_saw_enobuf);
 		counter_u64_zero(rack_saw_enobuf_hw);
 		counter_u64_zero(rack_saw_enetunreach);
 		counter_u64_zero(rack_persists_sends);
 		counter_u64_zero(rack_total_bytes);
 		counter_u64_zero(rack_persists_acks);
 		counter_u64_zero(rack_persists_loss);
 		counter_u64_zero(rack_persists_lost_ends);
 #ifdef INVARIANTS
 		counter_u64_zero(rack_adjust_map_bw);
 #endif
 		counter_u64_zero(rack_to_alloc_hard);
 		counter_u64_zero(rack_to_alloc_emerg);
 		counter_u64_zero(rack_sack_proc_all);
 		counter_u64_zero(rack_fto_send);
 		counter_u64_zero(rack_fto_rsm_send);
 		counter_u64_zero(rack_extended_rfo);
 		counter_u64_zero(rack_hw_pace_init_fail);
 		counter_u64_zero(rack_hw_pace_lost);
 		counter_u64_zero(rack_non_fto_send);
 		counter_u64_zero(rack_nfto_resend);
 		counter_u64_zero(rack_sack_proc_short);
 		counter_u64_zero(rack_sack_proc_restart);
 		counter_u64_zero(rack_to_alloc);
 		counter_u64_zero(rack_to_alloc_limited);
 		counter_u64_zero(rack_alloc_limited_conns);
 		counter_u64_zero(rack_split_limited);
 		counter_u64_zero(rack_rxt_clamps_cwnd);
 		counter_u64_zero(rack_rxt_clamps_cwnd_uniq);
 		counter_u64_zero(rack_multi_single_eq);
 		counter_u64_zero(rack_proc_non_comp_ack);
 		counter_u64_zero(rack_sack_attacks_detected);
 		counter_u64_zero(rack_sack_attacks_reversed);
 		counter_u64_zero(rack_sack_attacks_suspect);
 		counter_u64_zero(rack_sack_used_next_merge);
 		counter_u64_zero(rack_sack_used_prev_merge);
 		counter_u64_zero(rack_sack_splits);
 		counter_u64_zero(rack_sack_skipped_acked);
 		counter_u64_zero(rack_ack_total);
 		counter_u64_zero(rack_express_sack);
 		counter_u64_zero(rack_sack_total);
 		counter_u64_zero(rack_move_none);
 		counter_u64_zero(rack_move_some);
 		counter_u64_zero(rack_try_scwnd);
 		counter_u64_zero(rack_collapsed_win);
 		counter_u64_zero(rack_collapsed_win_rxt);
 		counter_u64_zero(rack_collapsed_win_seen);
 		counter_u64_zero(rack_collapsed_win_rxt_bytes);
 	} else if (stat == 2) {
 #ifdef INVARIANTS
 		printf("Clearing RACK option array\n");
 #endif
 		COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE);
 	} else if (stat == 3) {
 		printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n");
 	} else if (stat == 4) {
 #ifdef INVARIANTS
 		printf("Clearing RACK out size array\n");
 #endif
 		COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE);
 	}
 	rack_clear_counter = 0;
 	return (0);
 }
 
 static void
 rack_init_sysctls(void)
 {
 	struct sysctl_oid *rack_counters;
 	struct sysctl_oid *rack_attack;
 	struct sysctl_oid *rack_pacing;
 	struct sysctl_oid *rack_timely;
 	struct sysctl_oid *rack_timers;
 	struct sysctl_oid *rack_tlp;
 	struct sysctl_oid *rack_misc;
 	struct sysctl_oid *rack_features;
 	struct sysctl_oid *rack_measure;
 	struct sysctl_oid *rack_probertt;
 	struct sysctl_oid *rack_hw_pacing;
 
 	rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "sack_attack",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Rack Sack Attack Counters and Controls");
 	rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "stats",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Rack Counters");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "rate_sample_method", CTLFLAG_RW,
 	    &rack_rate_sample_method , USE_RTT_LOW,
 	    "What method should we use for rate sampling 0=high, 1=low ");
 	/* Probe rtt related controls */
 	rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "probertt",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "ProbeRTT related Controls");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "exit_per_hpb", CTLFLAG_RW,
 	    &rack_atexit_prtt_hbp, 130,
 	    "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW,
 	    &rack_atexit_prtt, 130,
 	    "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "gp_per_mul", CTLFLAG_RW,
 	    &rack_per_of_gp_probertt, 60,
 	    "What percentage of goodput do we pace at in probertt");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "gp_per_reduce", CTLFLAG_RW,
 	    &rack_per_of_gp_probertt_reduce, 10,
 	    "What percentage of goodput do we reduce every gp_srtt");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "gp_per_low", CTLFLAG_RW,
 	    &rack_per_of_gp_lowthresh, 40,
 	    "What percentage of goodput do we allow the multiplier to fall to");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "time_between", CTLFLAG_RW,
 	    & rack_time_between_probertt, 96000000,
 	    "How many useconds between the lowest rtt falling must past before we enter probertt");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "safety", CTLFLAG_RW,
 	    &rack_probe_rtt_safety_val, 2000000,
 	    "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "sets_cwnd", CTLFLAG_RW,
 	    &rack_probe_rtt_sets_cwnd, 0,
 	    "Do we set the cwnd too (if always_lower is on)");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "maxdrainsrtts", CTLFLAG_RW,
 	    &rack_max_drain_wait, 2,
 	    "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "mustdrainsrtts", CTLFLAG_RW,
 	    &rack_must_drain, 1,
 	    "We must drain this many gp_srtt's waiting for flight to reach goal");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "goal_use_min_entry", CTLFLAG_RW,
 	    &rack_probertt_use_min_rtt_entry, 1,
 	    "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "goal_use_min_exit", CTLFLAG_RW,
 	    &rack_probertt_use_min_rtt_exit, 0,
 	    "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "length_div", CTLFLAG_RW,
 	    &rack_probertt_gpsrtt_cnt_div, 0,
 	    "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "length_mul", CTLFLAG_RW,
 	    &rack_probertt_gpsrtt_cnt_mul, 0,
 	    "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "holdtim_at_target", CTLFLAG_RW,
 	    &rack_min_probertt_hold, 200000,
 	    "What is the minimum time we hold probertt at target");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "filter_life", CTLFLAG_RW,
 	    &rack_probertt_filter_life, 10000000,
 	    "What is the time for the filters life in useconds");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "lower_within", CTLFLAG_RW,
 	    &rack_probertt_lower_within, 10,
 	    "If the rtt goes lower within this percentage of the time, go into probe-rtt");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "must_move", CTLFLAG_RW,
 	    &rack_min_rtt_movement, 250,
 	    "How much is the minimum movement in rtt to count as a drop for probertt purposes");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "clear_is_cnts", CTLFLAG_RW,
 	    &rack_probertt_clear_is, 1,
 	    "Do we clear I/S counts on exiting probe-rtt");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "hbp_extra_drain", CTLFLAG_RW,
 	    &rack_max_drain_hbp, 1,
 	    "How many extra drain gpsrtt's do we get in highly buffered paths");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_probertt),
 	    OID_AUTO, "hbp_threshold", CTLFLAG_RW,
 	    &rack_hbp_thresh, 3,
 	    "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold");
 	/* Pacing related sysctls */
 	rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "pacing",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Pacing related Controls");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "fulldgpinrec", CTLFLAG_RW,
 	    &rack_uses_full_dgp_in_rec, 1,
 	    "Do we use all DGP features in recovery (fillcw, timely et.al.)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "fullbufdisc", CTLFLAG_RW,
 	    &rack_full_buffer_discount, 10,
 	    "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "fillcw", CTLFLAG_RW,
 	    &rack_fill_cw_state, 0,
 	    "Enable fillcw on new connections (default=0 off)?");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "min_burst", CTLFLAG_RW,
 	    &rack_pacing_min_seg, 0,
 	    "What is the min burst size for pacing (0 disables)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "divisor", CTLFLAG_RW,
 	    &rack_default_pacing_divisor, 4,
 	    "What is the default divisor given to the rl code?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "fillcw_max_mult", CTLFLAG_RW,
 	    &rack_bw_multipler, 2,
 	    "What is the multiplier of the current gp_est that fillcw can increase the b/w too?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "max_pace_over", CTLFLAG_RW,
 	    &rack_max_per_above, 30,
 	    "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "allow1mss", CTLFLAG_RW,
 	    &rack_pace_one_seg, 0,
 	    "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "limit_wsrtt", CTLFLAG_RW,
 	    &rack_limit_time_with_srtt, 0,
 	    "Do we limit pacing time based on srtt");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "init_win", CTLFLAG_RW,
 	    &rack_default_init_window, 0,
 	    "Do we have a rack initial window 0 = system default");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "gp_per_ss", CTLFLAG_RW,
 	    &rack_per_of_gp_ss, 250,
 	    "If non zero, what percentage of goodput to pace at in slow start");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "gp_per_ca", CTLFLAG_RW,
 	    &rack_per_of_gp_ca, 150,
 	    "If non zero, what percentage of goodput to pace at in congestion avoidance");
 	SYSCTL_ADD_U16(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "gp_per_rec", CTLFLAG_RW,
 	    &rack_per_of_gp_rec, 200,
 	    "If non zero, what percentage of goodput to pace at in recovery");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "pace_max_seg", CTLFLAG_RW,
 	    &rack_hptsi_segments, 40,
 	    "What size is the max for TSO segments in pacing and burst mitigation");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "burst_reduces", CTLFLAG_RW,
 	    &rack_slot_reduction, 4,
 	    "When doing only burst mitigation what is the reduce divisor");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "use_pacing", CTLFLAG_RW,
 	    &rack_pace_every_seg, 0,
 	    "If set we use pacing, if clear we use only the original burst mitigation");
 	SYSCTL_ADD_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_pacing),
 	    OID_AUTO, "rate_cap", CTLFLAG_RW,
 	    &rack_bw_rate_cap, 0,
 	    "If set we apply this value to the absolute rate cap used by pacing");
 	SYSCTL_ADD_U8(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "req_measure_cnt", CTLFLAG_RW,
 	    &rack_req_measurements, 1,
 	    "If doing dynamic pacing, how many measurements must be in before we start pacing?");
 	/* Hardware pacing */
 	rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "hdwr_pacing",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Pacing related Controls");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "rwnd_factor", CTLFLAG_RW,
 	    &rack_hw_rwnd_factor, 2,
 	    "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "precheck", CTLFLAG_RW,
 	    &rack_hw_check_queue, 0,
 	    "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW,
 	    &rack_enobuf_hw_boost_mult, 0,
 	    "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "pace_enobuf_max", CTLFLAG_RW,
 	    &rack_enobuf_hw_max, 2,
 	    "What is the max boost the pacing time if we see a ENOBUFS?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "pace_enobuf_min", CTLFLAG_RW,
 	    &rack_enobuf_hw_min, 2,
 	    "What is the min boost the pacing time if we see a ENOBUFS?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "enable", CTLFLAG_RW,
 	    &rack_enable_hw_pacing, 0,
 	    "Should RACK attempt to use hw pacing?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "rate_cap", CTLFLAG_RW,
 	    &rack_hw_rate_caps, 0,
 	    "Does the highest hardware pacing rate cap the rate we will send at??");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "uncap_per", CTLFLAG_RW,
 	    &rack_hw_rate_cap_per, 0,
 	    "If you go over b/w by this amount you will be uncapped (0 = never)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "rate_min", CTLFLAG_RW,
 	    &rack_hw_rate_min, 0,
 	    "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "rate_to_low", CTLFLAG_RW,
 	    &rack_hw_rate_to_low, 0,
 	    "If we fall below this rate, dis-engage hw pacing?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "up_only", CTLFLAG_RW,
 	    &rack_hw_up_only, 0,
 	    "Do we allow hw pacing to lower the rate selected?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_hw_pacing),
 	    OID_AUTO, "extra_mss_precise", CTLFLAG_RW,
 	    &rack_hw_pace_extra_slots, 0,
 	    "If the rates between software and hardware match precisely how many extra time_betweens do we get?");
 	rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "timely",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Rack Timely RTT Controls");
 	/* Timely based GP dynmics */
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "upper", CTLFLAG_RW,
 	    &rack_gp_per_bw_mul_up, 2,
 	    "Rack timely upper range for equal b/w (in percentage)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "lower", CTLFLAG_RW,
 	    &rack_gp_per_bw_mul_down, 4,
 	    "Rack timely lower range for equal b/w (in percentage)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "rtt_max_mul", CTLFLAG_RW,
 	    &rack_gp_rtt_maxmul, 3,
 	    "Rack timely multiplier of lowest rtt for rtt_max");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "rtt_min_div", CTLFLAG_RW,
 	    &rack_gp_rtt_mindiv, 4,
 	    "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "rtt_min_mul", CTLFLAG_RW,
 	    &rack_gp_rtt_minmul, 1,
 	    "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "decrease", CTLFLAG_RW,
 	    &rack_gp_decrease_per, 80,
 	    "Rack timely Beta value 80 = .8 (scaled by 100)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "increase", CTLFLAG_RW,
 	    &rack_gp_increase_per, 2,
 	    "Rack timely increase perentage of our GP multiplication factor");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "lowerbound", CTLFLAG_RW,
 	    &rack_per_lower_bound, 50,
 	    "Rack timely lowest percentage we allow GP multiplier to fall to");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "p5_upper", CTLFLAG_RW,
 	    &rack_gain_p5_ub, 250,
 	    "Profile 5 upper bound to timely gain");
 
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "upperboundss", CTLFLAG_RW,
 	    &rack_per_upper_bound_ss, 0,
 	    "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "upperboundca", CTLFLAG_RW,
 	    &rack_per_upper_bound_ca, 0,
 	    "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "dynamicgp", CTLFLAG_RW,
 	    &rack_do_dyn_mul, 0,
 	    "Rack timely do we enable dynmaic timely goodput by default");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "no_rec_red", CTLFLAG_RW,
 	    &rack_gp_no_rec_chg, 1,
 	    "Rack timely do we prohibit the recovery multiplier from being lowered");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "red_clear_cnt", CTLFLAG_RW,
 	    &rack_timely_dec_clear, 6,
 	    "Rack timely what threshold do we count to before another boost during b/w decent");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "max_push_rise", CTLFLAG_RW,
 	    &rack_timely_max_push_rise, 3,
 	    "Rack timely how many times do we push up with b/w increase");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "max_push_drop", CTLFLAG_RW,
 	    &rack_timely_max_push_drop, 3,
 	    "Rack timely how many times do we push back on b/w decent");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "min_segs", CTLFLAG_RW,
 	    &rack_timely_min_segs, 4,
 	    "Rack timely when setting the cwnd what is the min num segments");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "noback_max", CTLFLAG_RW,
 	    &rack_use_max_for_nobackoff, 0,
 	    "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "interim_timely_only", CTLFLAG_RW,
 	    &rack_timely_int_timely_only, 0,
 	    "Rack timely when doing interim timely's do we only do timely (no b/w consideration)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "nonstop", CTLFLAG_RW,
 	    &rack_timely_no_stopping, 0,
 	    "Rack timely don't stop increase");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "dec_raise_thresh", CTLFLAG_RW,
 	    &rack_down_raise_thresh, 100,
 	    "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timely),
 	    OID_AUTO, "bottom_drag_segs", CTLFLAG_RW,
 	    &rack_req_segs, 1,
 	    "Bottom dragging if not these many segments outstanding and room");
 
 	/* TLP and Rack related parameters */
 	rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "tlp",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "TLP and Rack related Controls");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "use_rrr", CTLFLAG_RW,
 	    &use_rack_rr, 1,
 	    "Do we use Rack Rapid Recovery");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "post_rec_labc", CTLFLAG_RW,
 	    &rack_max_abc_post_recovery, 2,
 	    "Since we do early recovery, do we override the l_abc to a value, if so what?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW,
 	    &rack_non_rxt_use_cr, 0,
 	    "Do we use ss/ca rate if in recovery we are transmitting a new data chunk");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "tlpmethod", CTLFLAG_RW,
 	    &rack_tlp_threshold_use, TLP_USE_TWO_ONE,
 	    "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "limit", CTLFLAG_RW,
 	    &rack_tlp_limit, 2,
 	    "How many TLP's can be sent without sending new data");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "use_greater", CTLFLAG_RW,
 	    &rack_tlp_use_greater, 1,
 	    "Should we use the rack_rtt time if its greater than srtt");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "tlpminto", CTLFLAG_RW,
 	    &rack_tlp_min, 10000,
 	    "TLP minimum timeout per the specification (in microseconds)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "send_oldest", CTLFLAG_RW,
 	    &rack_always_send_oldest, 0,
 	    "Should we always send the oldest TLP and RACK-TLP");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "rack_tlimit", CTLFLAG_RW,
 	    &rack_limited_retran, 0,
 	    "How many times can a rack timeout drive out sends");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW,
 	    &rack_lower_cwnd_at_tlp, 0,
 	    "When a TLP completes a retran should we enter recovery");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "reorder_thresh", CTLFLAG_RW,
 	    &rack_reorder_thresh, 2,
 	    "What factor for rack will be added when seeing reordering (shift right)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW,
 	    &rack_tlp_thresh, 1,
 	    "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "reorder_fade", CTLFLAG_RW,
 	    &rack_reorder_fade, 60000000,
 	    "Does reorder detection fade, if so how many microseconds (0 means never)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_tlp),
 	    OID_AUTO, "pktdelay", CTLFLAG_RW,
 	    &rack_pkt_delay, 1000,
 	    "Extra RACK time (in microseconds) besides reordering thresh");
 
 	/* Timer related controls */
 	rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "timers",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Timer related controls");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "persmin", CTLFLAG_RW,
 	    &rack_persist_min, 250000,
 	    "What is the minimum time in microseconds between persists");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "persmax", CTLFLAG_RW,
 	    &rack_persist_max, 2000000,
 	    "What is the largest delay in microseconds between persists");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "delayed_ack", CTLFLAG_RW,
 	    &rack_delayed_ack_time, 40000,
 	    "Delayed ack time (40ms in microseconds)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "minrto", CTLFLAG_RW,
 	    &rack_rto_min, 30000,
 	    "Minimum RTO in microseconds -- set with caution below 1000 due to TLP");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "maxrto", CTLFLAG_RW,
 	    &rack_rto_max, 4000000,
 	    "Maximum RTO in microseconds -- should be at least as large as min_rto");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_timers),
 	    OID_AUTO, "minto", CTLFLAG_RW,
 	    &rack_min_to, 1000,
 	    "Minimum rack timeout in microseconds");
 	/* Measure controls */
 	rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "measure",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Measure related controls");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "wma_divisor", CTLFLAG_RW,
 	    &rack_wma_divisor, 8,
 	    "When doing b/w calculation what is the  divisor for the WMA");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "end_cwnd", CTLFLAG_RW,
 	    &rack_cwnd_block_ends_measure, 0,
 	    "Does a cwnd just-return end the measurement window (app limited)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "end_rwnd", CTLFLAG_RW,
 	    &rack_rwnd_block_ends_measure, 0,
 	    "Does an rwnd just-return end the measurement window (app limited -- not persists)");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "min_target", CTLFLAG_RW,
 	    &rack_def_data_window, 20,
 	    "What is the minimum target window (in mss) for a GP measurements");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "goal_bdp", CTLFLAG_RW,
 	    &rack_goal_bdp, 2,
 	    "What is the goal BDP to measure");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "min_srtts", CTLFLAG_RW,
 	    &rack_min_srtts, 1,
 	    "What is the goal BDP to measure");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_measure),
 	    OID_AUTO, "min_measure_tim", CTLFLAG_RW,
 	    &rack_min_measure_usec, 0,
 	    "What is the Minimum time time for a measurement if 0, this is off");
 	/* Features */
 	rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "features",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Feature controls");
 	SYSCTL_ADD_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "rxt_clamp_thresh", CTLFLAG_RW,
 	    &rack_rxt_clamp_thresh, 0,
 	    "Bit encoded clamping setup bits CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW,
 	    &rack_hybrid_allow_set_maxseg, 0,
 	    "Should hybrid pacing allow the setmss command");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "cmpack", CTLFLAG_RW,
 	    &rack_use_cmp_acks, 1,
 	    "Should RACK have LRO send compressed acks");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "fsb", CTLFLAG_RW,
 	    &rack_use_fsb, 1,
 	    "Should RACK use the fast send block?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "rfo", CTLFLAG_RW,
 	    &rack_use_rfo, 1,
 	    "Should RACK use rack_fast_output()?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "rsmrfo", CTLFLAG_RW,
 	    &rack_use_rsm_rfo, 1,
 	    "Should RACK use rack_fast_rsm_output()?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW,
 	    &rack_enable_mqueue_for_nonpaced, 0,
 	    "Should RACK use mbuf queuing for non-paced connections");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_features),
 	    OID_AUTO, "hystartplusplus", CTLFLAG_RW,
 	    &rack_do_hystart, 0,
 	    "Should RACK enable HyStart++ on connections?");
 	/* Misc rack controls */
 	rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO,
 	    "misc",
 	    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 	    "Misc related controls");
 #ifdef TCP_ACCOUNTING
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "tcp_acct", CTLFLAG_RW,
 	    &rack_tcp_accounting, 0,
 	    "Should we turn on TCP accounting for all rack sessions?");
 #endif
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "dnd", CTLFLAG_RW,
 	    &rack_dnd_default, 0,
 	    "Do not disturb default for rack_rrr = 3");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "sad_seg_per", CTLFLAG_RW,
 	    &sad_seg_size_per, 800,
 	    "Percentage of segment size needed in a sack 800 = 80.0?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rxt_controls", CTLFLAG_RW,
 	    &rack_rxt_controls, 0,
 	    "Retransmit sending size controls (valid  values 0, 1, 2 default=1)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rack_hibeta", CTLFLAG_RW,
 	    &rack_hibeta_setting, 0,
 	    "Do we ue a high beta (80 instead of 50)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW,
 	    &rack_apply_rtt_with_reduced_conf, 0,
 	    "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW,
 	    &rack_dsack_std_based, 3,
 	    "How do we process dsack with respect to rack timers, bit field, 3 is standards based?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "prr_addback_max", CTLFLAG_RW,
 	    &rack_prr_addbackmax, 2,
 	    "What is the maximum number of MSS we allow to be added back if prr can't send all its data?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "stats_gets_ms", CTLFLAG_RW,
 	    &rack_stats_gets_ms_rtt, 1,
 	    "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "clientlowbuf", CTLFLAG_RW,
 	    &rack_client_low_buf, 0,
 	    "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "defprofile", CTLFLAG_RW,
 	    &rack_def_profile, 0,
 	    "Should RACK use a default profile (0=no, num == profile num)?");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "shared_cwnd", CTLFLAG_RW,
 	    &rack_enable_shared_cwnd, 1,
 	    "Should RACK try to use the shared cwnd on connections where allowed");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "limits_on_scwnd", CTLFLAG_RW,
 	    &rack_limits_scwnd, 1,
 	    "Should RACK place low end time limits on the shared cwnd feature");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "no_prr", CTLFLAG_RW,
 	    &rack_disable_prr, 0,
 	    "Should RACK not use prr and only pace (must have pacing on)");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "bb_verbose", CTLFLAG_RW,
 	    &rack_verbose_logging, 0,
 	    "Should RACK black box logging be verbose");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "data_after_close", CTLFLAG_RW,
 	    &rack_ignore_data_after_close, 1,
 	    "Do we hold off sending a RST until all pending data is ack'd");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "no_sack_needed", CTLFLAG_RW,
 	    &rack_sack_not_required, 1,
 	    "Do we allow rack to run on connections not supporting SACK");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "prr_sendalot", CTLFLAG_RW,
 	    &rack_send_a_lot_in_prr, 1,
 	    "Send a lot in prr");
 	SYSCTL_ADD_S32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "autoscale", CTLFLAG_RW,
 	    &rack_autosndbuf_inc, 20,
 	    "What percentage should rack scale up its snd buffer by?");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rnds_for_rxt_clamp", CTLFLAG_RW,
 	    &rack_rxt_min_rnds, 10,
 	    "Number of rounds needed between RTT clamps due to high loss rates");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rnds_for_unclamp", CTLFLAG_RW,
 	    &rack_unclamp_round_thresh, 100,
 	    "Number of rounds needed with no loss to unclamp");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "rxt_threshs_for_unclamp", CTLFLAG_RW,
 	    &rack_unclamp_rxt_thresh, 5,
 	   "Percentage of retransmits we need to be under to unclamp (5 = .5 percent)\n");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "clamp_ss_upper", CTLFLAG_RW,
 	    &rack_clamp_ss_upper, 110,
 	    "Clamp percentage ceiling in SS?");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_misc),
 	    OID_AUTO, "clamp_ca_upper", CTLFLAG_RW,
 	    &rack_clamp_ca_upper, 110,
 	    "Clamp percentage ceiling in CA?");
 	/* Sack Attacker detection stuff */
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "merge_out", CTLFLAG_RW,
 	    &rack_merge_out_sacks_on_attack, 0,
 	    "Do we merge the sendmap when we decide we are being attacked?");
 
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "detect_highsackratio", CTLFLAG_RW,
 	    &rack_highest_sack_thresh_seen, 0,
 	    "Highest sack to ack ratio seen");
 	SYSCTL_ADD_U32(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "detect_highmoveratio", CTLFLAG_RW,
 	    &rack_highest_move_thresh_seen, 0,
 	    "Highest move to non-move ratio seen");
 	rack_ack_total = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "acktotal", CTLFLAG_RD,
 	    &rack_ack_total,
 	    "Total number of Ack's");
 	rack_express_sack = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "exp_sacktotal", CTLFLAG_RD,
 	    &rack_express_sack,
 	    "Total expresss number of Sack's");
 	rack_sack_total = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "sacktotal", CTLFLAG_RD,
 	    &rack_sack_total,
 	    "Total number of SACKs");
 	rack_move_none = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "move_none", CTLFLAG_RD,
 	    &rack_move_none,
 	    "Total number of SACK index reuse of positions under threshold");
 	rack_move_some = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "move_some", CTLFLAG_RD,
 	    &rack_move_some,
 	    "Total number of SACK index reuse of positions over threshold");
 	rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "attacks", CTLFLAG_RD,
 	    &rack_sack_attacks_detected,
 	    "Total number of SACK attackers that had sack disabled");
 	rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "reversed", CTLFLAG_RD,
 	    &rack_sack_attacks_reversed,
 	    "Total number of SACK attackers that were later determined false positive");
 	rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "suspect", CTLFLAG_RD,
 	    &rack_sack_attacks_suspect,
 	    "Total number of SACKs that triggered early detection");
 
 	rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "nextmerge", CTLFLAG_RD,
 	    &rack_sack_used_next_merge,
 	    "Total number of times we used the next merge");
 	rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "prevmerge", CTLFLAG_RD,
 	    &rack_sack_used_prev_merge,
 	    "Total number of times we used the prev merge");
 	/* Counters */
 	rack_total_bytes = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "totalbytes", CTLFLAG_RD,
 	    &rack_total_bytes,
 	    "Total number of bytes sent");
 	rack_fto_send = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "fto_send", CTLFLAG_RD,
 	    &rack_fto_send, "Total number of rack_fast_output sends");
 	rack_fto_rsm_send = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "fto_rsm_send", CTLFLAG_RD,
 	    &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends");
 	rack_nfto_resend = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "nfto_resend", CTLFLAG_RD,
 	    &rack_nfto_resend, "Total number of rack_output retransmissions");
 	rack_non_fto_send = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "nfto_send", CTLFLAG_RD,
 	    &rack_non_fto_send, "Total number of rack_output first sends");
 	rack_extended_rfo = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "rfo_extended", CTLFLAG_RD,
 	    &rack_extended_rfo, "Total number of times we extended rfo");
 
 	rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "hwpace_init_fail", CTLFLAG_RD,
 	    &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing");
 	rack_hw_pace_lost = counter_u64_alloc(M_WAITOK);
 
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "hwpace_lost", CTLFLAG_RD,
 	    &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing");
 	rack_tlp_tot = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "tlp_to_total", CTLFLAG_RD,
 	    &rack_tlp_tot,
 	    "Total number of tail loss probe expirations");
 	rack_tlp_newdata = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "tlp_new", CTLFLAG_RD,
 	    &rack_tlp_newdata,
 	    "Total number of tail loss probe sending new data");
 	rack_tlp_retran = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "tlp_retran", CTLFLAG_RD,
 	    &rack_tlp_retran,
 	    "Total number of tail loss probe sending retransmitted data");
 	rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD,
 	    &rack_tlp_retran_bytes,
 	    "Total bytes of tail loss probe sending retransmitted data");
 	rack_to_tot = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "rack_to_tot", CTLFLAG_RD,
 	    &rack_to_tot,
 	    "Total number of times the rack to expired");
 	rack_saw_enobuf = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "saw_enobufs", CTLFLAG_RD,
 	    &rack_saw_enobuf,
 	    "Total number of times a sends returned enobuf for non-hdwr paced connections");
 	rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD,
 	    &rack_saw_enobuf_hw,
 	    "Total number of times a send returned enobuf for hdwr paced connections");
 	rack_saw_enetunreach = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "saw_enetunreach", CTLFLAG_RD,
 	    &rack_saw_enetunreach,
 	    "Total number of times a send received a enetunreachable");
 	rack_hot_alloc = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "alloc_hot", CTLFLAG_RD,
 	    &rack_hot_alloc,
 	    "Total allocations from the top of our list");
 	rack_to_alloc = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "allocs", CTLFLAG_RD,
 	    &rack_to_alloc,
 	    "Total allocations of tracking structures");
 	rack_to_alloc_hard = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "allochard", CTLFLAG_RD,
 	    &rack_to_alloc_hard,
 	    "Total allocations done with sleeping the hard way");
 	rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "allocemerg", CTLFLAG_RD,
 	    &rack_to_alloc_emerg,
 	    "Total allocations done from emergency cache");
 	rack_to_alloc_limited = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "alloc_limited", CTLFLAG_RD,
 	    &rack_to_alloc_limited,
 	    "Total allocations dropped due to limit");
 	rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "alloc_limited_conns", CTLFLAG_RD,
 	    &rack_alloc_limited_conns,
 	    "Connections with allocations dropped due to limit");
 	rack_split_limited = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "split_limited", CTLFLAG_RD,
 	    &rack_split_limited,
 	    "Split allocations dropped due to limit");
 	rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD,
 	    &rack_rxt_clamps_cwnd,
 	    "Number of times that excessive rxt clamped the cwnd down");
 	rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD,
 	    &rack_rxt_clamps_cwnd_uniq,
 	    "Number of connections that have had excessive rxt clamped the cwnd down");
 	rack_persists_sends = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "persist_sends", CTLFLAG_RD,
 	    &rack_persists_sends,
 	    "Number of times we sent a persist probe");
 	rack_persists_acks = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "persist_acks", CTLFLAG_RD,
 	    &rack_persists_acks,
 	    "Number of times a persist probe was acked");
 	rack_persists_loss = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "persist_loss", CTLFLAG_RD,
 	    &rack_persists_loss,
 	    "Number of times we detected a lost persist probe (no ack)");
 	rack_persists_lost_ends = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "persist_loss_ends", CTLFLAG_RD,
 	    &rack_persists_lost_ends,
 	    "Number of lost persist probe (no ack) that the run ended with a PERSIST abort");
 #ifdef INVARIANTS
 	rack_adjust_map_bw = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "map_adjust_req", CTLFLAG_RD,
 	    &rack_adjust_map_bw,
 	    "Number of times we hit the case where the sb went up and down on a sendmap entry");
 #endif
 	rack_multi_single_eq = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD,
 	    &rack_multi_single_eq,
 	    "Number of compressed acks total represented");
 	rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "cmp_ack_not", CTLFLAG_RD,
 	    &rack_proc_non_comp_ack,
 	    "Number of non compresseds acks that we processed");
 
 
 	rack_sack_proc_all = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "sack_long", CTLFLAG_RD,
 	    &rack_sack_proc_all,
 	    "Total times we had to walk whole list for sack processing");
 	rack_sack_proc_restart = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "sack_restart", CTLFLAG_RD,
 	    &rack_sack_proc_restart,
 	    "Total times we had to walk whole list due to a restart");
 	rack_sack_proc_short = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "sack_short", CTLFLAG_RD,
 	    &rack_sack_proc_short,
 	    "Total times we took shortcut for sack processing");
 	rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "skipacked", CTLFLAG_RD,
 	    &rack_sack_skipped_acked,
 	    "Total number of times we skipped previously sacked");
 	rack_sack_splits = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_attack),
 	    OID_AUTO, "ofsplit", CTLFLAG_RD,
 	    &rack_sack_splits,
 	    "Total number of times we did the old fashion tree split");
 	rack_input_idle_reduces = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD,
 	    &rack_input_idle_reduces,
 	    "Total number of idle reductions on input");
 	rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "collapsed_win_seen", CTLFLAG_RD,
 	    &rack_collapsed_win_seen,
 	    "Total number of collapsed window events seen (where our window shrinks)");
 
 	rack_collapsed_win = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "collapsed_win", CTLFLAG_RD,
 	    &rack_collapsed_win,
 	    "Total number of collapsed window events where we mark packets");
 	rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD,
 	    &rack_collapsed_win_rxt,
 	    "Total number of packets that were retransmitted");
 	rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD,
 	    &rack_collapsed_win_rxt_bytes,
 	    "Total number of bytes that were retransmitted");
 	rack_try_scwnd = counter_u64_alloc(M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_counters),
 	    OID_AUTO, "tried_scwnd", CTLFLAG_RD,
 	    &rack_try_scwnd,
 	    "Total number of scwnd attempts");
 	COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "outsize", CTLFLAG_RD,
 	    rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes");
 	COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK);
 	SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "opts", CTLFLAG_RD,
 	    rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats");
 	SYSCTL_ADD_PROC(&rack_sysctl_ctx,
 	    SYSCTL_CHILDREN(rack_sysctl_root),
 	    OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
 	    &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters");
 }
 
 static uint32_t
 rc_init_window(struct tcp_rack *rack)
 {
 	uint32_t win;
 
 	if (rack->rc_init_win == 0) {
 		/*
 		 * Nothing set by the user, use the system stack
 		 * default.
 		 */
 		return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)));
 	}
 	win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win;
 	return (win);
 }
 
 static uint64_t
 rack_get_fixed_pacing_bw(struct tcp_rack *rack)
 {
 	if (IN_FASTRECOVERY(rack->rc_tp->t_flags))
 		return (rack->r_ctl.rc_fixed_pacing_rate_rec);
 	else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
 		return (rack->r_ctl.rc_fixed_pacing_rate_ss);
 	else
 		return (rack->r_ctl.rc_fixed_pacing_rate_ca);
 }
 
 static void
 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim,
 	uint64_t data, uint8_t mod, uint16_t aux,
 	struct tcp_sendfile_track *cur, int line)
 {
 #ifdef TCP_REQUEST_TRK
 	int do_log = 0;
 
 	/*
 	 * The rate cap one is noisy and only should come out when normal BB logging
 	 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out
 	 * once per chunk and make up the BBpoint that can be turned on by the client.
 	 */
 	if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
 		/*
 		 * The very noisy two need to only come out when
 		 * we have verbose logging on.
 		 */
 		if (rack_verbose_logging != 0)
 			do_log = tcp_bblogging_on(rack->rc_tp);
 		else
 			do_log = 0;
 	} else if (mod != HYBRID_LOG_BW_MEASURE) {
 		/*
 		 * All other less noisy logs here except the measure which
 		 * also needs to come out on the point and the log.
 		 */
 		do_log = tcp_bblogging_on(rack->rc_tp);
 	} else {
 		do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING);
 	}
 
 	if (do_log) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 		uint64_t lt_bw;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 
 		log.u_bbr.cwnd_gain = line;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.rttProp = tim;
 		log.u_bbr.bw_inuse = cbw;
 		log.u_bbr.delRate = rack_get_gp_est(rack);
 		lt_bw = rack_get_lt_bw(rack);
 		log.u_bbr.flex1 = seq;
 		log.u_bbr.pacing_gain = aux;
 		/* lt_bw = < flex3 | flex2 > */
 		log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff);
 		log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff);
 		/* Record the last obtained us rtt in inflight */
 		if (cur == NULL) {
 			/* Make sure we are looking at the right log if an overide comes in */
 			cur = rack->r_ctl.rc_last_sft;
 		}
 		if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY)
 			log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt;
 		else {
 			/* Use the last known rtt i.e. the rack-rtt */
 			log.u_bbr.inflight = rack->rc_rack_rtt;
 		}
 		if (cur != NULL) {
 			uint64_t off;
 
 			log.u_bbr.cur_del_rate = cur->deadline;
 			if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) {
 				/* start = < lost | pkt_epoch > */
 				log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
 				log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
 				log.u_bbr.flex6 = cur->start_seq;
 				log.u_bbr.pkts_out = cur->end_seq;
 			} else {
 				/* start = < lost | pkt_epoch > */
 				log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff);
 				log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
 				/* end = < pkts_out | flex6 > */
 				log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff);
 				log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff);
 			}
 			/* first_send = <lt_epoch | epoch> */
 			log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff);
 			log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff);
 			/* localtime = <delivered | applimited>*/
 			log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
 			log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
 #ifdef TCP_REQUEST_TRK
 			off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
 			log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
 #endif
 			log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs);
 			log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs);
 			log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags;
 		} else {
 			log.u_bbr.flex7 = 0xffff;
 			log.u_bbr.cur_del_rate = 0xffffffffffffffff;
 		}
 		/*
 		 * Compose bbr_state to be a bit wise 0000ADHF
 		 * where A is the always_pace flag
 		 * where D is the dgp_on flag
 		 * where H is the hybrid_mode on flag
 		 * where F is the use_fixed_rate flag.
 		 */
 		log.u_bbr.bbr_state = rack->rc_always_pace;
 		log.u_bbr.bbr_state <<= 1;
 		log.u_bbr.bbr_state |= rack->dgp_on;
 		log.u_bbr.bbr_state <<= 1;
 		log.u_bbr.bbr_state |= rack->rc_hybrid_mode;
 		log.u_bbr.bbr_state <<= 1;
 		log.u_bbr.bbr_state |= rack->use_fixed_rate;
 		log.u_bbr.flex8 = mod;
 		tcp_log_event(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_HYBRID_PACING_LOG, 0,
 		    0, &log, false, NULL, __func__, __LINE__, &tv);
 
 	}
 #endif
 }
 
 #ifdef TCP_REQUEST_TRK
 static void
 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line)
 {
 	if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 		uint64_t off;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes;
 		log.u_bbr.delRate = cur->sent_at_fs;
 		log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes;
 		log.u_bbr.bw_inuse = cur->rxt_at_fs;
 		log.u_bbr.cwnd_gain = line;
 		off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
 		log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
 		/* start = < flex1 | flex2 > */
 		log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff);
 		log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff);
 		/* end = < flex3 | flex4 > */
 		log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff);
 		log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff);
 
 		/* localtime = <delivered | applimited>*/
 		log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff);
 		log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
 		/* client timestamp = <lt_epoch | epoch>*/
 		log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff);
 		log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff);
 		/* now set all the flags in */
 		log.u_bbr.pkts_out = cur->hybrid_flags;
 		log.u_bbr.flex6 = cur->flags;
 		/*
 		 * Last send time  = <flex5 | pkt_epoch>  note we do not distinguish cases
 		 * where a false retransmit occurred so first_send  <-> lastsend may
 		 * include longer time then it actually took if we have a false rxt.
 		 */
 		log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff);
 		log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff);
 
 		log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST;
 		tcp_log_event(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_HYBRID_PACING_LOG, 0,
 		    0, &log, false, NULL, __func__, __LINE__, &tv);
 	}
 }
 #endif
 
 static inline uint64_t
 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw)
 {
 	uint64_t ret_bw, ether;
 	uint64_t u_segsiz;
 
 	ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr);
 	if (rack->r_is_v6){
 #ifdef INET6
 		ether += sizeof(struct ip6_hdr);
 #endif
 		ether += 14;	/* eheader size 6+6+2 */
 	} else {
 #ifdef INET
 		ether += sizeof(struct ip);
 #endif
 		ether += 14;	/* eheader size 6+6+2 */
 	}
 	u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs);
 	ret_bw = bw;
 	ret_bw *= ether;
 	ret_bw /= u_segsiz;
 	return (ret_bw);
 }
 
 static void
 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped)
 {
 #ifdef TCP_REQUEST_TRK
 	struct timeval tv;
 	uint64_t timenow, timeleft, lenleft, lengone, calcbw;
 #endif
 
 	if (rack->r_ctl.bw_rate_cap == 0)
 		return;
 #ifdef TCP_REQUEST_TRK
 	if (rack->rc_catch_up && rack->rc_hybrid_mode &&
 	    (rack->r_ctl.rc_last_sft != NULL)) {
 		/*
 		 * We have a dynamic cap. The original target
 		 * is in bw_rate_cap, but we need to look at
 		 * how long it is until we hit the deadline.
 		 */
 		struct tcp_sendfile_track *ent;
 
       		ent = rack->r_ctl.rc_last_sft;
 		microuptime(&tv);
 		timenow = tcp_tv_to_lusectick(&tv);
 		if (timenow >= ent->deadline) {
 			/* No time left we do DGP only */
 			rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 					   0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__);
 			rack->r_ctl.bw_rate_cap = 0;
 			return;
 		}
 		/* We have the time */
 		timeleft = rack->r_ctl.rc_last_sft->deadline - timenow;
 		if (timeleft < HPTS_MSEC_IN_SEC) {
 			/* If there is less than a ms left just use DGPs rate */
 			rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 					   0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__);
 			rack->r_ctl.bw_rate_cap = 0;
 			return;
 		}
 		/*
 		 * Now lets find the amount of data left to send.
 		 *
 		 * Now ideally we want to use the end_seq to figure out how much more
 		 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry..
 		 */
 		if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
 			if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una))
 				lenleft = ent->end_seq - rack->rc_tp->snd_una;
 			else {
 				/* TSNH, we should catch it at the send */
 				rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 						   0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__);
 				rack->r_ctl.bw_rate_cap = 0;
 				return;
 			}
 		} else {
 			/*
 			 * The hard way, figure out how much is gone and then
 			 * take that away from the total the client asked for
 			 * (thats off by tls overhead if this is tls).
 			 */
 			if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq))
 				lengone = rack->rc_tp->snd_una - ent->start_seq;
 			else
 				lengone = 0;
 			if (lengone < (ent->end - ent->start))
 				lenleft = (ent->end - ent->start) - lengone;
 			else {
 				/* TSNH, we should catch it at the send */
 				rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 						   0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__);
 				rack->r_ctl.bw_rate_cap = 0;
 				return;
 			}
 		}
 		if (lenleft == 0) {
 			/* We have it all sent */
 			rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 					   0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__);
 			if (rack->r_ctl.bw_rate_cap)
 				goto normal_ratecap;
 			else
 				return;
 		}
 		calcbw = lenleft * HPTS_USEC_IN_SEC;
 		calcbw /= timeleft;
 		/* Now we must compensate for IP/TCP overhead */
 		calcbw = rack_compensate_for_linerate(rack, calcbw);
 		/* Update the bit rate cap */
 		rack->r_ctl.bw_rate_cap = calcbw;
 		if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
 		    (rack_hybrid_allow_set_maxseg == 1) &&
 		    ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
 			/* Lets set in a smaller mss possibly here to match our rate-cap */
 			uint32_t orig_max;
 
 			orig_max = rack->r_ctl.rc_pace_max_segs;
 			rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
 			rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp));
 			rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
 		}
 		rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 				   calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__);
 		if ((calcbw > 0) && (*bw > calcbw)) {
 			rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 					   *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__);
 			*capped = 1;
 			*bw = calcbw;
 		}
 		return;
 	}
 normal_ratecap:
 #endif
 	if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) {
 #ifdef TCP_REQUEST_TRK
 		if (rack->rc_hybrid_mode &&
 		    rack->rc_catch_up &&
 		    (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) &&
 		    (rack_hybrid_allow_set_maxseg == 1) &&
 		    ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) {
 			/* Lets set in a smaller mss possibly here to match our rate-cap */
 			uint32_t orig_max;
 
 			orig_max = rack->r_ctl.rc_pace_max_segs;
 			rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS;
 			rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp));
 			rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5);
 		}
 #endif
 		*capped = 1;
 		*bw = rack->r_ctl.bw_rate_cap;
 		rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 				   *bw, 0, 0,
 				   HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__);
 	}
 }
 
 static uint64_t
 rack_get_gp_est(struct tcp_rack *rack)
 {
 	uint64_t bw, lt_bw, ret_bw;
 
 	if (rack->rc_gp_filled == 0) {
 		/*
 		 * We have yet no b/w measurement,
 		 * if we have a user set initial bw
 		 * return it. If we don't have that and
 		 * we have an srtt, use the tcp IW (10) to
 		 * calculate a fictional b/w over the SRTT
 		 * which is more or less a guess. Note
 		 * we don't use our IW from rack on purpose
 		 * so if we have like IW=30, we are not
 		 * calculating a "huge" b/w.
 		 */
 		uint64_t srtt;
 
 		lt_bw = rack_get_lt_bw(rack);
 		if (lt_bw) {
 			/*
 			 * No goodput bw but a long-term b/w does exist
 			 * lets use that.
 			 */
 			ret_bw = lt_bw;
 			goto compensate;
 		}
 		if (rack->r_ctl.init_rate)
 			return (rack->r_ctl.init_rate);
 
 		/* Ok lets come up with the IW guess, if we have a srtt */
 		if (rack->rc_tp->t_srtt == 0) {
 			/*
 			 * Go with old pacing method
 			 * i.e. burst mitigation only.
 			 */
 			return (0);
 		}
 		/* Ok lets get the initial TCP win (not racks) */
 		bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp));
 		srtt = (uint64_t)rack->rc_tp->t_srtt;
 		bw *= (uint64_t)USECS_IN_SECOND;
 		bw /= srtt;
 		ret_bw = bw;
 		goto compensate;
 
 	}
 	if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
 		/* Averaging is done, we can return the value */
 		bw = rack->r_ctl.gp_bw;
 	} else {
 		/* Still doing initial average must calculate */
 		bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1);
 	}
 	lt_bw = rack_get_lt_bw(rack);
 	if (lt_bw == 0) {
 		/* If we don't have one then equate it to the gp_bw */
 		lt_bw = rack->r_ctl.gp_bw;
 	}
 	if ((rack->r_cwnd_was_clamped == 1) && (rack->r_clamped_gets_lower > 0)){
 		/*  if clamped take the lowest */
 		if (lt_bw < bw)
 			ret_bw = lt_bw;
 		else
 			ret_bw = bw;
 	} else {
 		/* If not set for clamped to get lowest, take the highest */
 		if (lt_bw > bw)
 			ret_bw = lt_bw;
 		else
 			ret_bw = bw;
 	}
 	/*
 	 * Now lets compensate based on the TCP/IP overhead. Our
 	 * Goodput estimate does not include this so we must pace out
 	 * a bit faster since our pacing calculations do. The pacing
 	 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz
 	 * we are using to do this, so we do that here in the opposite
 	 * direction as well. This means that if we are tunneled and the
 	 * segsiz is say 1200 bytes we will get quite a boost, but its
 	 * compensated for in the pacing time the opposite way.
 	 */
 compensate:
 	ret_bw = rack_compensate_for_linerate(rack, ret_bw);
 	return(ret_bw);
 }
 
 
 static uint64_t
 rack_get_bw(struct tcp_rack *rack)
 {
 	uint64_t bw;
 
 	if (rack->use_fixed_rate) {
 		/* Return the fixed pacing rate */
 		return (rack_get_fixed_pacing_bw(rack));
 	}
 	bw = rack_get_gp_est(rack);
 	return (bw);
 }
 
 static uint16_t
 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
 {
 	if (rack->use_fixed_rate) {
 		return (100);
 	} else if (rack->in_probe_rtt && (rsm == NULL))
 		return (rack->r_ctl.rack_per_of_gp_probertt);
 	else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
 		  rack->r_ctl.rack_per_of_gp_rec)) {
 		if (rsm) {
 			/* a retransmission always use the recovery rate */
 			return (rack->r_ctl.rack_per_of_gp_rec);
 		} else if (rack->rack_rec_nonrxt_use_cr) {
 			/* Directed to use the configured rate */
 			goto configured_rate;
 		} else if (rack->rack_no_prr &&
 			   (rack->r_ctl.rack_per_of_gp_rec > 100)) {
 			/* No PRR, lets just use the b/w estimate only */
 			return (100);
 		} else {
 			/*
 			 * Here we may have a non-retransmit but we
 			 * have no overrides, so just use the recovery
 			 * rate (prr is in effect).
 			 */
 			return (rack->r_ctl.rack_per_of_gp_rec);
 		}
 	}
 configured_rate:
 	/* For the configured rate we look at our cwnd vs the ssthresh */
 	if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh)
 		return (rack->r_ctl.rack_per_of_gp_ss);
 	else
 		return (rack->r_ctl.rack_per_of_gp_ca);
 }
 
 static void
 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6)
 {
 	/*
 	 * Types of logs (mod value)
 	 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit.
 	 * 2 = a dsack round begins, persist is reset to 16.
 	 * 3 = a dsack round ends
 	 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh
 	 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack
 	 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh.
 	 */
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = rack->rc_rack_tmr_std_based;
 		log.u_bbr.flex1 <<= 1;
 		log.u_bbr.flex1 |= rack->rc_rack_use_dsack;
 		log.u_bbr.flex1 <<= 1;
 		log.u_bbr.flex1 |= rack->rc_dsack_round_seen;
 		log.u_bbr.flex2 = rack->r_ctl.dsack_round_end;
 		log.u_bbr.flex3 = rack->r_ctl.num_dsack;
 		log.u_bbr.flex4 = flex4;
 		log.u_bbr.flex5 = flex5;
 		log.u_bbr.flex6 = flex6;
 		log.u_bbr.flex7 = rack->r_ctl.dsack_persist;
 		log.u_bbr.flex8 = mod;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    RACK_DSACK_HANDLING, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_hdwr_pacing(struct tcp_rack *rack,
 		     uint64_t rate, uint64_t hw_rate, int line,
 		     int error, uint16_t mod)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 		const struct ifnet *ifp;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff);
 		log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff);
 		if (rack->r_ctl.crte) {
 			ifp = rack->r_ctl.crte->ptbl->rs_ifp;
 		} else if (rack->rc_inp->inp_route.ro_nh &&
 			   rack->rc_inp->inp_route.ro_nh->nh_ifp) {
 			ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp;
 		} else
 			ifp = NULL;
 		if (ifp) {
 			log.u_bbr.flex3 = (((uint64_t)ifp  >> 32) & 0x00000000ffffffff);
 			log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff);
 		}
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.bw_inuse = rate;
 		log.u_bbr.flex5 = line;
 		log.u_bbr.flex6 = error;
 		log.u_bbr.flex7 = mod;
 		log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex8 = rack->use_fixed_rate;
 		log.u_bbr.flex8 <<= 1;
 		log.u_bbr.flex8 |= rack->rack_hdrw_pacing;
 		log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
 		log.u_bbr.delRate = rack->r_ctl.crte_prev_rate;
 		if (rack->r_ctl.crte)
 			log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate;
 		else
 			log.u_bbr.cur_del_rate = 0;
 		log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HDWR_PACE, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static uint64_t
 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
 {
 	/*
 	 * We allow rack_per_of_gp_xx to dictate our bw rate we want.
 	 */
 	uint64_t bw_est, high_rate;
 	uint64_t gain;
 
 	if ((rack->r_pacing_discount == 0) ||
 	    (rack_full_buffer_discount == 0)) {
 		/*
 		 * No buffer level based discount from client buffer
 		 * level is enabled or the feature is disabled.
 		 */
 		gain = (uint64_t)rack_get_output_gain(rack, rsm);
 		bw_est = bw * gain;
 		bw_est /= (uint64_t)100;
 	} else {
 		/*
 		 * We have a discount in place apply it with
 		 * just a 100% gain (we get no boost if the buffer
 		 * is full).
 		 */
 		uint64_t discount;
 
 		discount = bw * (uint64_t)(rack_full_buffer_discount * rack->r_ctl.pacing_discount_amm);
 		discount /= 100;
 		/* What %% of the b/w do we discount */
 		bw_est = bw - discount;
 	}
 	/* Never fall below the minimum (def 64kbps) */
 	if (bw_est < RACK_MIN_BW)
 		bw_est = RACK_MIN_BW;
 	if (rack->r_rack_hw_rate_caps) {
 		/* Rate caps are in place */
 		if (rack->r_ctl.crte != NULL) {
 			/* We have a hdwr rate already */
 			high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
 			if (bw_est >= high_rate) {
 				/* We are capping bw at the highest rate table entry */
 				if (rack_hw_rate_cap_per &&
 				    (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) {
 					rack->r_rack_hw_rate_caps = 0;
 					goto done;
 				}
 				rack_log_hdwr_pacing(rack,
 						     bw_est, high_rate, __LINE__,
 						     0, 3);
 				bw_est = high_rate;
 				if (capped)
 					*capped = 1;
 			}
 		} else if ((rack->rack_hdrw_pacing == 0) &&
 			   (rack->rack_hdw_pace_ena) &&
 			   (rack->rack_attempt_hdwr_pace == 0) &&
 			   (rack->rc_inp->inp_route.ro_nh != NULL) &&
 			   (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
 			/*
 			 * Special case, we have not yet attempted hardware
 			 * pacing, and yet we may, when we do, find out if we are
 			 * above the highest rate. We need to know the maxbw for the interface
 			 * in question (if it supports ratelimiting). We get back
 			 * a 0, if the interface is not found in the RL lists.
 			 */
 			high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
 			if (high_rate) {
 				/* Yep, we have a rate is it above this rate? */
 				if (bw_est > high_rate) {
 					bw_est = high_rate;
 					if (capped)
 						*capped = 1;
 				}
 			}
 		}
 	}
 done:
 	return (bw_est);
 }
 
 static void
 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		if (rack->sack_attack_disable > 0)
 			goto log_anyway;
 		if ((mod != 1) && (rack_verbose_logging == 0))  {
 			/*
 			 * We get 3 values currently for mod
 			 * 1 - We are retransmitting and this tells the reason.
 			 * 2 - We are clearing a dup-ack count.
 			 * 3 - We are incrementing a dup-ack count.
 			 *
 			 * The clear/increment are only logged
 			 * if you have BBverbose on.
 			 */
 			return;
 		}
 log_anyway:
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = tsused;
 		log.u_bbr.flex2 = thresh;
 		log.u_bbr.flex3 = rsm->r_flags;
 		log.u_bbr.flex4 = rsm->r_dupack;
 		log.u_bbr.flex5 = rsm->r_start;
 		log.u_bbr.flex6 = rsm->r_end;
 		log.u_bbr.flex8 = mod;
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_SETTINGS_CHG, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = rack->rc_tp->t_srtt;
 		log.u_bbr.flex2 = to;
 		log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex4 = slot;
 		log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot;
 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
 		log.u_bbr.flex7 = rack->rc_in_persist;
 		log.u_bbr.flex8 = which;
 		if (rack->rack_no_prr)
 			log.u_bbr.pkts_out = 0;
 		else
 			log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		log.u_bbr.cwnd_gain = rack->rack_deferred_inited;
 		log.u_bbr.pkt_epoch = rack->rc_has_collapsed;
 		log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift;
 		log.u_bbr.lost = rack_rto_min;
 		log.u_bbr.epoch = rack->r_ctl.roundends;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERSTAR, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex8 = to_num;
 		log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt;
 		log.u_bbr.flex2 = rack->rc_rack_rtt;
 		if (rsm == NULL)
 			log.u_bbr.flex3 = 0;
 		else
 			log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex5 = 0;
 		else
 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_RTO, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack,
 		 struct rack_sendmap *prev,
 		 struct rack_sendmap *rsm,
 		 struct rack_sendmap *next,
 		 int flag, uint32_t th_ack, int line)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex8 = flag;
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.cur_del_rate = (uint64_t)prev;
 		log.u_bbr.delRate = (uint64_t)rsm;
 		log.u_bbr.rttProp = (uint64_t)next;
 		log.u_bbr.flex7 = 0;
 		if (prev) {
 			log.u_bbr.flex1 = prev->r_start;
 			log.u_bbr.flex2 = prev->r_end;
 			log.u_bbr.flex7 |= 0x4;
 		}
 		if (rsm) {
 			log.u_bbr.flex3 = rsm->r_start;
 			log.u_bbr.flex4 = rsm->r_end;
 			log.u_bbr.flex7 |= 0x2;
 		}
 		if (next) {
 			log.u_bbr.flex5 = next->r_start;
 			log.u_bbr.flex6 = next->r_end;
 			log.u_bbr.flex7 |= 0x1;
 		}
 		log.u_bbr.applimited = line;
 		log.u_bbr.pkts_out = th_ack;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		if (rack->rack_no_prr)
 			log.u_bbr.lost = 0;
 		else
 			log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_MAPCHG, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len,
 		 struct rack_sendmap *rsm, int conf)
 {
 	if (tcp_bblogging_on(tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = t;
 		log.u_bbr.flex2 = len;
 		log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt;
 		log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
 		log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
 		log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt;
 		log.u_bbr.flex7 = conf;
 		log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot;
 		log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt;
 		log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		if (rsm) {
 			log.u_bbr.pkt_epoch = rsm->r_start;
 			log.u_bbr.lost = rsm->r_end;
 			log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
 			/* We loose any upper of the 24 bits */
 			log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
 		} else {
 			/* Its a SYN */
 			log.u_bbr.pkt_epoch = rack->rc_tp->iss;
 			log.u_bbr.lost = 0;
 			log.u_bbr.cwnd_gain = 0;
 			log.u_bbr.pacing_gain = 0;
 		}
 		/* Write out general bits of interest rrs here */
 		log.u_bbr.use_lt_bw = rack->rc_highly_buffered;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->forced_ack;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom;
 		log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight;
 		log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts;
 		log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered;
 		log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts;
 		log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt;
 		log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 		log.u_bbr.bw_inuse <<= 32;
 		if (rsm)
 			log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
 		TCP_LOG_EVENTP(tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRRTT, 0,
 		    0, &log, false, &tv);
 
 
 	}
 }
 
 static void
 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
 {
 	/*
 	 * Log the rtt sample we are
 	 * applying to the srtt algorithm in
 	 * useconds.
 	 */
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = rtt;
 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
 		log.u_bbr.flex3 = rack->r_ctl.sack_count;
 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
 		log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra;
 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
 		log.u_bbr.flex7 = 1;
 		log.u_bbr.flex8 = rack->sack_attack_disable;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		/*
 		 * We capture in delRate the upper 32 bits as
 		 * the confidence level we had declared, and the
 		 * lower 32 bits as the actual RTT using the arrival
 		 * timestamp.
 		 */
 		log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence;
 		log.u_bbr.delRate <<= 32;
 		log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt;
 		/* Lets capture all the things that make up t_rtxcur */
 		log.u_bbr.applimited = rack_rto_min;
 		log.u_bbr.epoch = rack_rto_max;
 		log.u_bbr.lt_epoch = rack->r_ctl.timer_slop;
 		log.u_bbr.lost = rack_rto_min;
 		log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop);
 		log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp);
 		log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec;
 		log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC;
 		log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_RTT, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = rtt;
 		log.u_bbr.flex2 = send_time;
 		log.u_bbr.flex3 = ack_time;
 		log.u_bbr.flex4 = where;
 		log.u_bbr.flex7 = 2;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_RTT, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 
 static void
 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = idx;
 		log.u_bbr.flex2 = rack_ts_to_msec(tsv);
 		log.u_bbr.flex3 = tsecho;
 		log.u_bbr.flex7 = 3;
 		log.u_bbr.rttProp = tsv;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_RTT, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 
 static inline void
 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,  int event, int line)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = tick;
 		log.u_bbr.flex3 = tp->t_maxunacktime;
 		log.u_bbr.flex4 = tp->t_acktime;
 		log.u_bbr.flex8 = event;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_PROGRESS, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = slot;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex2 = 0;
 		else
 			log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex5 = rack->r_ctl.ack_during_sd;
 		log.u_bbr.flex6 = line;
 		log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags);
 		log.u_bbr.flex8 = rack->rc_in_persist;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRSND, 0,
 		    0, &log, false, tv);
 	}
 }
 
 static void
 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = did_out;
 		log.u_bbr.flex2 = nxt_pkt;
 		log.u_bbr.flex3 = way_out;
 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex5 = 0;
 		else
 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex6 = nsegs;
 		log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex7 = rack->rc_ack_can_sendout_data;	/* Do we have ack-can-send set */
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->r_fast_output;	/* is fast output primed */
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->r_wanted_output;	/* Do we want output */
 		log.u_bbr.flex8 = rack->rc_in_persist;
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_DOSEG_DONE, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex4 = arg1;
 		log.u_bbr.flex5 = arg2;
 		log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs;
 		log.u_bbr.flex6 = arg3;
 		log.u_bbr.flex8 = frm;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.applimited = rack->r_ctl.rc_sacked;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
 		    &tptosocket(tp)->so_snd,
 		    TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
 			  uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = slot;
 		log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex4 = reason;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex5 = 0;
 		else
 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex7 = hpts_calling;
 		log.u_bbr.flex8 = rack->rc_in_persist;
 		log.u_bbr.lt_epoch = cwnd_to_use;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		log.u_bbr.cwnd_gain = rack->rc_has_collapsed;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_JUSTRET, 0,
 		    tlen, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts,
 		   struct timeval *tv, uint32_t flags_on_entry)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to;
 		log.u_bbr.flex3 = flags_on_entry;
 		log.u_bbr.flex4 = us_cts;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex5 = 0;
 		else
 			log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
 		log.u_bbr.flex7 = hpts_removed;
 		log.u_bbr.flex8 = 1;
 		log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.timeStamp = us_cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERCANC, 0,
 		    0, &log, false, tv);
 	}
 }
 
 static void
 rack_log_alt_to_to_cancel(struct tcp_rack *rack,
 			  uint32_t flex1, uint32_t flex2,
 			  uint32_t flex3, uint32_t flex4,
 			  uint32_t flex5, uint32_t flex6,
 			  uint16_t flex7, uint8_t mod)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		if (mod == 1) {
 			/* No you can't use 1, its for the real to cancel */
 			return;
 		}
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex1 = flex1;
 		log.u_bbr.flex2 = flex2;
 		log.u_bbr.flex3 = flex3;
 		log.u_bbr.flex4 = flex4;
 		log.u_bbr.flex5 = flex5;
 		log.u_bbr.flex6 = flex6;
 		log.u_bbr.flex7 = flex7;
 		log.u_bbr.flex8 = mod;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TIMERCANC, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = timers;
 		log.u_bbr.flex2 = ret;
 		log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp;
 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex5 = cts;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex6 = 0;
 		else
 			log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto;
 		log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
 		log.u_bbr.pacing_gain = rack->r_must_retran;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_TO_PROCESS, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = rack->r_ctl.rc_prr_out;
 		log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs;
 		if (rack->rack_no_prr)
 			log.u_bbr.flex3 = 0;
 		else
 			log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered;
 		log.u_bbr.flex5 = rack->r_ctl.rc_sacked;
 		log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt;
 		log.u_bbr.flex7 = line;
 		log.u_bbr.flex8 = frm;
 		log.u_bbr.pkts_out = orig_cwnd;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_BBRUPD, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 #ifdef TCP_SAD_DETECTION
 static void
 rack_log_sad(struct tcp_rack *rack, int event)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = rack->r_ctl.sack_count;
 		log.u_bbr.flex2 = rack->r_ctl.ack_count;
 		log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra;
 		log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move;
 		log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced;
 		log.u_bbr.flex6 = tcp_sack_to_ack_thresh;
 		log.u_bbr.pkts_out = tcp_sack_to_move_thresh;
 		log.u_bbr.lt_epoch = (tcp_force_detection << 8);
 		log.u_bbr.lt_epoch |= rack->do_detection;
 		log.u_bbr.applimited = tcp_map_minimum;
 		log.u_bbr.flex7 = rack->sack_attack_disable;
 		log.u_bbr.flex8 = event;
 		log.u_bbr.bbr_state = rack->rc_suspicious;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.delivered = tcp_sad_decay_val;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_SAD_DETECT, 0,
 		    0, &log, false, &tv);
 	}
 }
 #endif
 
 static void
 rack_counter_destroy(void)
 {
 	counter_u64_free(rack_total_bytes);
 	counter_u64_free(rack_fto_send);
 	counter_u64_free(rack_fto_rsm_send);
 	counter_u64_free(rack_nfto_resend);
 	counter_u64_free(rack_hw_pace_init_fail);
 	counter_u64_free(rack_hw_pace_lost);
 	counter_u64_free(rack_non_fto_send);
 	counter_u64_free(rack_extended_rfo);
 	counter_u64_free(rack_ack_total);
 	counter_u64_free(rack_express_sack);
 	counter_u64_free(rack_sack_total);
 	counter_u64_free(rack_move_none);
 	counter_u64_free(rack_move_some);
 	counter_u64_free(rack_sack_attacks_detected);
 	counter_u64_free(rack_sack_attacks_reversed);
 	counter_u64_free(rack_sack_attacks_suspect);
 	counter_u64_free(rack_sack_used_next_merge);
 	counter_u64_free(rack_sack_used_prev_merge);
 	counter_u64_free(rack_tlp_tot);
 	counter_u64_free(rack_tlp_newdata);
 	counter_u64_free(rack_tlp_retran);
 	counter_u64_free(rack_tlp_retran_bytes);
 	counter_u64_free(rack_to_tot);
 	counter_u64_free(rack_saw_enobuf);
 	counter_u64_free(rack_saw_enobuf_hw);
 	counter_u64_free(rack_saw_enetunreach);
 	counter_u64_free(rack_hot_alloc);
 	counter_u64_free(rack_to_alloc);
 	counter_u64_free(rack_to_alloc_hard);
 	counter_u64_free(rack_to_alloc_emerg);
 	counter_u64_free(rack_to_alloc_limited);
 	counter_u64_free(rack_alloc_limited_conns);
 	counter_u64_free(rack_split_limited);
 	counter_u64_free(rack_multi_single_eq);
 	counter_u64_free(rack_rxt_clamps_cwnd);
 	counter_u64_free(rack_rxt_clamps_cwnd_uniq);
 	counter_u64_free(rack_proc_non_comp_ack);
 	counter_u64_free(rack_sack_proc_all);
 	counter_u64_free(rack_sack_proc_restart);
 	counter_u64_free(rack_sack_proc_short);
 	counter_u64_free(rack_sack_skipped_acked);
 	counter_u64_free(rack_sack_splits);
 	counter_u64_free(rack_input_idle_reduces);
 	counter_u64_free(rack_collapsed_win);
 	counter_u64_free(rack_collapsed_win_rxt);
 	counter_u64_free(rack_collapsed_win_rxt_bytes);
 	counter_u64_free(rack_collapsed_win_seen);
 	counter_u64_free(rack_try_scwnd);
 	counter_u64_free(rack_persists_sends);
 	counter_u64_free(rack_persists_acks);
 	counter_u64_free(rack_persists_loss);
 	counter_u64_free(rack_persists_lost_ends);
 #ifdef INVARIANTS
 	counter_u64_free(rack_adjust_map_bw);
 #endif
 	COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE);
 	COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE);
 }
 
 static struct rack_sendmap *
 rack_alloc(struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm;
 
 	/*
 	 * First get the top of the list it in
 	 * theory is the "hottest" rsm we have,
 	 * possibly just freed by ack processing.
 	 */
 	if (rack->rc_free_cnt > rack_free_cache) {
 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
 		counter_u64_add(rack_hot_alloc, 1);
 		rack->rc_free_cnt--;
 		return (rsm);
 	}
 	/*
 	 * Once we get under our free cache we probably
 	 * no longer have a "hot" one available. Lets
 	 * get one from UMA.
 	 */
 	rsm = uma_zalloc(rack_zone, M_NOWAIT);
 	if (rsm) {
 		rack->r_ctl.rc_num_maps_alloced++;
 		counter_u64_add(rack_to_alloc, 1);
 		return (rsm);
 	}
 	/*
 	 * Dig in to our aux rsm's (the last two) since
 	 * UMA failed to get us one.
 	 */
 	if (rack->rc_free_cnt) {
 		counter_u64_add(rack_to_alloc_emerg, 1);
 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
 		rack->rc_free_cnt--;
 		return (rsm);
 	}
 	return (NULL);
 }
 
 static struct rack_sendmap *
 rack_alloc_full_limit(struct tcp_rack *rack)
 {
 	if ((V_tcp_map_entries_limit > 0) &&
 	    (rack->do_detection == 0) &&
 	    (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
 		counter_u64_add(rack_to_alloc_limited, 1);
 		if (!rack->alloc_limit_reported) {
 			rack->alloc_limit_reported = 1;
 			counter_u64_add(rack_alloc_limited_conns, 1);
 		}
 		return (NULL);
 	}
 	return (rack_alloc(rack));
 }
 
 /* wrapper to allocate a sendmap entry, subject to a specific limit */
 static struct rack_sendmap *
 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type)
 {
 	struct rack_sendmap *rsm;
 
 	if (limit_type) {
 		/* currently there is only one limit type */
 		if (rack->r_ctl.rc_split_limit > 0 &&
 		    (rack->do_detection == 0) &&
 		    rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) {
 			counter_u64_add(rack_split_limited, 1);
 			if (!rack->alloc_limit_reported) {
 				rack->alloc_limit_reported = 1;
 				counter_u64_add(rack_alloc_limited_conns, 1);
 			}
 			return (NULL);
 #ifdef TCP_SAD_DETECTION
 		} else if ((tcp_sad_limit != 0) &&
 			   (rack->do_detection == 1) &&
 			   (rack->r_ctl.rc_num_split_allocs >= tcp_sad_limit)) {
 			counter_u64_add(rack_split_limited, 1);
 			if (!rack->alloc_limit_reported) {
 				rack->alloc_limit_reported = 1;
 				counter_u64_add(rack_alloc_limited_conns, 1);
 			}
 			return (NULL);
 #endif
 		}
 	}
 
 	/* allocate and mark in the limit type, if set */
 	rsm = rack_alloc(rack);
 	if (rsm != NULL && limit_type) {
 		rsm->r_limit_type = limit_type;
 		rack->r_ctl.rc_num_split_allocs++;
 	}
 	return (rsm);
 }
 
 static void
 rack_free_trim(struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm;
 
 	/*
 	 * Free up all the tail entries until
 	 * we get our list down to the limit.
 	 */
 	while (rack->rc_free_cnt > rack_free_cache) {
 		rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
 		TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
 		rack->rc_free_cnt--;
 		rack->r_ctl.rc_num_maps_alloced--;
 		uma_zfree(rack_zone, rsm);
 	}
 }
 
 static void
 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
 {
 	if (rsm->r_flags & RACK_APP_LIMITED) {
 		if (rack->r_ctl.rc_app_limited_cnt > 0) {
 			rack->r_ctl.rc_app_limited_cnt--;
 		}
 	}
 	if (rsm->r_limit_type) {
 		/* currently there is only one limit type */
 		rack->r_ctl.rc_num_split_allocs--;
 	}
 	if (rsm == rack->r_ctl.rc_first_appl) {
 		if (rack->r_ctl.rc_app_limited_cnt == 0)
 			rack->r_ctl.rc_first_appl = NULL;
 		else
 			rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl);
 	}
 	if (rsm == rack->r_ctl.rc_resend)
 		rack->r_ctl.rc_resend = NULL;
 	if (rsm == rack->r_ctl.rc_end_appl)
 		rack->r_ctl.rc_end_appl = NULL;
 	if (rack->r_ctl.rc_tlpsend == rsm)
 		rack->r_ctl.rc_tlpsend = NULL;
 	if (rack->r_ctl.rc_sacklast == rsm)
 		rack->r_ctl.rc_sacklast = NULL;
 	memset(rsm, 0, sizeof(struct rack_sendmap));
 	/* Make sure we are not going to overrun our count limit of 0xff */
 	if ((rack->rc_free_cnt + 1) > 0xff) {
 		rack_free_trim(rack);
 	}
 	TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
 	rack->rc_free_cnt++;
 }
 
 static uint32_t
 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	uint64_t srtt, bw, len, tim;
 	uint32_t segsiz, def_len, minl;
 
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	def_len = rack_def_data_window * segsiz;
 	if (rack->rc_gp_filled == 0) {
 		/*
 		 * We have no measurement (IW is in flight?) so
 		 * we can only guess using our data_window sysctl
 		 * value (usually 20MSS).
 		 */
 		return (def_len);
 	}
 	/*
 	 * Now we have a number of factors to consider.
 	 *
 	 * 1) We have a desired BDP which is usually
 	 *    at least 2.
 	 * 2) We have a minimum number of rtt's usually 1 SRTT
 	 *    but we allow it too to be more.
 	 * 3) We want to make sure a measurement last N useconds (if
 	 *    we have set rack_min_measure_usec.
 	 *
 	 * We handle the first concern here by trying to create a data
 	 * window of max(rack_def_data_window, DesiredBDP). The
 	 * second concern we handle in not letting the measurement
 	 * window end normally until at least the required SRTT's
 	 * have gone by which is done further below in
 	 * rack_enough_for_measurement(). Finally the third concern
 	 * we also handle here by calculating how long that time
 	 * would take at the current BW and then return the
 	 * max of our first calculation and that length. Note
 	 * that if rack_min_measure_usec is 0, we don't deal
 	 * with concern 3. Also for both Concern 1 and 3 an
 	 * application limited period could end the measurement
 	 * earlier.
 	 *
 	 * So lets calculate the BDP with the "known" b/w using
 	 * the SRTT has our rtt and then multiply it by the
 	 * goal.
 	 */
 	bw = rack_get_bw(rack);
 	srtt = (uint64_t)tp->t_srtt;
 	len = bw * srtt;
 	len /= (uint64_t)HPTS_USEC_IN_SEC;
 	len *= max(1, rack_goal_bdp);
 	/* Now we need to round up to the nearest MSS */
 	len = roundup(len, segsiz);
 	if (rack_min_measure_usec) {
 		/* Now calculate our min length for this b/w */
 		tim = rack_min_measure_usec;
 		minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC;
 		if (minl == 0)
 			minl = 1;
 		minl = roundup(minl, segsiz);
 		if (len < minl)
 			len = minl;
 	}
 	/*
 	 * Now if we have a very small window we want
 	 * to attempt to get the window that is
 	 * as small as possible. This happens on
 	 * low b/w connections and we don't want to
 	 * span huge numbers of rtt's between measurements.
 	 *
 	 * We basically include 2 over our "MIN window" so
 	 * that the measurement can be shortened (possibly) by
 	 * an ack'ed packet.
 	 */
 	if (len < def_len)
 		return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz)));
 	else
 		return (max((uint32_t)len, def_len));
 
 }
 
 static int
 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality)
 {
 	uint32_t tim, srtts, segsiz;
 
 	/*
 	 * Has enough time passed for the GP measurement to be valid?
 	 */
 	if (SEQ_LT(th_ack, tp->gput_seq)) {
 		/* Not enough bytes yet */
 		return (0);
 	}
 	if ((tp->snd_max == tp->snd_una) ||
 	    (th_ack == tp->snd_max)){
 		/*
 		 * All is acked quality of all acked is
 		 * usually low or medium, but we in theory could split
 		 * all acked into two cases, where you got
 		 * a signifigant amount of your window and
 		 * where you did not. For now we leave it
 		 * but it is something to contemplate in the
 		 * future. The danger here is that delayed ack
 		 * is effecting the last byte (which is a 50:50 chance).
 		 */
 		*quality = RACK_QUALITY_ALLACKED;
 		return (1);
 	}
 	if (SEQ_GEQ(th_ack,  tp->gput_ack)) {
 		/*
 		 * We obtained our entire window of data we wanted
 		 * no matter if we are in recovery or not then
 		 * its ok since expanding the window does not
 		 * make things fuzzy (or at least not as much).
 		 */
 		*quality = RACK_QUALITY_HIGH;
 		return (1);
 	}
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	if (SEQ_LT(th_ack, tp->gput_ack) &&
 	    ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
 		/* Not enough bytes yet */
 		return (0);
 	}
 	if (rack->r_ctl.rc_first_appl &&
 	    (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) {
 		/*
 		 * We are up to the app limited send point
 		 * we have to measure irrespective of the time..
 		 */
 		*quality = RACK_QUALITY_APPLIMITED;
 		return (1);
 	}
 	/* Now what about time? */
 	srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts);
 	tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts;
 	if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) {
 		/*
 		 * We do not allow a measurement if we are in recovery
 		 * that would shrink the goodput window we wanted.
 		 * This is to prevent cloudyness of when the last send
 		 * was actually made.
 		 */
 		*quality = RACK_QUALITY_HIGH;
 		return (1);
 	}
 	/* Nope not even a full SRTT has passed */
 	return (0);
 }
 
 static void
 rack_log_timely(struct tcp_rack *rack,
 		uint32_t logged, uint64_t cur_bw, uint64_t low_bnd,
 		uint64_t up_bnd, int line, uint8_t method)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = logged;
 		log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt;
 		log.u_bbr.flex2 <<= 4;
 		log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt;
 		log.u_bbr.flex2 <<= 4;
 		log.u_bbr.flex2 |= rack->rc_gp_incr;
 		log.u_bbr.flex2 <<= 4;
 		log.u_bbr.flex2 |= rack->rc_gp_bwred;
 		log.u_bbr.flex3 = rack->rc_gp_incr;
 		log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
 		log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca;
 		log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec;
 		log.u_bbr.flex7 = rack->rc_gp_bwred;
 		log.u_bbr.flex8 = method;
 		log.u_bbr.cur_del_rate = cur_bw;
 		log.u_bbr.delRate = low_bnd;
 		log.u_bbr.bw_inuse = up_bnd;
 		log.u_bbr.rttProp = rack_get_bw(rack);
 		log.u_bbr.pkt_epoch = line;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
 		log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
 		log.u_bbr.cwnd_gain = rack->rc_dragged_bottom;
 		log.u_bbr.cwnd_gain <<= 1;
 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec;
 		log.u_bbr.cwnd_gain <<= 1;
 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
 		log.u_bbr.cwnd_gain <<= 1;
 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
 		log.u_bbr.lost = rack->r_ctl.rc_loss_count;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_TIMELY_WORK, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static int
 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult)
 {
 	/*
 	 * Before we increase we need to know if
 	 * the estimate just made was less than
 	 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est)
 	 *
 	 * If we already are pacing at a fast enough
 	 * rate to push us faster there is no sense of
 	 * increasing.
 	 *
 	 * We first caculate our actual pacing rate (ss or ca multiplier
 	 * times our cur_bw).
 	 *
 	 * Then we take the last measured rate and multipy by our
 	 * maximum pacing overage to give us a max allowable rate.
 	 *
 	 * If our act_rate is smaller than our max_allowable rate
 	 * then we should increase. Else we should hold steady.
 	 *
 	 */
 	uint64_t act_rate, max_allow_rate;
 
 	if (rack_timely_no_stopping)
 		return (1);
 
 	if ((cur_bw == 0) || (last_bw_est == 0)) {
 		/*
 		 * Initial startup case or
 		 * everything is acked case.
 		 */
 		rack_log_timely(rack,  mult, cur_bw, 0, 0,
 				__LINE__, 9);
 		return (1);
 	}
 	if (mult <= 100) {
 		/*
 		 * We can always pace at or slightly above our rate.
 		 */
 		rack_log_timely(rack,  mult, cur_bw, 0, 0,
 				__LINE__, 9);
 		return (1);
 	}
 	act_rate = cur_bw * (uint64_t)mult;
 	act_rate /= 100;
 	max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100);
 	max_allow_rate /= 100;
 	if (act_rate < max_allow_rate) {
 		/*
 		 * Here the rate we are actually pacing at
 		 * is smaller than 10% above our last measurement.
 		 * This means we are pacing below what we would
 		 * like to try to achieve (plus some wiggle room).
 		 */
 		rack_log_timely(rack,  mult, cur_bw, act_rate, max_allow_rate,
 				__LINE__, 9);
 		return (1);
 	} else {
 		/*
 		 * Here we are already pacing at least rack_max_per_above(10%)
 		 * what we are getting back. This indicates most likely
 		 * that we are being limited (cwnd/rwnd/app) and can't
 		 * get any more b/w. There is no sense of trying to
 		 * raise up the pacing rate its not speeding us up
 		 * and we already are pacing faster than we are getting.
 		 */
 		rack_log_timely(rack,  mult, cur_bw, act_rate, max_allow_rate,
 				__LINE__, 8);
 		return (0);
 	}
 }
 
 static void
 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack)
 {
 	/*
 	 * When we drag bottom, we want to assure
 	 * that no multiplier is below 1.0, if so
 	 * we want to restore it to at least that.
 	 */
 	if (rack->r_ctl.rack_per_of_gp_rec  < 100) {
 		/* This is unlikely we usually do not touch recovery */
 		rack->r_ctl.rack_per_of_gp_rec = 100;
 	}
 	if (rack->r_ctl.rack_per_of_gp_ca < 100) {
 		rack->r_ctl.rack_per_of_gp_ca = 100;
 	}
 	if (rack->r_ctl.rack_per_of_gp_ss < 100) {
 		rack->r_ctl.rack_per_of_gp_ss = 100;
 	}
 }
 
 static void
 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack)
 {
 	if (rack->r_ctl.rack_per_of_gp_ca > 100) {
 		rack->r_ctl.rack_per_of_gp_ca = 100;
 	}
 	if (rack->r_ctl.rack_per_of_gp_ss > 100) {
 		rack->r_ctl.rack_per_of_gp_ss = 100;
 	}
 }
 
 static void
 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override)
 {
 	int32_t  calc, logged, plus;
 
 	logged = 0;
 
 	if (override) {
 		/*
 		 * override is passed when we are
 		 * loosing b/w and making one last
 		 * gasp at trying to not loose out
 		 * to a new-reno flow.
 		 */
 		goto extra_boost;
 	}
 	/* In classic timely we boost by 5x if we have 5 increases in a row, lets not */
 	if (rack->rc_gp_incr &&
 	    ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) {
 		/*
 		 * Reset and get 5 strokes more before the boost. Note
 		 * that the count is 0 based so we have to add one.
 		 */
 extra_boost:
 		plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST;
 		rack->rc_gp_timely_inc_cnt = 0;
 	} else
 		plus = (uint32_t)rack_gp_increase_per;
 	/* Must be at least 1% increase for true timely increases */
 	if ((plus < 1) &&
 	    ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0)))
 		plus = 1;
 	if (rack->rc_gp_saw_rec &&
 	    (rack->rc_gp_no_rec_chg == 0) &&
 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
 				  rack->r_ctl.rack_per_of_gp_rec)) {
 		/* We have been in recovery ding it too */
 		calc = rack->r_ctl.rack_per_of_gp_rec + plus;
 		if (calc > 0xffff)
 			calc = 0xffff;
 		logged |= 1;
 		rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc;
 		if (rack->r_ctl.rack_per_upper_bound_ca &&
 		    (rack->rc_dragged_bottom == 0) &&
 		    (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca))
 			rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca;
 	}
 	if (rack->rc_gp_saw_ca &&
 	    (rack->rc_gp_saw_ss == 0) &&
 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
 				  rack->r_ctl.rack_per_of_gp_ca)) {
 		/* In CA */
 		calc = rack->r_ctl.rack_per_of_gp_ca + plus;
 		if (calc > 0xffff)
 			calc = 0xffff;
 		logged |= 2;
 		rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc;
 		if (rack->r_ctl.rack_per_upper_bound_ca &&
 		    (rack->rc_dragged_bottom == 0) &&
 		    (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca))
 			rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca;
 	}
 	if (rack->rc_gp_saw_ss &&
 	    rack_bw_can_be_raised(rack, cur_bw, last_bw_est,
 				  rack->r_ctl.rack_per_of_gp_ss)) {
 		/* In SS */
 		calc = rack->r_ctl.rack_per_of_gp_ss + plus;
 		if (calc > 0xffff)
 			calc = 0xffff;
 		rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc;
 		if (rack->r_ctl.rack_per_upper_bound_ss &&
 		    (rack->rc_dragged_bottom == 0) &&
 		    (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss))
 			rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss;
 		logged |= 4;
 	}
 	if (logged &&
 	    (rack->rc_gp_incr == 0)){
 		/* Go into increment mode */
 		rack->rc_gp_incr = 1;
 		rack->rc_gp_timely_inc_cnt = 0;
 	}
 	if (rack->rc_gp_incr &&
 	    logged &&
 	    (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) {
 		rack->rc_gp_timely_inc_cnt++;
 	}
 	rack_log_timely(rack,  logged, plus, 0, 0,
 			__LINE__, 1);
 }
 
 static uint32_t
 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff)
 {
 	/*-
 	 * norm_grad = rtt_diff / minrtt;
 	 * new_per = curper * (1 - B * norm_grad)
 	 *
 	 * B = rack_gp_decrease_per (default 80%)
 	 * rtt_dif = input var current rtt-diff
 	 * curper = input var current percentage
 	 * minrtt = from rack filter
 	 *
 	 * In order to do the floating point calculations above we
 	 * do an integer conversion. The code looks confusing so let me
 	 * translate it into something that use more variables and
 	 * is clearer for us humans :)
 	 *
 	 * uint64_t norm_grad, inverse, reduce_by, final_result;
 	 * uint32_t perf;
 	 *
 	 * norm_grad = (((uint64_t)rtt_diff * 1000000) /
 	 *             (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt));
 	 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad;
 	 * inverse /= 1000000;
 	 * reduce_by = (1000000 - inverse);
 	 * final_result = (cur_per * reduce_by) / 1000000;
 	 * perf = (uint32_t)final_result;
 	 */
 	uint64_t perf;
 
 	perf = (((uint64_t)curper * ((uint64_t)1000000 -
 		    ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 *
 		     (((uint64_t)rtt_diff * (uint64_t)1000000)/
 		      (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/
 		     (uint64_t)1000000)) /
 		(uint64_t)1000000);
 	if (perf > curper) {
 		/* TSNH */
 		perf = curper - 1;
 	}
 	return ((uint32_t)perf);
 }
 
 static uint32_t
 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt)
 {
 	/*
 	 *                                   highrttthresh
 	 * result = curper * (1 - (B * ( 1 -  ------          ))
 	 *                                     gp_srtt
 	 *
 	 * B = rack_gp_decrease_per (default .8 i.e. 80)
 	 * highrttthresh = filter_min * rack_gp_rtt_maxmul
 	 */
 	uint64_t perf;
 	uint32_t highrttthresh;
 
 	highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
 
 	perf = (((uint64_t)curper * ((uint64_t)1000000 -
 				     ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 -
 					((uint64_t)highrttthresh * (uint64_t)1000000) /
 						    (uint64_t)rtt)) / 100)) /(uint64_t)1000000);
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		uint64_t log1;
 
 		log1 = rtt;
 		log1 <<= 32;
 		log1 |= highrttthresh;
 		rack_log_timely(rack,
 				rack_gp_decrease_per,
 				(uint64_t)curper,
 				log1,
 				perf,
 				__LINE__,
 				15);
 	}
 	return (perf);
 }
 
 static void
 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff)
 {
 	uint64_t logvar, logvar2, logvar3;
 	uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val;
 
 	if (rack->rc_gp_incr) {
 		/* Turn off increment counting */
 		rack->rc_gp_incr = 0;
 		rack->rc_gp_timely_inc_cnt = 0;
 	}
 	ss_red = ca_red = rec_red = 0;
 	logged = 0;
 	/* Calculate the reduction value */
 	if (rtt_diff < 0) {
 		rtt_diff *= -1;
 	}
 	/* Must be at least 1% reduction */
 	if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) {
 		/* We have been in recovery ding it too */
 		if (timely_says == 2) {
 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt);
 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
 			if (alt < new_per)
 				val = alt;
 			else
 				val = new_per;
 		} else
 			 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff);
 		if (rack->r_ctl.rack_per_of_gp_rec > val) {
 			rec_red = (rack->r_ctl.rack_per_of_gp_rec - val);
 			rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val;
 		} else {
 			rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
 			rec_red = 0;
 		}
 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec)
 			rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound;
 		logged |= 1;
 	}
 	if (rack->rc_gp_saw_ss) {
 		/* Sent in SS */
 		if (timely_says == 2) {
 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt);
 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
 			if (alt < new_per)
 				val = alt;
 			else
 				val = new_per;
 		} else
 			val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff);
 		if (rack->r_ctl.rack_per_of_gp_ss > new_per) {
 			ss_red = rack->r_ctl.rack_per_of_gp_ss - val;
 			rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val;
 		} else {
 			ss_red = new_per;
 			rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
 			logvar = new_per;
 			logvar <<= 32;
 			logvar |= alt;
 			logvar2 = (uint32_t)rtt;
 			logvar2 <<= 32;
 			logvar2 |= (uint32_t)rtt_diff;
 			logvar3 = rack_gp_rtt_maxmul;
 			logvar3 <<= 32;
 			logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
 			rack_log_timely(rack, timely_says,
 					logvar2, logvar3,
 					logvar, __LINE__, 10);
 		}
 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss)
 			rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound;
 		logged |= 4;
 	} else if (rack->rc_gp_saw_ca) {
 		/* Sent in CA */
 		if (timely_says == 2) {
 			new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt);
 			alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
 			if (alt < new_per)
 				val = alt;
 			else
 				val = new_per;
 		} else
 			val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff);
 		if (rack->r_ctl.rack_per_of_gp_ca > val) {
 			ca_red = rack->r_ctl.rack_per_of_gp_ca - val;
 			rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val;
 		} else {
 			rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
 			ca_red = 0;
 			logvar = new_per;
 			logvar <<= 32;
 			logvar |= alt;
 			logvar2 = (uint32_t)rtt;
 			logvar2 <<= 32;
 			logvar2 |= (uint32_t)rtt_diff;
 			logvar3 = rack_gp_rtt_maxmul;
 			logvar3 <<= 32;
 			logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
 			rack_log_timely(rack, timely_says,
 					logvar2, logvar3,
 					logvar, __LINE__, 10);
 		}
 		if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca)
 			rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound;
 		logged |= 2;
 	}
 	if (rack->rc_gp_timely_dec_cnt < 0x7) {
 		rack->rc_gp_timely_dec_cnt++;
 		if (rack_timely_dec_clear &&
 		    (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear))
 			rack->rc_gp_timely_dec_cnt = 0;
 	}
 	logvar = ss_red;
 	logvar <<= 32;
 	logvar |= ca_red;
 	rack_log_timely(rack,  logged, rec_red, rack_per_lower_bound, logvar,
 			__LINE__, 2);
 }
 
 static void
 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts,
 		     uint32_t rtt, uint32_t line, uint8_t reas)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = line;
 		log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts;
 		log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts;
 		log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss;
 		log.u_bbr.flex5 = rtt;
 		log.u_bbr.flex6 = rack->rc_highly_buffered;
 		log.u_bbr.flex6 <<= 1;
 		log.u_bbr.flex6 |= rack->forced_ack;
 		log.u_bbr.flex6 <<= 1;
 		log.u_bbr.flex6 |= rack->rc_gp_dyn_mul;
 		log.u_bbr.flex6 <<= 1;
 		log.u_bbr.flex6 |= rack->in_probe_rtt;
 		log.u_bbr.flex6 <<= 1;
 		log.u_bbr.flex6 |= rack->measure_saw_probe_rtt;
 		log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt;
 		log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca;
 		log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec;
 		log.u_bbr.flex8 = reas;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.delRate = rack_get_bw(rack);
 		log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt;
 		log.u_bbr.cur_del_rate <<= 32;
 		log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt;
 		log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt;
 		log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt;
 		log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts;
 		log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight;
 		log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
 		log.u_bbr.rttProp = us_cts;
 		log.u_bbr.rttProp <<= 32;
 		log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_RTT_SHRINKS, 0,
 		    0, &log, false, &rack->r_ctl.act_rcv_time);
 	}
 }
 
 static void
 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt)
 {
 	uint64_t bwdp;
 
 	bwdp = rack_get_bw(rack);
 	bwdp *= (uint64_t)rtt;
 	bwdp /= (uint64_t)HPTS_USEC_IN_SEC;
 	rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz);
 	if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) {
 		/*
 		 * A window protocol must be able to have 4 packets
 		 * outstanding as the floor in order to function
 		 * (especially considering delayed ack :D).
 		 */
 		rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs);
 	}
 }
 
 static void
 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts)
 {
 	/**
 	 * ProbeRTT is a bit different in rack_pacing than in
 	 * BBR. It is like BBR in that it uses the lowering of
 	 * the RTT as a signal that we saw something new and
 	 * counts from there for how long between. But it is
 	 * different in that its quite simple. It does not
 	 * play with the cwnd and wait until we get down
 	 * to N segments outstanding and hold that for
 	 * 200ms. Instead it just sets the pacing reduction
 	 * rate to a set percentage (70 by default) and hold
 	 * that for a number of recent GP Srtt's.
 	 */
 	uint32_t segsiz;
 
 	if (rack->rc_gp_dyn_mul == 0)
 		return;
 
 	if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) {
 		/* We are idle */
 		return;
 	}
 	if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
 	    SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
 		/*
 		 * Stop the goodput now, the idea here is
 		 * that future measurements with in_probe_rtt
 		 * won't register if they are not greater so
 		 * we want to get what info (if any) is available
 		 * now.
 		 */
 		rack_do_goodput_measurement(rack->rc_tp, rack,
 					    rack->rc_tp->snd_una, __LINE__,
 					    RACK_QUALITY_PROBERTT);
 	}
 	rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
 	rack->r_ctl.rc_time_probertt_entered = us_cts;
 	segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
 		     rack->r_ctl.rc_pace_min_segs);
 	rack->in_probe_rtt = 1;
 	rack->measure_saw_probe_rtt = 1;
 	rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
 	rack->r_ctl.rc_time_probertt_starts = 0;
 	rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt;
 	if (rack_probertt_use_min_rtt_entry)
 		rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
 	else
 		rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt);
 	rack_log_rtt_shrinks(rack,  us_cts,  get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 			     __LINE__, RACK_RTTS_ENTERPROBE);
 }
 
 static void
 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts)
 {
 	struct rack_sendmap *rsm;
 	uint32_t segsiz;
 
 	segsiz = min(ctf_fixed_maxseg(rack->rc_tp),
 		     rack->r_ctl.rc_pace_min_segs);
 	rack->in_probe_rtt = 0;
 	if ((rack->rc_tp->t_flags & TF_GPUTINPROG) &&
 	    SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) {
 		/*
 		 * Stop the goodput now, the idea here is
 		 * that future measurements with in_probe_rtt
 		 * won't register if they are not greater so
 		 * we want to get what info (if any) is available
 		 * now.
 		 */
 		rack_do_goodput_measurement(rack->rc_tp, rack,
 					    rack->rc_tp->snd_una, __LINE__,
 					    RACK_QUALITY_PROBERTT);
 	} else if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
 		/*
 		 * We don't have enough data to make a measurement.
 		 * So lets just stop and start here after exiting
 		 * probe-rtt. We probably are not interested in
 		 * the results anyway.
 		 */
 		rack->rc_tp->t_flags &= ~TF_GPUTINPROG;
 	}
 	/*
 	 * Measurements through the current snd_max are going
 	 * to be limited by the slower pacing rate.
 	 *
 	 * We need to mark these as app-limited so we
 	 * don't collapse the b/w.
 	 */
 	rsm = tqhash_max(rack->r_ctl.tqh);
 	if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
 		if (rack->r_ctl.rc_app_limited_cnt == 0)
 			rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
 		else {
 			/*
 			 * Go out to the end app limited and mark
 			 * this new one as next and move the end_appl up
 			 * to this guy.
 			 */
 			if (rack->r_ctl.rc_end_appl)
 				rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
 			rack->r_ctl.rc_end_appl = rsm;
 		}
 		rsm->r_flags |= RACK_APP_LIMITED;
 		rack->r_ctl.rc_app_limited_cnt++;
 	}
 	/*
 	 * Now, we need to examine our pacing rate multipliers.
 	 * If its under 100%, we need to kick it back up to
 	 * 100%. We also don't let it be over our "max" above
 	 * the actual rate i.e. 100% + rack_clamp_atexit_prtt.
 	 * Note setting clamp_atexit_prtt to 0 has the effect
 	 * of setting CA/SS to 100% always at exit (which is
 	 * the default behavior).
 	 */
 	if (rack_probertt_clear_is) {
 		rack->rc_gp_incr = 0;
 		rack->rc_gp_bwred = 0;
 		rack->rc_gp_timely_inc_cnt = 0;
 		rack->rc_gp_timely_dec_cnt = 0;
 	}
 	/* Do we do any clamping at exit? */
 	if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) {
 		rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp;
 		rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp;
 	}
 	if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) {
 		rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt;
 		rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt;
 	}
 	/*
 	 * Lets set rtt_diff to 0, so that we will get a "boost"
 	 * after exiting.
 	 */
 	rack->r_ctl.rc_rtt_diff = 0;
 
 	/* Clear all flags so we start fresh */
 	rack->rc_tp->t_bytes_acked = 0;
 	rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
 	/*
 	 * If configured to, set the cwnd and ssthresh to
 	 * our targets.
 	 */
 	if (rack_probe_rtt_sets_cwnd) {
 		uint64_t ebdp;
 		uint32_t setto;
 
 		/* Set ssthresh so we get into CA once we hit our target */
 		if (rack_probertt_use_min_rtt_exit == 1) {
 			/* Set to min rtt */
 			rack_set_prtt_target(rack, segsiz,
 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt));
 		} else if (rack_probertt_use_min_rtt_exit == 2) {
 			/* Set to current gp rtt */
 			rack_set_prtt_target(rack, segsiz,
 					     rack->r_ctl.rc_gp_srtt);
 		} else if (rack_probertt_use_min_rtt_exit == 3) {
 			/* Set to entry gp rtt */
 			rack_set_prtt_target(rack, segsiz,
 					     rack->r_ctl.rc_entry_gp_rtt);
 		} else {
 			uint64_t sum;
 			uint32_t setval;
 
 			sum = rack->r_ctl.rc_entry_gp_rtt;
 			sum *= 10;
 			sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt));
 			if (sum >= 20) {
 				/*
 				 * A highly buffered path needs
 				 * cwnd space for timely to work.
 				 * Lets set things up as if
 				 * we are heading back here again.
 				 */
 				setval = rack->r_ctl.rc_entry_gp_rtt;
 			} else if (sum >= 15) {
 				/*
 				 * Lets take the smaller of the
 				 * two since we are just somewhat
 				 * buffered.
 				 */
 				setval = rack->r_ctl.rc_gp_srtt;
 				if (setval > rack->r_ctl.rc_entry_gp_rtt)
 					setval = rack->r_ctl.rc_entry_gp_rtt;
 			} else {
 				/*
 				 * Here we are not highly buffered
 				 * and should pick the min we can to
 				 * keep from causing loss.
 				 */
 				setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
 			}
 			rack_set_prtt_target(rack, segsiz,
 					     setval);
 		}
 		if (rack_probe_rtt_sets_cwnd > 1) {
 			/* There is a percentage here to boost */
 			ebdp = rack->r_ctl.rc_target_probertt_flight;
 			ebdp *= rack_probe_rtt_sets_cwnd;
 			ebdp /= 100;
 			setto = rack->r_ctl.rc_target_probertt_flight + ebdp;
 		} else
 			setto = rack->r_ctl.rc_target_probertt_flight;
 		rack->rc_tp->snd_cwnd = roundup(setto, segsiz);
 		if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) {
 			/* Enforce a min */
 			rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs;
 		}
 		/* If we set in the cwnd also set the ssthresh point so we are in CA */
 		rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1);
 	}
 	rack_log_rtt_shrinks(rack,  us_cts,
 			     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 			     __LINE__, RACK_RTTS_EXITPROBE);
 	/* Clear times last so log has all the info */
 	rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max;
 	rack->r_ctl.rc_time_probertt_entered = us_cts;
 	rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
 	rack->r_ctl.rc_time_of_last_probertt = us_cts;
 }
 
 static void
 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts)
 {
 	/* Check in on probe-rtt */
 	if (rack->rc_gp_filled == 0) {
 		/* We do not do p-rtt unless we have gp measurements */
 		return;
 	}
 	if (rack->in_probe_rtt) {
 		uint64_t no_overflow;
 		uint32_t endtime, must_stay;
 
 		if (rack->r_ctl.rc_went_idle_time &&
 		    ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) {
 			/*
 			 * We went idle during prtt, just exit now.
 			 */
 			rack_exit_probertt(rack, us_cts);
 		} else if (rack_probe_rtt_safety_val &&
 		    TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) &&
 		    ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) {
 			/*
 			 * Probe RTT safety value triggered!
 			 */
 			rack_log_rtt_shrinks(rack,  us_cts,
 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 					     __LINE__, RACK_RTTS_SAFETY);
 			rack_exit_probertt(rack, us_cts);
 		}
 		/* Calculate the max we will wait */
 		endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait);
 		if (rack->rc_highly_buffered)
 			endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp);
 		/* Calculate the min we must wait */
 		must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain);
 		if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) &&
 		    TSTMP_LT(us_cts, endtime)) {
 			uint32_t calc;
 			/* Do we lower more? */
 no_exit:
 			if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered))
 				calc = us_cts - rack->r_ctl.rc_time_probertt_entered;
 			else
 				calc = 0;
 			calc /= max(rack->r_ctl.rc_gp_srtt, 1);
 			if (calc) {
 				/* Maybe */
 				calc *= rack_per_of_gp_probertt_reduce;
 				rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc;
 				/* Limit it too */
 				if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh)
 					rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh;
 			}
 			/* We must reach target or the time set */
 			return;
 		}
 		if (rack->r_ctl.rc_time_probertt_starts == 0) {
 			if ((TSTMP_LT(us_cts, must_stay) &&
 			     rack->rc_highly_buffered) ||
 			     (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) >
 			      rack->r_ctl.rc_target_probertt_flight)) {
 				/* We are not past the must_stay time */
 				goto no_exit;
 			}
 			rack_log_rtt_shrinks(rack,  us_cts,
 					     get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 					     __LINE__, RACK_RTTS_REACHTARGET);
 			rack->r_ctl.rc_time_probertt_starts = us_cts;
 			if (rack->r_ctl.rc_time_probertt_starts == 0)
 				rack->r_ctl.rc_time_probertt_starts = 1;
 			/* Restore back to our rate we want to pace at in prtt */
 			rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
 		}
 		/*
 		 * Setup our end time, some number of gp_srtts plus 200ms.
 		 */
 		no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt *
 			       (uint64_t)rack_probertt_gpsrtt_cnt_mul);
 		if (rack_probertt_gpsrtt_cnt_div)
 			endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div);
 		else
 			endtime = 0;
 		endtime += rack_min_probertt_hold;
 		endtime += rack->r_ctl.rc_time_probertt_starts;
 		if (TSTMP_GEQ(us_cts,  endtime)) {
 			/* yes, exit probertt */
 			rack_exit_probertt(rack, us_cts);
 		}
 
 	} else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) {
 		/* Go into probertt, its been too long since we went lower */
 		rack_enter_probertt(rack, us_cts);
 	}
 }
 
 static void
 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est,
 		       uint32_t rtt, int32_t rtt_diff)
 {
 	uint64_t cur_bw, up_bnd, low_bnd, subfr;
 	uint32_t losses;
 
 	if ((rack->rc_gp_dyn_mul == 0) ||
 	    (rack->use_fixed_rate) ||
 	    (rack->in_probe_rtt) ||
 	    (rack->rc_always_pace == 0)) {
 		/* No dynamic GP multiplier in play */
 		return;
 	}
 	losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start;
 	cur_bw = rack_get_bw(rack);
 	/* Calculate our up and down range */
 	up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up;
 	up_bnd /= 100;
 	up_bnd += rack->r_ctl.last_gp_comp_bw;
 
 	subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down;
 	subfr /= 100;
 	low_bnd = rack->r_ctl.last_gp_comp_bw - subfr;
 	if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) {
 		/*
 		 * This is the case where our RTT is above
 		 * the max target and we have been configured
 		 * to just do timely no bonus up stuff in that case.
 		 *
 		 * There are two configurations, set to 1, and we
 		 * just do timely if we are over our max. If its
 		 * set above 1 then we slam the multipliers down
 		 * to 100 and then decrement per timely.
 		 */
 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
 				__LINE__, 3);
 		if (rack->r_ctl.rc_no_push_at_mrtt > 1)
 			rack_validate_multipliers_at_or_below_100(rack);
 		rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
 	} else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) {
 		/*
 		 * We are decreasing this is a bit complicated this
 		 * means we are loosing ground. This could be
 		 * because another flow entered and we are competing
 		 * for b/w with it. This will push the RTT up which
 		 * makes timely unusable unless we want to get shoved
 		 * into a corner and just be backed off (the age
 		 * old problem with delay based CC).
 		 *
 		 * On the other hand if it was a route change we
 		 * would like to stay somewhat contained and not
 		 * blow out the buffers.
 		 */
 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
 				__LINE__, 3);
 		rack->r_ctl.last_gp_comp_bw = cur_bw;
 		if (rack->rc_gp_bwred == 0) {
 			/* Go into reduction counting */
 			rack->rc_gp_bwred = 1;
 			rack->rc_gp_timely_dec_cnt = 0;
 		}
 		if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) {
 			/*
 			 * Push another time with a faster pacing
 			 * to try to gain back (we include override to
 			 * get a full raise factor).
 			 */
 			if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) ||
 			    (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) ||
 			    (timely_says == 0) ||
 			    (rack_down_raise_thresh == 0)) {
 				/*
 				 * Do an override up in b/w if we were
 				 * below the threshold or if the threshold
 				 * is zero we always do the raise.
 				 */
 				rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1);
 			} else {
 				/* Log it stays the same */
 				rack_log_timely(rack,  0, last_bw_est, low_bnd, 0,
 						__LINE__, 11);
 			}
 			rack->rc_gp_timely_dec_cnt++;
 			/* We are not incrementing really no-count */
 			rack->rc_gp_incr = 0;
 			rack->rc_gp_timely_inc_cnt = 0;
 		} else {
 			/*
 			 * Lets just use the RTT
 			 * information and give up
 			 * pushing.
 			 */
 			goto use_timely;
 		}
 	} else if ((timely_says != 2) &&
 		    !losses &&
 		    (last_bw_est > up_bnd)) {
 		/*
 		 * We are increasing b/w lets keep going, updating
 		 * our b/w and ignoring any timely input, unless
 		 * of course we are at our max raise (if there is one).
 		 */
 
 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
 				__LINE__, 3);
 		rack->r_ctl.last_gp_comp_bw = cur_bw;
 		if (rack->rc_gp_saw_ss &&
 		    rack->r_ctl.rack_per_upper_bound_ss &&
 		     (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) {
 			    /*
 			     * In cases where we can't go higher
 			     * we should just use timely.
 			     */
 			    goto use_timely;
 		}
 		if (rack->rc_gp_saw_ca &&
 		    rack->r_ctl.rack_per_upper_bound_ca &&
 		    (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) {
 			    /*
 			     * In cases where we can't go higher
 			     * we should just use timely.
 			     */
 			    goto use_timely;
 		}
 		rack->rc_gp_bwred = 0;
 		rack->rc_gp_timely_dec_cnt = 0;
 		/* You get a set number of pushes if timely is trying to reduce */
 		if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) {
 			rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
 		} else {
 			/* Log it stays the same */
 			rack_log_timely(rack,  0, last_bw_est, up_bnd, 0,
 			    __LINE__, 12);
 		}
 		return;
 	} else {
 		/*
 		 * We are staying between the lower and upper range bounds
 		 * so use timely to decide.
 		 */
 		rack_log_timely(rack,  timely_says, cur_bw, low_bnd, up_bnd,
 				__LINE__, 3);
 use_timely:
 		if (timely_says) {
 			rack->rc_gp_incr = 0;
 			rack->rc_gp_timely_inc_cnt = 0;
 			if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) &&
 			    !losses &&
 			    (last_bw_est < low_bnd)) {
 				/* We are loosing ground */
 				rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
 				rack->rc_gp_timely_dec_cnt++;
 				/* We are not incrementing really no-count */
 				rack->rc_gp_incr = 0;
 				rack->rc_gp_timely_inc_cnt = 0;
 			} else
 				rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff);
 		} else {
 			rack->rc_gp_bwred = 0;
 			rack->rc_gp_timely_dec_cnt = 0;
 			rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0);
 		}
 	}
 }
 
 static int32_t
 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt)
 {
 	int32_t timely_says;
 	uint64_t log_mult, log_rtt_a_diff;
 
 	log_rtt_a_diff = rtt;
 	log_rtt_a_diff <<= 32;
 	log_rtt_a_diff |= (uint32_t)rtt_diff;
 	if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) *
 		    rack_gp_rtt_maxmul)) {
 		/* Reduce the b/w multiplier */
 		timely_says = 2;
 		log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul;
 		log_mult <<= 32;
 		log_mult |= prev_rtt;
 		rack_log_timely(rack,  timely_says, log_mult,
 				get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 				log_rtt_a_diff, __LINE__, 4);
 	} else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
 			   ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
 			    max(rack_gp_rtt_mindiv , 1)))) {
 		/* Increase the b/w multiplier */
 		log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) +
 			((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) /
 			 max(rack_gp_rtt_mindiv , 1));
 		log_mult <<= 32;
 		log_mult |= prev_rtt;
 		timely_says = 0;
 		rack_log_timely(rack,  timely_says, log_mult ,
 				get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt),
 				log_rtt_a_diff, __LINE__, 5);
 	} else {
 		/*
 		 * Use a gradient to find it the timely gradient
 		 * is:
 		 * grad = rc_rtt_diff / min_rtt;
 		 *
 		 * anything below or equal to 0 will be
 		 * a increase indication. Anything above
 		 * zero is a decrease. Note we take care
 		 * of the actual gradient calculation
 		 * in the reduction (its not needed for
 		 * increase).
 		 */
 		log_mult = prev_rtt;
 		if (rtt_diff <= 0) {
 			/*
 			 * Rttdiff is less than zero, increase the
 			 * b/w multiplier (its 0 or negative)
 			 */
 			timely_says = 0;
 			rack_log_timely(rack,  timely_says, log_mult,
 					get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6);
 		} else {
 			/* Reduce the b/w multiplier */
 			timely_says = 1;
 			rack_log_timely(rack,  timely_says, log_mult,
 					get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7);
 		}
 	}
 	return (timely_says);
 }
 
 static __inline int
 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
 {
 	if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
 	    SEQ_LEQ(rsm->r_end, tp->gput_ack)) {
 		/**
 		 * This covers the case that the
 		 * resent is completely inside
 		 * the gp range or up to it.
 		 *      |----------------|
 		 *      |-----| <or>
 		 *            |----|
 		 *            <or>   |---|
 		 */
 		return (1);
 	} else if (SEQ_LT(rsm->r_start, tp->gput_seq) &&
 		   SEQ_GT(rsm->r_end, tp->gput_seq)){
 		/**
 		 * This covers the case of
 		 *      |--------------|
 		 *  |-------->|
 		 */
 		return (1);
 	} else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
 		   SEQ_LT(rsm->r_start, tp->gput_ack) &&
 		   SEQ_GEQ(rsm->r_end, tp->gput_ack)) {
 
 		/**
 		 * This covers the case of
 		 *      |--------------|
 		 *              |-------->|
 		 */
 		return (1);
 	}
 	return (0);
 }
 
 static __inline void
 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
 {
 
 	if ((tp->t_flags & TF_GPUTINPROG) == 0)
 		return;
 	/*
 	 * We have a Goodput measurement in progress. Mark
 	 * the send if its within the window. If its not
 	 * in the window make sure it does not have the mark.
 	 */
 	if (rack_in_gp_window(tp, rsm))
 		rsm->r_flags |= RACK_IN_GP_WIN;
 	else
 		rsm->r_flags &= ~RACK_IN_GP_WIN;
 }
 
 static __inline void
 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/* A GP measurement is ending, clear all marks on the send map*/
 	struct rack_sendmap *rsm = NULL;
 
 	rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
 	if (rsm == NULL) {
 		rsm = tqhash_min(rack->r_ctl.tqh);
 	}
 	/* Nothing left? */
 	while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){
 		rsm->r_flags &= ~RACK_IN_GP_WIN;
 		rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 	}
 }
 
 
 static __inline void
 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm = NULL;
 
 	if (tp->snd_una == tp->snd_max) {
 		/* Nothing outstanding yet, nothing to do here */
 		return;
 	}
 	if (SEQ_GT(tp->gput_seq, tp->snd_una)) {
 		/*
 		 * We are measuring ahead of some outstanding
 		 * data. We need to walk through up until we get
 		 * to gp_seq marking so that no rsm is set incorrectly
 		 * with RACK_IN_GP_WIN.
 		 */
 		rsm = tqhash_min(rack->r_ctl.tqh);
 		while (rsm != NULL) {
 			rack_mark_in_gp_win(tp, rsm);
 			if (SEQ_GEQ(rsm->r_end, tp->gput_seq))
 				break;
 			rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 		}
 	}
 	if (rsm == NULL) {
 		/*
 		 * Need to find the GP seq, if rsm is
 		 * set we stopped as we hit it.
 		 */
 		rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
 		if (rsm == NULL)
 			return;
 		rack_mark_in_gp_win(tp, rsm);
 	}
 	/*
 	 * Now we may need to mark already sent rsm, ahead of
 	 * gput_seq in the window since they may have been sent
 	 * *before* we started our measurment. The rsm, if non-null
 	 * has been marked (note if rsm would have been NULL we would have
 	 * returned in the previous block). So we go to the next, and continue
 	 * until we run out of entries or we exceed the gp_ack value.
 	 */
 	rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 	while (rsm) {
 		rack_mark_in_gp_win(tp, rsm);
 		if (SEQ_GT(rsm->r_end, tp->gput_ack))
 			break;
 		rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 	}
 }
 
 static void
 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
 			    tcp_seq th_ack, int line, uint8_t quality)
 {
 	uint64_t tim, bytes_ps, stim, utim;
 	uint32_t segsiz, bytes, reqbytes, us_cts;
 	int32_t gput, new_rtt_diff, timely_says;
 	uint64_t  resid_bw, subpart = 0, addpart = 0, srtt;
 	int did_add = 0;
 
 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	if (TSTMP_GEQ(us_cts, tp->gput_ts))
 		tim = us_cts - tp->gput_ts;
 	else
 		tim = 0;
 	if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts)
 		stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts;
 	else
 		stim = 0;
 	/*
 	 * Use the larger of the send time or ack time. This prevents us
 	 * from being influenced by ack artifacts to come up with too
 	 * high of measurement. Note that since we are spanning over many more
 	 * bytes in most of our measurements hopefully that is less likely to
 	 * occur.
 	 */
 	if (tim > stim)
 		utim = max(tim, 1);
 	else
 		utim = max(stim, 1);
 	reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz));
 	rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL);
 	if ((tim == 0) && (stim == 0)) {
 		/*
 		 * Invalid measurement time, maybe
 		 * all on one ack/one send?
 		 */
 		bytes = 0;
 		bytes_ps = 0;
 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
 					   0, 0, 0, 10, __LINE__, NULL, quality);
 		goto skip_measurement;
 	}
 	if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) {
 		/* We never made a us_rtt measurement? */
 		bytes = 0;
 		bytes_ps = 0;
 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
 					   0, 0, 0, 10, __LINE__, NULL, quality);
 		goto skip_measurement;
 	}
 	/*
 	 * Calculate the maximum possible b/w this connection
 	 * could have. We base our calculation on the lowest
 	 * rtt we have seen during the measurement and the
 	 * largest rwnd the client has given us in that time. This
 	 * forms a BDP that is the maximum that we could ever
 	 * get to the client. Anything larger is not valid.
 	 *
 	 * I originally had code here that rejected measurements
 	 * where the time was less than 1/2 the latest us_rtt.
 	 * But after thinking on that I realized its wrong since
 	 * say you had a 150Mbps or even 1Gbps link, and you
 	 * were a long way away.. example I am in Europe (100ms rtt)
 	 * talking to my 1Gbps link in S.C. Now measuring say 150,000
 	 * bytes my time would be 1.2ms, and yet my rtt would say
 	 * the measurement was invalid the time was < 50ms. The
 	 * same thing is true for 150Mb (8ms of time).
 	 *
 	 * A better way I realized is to look at what the maximum
 	 * the connection could possibly do. This is gated on
 	 * the lowest RTT we have seen and the highest rwnd.
 	 * We should in theory never exceed that, if we are
 	 * then something on the path is storing up packets
 	 * and then feeding them all at once to our endpoint
 	 * messing up our measurement.
 	 */
 	rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd;
 	rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC;
 	rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt;
 	if (SEQ_LT(th_ack, tp->gput_seq)) {
 		/* No measurement can be made */
 		bytes = 0;
 		bytes_ps = 0;
 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
 					   0, 0, 0, 10, __LINE__, NULL, quality);
 		goto skip_measurement;
 	} else
 		bytes = (th_ack - tp->gput_seq);
 	bytes_ps = (uint64_t)bytes;
 	/*
 	 * Don't measure a b/w for pacing unless we have gotten at least
 	 * an initial windows worth of data in this measurement interval.
 	 *
 	 * Small numbers of bytes get badly influenced by delayed ack and
 	 * other artifacts. Note we take the initial window or our
 	 * defined minimum GP (defaulting to 10 which hopefully is the
 	 * IW).
 	 */
 	if (rack->rc_gp_filled == 0) {
 		/*
 		 * The initial estimate is special. We
 		 * have blasted out an IW worth of packets
 		 * without a real valid ack ts results. We
 		 * then setup the app_limited_needs_set flag,
 		 * this should get the first ack in (probably 2
 		 * MSS worth) to be recorded as the timestamp.
 		 * We thus allow a smaller number of bytes i.e.
 		 * IW - 2MSS.
 		 */
 		reqbytes -= (2 * segsiz);
 		/* Also lets fill previous for our first measurement to be neutral */
 		rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
 	}
 	if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) {
 		rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
 					   rack->r_ctl.rc_app_limited_cnt,
 					   0, 0, 10, __LINE__, NULL, quality);
 		goto skip_measurement;
 	}
 	/*
 	 * We now need to calculate the Timely like status so
 	 * we can update (possibly) the b/w multipliers.
 	 */
 	new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt;
 	if (rack->rc_gp_filled == 0) {
 		/* No previous reading */
 		rack->r_ctl.rc_rtt_diff = new_rtt_diff;
 	} else {
 		if (rack->measure_saw_probe_rtt == 0) {
 			/*
 			 * We don't want a probertt to be counted
 			 * since it will be negative incorrectly. We
 			 * expect to be reducing the RTT when we
 			 * pace at a slower rate.
 			 */
 			rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8);
 			rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8);
 		}
 	}
 	timely_says = rack_make_timely_judgement(rack,
 	    rack->r_ctl.rc_gp_srtt,
 	    rack->r_ctl.rc_rtt_diff,
 	    rack->r_ctl.rc_prev_gp_srtt
 	);
 	bytes_ps *= HPTS_USEC_IN_SEC;
 	bytes_ps /= utim;
 	if (bytes_ps > rack->r_ctl.last_max_bw) {
 		/*
 		 * Something is on path playing
 		 * since this b/w is not possible based
 		 * on our BDP (highest rwnd and lowest rtt
 		 * we saw in the measurement window).
 		 *
 		 * Another option here would be to
 		 * instead skip the measurement.
 		 */
 		rack_log_pacing_delay_calc(rack, bytes, reqbytes,
 					   bytes_ps, rack->r_ctl.last_max_bw, 0,
 					   11, __LINE__, NULL, quality);
 		bytes_ps = rack->r_ctl.last_max_bw;
 	}
 	/* We store gp for b/w in bytes per second */
 	if (rack->rc_gp_filled == 0) {
 		/* Initial measurement */
 		if (bytes_ps) {
 			rack->r_ctl.gp_bw = bytes_ps;
 			rack->rc_gp_filled = 1;
 			rack->r_ctl.num_measurements = 1;
 			rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
 		} else {
 			rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes,
 						   rack->r_ctl.rc_app_limited_cnt,
 						   0, 0, 10, __LINE__, NULL, quality);
 		}
 		if (tcp_in_hpts(rack->rc_tp) &&
 		    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 			/*
 			 * Ok we can't trust the pacer in this case
 			 * where we transition from un-paced to paced.
 			 * Or for that matter when the burst mitigation
 			 * was making a wild guess and got it wrong.
 			 * Stop the pacer and clear up all the aggregate
 			 * delays etc.
 			 */
 			tcp_hpts_remove(rack->rc_tp);
 			rack->r_ctl.rc_hpts_flags = 0;
 			rack->r_ctl.rc_last_output_to = 0;
 		}
 		did_add = 2;
 	} else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) {
 		/* Still a small number run an average */
 		rack->r_ctl.gp_bw += bytes_ps;
 		addpart = rack->r_ctl.num_measurements;
 		rack->r_ctl.num_measurements++;
 		if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) {
 			/* We have collected enough to move forward */
 			rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements;
 		}
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		did_add = 3;
 	} else {
 		/*
 		 * We want to take 1/wma of the goodput and add in to 7/8th
 		 * of the old value weighted by the srtt. So if your measurement
 		 * period is say 2 SRTT's long you would get 1/4 as the
 		 * value, if it was like 1/2 SRTT then you would get 1/16th.
 		 *
 		 * But we must be careful not to take too much i.e. if the
 		 * srtt is say 20ms and the measurement is taken over
 		 * 400ms our weight would be 400/20 i.e. 20. On the
 		 * other hand if we get a measurement over 1ms with a
 		 * 10ms rtt we only want to take a much smaller portion.
 		 */
 		if (rack->r_ctl.num_measurements < 0xff) {
 			rack->r_ctl.num_measurements++;
 		}
 		srtt = (uint64_t)tp->t_srtt;
 		if (srtt == 0) {
 			/*
 			 * Strange why did t_srtt go back to zero?
 			 */
 			if (rack->r_ctl.rc_rack_min_rtt)
 				srtt = rack->r_ctl.rc_rack_min_rtt;
 			else
 				srtt = HPTS_USEC_IN_MSEC;
 		}
 		/*
 		 * XXXrrs: Note for reviewers, in playing with
 		 * dynamic pacing I discovered this GP calculation
 		 * as done originally leads to some undesired results.
 		 * Basically you can get longer measurements contributing
 		 * too much to the WMA. Thus I changed it if you are doing
 		 * dynamic adjustments to only do the aportioned adjustment
 		 * if we have a very small (time wise) measurement. Longer
 		 * measurements just get there weight (defaulting to 1/8)
 		 * add to the WMA. We may want to think about changing
 		 * this to always do that for both sides i.e. dynamic
 		 * and non-dynamic... but considering lots of folks
 		 * were playing with this I did not want to change the
 		 * calculation per.se. without your thoughts.. Lawerence?
 		 * Peter??
 		 */
 		if (rack->rc_gp_dyn_mul == 0) {
 			subpart = rack->r_ctl.gp_bw * utim;
 			subpart /= (srtt * 8);
 			if (subpart < (rack->r_ctl.gp_bw / 2)) {
 				/*
 				 * The b/w update takes no more
 				 * away then 1/2 our running total
 				 * so factor it in.
 				 */
 				addpart = bytes_ps * utim;
 				addpart /= (srtt * 8);
 			} else {
 				/*
 				 * Don't allow a single measurement
 				 * to account for more than 1/2 of the
 				 * WMA. This could happen on a retransmission
 				 * where utim becomes huge compared to
 				 * srtt (multiple retransmissions when using
 				 * the sending rate which factors in all the
 				 * transmissions from the first one).
 				 */
 				subpart = rack->r_ctl.gp_bw / 2;
 				addpart = bytes_ps / 2;
 			}
 			resid_bw = rack->r_ctl.gp_bw - subpart;
 			rack->r_ctl.gp_bw = resid_bw + addpart;
 			did_add = 1;
 		} else {
 			if ((utim / srtt) <= 1) {
 				/*
 				 * The b/w update was over a small period
 				 * of time. The idea here is to prevent a small
 				 * measurement time period from counting
 				 * too much. So we scale it based on the
 				 * time so it attributes less than 1/rack_wma_divisor
 				 * of its measurement.
 				 */
 				subpart = rack->r_ctl.gp_bw * utim;
 				subpart /= (srtt * rack_wma_divisor);
 				addpart = bytes_ps * utim;
 				addpart /= (srtt * rack_wma_divisor);
 			} else {
 				/*
 				 * The scaled measurement was long
 				 * enough so lets just add in the
 				 * portion of the measurement i.e. 1/rack_wma_divisor
 				 */
 				subpart = rack->r_ctl.gp_bw / rack_wma_divisor;
 				addpart = bytes_ps / rack_wma_divisor;
 			}
 			if ((rack->measure_saw_probe_rtt == 0) ||
 		            (bytes_ps > rack->r_ctl.gp_bw)) {
 				/*
 				 * For probe-rtt we only add it in
 				 * if its larger, all others we just
 				 * add in.
 				 */
 				did_add = 1;
 				resid_bw = rack->r_ctl.gp_bw - subpart;
 				rack->r_ctl.gp_bw = resid_bw + addpart;
 			}
 		}
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 	}
 	if ((rack->gp_ready == 0) &&
 	    (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
 		/* We have enough measurements now */
 		rack->gp_ready = 1;
 		if (rack->dgp_on ||
 		    rack->rack_hibeta)
 			rack_set_cc_pacing(rack);
 		if (rack->defer_options)
 			rack_apply_deferred_options(rack);
 	}
 	rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim,
 				   rack_get_bw(rack), 22, did_add, NULL, quality);
 	/* We do not update any multipliers if we are in or have seen a probe-rtt */
 	if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set)
 		rack_update_multiplier(rack, timely_says, bytes_ps,
 				       rack->r_ctl.rc_gp_srtt,
 				       rack->r_ctl.rc_rtt_diff);
 	rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim,
 				   rack_get_bw(rack), 3, line, NULL, quality);
 	rack_log_pacing_delay_calc(rack,
 				   bytes, /* flex2 */
 				   tim, /* flex1 */
 				   bytes_ps, /* bw_inuse */
 				   rack->r_ctl.gp_bw, /* delRate */
 				   rack_get_lt_bw(rack), /* rttProp */
 				   20, line, NULL, 0);
 	/* reset the gp srtt and setup the new prev */
 	rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt;
 	/* Record the lost count for the next measurement */
 	rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count;
 skip_measurement:
 	/*
 	 * We restart our diffs based on the gpsrtt in the
 	 * measurement window.
 	 */
 	rack->rc_gp_rtt_set = 0;
 	rack->rc_gp_saw_rec = 0;
 	rack->rc_gp_saw_ca = 0;
 	rack->rc_gp_saw_ss = 0;
 	rack->rc_dragged_bottom = 0;
 
 	if (quality == RACK_QUALITY_HIGH) {
 		/*
 		 * Gput in the stats world is in kbps where bytes_ps is
 		 * bytes per second so we do ((x * 8)/ 1000).
 		 */
 		gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000);
 #ifdef STATS
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT,
 					 gput);
 		/*
 		 * XXXLAS: This is a temporary hack, and should be
 		 * chained off VOI_TCP_GPUT when stats(9) grows an
 		 * API to deal with chained VOIs.
 		 */
 		if (tp->t_stats_gput_prev > 0)
 			stats_voi_update_abs_s32(tp->t_stats,
 						 VOI_TCP_GPUT_ND,
 						 ((gput - tp->t_stats_gput_prev) * 100) /
 						 tp->t_stats_gput_prev);
 #endif
 		tp->t_stats_gput_prev = gput;
 	}
 	tp->t_flags &= ~TF_GPUTINPROG;
 	/*
 	 * Now are we app limited now and there is space from where we
 	 * were to where we want to go?
 	 *
 	 * We don't do the other case i.e. non-applimited here since
 	 * the next send will trigger us picking up the missing data.
 	 */
 	if (rack->r_ctl.rc_first_appl &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    rack->r_ctl.rc_app_limited_cnt &&
 	    (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) &&
 	    ((rack->r_ctl.rc_first_appl->r_end - th_ack) >
 	     max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) {
 		/*
 		 * Yep there is enough outstanding to make a measurement here.
 		 */
 		struct rack_sendmap *rsm;
 
 		rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
 		rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
 		tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 		rack->app_limited_needs_set = 0;
 		tp->gput_seq = th_ack;
 		if (rack->in_probe_rtt)
 			rack->measure_saw_probe_rtt = 1;
 		else if ((rack->measure_saw_probe_rtt) &&
 			 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
 			rack->measure_saw_probe_rtt = 0;
 		if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) {
 			/* There is a full window to gain info from */
 			tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
 		} else {
 			/* We can only measure up to the applimited point */
 			tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack);
 			if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
 				/*
 				 * We don't have enough to make a measurement.
 				 */
 				tp->t_flags &= ~TF_GPUTINPROG;
 				rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
 							   0, 0, 0, 6, __LINE__, NULL, quality);
 				return;
 			}
 		}
 		if (tp->t_state >= TCPS_FIN_WAIT_1) {
 			/*
 			 * We will get no more data into the SB
 			 * this means we need to have the data available
 			 * before we start a measurement.
 			 */
 			if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) {
 				/* Nope not enough data. */
 				return;
 			}
 		}
 		tp->t_flags |= TF_GPUTINPROG;
 		/*
 		 * Now we need to find the timestamp of the send at tp->gput_seq
 		 * for the send based measurement.
 		 */
 		rack->r_ctl.rc_gp_cumack_ts = 0;
 		rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
 		if (rsm) {
 			/* Ok send-based limit is set */
 			if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
 				/*
 				 * Move back to include the earlier part
 				 * so our ack time lines up right (this may
 				 * make an overlapping measurement but thats
 				 * ok).
 				 */
 				tp->gput_seq = rsm->r_start;
 			}
 			if (rsm->r_flags & RACK_ACKED) {
 				struct rack_sendmap *nrsm;
 
 				tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
 				tp->gput_seq = rsm->r_end;
 				nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
 				if (nrsm)
 					rsm = nrsm;
 				else {
 					rack->app_limited_needs_set = 1;
 				}
 			} else
 				rack->app_limited_needs_set = 1;
 			/* We always go from the first send */
 			rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
 		} else {
 			/*
 			 * If we don't find the rsm due to some
 			 * send-limit set the current time, which
 			 * basically disables the send-limit.
 			 */
 			struct timeval tv;
 
 			microuptime(&tv);
 			rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
 		}
 		rack_tend_gp_marks(tp, rack);
 		rack_log_pacing_delay_calc(rack,
 					   tp->gput_seq,
 					   tp->gput_ack,
 					   (uint64_t)rsm,
 					   tp->gput_ts,
 					   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
 					   9,
 					   __LINE__, rsm, quality);
 		rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
 	} else {
 		/*
 		 * To make sure proper timestamp merging occurs, we need to clear
 		 * all GP marks if we don't start a measurement.
 		 */
 		rack_clear_gp_marks(tp, rack);
 	}
 }
 
 /*
  * CC wrapper hook functions
  */
 static void
 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs,
     uint16_t type, int32_t recovery)
 {
 	uint32_t prior_cwnd, acked;
 	struct tcp_log_buffer *lgb = NULL;
 	uint8_t labc_to_use, quality;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	tp->t_ccv.nsegs = nsegs;
 	acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una);
 	if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
 		uint32_t max;
 
 		max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp);
 		if (tp->t_ccv.bytes_this_ack > max) {
 			tp->t_ccv.bytes_this_ack = max;
 		}
 	}
 #ifdef STATS
 	stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF,
 	    ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd);
 #endif
 	if ((th_ack == tp->snd_max) && rack->lt_bw_up) {
 		/* We will ack all, time
 		 * to end any lt_bw_up we
 		 * have running until something
 		 * new is sent.
 		 */
 		struct timeval tv;
 
 		rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq);
 		rack->r_ctl.lt_seq = tp->snd_max;
 		(void)tcp_get_usecs(&tv);
 		rack->r_ctl.lt_bw_time += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark);
 		rack->lt_bw_up = 0;
 	}
 	quality = RACK_QUALITY_NONE;
 	if ((tp->t_flags & TF_GPUTINPROG) &&
 	    rack_enough_for_measurement(tp, rack, th_ack, &quality)) {
 		/* Measure the Goodput */
 		rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality);
 	}
 	/* Which way our we limited, if not cwnd limited no advance in CA */
 	if (tp->snd_cwnd <= tp->snd_wnd)
 		tp->t_ccv.flags |= CCF_CWND_LIMITED;
 	else
 		tp->t_ccv.flags &= ~CCF_CWND_LIMITED;
 	if (tp->snd_cwnd > tp->snd_ssthresh) {
 		tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack,
 			 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp));
 		/* For the setting of a window past use the actual scwnd we are using */
 		if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) {
 			tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use;
 			tp->t_ccv.flags |= CCF_ABC_SENTAWND;
 		}
 	} else {
 		tp->t_ccv.flags &= ~CCF_ABC_SENTAWND;
 		tp->t_bytes_acked = 0;
 	}
 	prior_cwnd = tp->snd_cwnd;
 	if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec ||
 	    (rack_client_low_buf && rack->client_bufferlvl &&
 	    (rack->client_bufferlvl < rack_client_low_buf)))
 		labc_to_use = rack->rc_labc;
 	else
 		labc_to_use = rack_max_abc_post_recovery;
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex1 = th_ack;
 		log.u_bbr.flex2 = tp->t_ccv.flags;
 		log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
 		log.u_bbr.flex4 = tp->t_ccv.nsegs;
 		log.u_bbr.flex5 = labc_to_use;
 		log.u_bbr.flex6 = prior_cwnd;
 		log.u_bbr.flex7 = V_tcp_do_newsack;
 		log.u_bbr.flex8 = 1;
 		lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
 				     0, &log, false, NULL, __func__, __LINE__,&tv);
 	}
 	if (CC_ALGO(tp)->ack_received != NULL) {
 		/* XXXLAS: Find a way to live without this */
 		tp->t_ccv.curack = th_ack;
 		tp->t_ccv.labc = labc_to_use;
 		tp->t_ccv.flags |= CCF_USE_LOCAL_ABC;
 		CC_ALGO(tp)->ack_received(&tp->t_ccv, type);
 	}
 	if (lgb) {
 		lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd;
 	}
 	if (rack->r_must_retran) {
 		if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) {
 			/*
 			 * We now are beyond the rxt point so lets disable
 			 * the flag.
 			 */
 			rack->r_ctl.rc_out_at_rto = 0;
 			rack->r_must_retran = 0;
 		} else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) {
 			/*
 			 * Only decrement the rc_out_at_rto if the cwnd advances
 			 * at least a whole segment. Otherwise next time the peer
 			 * acks, we won't be able to send this generaly happens
 			 * when we are in Congestion Avoidance.
 			 */
 			if (acked <= rack->r_ctl.rc_out_at_rto){
 				rack->r_ctl.rc_out_at_rto -= acked;
 			} else {
 				rack->r_ctl.rc_out_at_rto = 0;
 			}
 		}
 	}
 #ifdef STATS
 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use);
 #endif
 	if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) {
 		rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use;
 	}
 }
 
 static void
 tcp_rack_partialack(struct tcpcb *tp)
 {
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	/*
 	 * If we are doing PRR and have enough
 	 * room to send <or> we are pacing and prr
 	 * is disabled we will want to see if we
 	 * can send data (by setting r_wanted_output to
 	 * true).
 	 */
 	if ((rack->r_ctl.rc_prr_sndcnt > 0) ||
 	    rack->rack_no_prr)
 		rack->r_wanted_output = 1;
 }
 
 static inline void
 rack_set_most_aggr(struct tcp_rack *rack)
 {
 	rack->r_fill_less_agg = 0;
 	/* Once the cwnd as been clamped we don't do fill_cw */
 	if (rack->r_cwnd_was_clamped == 0)
 		rack->rc_pace_to_cwnd = 1;
 	rack->r_pacing_discount = 0;
 }
 
 static inline void
 rack_limit_fillcw(struct tcp_rack *rack)
 {
 	rack->r_fill_less_agg = 1;
 	/* Once the cwnd as been clamped we don't do fill_cw */
 	if (rack->r_cwnd_was_clamped == 0)
 		rack->rc_pace_to_cwnd = 1;
 	rack->r_pacing_discount = 0;
 }
 
 static inline void
 rack_disable_fillcw(struct tcp_rack *rack)
 {
 	rack->r_fill_less_agg = 1;
 	rack->rc_pace_to_cwnd = 0;
 	rack->r_pacing_discount = 0;
 }
 
 static void
 rack_client_buffer_level_set(struct tcp_rack *rack)
 {
 	/*
 	 * Only if DGP is on do we do anything that
 	 * changes stack behavior. If DGP is off all
 	 * we will do is issue a BB log (if BB logging is
 	 * on) and return.
 	 */
 	if (rack->dgp_on == 0) {
 		rack_log_pacing_delay_calc(rack, 0, rack->client_bufferlvl,
 					   0, 0, 0, 30, __LINE__, NULL, 0);
 		return;
 	}
 	if (IN_RECOVERY(rack->rc_tp->t_flags) && rack->r_ctl.full_dgp_in_rec) {
 		goto set_most_agg;
 	}
 	/*
 	 * We are in DGP so what setting should we
 	 * apply based on where the client is?
 	 */
 	switch(rack->r_ctl.rc_dgp_bl_agg) {
 	default:
 	case DGP_LEVEL0:
 set_most_agg:
 		rack_set_most_aggr(rack);
 		break;
 	case DGP_LEVEL1:
 		if (rack->client_bufferlvl == 4)
 			rack_limit_fillcw(rack);
 		else if (rack->client_bufferlvl == 5)
 			rack_disable_fillcw(rack);
 		else
 			rack_set_most_aggr(rack);
 		break;
 	case DGP_LEVEL2:
 		if (rack->client_bufferlvl == 3)
 			rack_limit_fillcw(rack);
 		else if (rack->client_bufferlvl == 4)
 			rack_disable_fillcw(rack);
 		else if (rack->client_bufferlvl == 5) {
 			rack_disable_fillcw(rack);
 			rack->r_pacing_discount = 1;
 			rack->r_ctl.pacing_discount_amm = 1;
 		} else
 			rack_set_most_aggr(rack);
 		break;
 	case DGP_LEVEL3:
 		if (rack->client_bufferlvl == 2)
 			rack_limit_fillcw(rack);
 		else if (rack->client_bufferlvl == 3)
 			rack_disable_fillcw(rack);
 		else if (rack->client_bufferlvl == 4) {
 			rack_disable_fillcw(rack);
 			rack->r_pacing_discount = 1;
 			rack->r_ctl.pacing_discount_amm = 1;
 		} else if (rack->client_bufferlvl == 5) {
 			rack_disable_fillcw(rack);
 			rack->r_pacing_discount = 1;
 			rack->r_ctl.pacing_discount_amm = 2;
 		} else
 			rack_set_most_aggr(rack);
 		break;
 	}
 	rack_log_pacing_delay_calc(rack, rack->r_ctl.rc_dgp_bl_agg, rack->client_bufferlvl, 0,
 				   0, 0, 30, __LINE__, NULL, 0);
 }
 
 static void
 do_rack_check_for_unclamp(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * Can we unclamp. We unclamp if more than
 	 * N rounds have transpired with no loss.
 	 */
 	uint64_t snds, rxts, rxt_per;
 	uint32_t rnds;
 
 	rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped;
 	if ((rack_unclamp_round_thresh > 0) &&
 	    (rnds >= rack_unclamp_round_thresh)) {
 		snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes;
 		KASSERT ((snds > 0), ("rack:%p tp:%p snds:%ju is 0", rack, tp,
 		    (uintmax_t)snds));
 		rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes;
 		rxt_per = rxts * 1000;
 		rxt_per /= snds;
 		if ((uint32_t)rxt_per <= rack_unclamp_rxt_thresh) {
 			/* Unclamp */
 			if (tcp_bblogging_on(rack->rc_tp)) {
 				union tcp_log_stackspecific log;
 				struct timeval tv;
 
 				memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 				log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 				log.u_bbr.flex3 = rnds;
 				log.u_bbr.flex4 = rack_unclamp_round_thresh;
 				log.u_bbr.flex5 = (uint32_t)rxt_per;
 				log.u_bbr.flex8 = 6;
 				log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs;
 				log.u_bbr.bbr_state = rack->rc_pace_to_cwnd;
 				log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied;
 				log.u_bbr.applimited = rack->r_ctl.max_clamps;
 				log.u_bbr.epoch = rack->r_ctl.clamp_options;
 				log.u_bbr.cur_del_rate = rxts;
 				log.u_bbr.bw_inuse = rack_get_lt_bw(rack);
 				log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 				log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff);
 				log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff);
 				tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
 					      0, &log, false, NULL, NULL, 0, &tv);
 			}
 			rack->r_ctl.num_of_clamps_applied = 0;
 			rack->r_cwnd_was_clamped = 0;
 			rack->excess_rxt_on = 1;
 			if (rack->r_ctl.clamp_options) {
 				/*
 				 * We only allow fillcw to be toggled
 				 * if you are setting a max seg too.
 				 */
 				if (rack->r_ctl.clamp_options & 0x1) {
 					if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) {
 						/* turn on fill cw  for non-dgp*/
 						rack->rc_pace_to_cwnd = 0;
 					} else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) {
 						/* For DGP we want it off */
 						rack->rc_pace_to_cwnd = 1;
 					}
 				}
 			}
 			if (rack->dgp_on) {
 				/* Reset all multipliers to 100.0 so just the measured bw */
 				/* Crash any per boosts down to 100% */
 				rack->r_ctl.rack_per_of_gp_rec = 100;
 				rack->r_ctl.rack_per_of_gp_ss = 100;
 				rack->r_ctl.rack_per_of_gp_ca = 100;
 				/* Set in an upper bound for ss/ca % increase */
 				rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
 				rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
 			}
 		}
 	}
 }
 
 static void
 do_rack_excess_rxt(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * Rack excess rxt accounting is turned on. If we
 	 * are above a threshold of rxt's in at least N
 	 * rounds, then back off the cwnd and ssthresh
 	 * to fit into the long-term b/w.
 	 */
 	uint64_t snds, rxts, rxt_per, lt_bw, bdp;
 	uint32_t rnds, new_cwnd, new_ssthresh, rtt, shared_cwnd_was_enabled = 0;
 
 	/* Is it shut off by 0 rounds? */
 	if (rack_rxt_min_rnds == 0)
 		return;
 	if ((rack->r_ctl.max_clamps > 0) &&
 	    (rack->r_ctl.num_of_clamps_applied >= rack->r_ctl.max_clamps)) {
 		/*
 		 * The idea, if max_clamps is set, is that if clamping it
 		 * N times did not work again, then there is no sense
 		 * clamping it again. The link is just a lossy link and
 		 * our clamps are doing no good. Turn it off so we don't come
 		 * back here again.
 		 */
 		rack->excess_rxt_on = 0;
 		rack->r_cwnd_was_clamped = 0;
 		rack->r_ctl.num_of_clamps_applied = 0;
 		return;
 	}
 	snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes;
 	rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes;
 	rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped;
 	/* Has enough rounds progressed for us to re-measure? */
 	if ((rnds >= rack_rxt_min_rnds) &&
 	    (rack->r_ctl.rxt_threshold > 0)){
 		rxt_per = rxts * 1000;
 		rxt_per /= snds;
 		if (rxt_per >= rack->r_ctl.rxt_threshold) {
 			/*
 			 * Action required:
 			 *  We are above our excess retransmit level, lets
 			 *  cut down the cwnd and ssthresh to match the long-term
 			 *  b/w we are getting.
 			 */
 			/* First disable scwnd if enabled */
 #ifdef NETFLIX_SHARED_CWND
 			rack->rack_enable_scwnd = 0;
 			if (rack->r_ctl.rc_scw) {
 				uint32_t limit;
 
 				shared_cwnd_was_enabled = 1;
 				if (rack->r_limit_scw)
 					limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
 				else
 					limit = 0;
 				tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
 							  rack->r_ctl.rc_scw_index,
 							  limit);
 				rack->r_ctl.rc_scw = NULL;
 			}
 
 #endif
 			/* Calculate what the cwnd and ssthresh should be */
 			tcp_trace_point(rack->rc_tp, TCP_TP_EXCESS_RXT);
 			lt_bw = rack_get_lt_bw(rack);
 			if (lt_bw == 0) {
 				/*
 				 * No lt_bw, lets chop things to one MSS
 				 * and the ssthresh to the iwnd.
 				 */
 reset_to_iw:
 				new_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 				new_ssthresh = tcp_compute_initwnd(tcp_maxseg(tp));
 			} else {
 				rtt = rack->rc_rack_rtt;
 				if (rtt == 0) {
 					/* If we have no rack_rtt drop to the IW situation */
 					goto reset_to_iw;
 				}
 				bdp = lt_bw * (uint64_t)rtt;
 				bdp /= HPTS_USEC_IN_SEC;
 				new_cwnd = (uint32_t)bdp;
 				new_ssthresh = new_cwnd - 1;
 				if (new_cwnd < ctf_fixed_maxseg(tp)) {
 					/* Rock bottom, goto IW settings  */
 					goto reset_to_iw;
 				}
 			}
 			rack->r_cwnd_was_clamped = 1;
 			rack->r_ctl.num_of_clamps_applied++;
 			/* Reset the counter fromn now */
 			tp->t_bytes_acked = 0;
 			/*
 			 * Now what about options?
 			 * We look at the bottom  8 bits:
 			 * F = fill cw bit (toggle it if set)
 			 * S = Segment bits
 			 * M = set max segment bit
 			 *
 			 * SSSS SSMF
 			 */
 			if (rack->r_ctl.clamp_options) {
 				if (rack->r_ctl.clamp_options & 0x1) {
 					if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) {
 						/* turn on fill cw  for non-dgp*/
 						rack->rc_pace_to_cwnd = 1;
 					} else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) {
 						/* For DGP we want it off */
 						rack->rc_pace_to_cwnd = 0;
 					}
 				}
 			}
 			if (rack->dgp_on) {
 				/* Reset all multipliers to 100.0 so just the measured bw */
 				/* Crash any per boosts down to 100% */
 				rack->r_ctl.rack_per_of_gp_rec = 100;
 				rack->r_ctl.rack_per_of_gp_ss = 100;
 				rack->r_ctl.rack_per_of_gp_ca = 100;
 				/* Set in an upper bound for ss/ca % increase */
 				rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_clamp_ss_upper;
 				rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_clamp_ca_upper;
 				/* Now move to the lt_bw */
 				rack->r_ctl.gp_bw = lt_bw;
 				rack->rc_gp_filled = 1;
 				rack->r_ctl.num_measurements = RACK_REQ_AVG;
 			}
 			if (tcp_bblogging_on(rack->rc_tp)) {
 				union tcp_log_stackspecific log;
 				struct timeval tv;
 
 				memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 				log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 				log.u_bbr.flex1 = new_cwnd;
 				log.u_bbr.flex2 = new_ssthresh;
 				log.u_bbr.flex3 = rnds;
 				log.u_bbr.flex4 = rack_rxt_min_rnds;
 				log.u_bbr.flex5 = rtt;
 				log.u_bbr.flex6 = shared_cwnd_was_enabled;
 				log.u_bbr.flex8 = 5;
 				log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs;
 				log.u_bbr.bbr_state = rack->rc_pace_to_cwnd;
 				log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied;
 				log.u_bbr.applimited = rack->r_ctl.max_clamps;
 				log.u_bbr.epoch = rack->r_ctl.clamp_options;
 				log.u_bbr.cur_del_rate = rxts;
 				log.u_bbr.delRate = snds;
 				log.u_bbr.rttProp = rack->r_ctl.rxt_threshold;
 				log.u_bbr.bw_inuse = lt_bw;
 				log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 				log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff);
 				log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff);
 				tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
 					       0, &log, false, NULL, NULL, 0, &tv);
 			}
 			/* Update our point where we did it */
 			if (rack->r_ctl.already_had_a_excess == 0) {
 				rack->r_ctl.already_had_a_excess = 1;
 				counter_u64_add(rack_rxt_clamps_cwnd_uniq, 1);
 			}
 			counter_u64_add(rack_rxt_clamps_cwnd, 1);
 			rack->r_ctl.last_sndbytes = tp->t_sndbytes;
 			rack->r_ctl.last_snd_rxt_bytes = tp->t_snd_rxt_bytes;
 			rack->r_ctl.last_rnd_rxt_clamped = rack->r_ctl.current_round;
 			if (new_cwnd < tp->snd_cwnd)
 				tp->snd_cwnd = new_cwnd;
 			if (new_ssthresh < tp->snd_ssthresh)
 				tp->snd_ssthresh = new_ssthresh;
 		}
 	}
 }
 
 static void
 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
 {
 	struct tcp_rack *rack;
 	uint32_t orig_cwnd;
 
 	orig_cwnd = tp->snd_cwnd;
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	/* only alert CC if we alerted when we entered */
 	if (CC_ALGO(tp)->post_recovery != NULL) {
 		tp->t_ccv.curack = th_ack;
 		CC_ALGO(tp)->post_recovery(&tp->t_ccv);
 		if (tp->snd_cwnd < tp->snd_ssthresh) {
 			/*
 			 * Rack has burst control and pacing
 			 * so lets not set this any lower than
 			 * snd_ssthresh per RFC-6582 (option 2).
 			 */
 			tp->snd_cwnd = tp->snd_ssthresh;
 		}
 	}
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex1 = th_ack;
 		log.u_bbr.flex2 = tp->t_ccv.flags;
 		log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack;
 		log.u_bbr.flex4 = tp->t_ccv.nsegs;
 		log.u_bbr.flex5 = V_tcp_abc_l_var;
 		log.u_bbr.flex6 = orig_cwnd;
 		log.u_bbr.flex7 = V_tcp_do_newsack;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex8 = 2;
 		tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0,
 			       0, &log, false, NULL, __func__, __LINE__, &tv);
 	}
 	if ((rack->rack_no_prr == 0) &&
 	    (rack->no_prr_addback == 0) &&
 	    (rack->r_ctl.rc_prr_sndcnt > 0)) {
 		/*
 		 * Suck the next prr cnt back into cwnd, but
 		 * only do that if we are not application limited.
 		 */
 		if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) {
 			/*
 			 * We are allowed to add back to the cwnd the amount we did
 			 * not get out if:
 			 * a) no_prr_addback is off.
 			 * b) we are not app limited
 			 * c) we are doing prr
 			 * <and>
 			 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none).
 			 */
 			tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax),
 					    rack->r_ctl.rc_prr_sndcnt);
 		}
 		rack->r_ctl.rc_prr_sndcnt = 0;
 		rack_log_to_prr(rack, 1, 0, __LINE__);
 	}
 	rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
 	tp->snd_recover = tp->snd_una;
 	if (rack->r_ctl.dsack_persist) {
 		rack->r_ctl.dsack_persist--;
 		if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
 			rack->r_ctl.num_dsack = 0;
 		}
 		rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
 	}
 	EXIT_RECOVERY(tp->t_flags);
 	if (rack->r_ctl.full_dgp_in_rec)
 		rack_client_buffer_level_set(rack);
 }
 
 static void
 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
 {
 	struct tcp_rack *rack;
 	uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 #ifdef STATS
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
 #endif
 	if (IN_RECOVERY(tp->t_flags) == 0) {
 		in_rec_at_entry = 0;
 		ssthresh_enter = tp->snd_ssthresh;
 		cwnd_enter = tp->snd_cwnd;
 	} else
 		in_rec_at_entry = 1;
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	switch (type) {
 	case CC_NDUPACK:
 		tp->t_flags &= ~TF_WASFRECOVERY;
 		tp->t_flags &= ~TF_WASCRECOVERY;
 		if (!IN_FASTRECOVERY(tp->t_flags)) {
 			if (rack->dgp_on && rack->r_cwnd_was_clamped) {
 				/* Reset the gains so that on exit we will be softer longer */
 				rack->r_ctl.rack_per_of_gp_rec = 100;
 				rack->r_ctl.rack_per_of_gp_ss = 98;
 				rack->r_ctl.rack_per_of_gp_ca = 98;
 			}
 			rack->r_ctl.rc_prr_delivered = 0;
 			rack->r_ctl.rc_prr_out = 0;
 			rack->r_fast_output = 0;
 			if (rack->rack_no_prr == 0) {
 				rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
 				rack_log_to_prr(rack, 2, in_rec_at_entry, line);
 			}
 			rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una;
 			tp->snd_recover = tp->snd_max;
 			if (tp->t_flags2 & TF2_ECN_PERMIT)
 				tp->t_flags2 |= TF2_ECN_SND_CWR;
 		}
 		break;
 	case CC_ECN:
 		if (!IN_CONGRECOVERY(tp->t_flags) ||
 		    /*
 		     * Allow ECN reaction on ACK to CWR, if
 		     * that data segment was also CE marked.
 		     */
 		    SEQ_GEQ(ack, tp->snd_recover)) {
 			EXIT_CONGRECOVERY(tp->t_flags);
 			KMOD_TCPSTAT_INC(tcps_ecn_rcwnd);
 			rack->r_fast_output = 0;
 			tp->snd_recover = tp->snd_max + 1;
 			if (tp->t_flags2 & TF2_ECN_PERMIT)
 				tp->t_flags2 |= TF2_ECN_SND_CWR;
 		}
 		break;
 	case CC_RTO:
 		tp->t_dupacks = 0;
 		tp->t_bytes_acked = 0;
 		rack->r_fast_output = 0;
 		EXIT_RECOVERY(tp->t_flags);
 		tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 /
 		    ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp);
 		orig_cwnd = tp->snd_cwnd;
 		tp->snd_cwnd = ctf_fixed_maxseg(tp);
 		rack_log_to_prr(rack, 16, orig_cwnd, line);
 		if (tp->t_flags2 & TF2_ECN_PERMIT)
 			tp->t_flags2 |= TF2_ECN_SND_CWR;
 		break;
 	case CC_RTO_ERR:
 		KMOD_TCPSTAT_INC(tcps_sndrexmitbad);
 		/* RTO was unnecessary, so reset everything. */
 		tp->snd_cwnd = tp->snd_cwnd_prev;
 		tp->snd_ssthresh = tp->snd_ssthresh_prev;
 		tp->snd_recover = tp->snd_recover_prev;
 		if (tp->t_flags & TF_WASFRECOVERY) {
 			ENTER_FASTRECOVERY(tp->t_flags);
 			tp->t_flags &= ~TF_WASFRECOVERY;
 		}
 		if (tp->t_flags & TF_WASCRECOVERY) {
 			ENTER_CONGRECOVERY(tp->t_flags);
 			tp->t_flags &= ~TF_WASCRECOVERY;
 		}
 		tp->snd_nxt = tp->snd_max;
 		tp->t_badrxtwin = 0;
 		break;
 	}
 	if ((CC_ALGO(tp)->cong_signal != NULL)  &&
 	    (type != CC_RTO)){
 		tp->t_ccv.curack = ack;
 		CC_ALGO(tp)->cong_signal(&tp->t_ccv, type);
 	}
 	if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) {
 		rack_log_to_prr(rack, 15, cwnd_enter, line);
 		if (rack->r_ctl.full_dgp_in_rec)
 			rack_client_buffer_level_set(rack);
 		rack->r_ctl.dsack_byte_cnt = 0;
 		rack->r_ctl.retran_during_recovery = 0;
 		rack->r_ctl.rc_cwnd_at_erec = cwnd_enter;
 		rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter;
 		rack->r_ent_rec_ns = 1;
 	}
 }
 
 static inline void
 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
 {
 	uint32_t i_cwnd;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	if (CC_ALGO(tp)->after_idle != NULL)
 		CC_ALGO(tp)->after_idle(&tp->t_ccv);
 
 	if (tp->snd_cwnd == 1)
 		i_cwnd = tp->t_maxseg;		/* SYN(-ACK) lost */
 	else
 		i_cwnd = rc_init_window(rack);
 
 	/*
 	 * Being idle is no different than the initial window. If the cc
 	 * clamps it down below the initial window raise it to the initial
 	 * window.
 	 */
 	if (tp->snd_cwnd < i_cwnd) {
 		tp->snd_cwnd = i_cwnd;
 	}
 }
 
 /*
  * Indicate whether this ack should be delayed.  We can delay the ack if
  * following conditions are met:
  *	- There is no delayed ack timer in progress.
  *	- Our last ack wasn't a 0-sized window. We never want to delay
  *	  the ack that opens up a 0-sized window.
  *	- LRO wasn't used for this segment. We make sure by checking that the
  *	  segment size is not larger than the MSS.
  *	- Delayed acks are enabled or this is a half-synchronized T/TCP
  *	  connection.
  */
 #define DELAY_ACK(tp, tlen)			 \
 	(((tp->t_flags & TF_RXWIN0SENT) == 0) && \
 	((tp->t_flags & TF_DELACK) == 0) &&	 \
 	(tlen <= tp->t_maxseg) &&		 \
 	(tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
 
 static struct rack_sendmap *
 rack_find_lowest_rsm(struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm;
 
 	/*
 	 * Walk the time-order transmitted list looking for an rsm that is
 	 * not acked. This will be the one that was sent the longest time
 	 * ago that is still outstanding.
 	 */
 	TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
 		if (rsm->r_flags & RACK_ACKED) {
 			continue;
 		}
 		goto finish;
 	}
 finish:
 	return (rsm);
 }
 
 static struct rack_sendmap *
 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
 {
 	struct rack_sendmap *prsm;
 
 	/*
 	 * Walk the sequence order list backward until we hit and arrive at
 	 * the highest seq not acked. In theory when this is called it
 	 * should be the last segment (which it was not).
 	 */
 	prsm = rsm;
 
 	TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) {
 		if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) {
 			continue;
 		}
 		return (prsm);
 	}
 	return (NULL);
 }
 
 static uint32_t
 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts)
 {
 	int32_t lro;
 	uint32_t thresh;
 
 	/*
 	 * lro is the flag we use to determine if we have seen reordering.
 	 * If it gets set we have seen reordering. The reorder logic either
 	 * works in one of two ways:
 	 *
 	 * If reorder-fade is configured, then we track the last time we saw
 	 * re-ordering occur. If we reach the point where enough time as
 	 * passed we no longer consider reordering has occuring.
 	 *
 	 * Or if reorder-face is 0, then once we see reordering we consider
 	 * the connection to alway be subject to reordering and just set lro
 	 * to 1.
 	 *
 	 * In the end if lro is non-zero we add the extra time for
 	 * reordering in.
 	 */
 	if (srtt == 0)
 		srtt = 1;
 	if (rack->r_ctl.rc_reorder_ts) {
 		if (rack->r_ctl.rc_reorder_fade) {
 			if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) {
 				lro = cts - rack->r_ctl.rc_reorder_ts;
 				if (lro == 0) {
 					/*
 					 * No time as passed since the last
 					 * reorder, mark it as reordering.
 					 */
 					lro = 1;
 				}
 			} else {
 				/* Negative time? */
 				lro = 0;
 			}
 			if (lro > rack->r_ctl.rc_reorder_fade) {
 				/* Turn off reordering seen too */
 				rack->r_ctl.rc_reorder_ts = 0;
 				lro = 0;
 			}
 		} else {
 			/* Reodering does not fade */
 			lro = 1;
 		}
 	} else {
 		lro = 0;
 	}
 	if (rack->rc_rack_tmr_std_based == 0) {
 		thresh = srtt + rack->r_ctl.rc_pkt_delay;
 	} else {
 		/* Standards based pkt-delay is 1/4 srtt */
 		thresh = srtt +  (srtt >> 2);
 	}
 	if (lro && (rack->rc_rack_tmr_std_based == 0)) {
 		/* It must be set, if not you get 1/4 rtt */
 		if (rack->r_ctl.rc_reorder_shift)
 			thresh += (srtt >> rack->r_ctl.rc_reorder_shift);
 		else
 			thresh += (srtt >> 2);
 	}
 	if (rack->rc_rack_use_dsack &&
 	    lro &&
 	    (rack->r_ctl.num_dsack > 0)) {
 		/*
 		 * We only increase the reordering window if we
 		 * have seen reordering <and> we have a DSACK count.
 		 */
 		thresh += rack->r_ctl.num_dsack * (srtt >> 2);
 		rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh);
 	}
 	/* SRTT * 2 is the ceiling */
 	if (thresh > (srtt * 2)) {
 		thresh = srtt * 2;
 	}
 	/* And we don't want it above the RTO max either */
 	if (thresh > rack_rto_max) {
 		thresh = rack_rto_max;
 	}
 	rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh);
 	return (thresh);
 }
 
 static uint32_t
 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
 		     struct rack_sendmap *rsm, uint32_t srtt)
 {
 	struct rack_sendmap *prsm;
 	uint32_t thresh, len;
 	int segsiz;
 
 	if (srtt == 0)
 		srtt = 1;
 	if (rack->r_ctl.rc_tlp_threshold)
 		thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
 	else
 		thresh = (srtt * 2);
 
 	/* Get the previous sent packet, if any */
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	len = rsm->r_end - rsm->r_start;
 	if (rack->rack_tlp_threshold_use == TLP_USE_ID) {
 		/* Exactly like the ID */
 		if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) {
 			uint32_t alt_thresh;
 			/*
 			 * Compensate for delayed-ack with the d-ack time.
 			 */
 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
 			if (alt_thresh > thresh)
 				thresh = alt_thresh;
 		}
 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) {
 		/* 2.1 behavior */
 		prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
 		if (prsm && (len <= segsiz)) {
 			/*
 			 * Two packets outstanding, thresh should be (2*srtt) +
 			 * possible inter-packet delay (if any).
 			 */
 			uint32_t inter_gap = 0;
 			int idx, nidx;
 
 			idx = rsm->r_rtr_cnt - 1;
 			nidx = prsm->r_rtr_cnt - 1;
 			if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
 				/* Yes it was sent later (or at the same time) */
 				inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
 			}
 			thresh += inter_gap;
 		} else if (len <= segsiz) {
 			/*
 			 * Possibly compensate for delayed-ack.
 			 */
 			uint32_t alt_thresh;
 
 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
 			if (alt_thresh > thresh)
 				thresh = alt_thresh;
 		}
 	} else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) {
 		/* 2.2 behavior */
 		if (len <= segsiz) {
 			uint32_t alt_thresh;
 			/*
 			 * Compensate for delayed-ack with the d-ack time.
 			 */
 			alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
 			if (alt_thresh > thresh)
 				thresh = alt_thresh;
 		}
 	}
 	/* Not above an RTO */
 	if (thresh > tp->t_rxtcur) {
 		thresh = tp->t_rxtcur;
 	}
 	/* Not above a RTO max */
 	if (thresh > rack_rto_max) {
 		thresh = rack_rto_max;
 	}
 	/* Apply user supplied min TLP */
 	if (thresh < rack_tlp_min) {
 		thresh = rack_tlp_min;
 	}
 	return (thresh);
 }
 
 static uint32_t
 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * We want the rack_rtt which is the
 	 * last rtt we measured. However if that
 	 * does not exist we fallback to the srtt (which
 	 * we probably will never do) and then as a last
 	 * resort we use RACK_INITIAL_RTO if no srtt is
 	 * yet set.
 	 */
 	if (rack->rc_rack_rtt)
 		return (rack->rc_rack_rtt);
 	else if (tp->t_srtt == 0)
 		return (RACK_INITIAL_RTO);
 	return (tp->t_srtt);
 }
 
 static struct rack_sendmap *
 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused)
 {
 	/*
 	 * Check to see that we don't need to fall into recovery. We will
 	 * need to do so if our oldest transmit is past the time we should
 	 * have had an ack.
 	 */
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm;
 	int32_t idx;
 	uint32_t srtt, thresh;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (tqhash_empty(rack->r_ctl.tqh)) {
 		return (NULL);
 	}
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	if (rsm == NULL)
 		return (NULL);
 
 
 	if (rsm->r_flags & RACK_ACKED) {
 		rsm = rack_find_lowest_rsm(rack);
 		if (rsm == NULL)
 			return (NULL);
 	}
 	idx = rsm->r_rtr_cnt - 1;
 	srtt = rack_grab_rtt(tp, rack);
 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
 	if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
 		return (NULL);
 	}
 	if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
 		return (NULL);
 	}
 	/* Ok if we reach here we are over-due and this guy can be sent */
 	rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
 	return (rsm);
 }
 
 static uint32_t
 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	int32_t t;
 	int32_t tt;
 	uint32_t ret_val;
 
 	t = (tp->t_srtt + (tp->t_rttvar << 2));
 	RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
  	    rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop);
 	rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT;
 	ret_val = (uint32_t)tt;
 	return (ret_val);
 }
 
 static uint32_t
 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack)
 {
 	/*
 	 * Start the FR timer, we do this based on getting the first one in
 	 * the rc_tmap. Note that if its NULL we must stop the timer. in all
 	 * events we need to stop the running timer (if its running) before
 	 * starting the new one.
 	 */
 	uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse;
 	uint32_t srtt_cur;
 	int32_t idx;
 	int32_t is_tlp_timer = 0;
 	struct rack_sendmap *rsm;
 
 	if (rack->t_timers_stopped) {
 		/* All timers have been stopped none are to run */
 		return (0);
 	}
 	if (rack->rc_in_persist) {
 		/* We can't start any timer in persists */
 		return (rack_get_persists_timer_val(tp, rack));
 	}
 	rack->rc_on_min_to = 0;
 	if ((tp->t_state < TCPS_ESTABLISHED) ||
 	    (rack->sack_attack_disable > 0) ||
 	    ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
 		goto activate_rxt;
 	}
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	if ((rsm == NULL) || sup_rack) {
 		/* Nothing on the send map or no rack */
 activate_rxt:
 		time_since_sent = 0;
 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 		if (rsm) {
 			/*
 			 * Should we discount the RTX timer any?
 			 *
 			 * We want to discount it the smallest amount.
 			 * If a timer (Rack/TLP or RXT) has gone off more
 			 * recently thats the discount we want to use (now - timer time).
 			 * If the retransmit of the oldest packet was more recent then
 			 * we want to use that (now - oldest-packet-last_transmit_time).
 			 *
 			 */
 			idx = rsm->r_rtr_cnt - 1;
 			if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
 				tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
 			else
 				tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
 			if (TSTMP_GT(cts, tstmp_touse))
 			    time_since_sent = cts - tstmp_touse;
 		}
 		if (SEQ_LT(tp->snd_una, tp->snd_max) ||
 		    sbavail(&tptosocket(tp)->so_snd)) {
 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
 			to = tp->t_rxtcur;
 			if (to > time_since_sent)
 				to -= time_since_sent;
 			else
 				to = rack->r_ctl.rc_min_to;
 			if (to == 0)
 				to = 1;
 			/* Special case for KEEPINIT */
 			if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
 			    (TP_KEEPINIT(tp) != 0) &&
 			    rsm) {
 				/*
 				 * We have to put a ceiling on the rxt timer
 				 * of the keep-init timeout.
 				 */
 				uint32_t max_time, red;
 
 				max_time = TICKS_2_USEC(TP_KEEPINIT(tp));
 				if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
 					red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
 					if (red < max_time)
 						max_time -= red;
 					else
 						max_time = 1;
 				}
 				/* Reduce timeout to the keep value if needed */
 				if (max_time < to)
 					to = max_time;
 			}
 			return (to);
 		}
 		return (0);
 	}
 	if (rsm->r_flags & RACK_ACKED) {
 		rsm = rack_find_lowest_rsm(rack);
 		if (rsm == NULL) {
 			/* No lowest? */
 			goto activate_rxt;
 		}
 	}
 	if (rack->sack_attack_disable) {
 		/*
 		 * We don't want to do
 		 * any TLP's if you are an attacker.
 		 * Though if you are doing what
 		 * is expected you may still have
 		 * SACK-PASSED marks.
 		 */
 		goto activate_rxt;
 	}
 	/* Convert from ms to usecs */
 	if ((rsm->r_flags & RACK_SACK_PASSED) ||
 	    (rsm->r_flags & RACK_RWND_COLLAPSED) ||
 	    (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
 		if ((tp->t_flags & TF_SENTFIN) &&
 		    ((tp->snd_max - tp->snd_una) == 1) &&
 		    (rsm->r_flags & RACK_HAS_FIN)) {
 			/*
 			 * We don't start a rack timer if all we have is a
 			 * FIN outstanding.
 			 */
 			goto activate_rxt;
 		}
 		if ((rack->use_rack_rr == 0) &&
 		    (IN_FASTRECOVERY(tp->t_flags)) &&
 		    (rack->rack_no_prr == 0) &&
 		     (rack->r_ctl.rc_prr_sndcnt  < ctf_fixed_maxseg(tp))) {
 			/*
 			 * We are not cheating, in recovery  and
 			 * not enough ack's to yet get our next
 			 * retransmission out.
 			 *
 			 * Note that classified attackers do not
 			 * get to use the rack-cheat.
 			 */
 			goto activate_tlp;
 		}
 		srtt = rack_grab_rtt(tp, rack);
 		thresh = rack_calc_thresh_rack(rack, srtt, cts);
 		idx = rsm->r_rtr_cnt - 1;
 		exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
 		if (SEQ_GEQ(exp, cts)) {
 			to = exp - cts;
 			if (to < rack->r_ctl.rc_min_to) {
 				to = rack->r_ctl.rc_min_to;
 				if (rack->r_rr_config == 3)
 					rack->rc_on_min_to = 1;
 			}
 		} else {
 			to = rack->r_ctl.rc_min_to;
 			if (rack->r_rr_config == 3)
 				rack->rc_on_min_to = 1;
 		}
 	} else {
 		/* Ok we need to do a TLP not RACK */
 activate_tlp:
 		if ((rack->rc_tlp_in_progress != 0) &&
 		    (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) {
 			/*
 			 * The previous send was a TLP and we have sent
 			 * N TLP's without sending new data.
 			 */
 			goto activate_rxt;
 		}
 		rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
 		if (rsm == NULL) {
 			/* We found no rsm to TLP with. */
 			goto activate_rxt;
 		}
 		if (rsm->r_flags & RACK_HAS_FIN) {
 			/* If its a FIN we dont do TLP */
 			rsm = NULL;
 			goto activate_rxt;
 		}
 		idx = rsm->r_rtr_cnt - 1;
 		time_since_sent = 0;
 		if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
 			tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
 		else
 			tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time;
 		if (TSTMP_GT(cts, tstmp_touse))
 		    time_since_sent = cts - tstmp_touse;
 		is_tlp_timer = 1;
 		if (tp->t_srtt) {
 			if ((rack->rc_srtt_measure_made == 0) &&
 			    (tp->t_srtt == 1)) {
 				/*
 				 * If another stack as run and set srtt to 1,
 				 * then the srtt was 0, so lets use the initial.
 				 */
 				srtt = RACK_INITIAL_RTO;
 			} else {
 				srtt_cur = tp->t_srtt;
 				srtt = srtt_cur;
 			}
 		} else
 			srtt = RACK_INITIAL_RTO;
 		/*
 		 * If the SRTT is not keeping up and the
 		 * rack RTT has spiked we want to use
 		 * the last RTT not the smoothed one.
 		 */
 		if (rack_tlp_use_greater &&
 		    tp->t_srtt &&
 		    (srtt < rack_grab_rtt(tp, rack))) {
 			srtt = rack_grab_rtt(tp, rack);
 		}
 		thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
 		if (thresh > time_since_sent) {
 			to = thresh - time_since_sent;
 		} else {
 			to = rack->r_ctl.rc_min_to;
 			rack_log_alt_to_to_cancel(rack,
 						  thresh,		/* flex1 */
 						  time_since_sent,	/* flex2 */
 						  tstmp_touse,		/* flex3 */
 						  rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */
 						  (uint32_t)rsm->r_tim_lastsent[idx],
 						  srtt,
 						  idx, 99);
 		}
 		if (to < rack_tlp_min) {
 			to = rack_tlp_min;
 		}
 		if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) {
 			/*
 			 * If the TLP time works out to larger than the max
 			 * RTO lets not do TLP.. just RTO.
 			 */
 			goto activate_rxt;
 		}
 	}
 	if (is_tlp_timer == 0) {
 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK;
 	} else {
 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP;
 	}
 	if (to == 0)
 		to = 1;
 	return (to);
 }
 
 static void
 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una)
 {
 	struct timeval tv;
 
 	if (rack->rc_in_persist == 0) {
 		if (tp->t_flags & TF_GPUTINPROG) {
 			/*
 			 * Stop the goodput now, the calling of the
 			 * measurement function clears the flag.
 			 */
 			rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__,
 						    RACK_QUALITY_PERSIST);
 		}
 #ifdef NETFLIX_SHARED_CWND
 		if (rack->r_ctl.rc_scw) {
 			tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
 			rack->rack_scwnd_is_idle = 1;
 		}
 #endif
 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(&tv);
 		if (rack->lt_bw_up) {
 			/* Suspend our LT BW measurement */
 			uint64_t tmark;
 
 			rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq);
 			rack->r_ctl.lt_seq = snd_una;
 			tmark = tcp_tv_to_lusectick(&tv);
 			rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
 			rack->r_ctl.lt_timemark = tmark;
 			rack->lt_bw_up = 0;
 			rack->r_persist_lt_bw_off = 1;
 		}
 		if (rack->r_ctl.rc_went_idle_time == 0)
 			rack->r_ctl.rc_went_idle_time = 1;
 		rack_timer_cancel(tp, rack, cts, __LINE__);
 		rack->r_ctl.persist_lost_ends = 0;
 		rack->probe_not_answered = 0;
 		rack->forced_ack = 0;
 		tp->t_rxtshift = 0;
 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 		rack->rc_in_persist = 1;
 	}
 }
 
 static void
 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 	struct timeval tv;
 	uint32_t t_time;
 
 	if (tcp_in_hpts(rack->rc_tp)) {
 		tcp_hpts_remove(rack->rc_tp);
 		rack->r_ctl.rc_hpts_flags = 0;
 	}
 #ifdef NETFLIX_SHARED_CWND
 	if (rack->r_ctl.rc_scw) {
 		tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
 		rack->rack_scwnd_is_idle = 0;
 	}
 #endif
 	t_time = tcp_get_usecs(&tv);
 	if (rack->rc_gp_dyn_mul &&
 	    (rack->use_fixed_rate == 0) &&
 	    (rack->rc_always_pace)) {
 		/*
 		 * Do we count this as if a probe-rtt just
 		 * finished?
 		 */
 		uint32_t time_idle, idle_min;
 
 		time_idle = t_time - rack->r_ctl.rc_went_idle_time;
 		idle_min = rack_min_probertt_hold;
 		if (rack_probertt_gpsrtt_cnt_div) {
 			uint64_t extra;
 			extra = (uint64_t)rack->r_ctl.rc_gp_srtt *
 				(uint64_t)rack_probertt_gpsrtt_cnt_mul;
 			extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div;
 			idle_min += (uint32_t)extra;
 		}
 		if (time_idle >= idle_min) {
 			/* Yes, we count it as a probe-rtt. */
 			uint32_t us_cts;
 
 			us_cts = tcp_get_usecs(NULL);
 			if (rack->in_probe_rtt == 0) {
 				rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
 				rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
 				rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
 				rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
 			} else {
 				rack_exit_probertt(rack, us_cts);
 			}
 		}
 	}
 	if (rack->r_persist_lt_bw_off) {
 		/* Continue where we left off */
 		rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
 		rack->lt_bw_up = 1;
 		rack->r_persist_lt_bw_off = 0;
 	}
 	rack->rc_in_persist = 0;
 	rack->r_ctl.rc_went_idle_time = 0;
 	tp->t_rxtshift = 0;
 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 	   rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 	rack->r_ctl.rc_agg_delayed = 0;
 	rack->r_early = 0;
 	rack->r_late = 0;
 	rack->r_ctl.rc_agg_early = 0;
 }
 
 static void
 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
 		   struct hpts_diag *diag, struct timeval *tv)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = diag->p_nxt_slot;
 		log.u_bbr.flex2 = diag->p_cur_slot;
 		log.u_bbr.flex3 = diag->slot_req;
 		log.u_bbr.flex4 = diag->inp_hptsslot;
 		log.u_bbr.flex5 = diag->slot_remaining;
 		log.u_bbr.flex6 = diag->need_new_to;
 		log.u_bbr.flex7 = diag->p_hpts_active;
 		log.u_bbr.flex8 = diag->p_on_min_sleep;
 		/* Hijack other fields as needed */
 		log.u_bbr.epoch = diag->have_slept;
 		log.u_bbr.lt_epoch = diag->yet_to_sleep;
 		log.u_bbr.pkts_out = diag->co_ret;
 		log.u_bbr.applimited = diag->hpts_sleep_time;
 		log.u_bbr.delivered = diag->p_prev_slot;
 		log.u_bbr.inflight = diag->p_runningslot;
 		log.u_bbr.bw_inuse = diag->wheel_slot;
 		log.u_bbr.rttProp = diag->wheel_cts;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.delRate = diag->maxslots;
 		log.u_bbr.cur_del_rate = diag->p_curtick;
 		log.u_bbr.cur_del_rate <<= 32;
 		log.u_bbr.cur_del_rate |= diag->p_lasttick;
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HPTSDIAG, 0,
 		    0, &log, false, tv);
 	}
 
 }
 
 static void
 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.flex1 = sb->sb_flags;
 		log.u_bbr.flex2 = len;
 		log.u_bbr.flex3 = sb->sb_state;
 		log.u_bbr.flex8 = type;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_LOG_SB_WAKE, 0,
 		    len, &log, false, &tv);
 	}
 }
 
 static void
 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
       int32_t slot, uint32_t tot_len_this_send, int sup_rack)
 {
 	struct hpts_diag diag;
 	struct inpcb *inp = tptoinpcb(tp);
 	struct timeval tv;
 	uint32_t delayed_ack = 0;
 	uint32_t hpts_timeout;
 	uint32_t entry_slot = slot;
 	uint8_t stopped;
 	uint32_t left = 0;
 	uint32_t us_cts;
 
 	if ((tp->t_state == TCPS_CLOSED) ||
 	    (tp->t_state == TCPS_LISTEN)) {
 		return;
 	}
 	if (tcp_in_hpts(tp)) {
 		/* Already on the pacer */
 		return;
 	}
 	stopped = rack->rc_tmr_stopped;
 	if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
 		left = rack->r_ctl.rc_timer_exp - cts;
 	}
 	rack->r_ctl.rc_timer_exp = 0;
 	rack->r_ctl.rc_hpts_flags = 0;
 	us_cts = tcp_get_usecs(&tv);
 	/* Now early/late accounting */
 	rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
 	if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
 		/*
 		 * We have a early carry over set,
 		 * we can always add more time so we
 		 * can always make this compensation.
 		 *
 		 * Note if ack's are allowed to wake us do not
 		 * penalize the next timer for being awoke
 		 * by an ack aka the rc_agg_early (non-paced mode).
 		 */
 		slot += rack->r_ctl.rc_agg_early;
 		rack->r_early = 0;
 		rack->r_ctl.rc_agg_early = 0;
 	}
 	if (rack->r_late) {
 		/*
 		 * This is harder, we can
 		 * compensate some but it
 		 * really depends on what
 		 * the current pacing time is.
 		 */
 		if (rack->r_ctl.rc_agg_delayed >= slot) {
 			/*
 			 * We can't compensate for it all.
 			 * And we have to have some time
 			 * on the clock. We always have a min
 			 * 10 slots (10 x 10 i.e. 100 usecs).
 			 */
 			if (slot <= HPTS_TICKS_PER_SLOT) {
 				/* We gain delay */
 				rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot);
 				slot = HPTS_TICKS_PER_SLOT;
 			} else {
 				/* We take off some */
 				rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT);
 				slot = HPTS_TICKS_PER_SLOT;
 			}
 		} else {
 			slot -= rack->r_ctl.rc_agg_delayed;
 			rack->r_ctl.rc_agg_delayed = 0;
 			/* Make sure we have 100 useconds at minimum */
 			if (slot < HPTS_TICKS_PER_SLOT) {
 				rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot;
 				slot = HPTS_TICKS_PER_SLOT;
 			}
 			if (rack->r_ctl.rc_agg_delayed == 0)
 				rack->r_late = 0;
 		}
 	}
 	hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
 #ifdef TCP_SAD_DETECTION
 	if (rack->sack_attack_disable &&
 	    (rack->r_ctl.ack_during_sd > 0) &&
 	    (slot < tcp_sad_pacing_interval)) {
 		/*
 		 * We have a potential attacker on
 		 * the line. We have possibly some
 		 * (or now) pacing time set. We want to
 		 * slow down the processing of sacks by some
 		 * amount (if it is an attacker). Set the default
 		 * slot for attackers in place (unless the original
 		 * interval is longer). Its stored in
 		 * micro-seconds, so lets convert to msecs.
 		 */
 		slot = tcp_sad_pacing_interval;
 		rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
 		rack->r_ctl.ack_during_sd = 0;
 	}
 #endif
 	if (tp->t_flags & TF_DELACK) {
 		delayed_ack = TICKS_2_USEC(tcp_delacktime);
 		rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK;
 	}
 	if (delayed_ack && ((hpts_timeout == 0) ||
 			    (delayed_ack < hpts_timeout)))
 		hpts_timeout = delayed_ack;
 	else
 		rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
 	/*
 	 * If no timers are going to run and we will fall off the hptsi
 	 * wheel, we resort to a keep-alive timer if its configured.
 	 */
 	if ((hpts_timeout == 0) &&
 	    (slot == 0)) {
 		if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
 		    (tp->t_state <= TCPS_CLOSING)) {
 			/*
 			 * Ok we have no timer (persists, rack, tlp, rxt  or
 			 * del-ack), we don't have segments being paced. So
 			 * all that is left is the keepalive timer.
 			 */
 			if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 				/* Get the established keep-alive time */
 				hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp));
 			} else {
 				/*
 				 * Get the initial setup keep-alive time,
 				 * note that this is probably not going to
 				 * happen, since rack will be running a rxt timer
 				 * if a SYN of some sort is outstanding. It is
 				 * actually handled in rack_timeout_rxt().
 				 */
 				hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp));
 			}
 			rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP;
 			if (rack->in_probe_rtt) {
 				/*
 				 * We want to instead not wake up a long time from
 				 * now but to wake up about the time we would
 				 * exit probe-rtt and initiate a keep-alive ack.
 				 * This will get us out of probe-rtt and update
 				 * our min-rtt.
 				 */
 				hpts_timeout = rack_min_probertt_hold;
 			}
 		}
 	}
 	if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) ==
 	    (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) {
 		/*
 		 * RACK, TLP, persists and RXT timers all are restartable
 		 * based on actions input .. i.e we received a packet (ack
 		 * or sack) and that changes things (rw, or snd_una etc).
 		 * Thus we can restart them with a new value. For
 		 * keep-alive, delayed_ack we keep track of what was left
 		 * and restart the timer with a smaller value.
 		 */
 		if (left < hpts_timeout)
 			hpts_timeout = left;
 	}
 	if (hpts_timeout) {
 		/*
 		 * Hack alert for now we can't time-out over 2,147,483
 		 * seconds (a bit more than 596 hours), which is probably ok
 		 * :).
 		 */
 		if (hpts_timeout > 0x7ffffffe)
 			hpts_timeout = 0x7ffffffe;
 		rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
 	}
 	rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
 	if ((rack->gp_ready == 0) &&
 	    (rack->use_fixed_rate == 0) &&
 	    (hpts_timeout < slot) &&
 	    (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
 		/*
 		 * We have no good estimate yet for the
 		 * old clunky burst mitigation or the
 		 * real pacing. And the tlp or rxt is smaller
 		 * than the pacing calculation. Lets not
 		 * pace that long since we know the calculation
 		 * so far is not accurate.
 		 */
 		slot = hpts_timeout;
 	}
 	/**
 	 * Turn off all the flags for queuing by default. The
 	 * flags have important meanings to what happens when
 	 * LRO interacts with the transport. Most likely (by default now)
 	 * mbuf_queueing and ack compression are on. So the transport
 	 * has a couple of flags that control what happens (if those
 	 * are not on then these flags won't have any effect since it
 	 * won't go through the queuing LRO path).
 	 *
 	 * TF2_MBUF_QUEUE_READY - This flags says that I am busy
 	 *                        pacing output, so don't disturb. But
 	 *                        it also means LRO can wake me if there
 	 *                        is a SACK arrival.
 	 *
 	 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction
 	 *                       with the above flag (QUEUE_READY) and
 	 *                       when present it says don't even wake me
 	 *                       if a SACK arrives.
 	 *
 	 * The idea behind these flags is that if we are pacing we
 	 * set the MBUF_QUEUE_READY and only get woken up if
 	 * a SACK arrives (which could change things) or if
 	 * our pacing timer expires. If, however, we have a rack
 	 * timer running, then we don't even want a sack to wake
 	 * us since the rack timer has to expire before we can send.
 	 *
 	 * Other cases should usually have none of the flags set
 	 * so LRO can call into us.
 	 */
 	tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY);
 	if (slot) {
 		rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
 		rack->r_ctl.rc_last_output_to = us_cts + slot;
 		/*
 		 * A pacing timer (slot) is being set, in
 		 * such a case we cannot send (we are blocked by
 		 * the timer). So lets tell LRO that it should not
 		 * wake us unless there is a SACK. Note this only
 		 * will be effective if mbuf queueing is on or
 		 * compressed acks are being processed.
 		 */
 		tp->t_flags2 |= TF2_MBUF_QUEUE_READY;
 		/*
 		 * But wait if we have a Rack timer running
 		 * even a SACK should not disturb us (with
 		 * the exception of r_rr_config 3).
 		 */
 		if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) ||
 		    (IN_RECOVERY(tp->t_flags))) {
 			if (rack->r_rr_config != 3)
 				tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
 			else if (rack->rc_pace_dnd) {
 				/*
 				 * When DND is on, we only let a sack
 				 * interrupt us if we are not in recovery.
 				 *
 				 * If DND is off, then we never hit here
 				 * and let all sacks wake us up.
 				 *
 				 */
 				tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
 			}
 		}
 		/* For sack attackers we want to ignore sack */
 		if (rack->sack_attack_disable == 1) {
 			tp->t_flags2 |= (TF2_DONT_SACK_QUEUE |
 			    TF2_MBUF_QUEUE_READY);
 		} else if (rack->rc_ack_can_sendout_data) {
 			/*
 			 * Ahh but wait, this is that special case
 			 * where the pacing timer can be disturbed
 			 * backout the changes (used for non-paced
 			 * burst limiting).
 			 */
 			tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE |
 			    TF2_MBUF_QUEUE_READY);
 		}
 		if ((rack->use_rack_rr) &&
 		    (rack->r_rr_config < 2) &&
 		    ((hpts_timeout) && (hpts_timeout < slot))) {
 			/*
 			 * Arrange for the hpts to kick back in after the
 			 * t-o if the t-o does not cause a send.
 			 */
 			(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
 						   __LINE__, &diag);
 			rack_log_hpts_diag(rack, us_cts, &diag, &tv);
 			rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
 		} else {
 			(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
 						   __LINE__, &diag);
 			rack_log_hpts_diag(rack, us_cts, &diag, &tv);
 			rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
 		}
 	} else if (hpts_timeout) {
 		/*
 		 * With respect to t_flags2(?) here, lets let any new acks wake
 		 * us up here. Since we are not pacing (no pacing timer), output
 		 * can happen so we should let it. If its a Rack timer, then any inbound
 		 * packet probably won't change the sending (we will be blocked)
 		 * but it may change the prr stats so letting it in (the set defaults
 		 * at the start of this block) are good enough.
 		 */
 		rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 		(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
 					   __LINE__, &diag);
 		rack_log_hpts_diag(rack, us_cts, &diag, &tv);
 		rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
 	} else {
 		/* No timer starting */
 #ifdef INVARIANTS
 		if (SEQ_GT(tp->snd_max, tp->snd_una)) {
 			panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
 			    tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
 		}
 #endif
 	}
 	rack->rc_tmr_stopped = 0;
 	if (slot)
 		rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
 }
 
 /*
  * RACK Timer, here we simply do logging and house keeping.
  * the normal rack_output() function will call the
  * appropriate thing to check if we need to do a RACK retransmit.
  * We return 1, saying don't proceed with rack_output only
  * when all timers have been stopped (destroyed PCB?).
  */
 static int
 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 	/*
 	 * This timer simply provides an internal trigger to send out data.
 	 * The check_recovery_mode call will see if there are needed
 	 * retransmissions, if so we will enter fast-recovery. The output
 	 * call may or may not do the same thing depending on sysctl
 	 * settings.
 	 */
 	struct rack_sendmap *rsm;
 
 	counter_u64_add(rack_to_tot, 1);
 	if (rack->r_state && (rack->r_state != tp->t_state))
 		rack_set_state(tp, rack);
 	rack->rc_on_min_to = 0;
 	rsm = rack_check_recovery_mode(tp, cts);
 	rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
 	if (rsm) {
 		rack->r_ctl.rc_resend = rsm;
 		rack->r_timer_override = 1;
 		if (rack->use_rack_rr) {
 			/*
 			 * Don't accumulate extra pacing delay
 			 * we are allowing the rack timer to
 			 * over-ride pacing i.e. rrr takes precedence
 			 * if the pacing interval is longer than the rrr
 			 * time (in other words we get the min pacing
 			 * time versus rrr pacing time).
 			 */
 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 		}
 	}
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK;
 	if (rsm == NULL) {
 		/* restart a timer and return 1 */
 		rack_start_hpts_timer(rack, tp, cts,
 				      0, 0, 0);
 		return (1);
 	}
 	return (0);
 }
 
 
 
 static void
 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
 {
 
 	if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) {
 		/*
 		 * The trailing space changed, mbufs can grow
 		 * at the tail but they can't shrink from
 		 * it, KASSERT that. Adjust the orig_m_len to
 		 * compensate for this change.
 		 */
 		KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)),
 			("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
 			 rsm->m,
 			 rsm,
 			 (intmax_t)M_TRAILINGROOM(rsm->m),
 			 rsm->orig_t_space,
 			 rsm->orig_m_len,
 			 rsm->m->m_len));
 		rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m));
 		rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 	}
 	if (rsm->m->m_len < rsm->orig_m_len) {
 		/*
 		 * Mbuf shrank, trimmed off the top by an ack, our
 		 * offset changes.
 		 */
 		KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)),
 			("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n",
 			 rsm->m, rsm->m->m_len,
 			 rsm, rsm->orig_m_len,
 			 rsm->soff));
 		if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len))
 			rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
 		else
 			rsm->soff = 0;
 		rsm->orig_m_len = rsm->m->m_len;
 #ifdef INVARIANTS
 	} else if (rsm->m->m_len > rsm->orig_m_len) {
 		panic("rsm:%p m:%p m_len grew outside of t_space compensation",
 		      rsm, rsm->m);
 #endif
 	}
 }
 
 static void
 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
 {
 	struct mbuf *m;
 	uint32_t soff;
 
 	if (src_rsm->m &&
 	    ((src_rsm->orig_m_len != src_rsm->m->m_len) ||
 	     (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) {
 		/* Fix up the orig_m_len and possibly the mbuf offset */
 		rack_adjust_orig_mlen(src_rsm);
 	}
 	m = src_rsm->m;
 	soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start);
 	while (soff >= m->m_len) {
 		/* Move out past this mbuf */
 		soff -= m->m_len;
 		m = m->m_next;
 		KASSERT((m != NULL),
 			("rsm:%p nrsm:%p hit at soff:%u null m",
 			 src_rsm, rsm, soff));
 		if (m == NULL) {
 			/* This should *not* happen which is why there is a kassert */
 			src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
 					       (src_rsm->r_start - rack->rc_tp->snd_una),
 					       &src_rsm->soff);
 			src_rsm->orig_m_len = src_rsm->m->m_len;
 			src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m);
 			rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
 					   (rsm->r_start - rack->rc_tp->snd_una),
 					   &rsm->soff);
 			rsm->orig_m_len = rsm->m->m_len;
 			rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 			return;
 		}
 	}
 	rsm->m = m;
 	rsm->soff = soff;
 	rsm->orig_m_len = m->m_len;
 	rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 }
 
 static __inline void
 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm,
 	       struct rack_sendmap *rsm, uint32_t start)
 {
 	int idx;
 
 	nrsm->r_start = start;
 	nrsm->r_end = rsm->r_end;
 	nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
 	nrsm->r_flags = rsm->r_flags;
 	nrsm->r_dupack = rsm->r_dupack;
 	nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
 	nrsm->r_rtr_bytes = 0;
 	nrsm->r_fas = rsm->r_fas;
 	nrsm->r_bas = rsm->r_bas;
 	rsm->r_end = nrsm->r_start;
 	nrsm->r_just_ret = rsm->r_just_ret;
 	for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) {
 		nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
 	}
 	/* Now if we have SYN flag we keep it on the left edge */
 	if (nrsm->r_flags & RACK_HAS_SYN)
 		nrsm->r_flags &= ~RACK_HAS_SYN;
 	/* Now if we have a FIN flag we keep it on the right edge */
 	if (rsm->r_flags & RACK_HAS_FIN)
 		rsm->r_flags &= ~RACK_HAS_FIN;
 	/* Push bit must go to the right edge as well */
 	if (rsm->r_flags & RACK_HAD_PUSH)
 		rsm->r_flags &= ~RACK_HAD_PUSH;
 	/* Clone over the state of the hw_tls flag */
 	nrsm->r_hw_tls = rsm->r_hw_tls;
 	/*
 	 * Now we need to find nrsm's new location in the mbuf chain
 	 * we basically calculate a new offset, which is soff +
 	 * how much is left in original rsm. Then we walk out the mbuf
 	 * chain to find the righ position, it may be the same mbuf
 	 * or maybe not.
 	 */
 	KASSERT(((rsm->m != NULL) ||
 		 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
 		("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
 	if (rsm->m)
 		rack_setup_offset_for_rsm(rack, rsm, nrsm);
 }
 
 static struct rack_sendmap *
 rack_merge_rsm(struct tcp_rack *rack,
 	       struct rack_sendmap *l_rsm,
 	       struct rack_sendmap *r_rsm)
 {
 	/*
 	 * We are merging two ack'd RSM's,
 	 * the l_rsm is on the left (lower seq
 	 * values) and the r_rsm is on the right
 	 * (higher seq value). The simplest way
 	 * to merge these is to move the right
 	 * one into the left. I don't think there
 	 * is any reason we need to try to find
 	 * the oldest (or last oldest retransmitted).
 	 */
 	rack_log_map_chg(rack->rc_tp, rack, NULL,
 			 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__);
 	l_rsm->r_end = r_rsm->r_end;
 	if (l_rsm->r_dupack < r_rsm->r_dupack)
 		l_rsm->r_dupack = r_rsm->r_dupack;
 	if (r_rsm->r_rtr_bytes)
 		l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes;
 	if (r_rsm->r_in_tmap) {
 		/* This really should not happen */
 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext);
 		r_rsm->r_in_tmap = 0;
 	}
 
 	/* Now the flags */
 	if (r_rsm->r_flags & RACK_HAS_FIN)
 		l_rsm->r_flags |= RACK_HAS_FIN;
 	if (r_rsm->r_flags & RACK_TLP)
 		l_rsm->r_flags |= RACK_TLP;
 	if (r_rsm->r_flags & RACK_RWND_COLLAPSED)
 		l_rsm->r_flags |= RACK_RWND_COLLAPSED;
 	if ((r_rsm->r_flags & RACK_APP_LIMITED)  &&
 	    ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) {
 		/*
 		 * If both are app-limited then let the
 		 * free lower the count. If right is app
 		 * limited and left is not, transfer.
 		 */
 		l_rsm->r_flags |= RACK_APP_LIMITED;
 		r_rsm->r_flags &= ~RACK_APP_LIMITED;
 		if (r_rsm == rack->r_ctl.rc_first_appl)
 			rack->r_ctl.rc_first_appl = l_rsm;
 	}
 	tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE);
 	/*
 	 * We keep the largest value, which is the newest
 	 * send. We do this in case a segment that is
 	 * joined together and not part of a GP estimate
 	 * later gets expanded into the GP estimate.
 	 *
 	 * We prohibit the merging of unlike kinds i.e.
 	 * all pieces that are in the GP estimate can be
 	 * merged and all pieces that are not in a GP estimate
 	 * can be merged, but not disimilar pieces. Combine
 	 * this with taking the highest here and we should
 	 * be ok unless of course the client reneges. Then
 	 * all bets are off.
 	 */
 	if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] <
 	   r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) {
 		l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)];
 	}
 	/*
 	 * When merging two RSM's we also need to consider the ack time and keep
 	 * newest. If the ack gets merged into a measurement then that is the
 	 * one we will want to be using.
 	 */
 	if(l_rsm->r_ack_arrival	 < r_rsm->r_ack_arrival)
 		l_rsm->r_ack_arrival = r_rsm->r_ack_arrival;
 
 	if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) {
 		/* Transfer the split limit to the map we free */
 		r_rsm->r_limit_type = l_rsm->r_limit_type;
 		l_rsm->r_limit_type = 0;
 	}
 	rack_free(rack, r_rsm);
 	l_rsm->r_flags |= RACK_MERGED;
 	return (l_rsm);
 }
 
 /*
  * TLP Timer, here we simply setup what segment we want to
  * have the TLP expire on, the normal rack_output() will then
  * send it out.
  *
  * We return 1, saying don't proceed with rack_output only
  * when all timers have been stopped (destroyed PCB?).
  */
 static int
 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp)
 {
 	/*
 	 * Tail Loss Probe.
 	 */
 	struct rack_sendmap *rsm = NULL;
 	int insret __diagused;
 	struct socket *so = tptosocket(tp);
 	uint32_t amm;
 	uint32_t out, avail;
 	int collapsed_win = 0;
 
 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
 		/* Its not time yet */
 		return (0);
 	}
 	if (ctf_progress_timeout_check(tp, true)) {
 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	/*
 	 * A TLP timer has expired. We have been idle for 2 rtts. So we now
 	 * need to figure out how to force a full MSS segment out.
 	 */
 	rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL);
 	rack->r_ctl.retran_during_recovery = 0;
 	rack->r_ctl.dsack_byte_cnt = 0;
 	counter_u64_add(rack_tlp_tot, 1);
 	if (rack->r_state && (rack->r_state != tp->t_state))
 		rack_set_state(tp, rack);
 	avail = sbavail(&so->so_snd);
 	out = tp->snd_max - tp->snd_una;
 	if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
 		/* special case, we need a retransmission */
 		collapsed_win = 1;
 		goto need_retran;
 	}
 	if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) {
 		rack->r_ctl.dsack_persist--;
 		if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
 			rack->r_ctl.num_dsack = 0;
 		}
 		rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
 	}
 	if ((tp->t_flags & TF_GPUTINPROG) &&
 	    (rack->r_ctl.rc_tlp_cnt_out == 1)) {
 		/*
 		 * If this is the second in a row
 		 * TLP and we are doing a measurement
 		 * its time to abandon the measurement.
 		 * Something is likely broken on
 		 * the clients network and measuring a
 		 * broken network does us no good.
 		 */
 		tp->t_flags &= ~TF_GPUTINPROG;
 		rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 					   rack->r_ctl.rc_gp_srtt /*flex1*/,
 					   tp->gput_seq,
 					   0, 0, 18, __LINE__, NULL, 0);
 	}
 	/*
 	 * Check our send oldest always settings, and if
 	 * there is an oldest to send jump to the need_retran.
 	 */
 	if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0))
 		goto need_retran;
 
 	if (avail > out) {
 		/* New data is available */
 		amm = avail - out;
 		if (amm > ctf_fixed_maxseg(tp)) {
 			amm = ctf_fixed_maxseg(tp);
 			if ((amm + out) > tp->snd_wnd) {
 				/* We are rwnd limited */
 				goto need_retran;
 			}
 		} else if (amm < ctf_fixed_maxseg(tp)) {
 			/* not enough to fill a MTU */
 			goto need_retran;
 		}
 		if (IN_FASTRECOVERY(tp->t_flags)) {
 			/* Unlikely */
 			if (rack->rack_no_prr == 0) {
 				if (out + amm <= tp->snd_wnd) {
 					rack->r_ctl.rc_prr_sndcnt = amm;
 					rack->r_ctl.rc_tlp_new_data = amm;
 					rack_log_to_prr(rack, 4, 0, __LINE__);
 				}
 			} else
 				goto need_retran;
 		} else {
 			/* Set the send-new override */
 			if (out + amm <= tp->snd_wnd)
 				rack->r_ctl.rc_tlp_new_data = amm;
 			else
 				goto need_retran;
 		}
 		rack->r_ctl.rc_tlpsend = NULL;
 		counter_u64_add(rack_tlp_newdata, 1);
 		goto send;
 	}
 need_retran:
 	/*
 	 * Ok we need to arrange the last un-acked segment to be re-sent, or
 	 * optionally the first un-acked segment.
 	 */
 	if (collapsed_win == 0) {
 		if (rack_always_send_oldest)
 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 		else {
 			rsm = tqhash_max(rack->r_ctl.tqh);
 			if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
 				rsm = rack_find_high_nonack(rack, rsm);
 			}
 		}
 		if (rsm == NULL) {
 #ifdef TCP_BLACKBOX
 			tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true);
 #endif
 			goto out;
 		}
 	} else {
 		/*
 		 * We had a collapsed window, lets find
 		 * the point before the collapse.
 		 */
 		if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una))
 			rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1));
 		else {
 			rsm = tqhash_min(rack->r_ctl.tqh);
 		}
 		if (rsm == NULL) {
 			/* Huh */
 			goto out;
 		}
 	}
 	if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
 		/*
 		 * We need to split this the last segment in two.
 		 */
 		struct rack_sendmap *nrsm;
 
 		nrsm = rack_alloc_full_limit(rack);
 		if (nrsm == NULL) {
 			/*
 			 * No memory to split, we will just exit and punt
 			 * off to the RXT timer.
 			 */
 			goto out;
 		}
 		rack_clone_rsm(rack, nrsm, rsm,
 			       (rsm->r_end - ctf_fixed_maxseg(tp)));
 		rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
 #ifndef INVARIANTS
 		(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 		if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 			panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 			      nrsm, insret, rack, rsm);
 		}
 #endif
 		if (rsm->r_in_tmap) {
 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 			nrsm->r_in_tmap = 1;
 		}
 		rsm = nrsm;
 	}
 	rack->r_ctl.rc_tlpsend = rsm;
 send:
 	/* Make sure output path knows we are doing a TLP */
 	*doing_tlp = 1;
 	rack->r_timer_override = 1;
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
 	return (0);
 out:
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP;
 	return (0);
 }
 
 /*
  * Delayed ack Timer, here we simply need to setup the
  * ACK_NOW flag and remove the DELACK flag. From there
  * the output routine will send the ack out.
  *
  * We only return 1, saying don't proceed, if all timers
  * are stopped (destroyed PCB?).
  */
 static int
 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 
 	rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL);
 	tp->t_flags &= ~TF_DELACK;
 	tp->t_flags |= TF_ACKNOW;
 	KMOD_TCPSTAT_INC(tcps_delack);
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
 	return (0);
 }
 
 /*
  * Persists timer, here we simply send the
  * same thing as a keepalive will.
  * the one byte send.
  *
  * We only return 1, saying don't proceed, if all timers
  * are stopped (destroyed PCB?).
  */
 static int
 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 	struct tcptemp *t_template;
 	int32_t retval = 1;
 
 	if (rack->rc_in_persist == 0)
 		return (0);
 	if (ctf_progress_timeout_check(tp, false)) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
 		counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	/*
 	 * Persistence timer into zero window. Force a byte to be output, if
 	 * possible.
 	 */
 	KMOD_TCPSTAT_INC(tcps_persisttimeo);
 	/*
 	 * Hack: if the peer is dead/unreachable, we do not time out if the
 	 * window is closed.  After a full backoff, drop the connection if
 	 * the idle time (no responses to probes) reaches the maximum
 	 * backoff that we would use if retransmitting.
 	 */
 	if (tp->t_rxtshift >= V_tcp_retries &&
 	    (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
 	     TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) {
 		KMOD_TCPSTAT_INC(tcps_persistdrop);
 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
 		counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
 		retval = -ETIMEDOUT;	/* tcp_drop() */
 		goto out;
 	}
 	if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) &&
 	    tp->snd_una == tp->snd_max)
 		rack_exit_persist(tp, rack, cts);
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT;
 	/*
 	 * If the user has closed the socket then drop a persisting
 	 * connection after a much reduced timeout.
 	 */
 	if (tp->t_state > TCPS_CLOSE_WAIT &&
 	    (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
 		KMOD_TCPSTAT_INC(tcps_persistdrop);
 		tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX);
 		counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
 		retval = -ETIMEDOUT;	/* tcp_drop() */
 		goto out;
 	}
 	t_template = tcpip_maketemplate(rack->rc_inp);
 	if (t_template) {
 		/* only set it if we were answered */
 		if (rack->forced_ack == 0) {
 			rack->forced_ack = 1;
 			rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
 		} else {
 			rack->probe_not_answered = 1;
 			counter_u64_add(rack_persists_loss, 1);
 			rack->r_ctl.persist_lost_ends++;
 		}
 		counter_u64_add(rack_persists_sends, 1);
 		counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1);
 		tcp_respond(tp, t_template->tt_ipgen,
 			    &t_template->tt_t, (struct mbuf *)NULL,
 			    tp->rcv_nxt, tp->snd_una - 1, 0);
 		/* This sends an ack */
 		if (tp->t_flags & TF_DELACK)
 			tp->t_flags &= ~TF_DELACK;
 		free(t_template, M_TEMP);
 	}
 	if (tp->t_rxtshift < V_tcp_retries)
 		tp->t_rxtshift++;
 out:
 	rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL);
 	rack_start_hpts_timer(rack, tp, cts,
 			      0, 0, 0);
 	return (retval);
 }
 
 /*
  * If a keepalive goes off, we had no other timers
  * happening. We always return 1 here since this
  * routine either drops the connection or sends
  * out a segment with respond.
  */
 static int
 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 	struct tcptemp *t_template;
 	struct inpcb *inp = tptoinpcb(tp);
 
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
 	rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
 	/*
 	 * Keep-alive timer went off; send something or drop connection if
 	 * idle for too long.
 	 */
 	KMOD_TCPSTAT_INC(tcps_keeptimeo);
 	if (tp->t_state < TCPS_ESTABLISHED)
 		goto dropit;
 	if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
 	    tp->t_state <= TCPS_CLOSING) {
 		if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
 			goto dropit;
 		/*
 		 * Send a packet designed to force a response if the peer is
 		 * up and reachable: either an ACK if the connection is
 		 * still alive, or an RST if the peer has closed the
 		 * connection due to timeout or reboot. Using sequence
 		 * number tp->snd_una-1 causes the transmitted zero-length
 		 * segment to lie outside the receive window; by the
 		 * protocol spec, this requires the correspondent TCP to
 		 * respond.
 		 */
 		KMOD_TCPSTAT_INC(tcps_keepprobe);
 		t_template = tcpip_maketemplate(inp);
 		if (t_template) {
 			if (rack->forced_ack == 0) {
 				rack->forced_ack = 1;
 				rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL);
 			} else {
 				rack->probe_not_answered = 1;
 			}
 			tcp_respond(tp, t_template->tt_ipgen,
 			    &t_template->tt_t, (struct mbuf *)NULL,
 			    tp->rcv_nxt, tp->snd_una - 1, 0);
 			free(t_template, M_TEMP);
 		}
 	}
 	rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
 	return (1);
 dropit:
 	KMOD_TCPSTAT_INC(tcps_keepdrops);
 	tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
 	return (-ETIMEDOUT);	/* tcp_drop() */
 }
 
 /*
  * Retransmit helper function, clear up all the ack
  * flags and take care of important book keeping.
  */
 static void
 rack_remxt_tmr(struct tcpcb *tp)
 {
 	/*
 	 * The retransmit timer went off, all sack'd blocks must be
 	 * un-acked.
 	 */
 	struct rack_sendmap *rsm, *trsm = NULL;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__);
 	rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL);
 	if (rack->r_state && (rack->r_state != tp->t_state))
 		rack_set_state(tp, rack);
 	/*
 	 * Ideally we would like to be able to
 	 * mark SACK-PASS on anything not acked here.
 	 *
 	 * However, if we do that we would burst out
 	 * all that data 1ms apart. This would be unwise,
 	 * so for now we will just let the normal rxt timer
 	 * and tlp timer take care of it.
 	 *
 	 * Also we really need to stick them back in sequence
 	 * order. This way we send in the proper order and any
 	 * sacks that come floating in will "re-ack" the data.
 	 * To do this we zap the tmap with an INIT and then
 	 * walk through and place every rsm in the tail queue
 	 * hash table back in its seq ordered place.
 	 */
 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
 
 	TQHASH_FOREACH(rsm, rack->r_ctl.tqh)  {
 		rsm->r_dupack = 0;
 		if (rack_verbose_logging)
 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 		/* We must re-add it back to the tlist */
 		if (trsm == NULL) {
 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 		} else {
 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
 		}
 		rsm->r_in_tmap = 1;
 		trsm = rsm;
 		if (rsm->r_flags & RACK_ACKED)
 			rsm->r_flags |= RACK_WAS_ACKED;
 		rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
 		rsm->r_flags |= RACK_MUST_RXT;
 	}
 	/* Clear the count (we just un-acked them) */
 	rack->r_ctl.rc_last_timeout_snduna = tp->snd_una;
 	rack->r_ctl.rc_sacked = 0;
 	rack->r_ctl.rc_sacklast = NULL;
 	rack->r_ctl.rc_agg_delayed = 0;
 	rack->r_early = 0;
 	rack->r_ctl.rc_agg_early = 0;
 	rack->r_late = 0;
 	/* Clear the tlp rtx mark */
 	rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh);
 	if (rack->r_ctl.rc_resend != NULL)
 		rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT;
 	rack->r_ctl.rc_prr_sndcnt = 0;
 	rack_log_to_prr(rack, 6, 0, __LINE__);
 	rack->r_timer_override = 1;
 	if ((((tp->t_flags & TF_SACK_PERMIT) == 0)
 #ifdef TCP_SAD_DETECTION
 	    || (rack->sack_attack_disable != 0)
 #endif
 		    ) && ((tp->t_flags & TF_SENTFIN) == 0)) {
 		/*
 		 * For non-sack customers new data
 		 * needs to go out as retransmits until
 		 * we retransmit up to snd_max.
 		 */
 		rack->r_must_retran = 1;
 		rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp,
 						rack->r_ctl.rc_sacked);
 	}
 	rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
 }
 
 static void
 rack_convert_rtts(struct tcpcb *tp)
 {
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
 	tp->t_rxtcur = RACK_REXMTVAL(tp);
 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 		tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop);
 	}
 	if (tp->t_rxtcur > rack_rto_max) {
 		tp->t_rxtcur = rack_rto_max;
 	}
 }
 
 static void
 rack_cc_conn_init(struct tcpcb *tp)
 {
 	struct tcp_rack *rack;
 	uint32_t srtt;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	srtt = tp->t_srtt;
 	cc_conn_init(tp);
 	/*
 	 * Now convert to rack's internal format,
 	 * if required.
 	 */
 	if ((srtt == 0) && (tp->t_srtt != 0))
 		rack_convert_rtts(tp);
 	/*
 	 * We want a chance to stay in slowstart as
 	 * we create a connection. TCP spec says that
 	 * initially ssthresh is infinite. For our
 	 * purposes that is the snd_wnd.
 	 */
 	if (tp->snd_ssthresh < tp->snd_wnd) {
 		tp->snd_ssthresh = tp->snd_wnd;
 	}
 	/*
 	 * We also want to assure a IW worth of
 	 * data can get inflight.
 	 */
 	if (rc_init_window(rack) < tp->snd_cwnd)
 		tp->snd_cwnd = rc_init_window(rack);
 }
 
 /*
  * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
  * we will setup to retransmit the lowest seq number outstanding.
  */
 static int
 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	int32_t rexmt;
 	int32_t retval = 0;
 	bool isipv6;
 
 	if ((tp->t_flags & TF_GPUTINPROG) &&
 	    (tp->t_rxtshift)) {
 		/*
 		 * We have had a second timeout
 		 * measurements on successive rxt's are not profitable.
 		 * It is unlikely to be of any use (the network is
 		 * broken or the client went away).
 		 */
 		tp->t_flags &= ~TF_GPUTINPROG;
 		rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 					   rack->r_ctl.rc_gp_srtt /*flex1*/,
 					   tp->gput_seq,
 					   0, 0, 18, __LINE__, NULL, 0);
 	}
 	if (ctf_progress_timeout_check(tp, false)) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
 		rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
 		return (-ETIMEDOUT);	/* tcp_drop() */
 	}
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT;
 	rack->r_ctl.retran_during_recovery = 0;
 	rack->rc_ack_required = 1;
 	rack->r_ctl.dsack_byte_cnt = 0;
 	if (IN_FASTRECOVERY(tp->t_flags))
 		tp->t_flags |= TF_WASFRECOVERY;
 	else
 		tp->t_flags &= ~TF_WASFRECOVERY;
 	if (IN_CONGRECOVERY(tp->t_flags))
 		tp->t_flags |= TF_WASCRECOVERY;
 	else
 		tp->t_flags &= ~TF_WASCRECOVERY;
 	if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    (tp->snd_una == tp->snd_max)) {
 		/* Nothing outstanding .. nothing to do */
 		return (0);
 	}
 	if (rack->r_ctl.dsack_persist) {
 		rack->r_ctl.dsack_persist--;
 		if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) {
 			rack->r_ctl.num_dsack = 0;
 		}
 		rack_log_dsack_event(rack, 1, __LINE__, 0, 0);
 	}
 	/*
 	 * Rack can only run one timer  at a time, so we cannot
 	 * run a KEEPINIT (gating SYN sending) and a retransmit
 	 * timer for the SYN. So if we are in a front state and
 	 * have a KEEPINIT timer we need to check the first transmit
 	 * against now to see if we have exceeded the KEEPINIT time
 	 * (if one is set).
 	 */
 	if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) &&
 	    (TP_KEEPINIT(tp) != 0)) {
 		struct rack_sendmap *rsm;
 
 		rsm = tqhash_min(rack->r_ctl.tqh);
 		if (rsm) {
 			/* Ok we have something outstanding to test keepinit with */
 			if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
 			    ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
 				/* We have exceeded the KEEPINIT time */
 				tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX);
 				goto drop_it;
 			}
 		}
 	}
 	/*
 	 * Retransmission timer went off.  Message has not been acked within
 	 * retransmit interval.  Back off to a longer retransmit interval
 	 * and retransmit one segment.
 	 */
 	rack_remxt_tmr(tp);
 	if ((rack->r_ctl.rc_resend == NULL) ||
 	    ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) {
 		/*
 		 * If the rwnd collapsed on
 		 * the one we are retransmitting
 		 * it does not count against the
 		 * rxt count.
 		 */
 		tp->t_rxtshift++;
 	}
 	if (tp->t_rxtshift > V_tcp_retries) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN);
 drop_it:
 		tp->t_rxtshift = V_tcp_retries;
 		KMOD_TCPSTAT_INC(tcps_timeoutdrop);
 		/* XXXGL: previously t_softerror was casted to uint16_t */
 		MPASS(tp->t_softerror >= 0);
 		retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT;
 		goto out;	/* tcp_drop() */
 	}
 	if (tp->t_state == TCPS_SYN_SENT) {
 		/*
 		 * If the SYN was retransmitted, indicate CWND to be limited
 		 * to 1 segment in cc_conn_init().
 		 */
 		tp->snd_cwnd = 1;
 	} else if (tp->t_rxtshift == 1) {
 		/*
 		 * first retransmit; record ssthresh and cwnd so they can be
 		 * recovered if this turns out to be a "bad" retransmit. A
 		 * retransmit is considered "bad" if an ACK for this segment
 		 * is received within RTT/2 interval; the assumption here is
 		 * that the ACK was already in flight.  See "On Estimating
 		 * End-to-End Network Path Properties" by Allman and Paxson
 		 * for more details.
 		 */
 		tp->snd_cwnd_prev = tp->snd_cwnd;
 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
 		tp->snd_recover_prev = tp->snd_recover;
 		tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2);
 		tp->t_flags |= TF_PREVVALID;
 	} else if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
 		tp->t_flags &= ~TF_PREVVALID;
 	KMOD_TCPSTAT_INC(tcps_rexmttimeo);
 	if ((tp->t_state == TCPS_SYN_SENT) ||
 	    (tp->t_state == TCPS_SYN_RECEIVED))
 		rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift];
 	else
 		rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift];
 
 	RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt,
 	   max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop);
 	/*
 	 * We enter the path for PLMTUD if connection is established or, if
 	 * connection is FIN_WAIT_1 status, reason for the last is that if
 	 * amount of data we send is very small, we could send it in couple
 	 * of packets and process straight to FIN. In that case we won't
 	 * catch ESTABLISHED state.
 	 */
 #ifdef INET6
 	isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
 #else
 	isipv6 = false;
 #endif
 	if (((V_tcp_pmtud_blackhole_detect == 1) ||
 	    (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
 	    (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
 	    ((tp->t_state == TCPS_ESTABLISHED) ||
 	    (tp->t_state == TCPS_FIN_WAIT_1))) {
 		/*
 		 * Idea here is that at each stage of mtu probe (usually,
 		 * 1448 -> 1188 -> 524) should be given 2 chances to recover
 		 * before further clamping down. 'tp->t_rxtshift % 2 == 0'
 		 * should take care of that.
 		 */
 		if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) ==
 		    (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) &&
 		    (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 &&
 		    tp->t_rxtshift % 2 == 0)) {
 			/*
 			 * Enter Path MTU Black-hole Detection mechanism: -
 			 * Disable Path MTU Discovery (IP "DF" bit). -
 			 * Reduce MTU to lower value than what we negotiated
 			 * with peer.
 			 */
 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
 				/* Record that we may have found a black hole. */
 				tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
 				/* Keep track of previous MSS. */
 				tp->t_pmtud_saved_maxseg = tp->t_maxseg;
 			}
 
 			/*
 			 * Reduce the MSS to blackhole value or to the
 			 * default in an attempt to retransmit.
 			 */
 #ifdef INET6
 			if (isipv6 &&
 			    tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) {
 				/* Use the sysctl tuneable blackhole MSS. */
 				tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
 			} else if (isipv6) {
 				/* Use the default MSS. */
 				tp->t_maxseg = V_tcp_v6mssdflt;
 				/*
 				 * Disable Path MTU Discovery when we switch
 				 * to minmss.
 				 */
 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
 			}
 #endif
 #if defined(INET6) && defined(INET)
 			else
 #endif
 #ifdef INET
 			if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) {
 				/* Use the sysctl tuneable blackhole MSS. */
 				tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated);
 			} else {
 				/* Use the default MSS. */
 				tp->t_maxseg = V_tcp_mssdflt;
 				/*
 				 * Disable Path MTU Discovery when we switch
 				 * to minmss.
 				 */
 				tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
 			}
 #endif
 		} else {
 			/*
 			 * If further retransmissions are still unsuccessful
 			 * with a lowered MTU, maybe this isn't a blackhole
 			 * and we restore the previous MSS and blackhole
 			 * detection flags. The limit '6' is determined by
 			 * giving each probe stage (1448, 1188, 524) 2
 			 * chances to recover.
 			 */
 			if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
 			    (tp->t_rxtshift >= 6)) {
 				tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 				tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
 				tp->t_maxseg = tp->t_pmtud_saved_maxseg;
 				KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed);
 			}
 		}
 	}
 	/*
 	 * Disable RFC1323 and SACK if we haven't got any response to
 	 * our third SYN to work-around some broken terminal servers
 	 * (most of which have hopefully been retired) that have bad VJ
 	 * header compression code which trashes TCP segments containing
 	 * unknown-to-them TCP options.
 	 */
 	if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
 	    (tp->t_rxtshift == 3))
 		tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
 	/*
 	 * If we backed off this far, our srtt estimate is probably bogus.
 	 * Clobber it so we'll take the next rtt measurement as our srtt;
 	 * move the current srtt into rttvar to keep the current retransmit
 	 * times until then.
 	 */
 	if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
 #ifdef INET6
 		if ((inp->inp_vflag & INP_IPV6) != 0)
 			in6_losing(inp);
 		else
 #endif
 			in_losing(inp);
 		tp->t_rttvar += tp->t_srtt;
 		tp->t_srtt = 0;
 	}
 	sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
 	tp->snd_recover = tp->snd_max;
 	tp->t_flags |= TF_ACKNOW;
 	tp->t_rtttime = 0;
 	rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__);
 out:
 	return (retval);
 }
 
 static int
 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp)
 {
 	int32_t ret = 0;
 	int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK);
 
 	if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 	    (tp->t_flags & TF_GPUTINPROG)) {
 		/*
 		 * We have a goodput in progress
 		 * and we have entered a late state.
 		 * Do we have enough data in the sb
 		 * to handle the GPUT request?
 		 */
 		uint32_t bytes;
 
 		bytes = tp->gput_ack - tp->gput_seq;
 		if (SEQ_GT(tp->gput_seq, tp->snd_una))
 			bytes += tp->gput_seq - tp->snd_una;
 		if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
 			/*
 			 * There are not enough bytes in the socket
 			 * buffer that have been sent to cover this
 			 * measurement. Cancel it.
 			 */
 			rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 						   rack->r_ctl.rc_gp_srtt /*flex1*/,
 						   tp->gput_seq,
 						   0, 0, 18, __LINE__, NULL, 0);
 			tp->t_flags &= ~TF_GPUTINPROG;
 		}
 	}
 	if (timers == 0) {
 		return (0);
 	}
 	if (tp->t_state == TCPS_LISTEN) {
 		/* no timers on listen sockets */
 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)
 			return (0);
 		return (1);
 	}
 	if ((timers & PACE_TMR_RACK) &&
 	    rack->rc_on_min_to) {
 		/*
 		 * For the rack timer when we
 		 * are on a min-timeout (which means rrr_conf = 3)
 		 * we don't want to check the timer. It may
 		 * be going off for a pace and thats ok we
 		 * want to send the retransmit (if its ready).
 		 *
 		 * If its on a normal rack timer (non-min) then
 		 * we will check if its expired.
 		 */
 		goto skip_time_check;
 	}
 	if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) {
 		uint32_t left;
 
 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 			ret = -1;
 			rack_log_to_processing(rack, cts, ret, 0);
 			return (0);
 		}
 		if (hpts_calling == 0) {
 			/*
 			 * A user send or queued mbuf (sack) has called us? We
 			 * return 0 and let the pacing guards
 			 * deal with it if they should or
 			 * should not cause a send.
 			 */
 			ret = -2;
 			rack_log_to_processing(rack, cts, ret, 0);
 			return (0);
 		}
 		/*
 		 * Ok our timer went off early and we are not paced false
 		 * alarm, go back to sleep. We make sure we don't have
 		 * no-sack wakeup on since we no longer have a PKT_OUTPUT
 		 * flag in place.
 		 */
 		rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
 		ret = -3;
 		left = rack->r_ctl.rc_timer_exp - cts;
 		tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left));
 		rack_log_to_processing(rack, cts, ret, left);
 		return (1);
 	}
 skip_time_check:
 	rack->rc_tmr_stopped = 0;
 	rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK;
 	if (timers & PACE_TMR_DELACK) {
 		ret = rack_timeout_delack(tp, rack, cts);
 	} else if (timers & PACE_TMR_RACK) {
 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
 		rack->r_fast_output = 0;
 		ret = rack_timeout_rack(tp, rack, cts);
 	} else if (timers & PACE_TMR_TLP) {
 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
 		ret = rack_timeout_tlp(tp, rack, cts, doing_tlp);
 	} else if (timers & PACE_TMR_RXT) {
 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
 		rack->r_fast_output = 0;
 		ret = rack_timeout_rxt(tp, rack, cts);
 	} else if (timers & PACE_TMR_PERSIT) {
 		ret = rack_timeout_persist(tp, rack, cts);
 	} else if (timers & PACE_TMR_KEEP) {
 		ret = rack_timeout_keepalive(tp, rack, cts);
 	}
 	rack_log_to_processing(rack, cts, ret, timers);
 	return (ret);
 }
 
 static void
 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line)
 {
 	struct timeval tv;
 	uint32_t us_cts, flags_on_entry;
 	uint8_t hpts_removed = 0;
 
 	flags_on_entry = rack->r_ctl.rc_hpts_flags;
 	us_cts = tcp_get_usecs(&tv);
 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 	    ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) ||
 	     ((tp->snd_max - tp->snd_una) == 0))) {
 		tcp_hpts_remove(rack->rc_tp);
 		hpts_removed = 1;
 		/* If we were not delayed cancel out the flag. */
 		if ((tp->snd_max - tp->snd_una) == 0)
 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 		rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
 	}
 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
 		if (tcp_in_hpts(rack->rc_tp) &&
 		    ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) {
 			/*
 			 * Canceling timer's when we have no output being
 			 * paced. We also must remove ourselves from the
 			 * hpts.
 			 */
 			tcp_hpts_remove(rack->rc_tp);
 			hpts_removed = 1;
 		}
 		rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK);
 	}
 	if (hpts_removed == 0)
 		rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry);
 }
 
 static int
 rack_stopall(struct tcpcb *tp)
 {
 	struct tcp_rack *rack;
+
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	rack->t_timers_stopped = 1;
+
+	tcp_hpts_remove(tp);
+
 	return (0);
 }
 
 static void
 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * Assure no timers are running.
 	 */
 	if (tcp_timer_active(tp, TT_PERSIST)) {
 		/* We enter in persists, set the flag appropriately */
 		rack->rc_in_persist = 1;
 	}
 	if (tcp_in_hpts(rack->rc_tp)) {
 		tcp_hpts_remove(rack->rc_tp);
 	}
 }
 
 static void
 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz)
 {
 	int32_t idx;
 
 	rsm->r_rtr_cnt++;
 	rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 	rsm->r_dupack = 0;
 	if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
 		rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
 		rsm->r_flags |= RACK_OVERMAX;
 	}
 	if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
 		rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
 		rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
 	}
 	idx = rsm->r_rtr_cnt - 1;
 	rsm->r_tim_lastsent[idx] = ts;
 	/*
 	 * Here we don't add in the len of send, since its already
 	 * in snduna <->snd_max.
 	 */
 	rsm->r_fas = ctf_flight_size(rack->rc_tp,
 				     rack->r_ctl.rc_sacked);
 	if (rsm->r_flags & RACK_ACKED) {
 		/* Problably MTU discovery messing with us */
 		rsm->r_flags &= ~RACK_ACKED;
 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 	}
 	if (rsm->r_in_tmap) {
 		TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 0;
 	}
 	/* Lets make sure it really is in or not the GP window */
 	rack_mark_in_gp_win(tp, rsm);
 	TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 	rsm->r_in_tmap = 1;
 	rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz);
 	/* Take off the must retransmit flag, if its on */
 	if (rsm->r_flags & RACK_MUST_RXT) {
 		if (rack->r_must_retran)
 			rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
 		if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
 			/*
 			 * We have retransmitted all we need. Clear
 			 * any must retransmit flags.
 			 */
 			rack->r_must_retran = 0;
 			rack->r_ctl.rc_out_at_rto = 0;
 		}
 		rsm->r_flags &= ~RACK_MUST_RXT;
 	}
 	/* Remove any collapsed flag */
 	rsm->r_flags &= ~RACK_RWND_COLLAPSED;
 	if (rsm->r_flags & RACK_SACK_PASSED) {
 		/* We have retransmitted due to the SACK pass */
 		rsm->r_flags &= ~RACK_SACK_PASSED;
 		rsm->r_flags |= RACK_WAS_SACKPASS;
 	}
 }
 
 static uint32_t
 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag, int segsiz)
 {
 	/*
 	 * We (re-)transmitted starting at rsm->r_start for some length
 	 * (possibly less than r_end.
 	 */
 	struct rack_sendmap *nrsm;
 	int insret __diagused;
 	uint32_t c_end;
 	int32_t len;
 
 	len = *lenp;
 	c_end = rsm->r_start + len;
 	if (SEQ_GEQ(c_end, rsm->r_end)) {
 		/*
 		 * We retransmitted the whole piece or more than the whole
 		 * slopping into the next rsm.
 		 */
 		rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
 		if (c_end == rsm->r_end) {
 			*lenp = 0;
 			return (0);
 		} else {
 			int32_t act_len;
 
 			/* Hangs over the end return whats left */
 			act_len = rsm->r_end - rsm->r_start;
 			*lenp = (len - act_len);
 			return (rsm->r_end);
 		}
 		/* We don't get out of this block. */
 	}
 	/*
 	 * Here we retransmitted less than the whole thing which means we
 	 * have to split this into what was transmitted and what was not.
 	 */
 	nrsm = rack_alloc_full_limit(rack);
 	if (nrsm == NULL) {
 		/*
 		 * We can't get memory, so lets not proceed.
 		 */
 		*lenp = 0;
 		return (0);
 	}
 	/*
 	 * So here we are going to take the original rsm and make it what we
 	 * retransmitted. nrsm will be the tail portion we did not
 	 * retransmit. For example say the chunk was 1, 11 (10 bytes). And
 	 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to
 	 * 1, 6 and the new piece will be 6, 11.
 	 */
 	rack_clone_rsm(rack, nrsm, rsm, c_end);
 	nrsm->r_dupack = 0;
 	rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
 #ifndef INVARIANTS
 	(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 	if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 		panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 		      nrsm, insret, rack, rsm);
 	}
 #endif
 	if (rsm->r_in_tmap) {
 		TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 		nrsm->r_in_tmap = 1;
 	}
 	rsm->r_flags &= (~RACK_HAS_FIN);
 	rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
 	/* Log a split of rsm into rsm and nrsm */
 	rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
 	*lenp = 0;
 	return (0);
 }
 
 static void
 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
 		uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts,
 		struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb,
 		uint32_t s_moff, int hw_tls, int segsiz)
 {
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm, *nrsm;
 	int insret __diagused;
 
 	register uint32_t snd_max, snd_una;
 
 	/*
 	 * Add to the RACK log of packets in flight or retransmitted. If
 	 * there is a TS option we will use the TS echoed, if not we will
 	 * grab a TS.
 	 *
 	 * Retransmissions will increment the count and move the ts to its
 	 * proper place. Note that if options do not include TS's then we
 	 * won't be able to effectively use the ACK for an RTT on a retran.
 	 *
 	 * Notes about r_start and r_end. Lets consider a send starting at
 	 * sequence 1 for 10 bytes. In such an example the r_start would be
 	 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
 	 * This means that r_end is actually the first sequence for the next
 	 * slot (11).
 	 *
 	 */
 	/*
 	 * If err is set what do we do XXXrrs? should we not add the thing?
 	 * -- i.e. return if err != 0 or should we pretend we sent it? --
 	 * i.e. proceed with add ** do this for now.
 	 */
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (err)
 		/*
 		 * We don't log errors -- we could but snd_max does not
 		 * advance in this case either.
 		 */
 		return;
 
 	if (th_flags & TH_RST) {
 		/*
 		 * We don't log resets and we return immediately from
 		 * sending
 		 */
 		return;
 	}
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	snd_una = tp->snd_una;
 	snd_max = tp->snd_max;
 	if (th_flags & (TH_SYN | TH_FIN)) {
 		/*
 		 * The call to rack_log_output is made before bumping
 		 * snd_max. This means we can record one extra byte on a SYN
 		 * or FIN if seq_out is adding more on and a FIN is present
 		 * (and we are not resending).
 		 */
 		if ((th_flags & TH_SYN) && (seq_out == tp->iss))
 			len++;
 		if (th_flags & TH_FIN)
 			len++;
 		if (SEQ_LT(snd_max, tp->snd_nxt)) {
 			/*
 			 * The add/update as not been done for the FIN/SYN
 			 * yet.
 			 */
 			snd_max = tp->snd_nxt;
 		}
 	}
 	if (SEQ_LEQ((seq_out + len), snd_una)) {
 		/* Are sending an old segment to induce an ack (keep-alive)? */
 		return;
 	}
 	if (SEQ_LT(seq_out, snd_una)) {
 		/* huh? should we panic? */
 		uint32_t end;
 
 		end = seq_out + len;
 		seq_out = snd_una;
 		if (SEQ_GEQ(end, seq_out))
 			len = end - seq_out;
 		else
 			len = 0;
 	}
 	if (len == 0) {
 		/* We don't log zero window probes */
 		return;
 	}
 	if (IN_FASTRECOVERY(tp->t_flags)) {
 		rack->r_ctl.rc_prr_out += len;
 	}
 	/* First question is it a retransmission or new? */
 	if (seq_out == snd_max) {
 		/* Its new */
 		rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts);
 again:
 		rsm = rack_alloc(rack);
 		if (rsm == NULL) {
 			/*
 			 * Hmm out of memory and the tcb got destroyed while
 			 * we tried to wait.
 			 */
 			return;
 		}
 		if (th_flags & TH_FIN) {
 			rsm->r_flags = RACK_HAS_FIN|add_flag;
 		} else {
 			rsm->r_flags = add_flag;
 		}
 		if (hw_tls)
 			rsm->r_hw_tls = 1;
 		rsm->r_tim_lastsent[0] = cts;
 		rsm->r_rtr_cnt = 1;
 		rsm->r_rtr_bytes = 0;
 		if (th_flags & TH_SYN) {
 			/* The data space is one beyond snd_una */
 			rsm->r_flags |= RACK_HAS_SYN;
 		}
 		rsm->r_start = seq_out;
 		rsm->r_end = rsm->r_start + len;
 		rack_mark_in_gp_win(tp, rsm);
 		rsm->r_dupack = 0;
 		/*
 		 * save off the mbuf location that
 		 * sndmbuf_noadv returned (which is
 		 * where we started copying from)..
 		 */
 		rsm->m = s_mb;
 		rsm->soff = s_moff;
 		/*
 		 * Here we do add in the len of send, since its not yet
 		 * reflected in in snduna <->snd_max
 		 */
 		rsm->r_fas = (ctf_flight_size(rack->rc_tp,
 					      rack->r_ctl.rc_sacked) +
 			      (rsm->r_end - rsm->r_start));
 		/* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
 		if (rsm->m) {
 			if (rsm->m->m_len <= rsm->soff) {
 				/*
 				 * XXXrrs Question, will this happen?
 				 *
 				 * If sbsndptr is set at the correct place
 				 * then s_moff should always be somewhere
 				 * within rsm->m. But if the sbsndptr was
 				 * off then that won't be true. If it occurs
 				 * we need to walkout to the correct location.
 				 */
 				struct mbuf *lm;
 
 				lm = rsm->m;
 				while (lm->m_len <= rsm->soff) {
 					rsm->soff -= lm->m_len;
 					lm = lm->m_next;
 					KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
 							     __func__, rack, s_moff, s_mb, rsm->soff));
 				}
 				rsm->m = lm;
 			}
 			rsm->orig_m_len = rsm->m->m_len;
 			rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 		} else {
 			rsm->orig_m_len = 0;
 			rsm->orig_t_space = 0;
 		}
 		rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz);
 		rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 		/* Log a new rsm */
 		rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
 #ifndef INVARIANTS
 		(void)tqhash_insert(rack->r_ctl.tqh, rsm);
 #else
 		if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
 			panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 			      nrsm, insret, rack, rsm);
 		}
 #endif
 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 1;
 		/*
 		 * Special case detection, is there just a single
 		 * packet outstanding when we are not in recovery?
 		 *
 		 * If this is true mark it so.
 		 */
 		if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
 		    (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
 			struct rack_sendmap *prsm;
 
 			prsm = tqhash_prev(rack->r_ctl.tqh, rsm);
 			if (prsm)
 				prsm->r_one_out_nr = 1;
 		}
 		return;
 	}
 	/*
 	 * If we reach here its a retransmission and we need to find it.
 	 */
 more:
 	if (hintrsm && (hintrsm->r_start == seq_out)) {
 		rsm = hintrsm;
 		hintrsm = NULL;
 	} else {
 		/* No hints sorry */
 		rsm = NULL;
 	}
 	if ((rsm) && (rsm->r_start == seq_out)) {
 		seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
 		if (len == 0) {
 			return;
 		} else {
 			goto more;
 		}
 	}
 	/* Ok it was not the last pointer go through it the hard way. */
 refind:
 	rsm = tqhash_find(rack->r_ctl.tqh, seq_out);
 	if (rsm) {
 		if (rsm->r_start == seq_out) {
 			seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
 			if (len == 0) {
 				return;
 			} else {
 				goto refind;
 			}
 		}
 		if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
 			/* Transmitted within this piece */
 			/*
 			 * Ok we must split off the front and then let the
 			 * update do the rest
 			 */
 			nrsm = rack_alloc_full_limit(rack);
 			if (nrsm == NULL) {
 				rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz);
 				return;
 			}
 			/*
 			 * copy rsm to nrsm and then trim the front of rsm
 			 * to not include this part.
 			 */
 			rack_clone_rsm(rack, nrsm, rsm, seq_out);
 			rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
 #ifndef INVARIANTS
 			(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 			if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 				panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 				      nrsm, insret, rack, rsm);
 			}
 #endif
 			if (rsm->r_in_tmap) {
 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 				nrsm->r_in_tmap = 1;
 			}
 			rsm->r_flags &= (~RACK_HAS_FIN);
 			seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz);
 			if (len == 0) {
 				return;
 			} else if (len > 0)
 				goto refind;
 		}
 	}
 	/*
 	 * Hmm not found in map did they retransmit both old and on into the
 	 * new?
 	 */
 	if (seq_out == tp->snd_max) {
 		goto again;
 	} else if (SEQ_LT(seq_out, tp->snd_max)) {
 #ifdef INVARIANTS
 		printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
 		       seq_out, len, tp->snd_una, tp->snd_max);
 		printf("Starting Dump of all rack entries\n");
 		TQHASH_FOREACH(rsm, rack->r_ctl.tqh)  {
 			printf("rsm:%p start:%u end:%u\n",
 			       rsm, rsm->r_start, rsm->r_end);
 		}
 		printf("Dump complete\n");
 		panic("seq_out not found rack:%p tp:%p",
 		      rack, tp);
 #endif
 	} else {
 #ifdef INVARIANTS
 		/*
 		 * Hmm beyond sndmax? (only if we are using the new rtt-pack
 		 * flag)
 		 */
 		panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p",
 		      seq_out, len, tp->snd_max, tp);
 #endif
 	}
 }
 
 /*
  * Record one of the RTT updates from an ack into
  * our sample structure.
  */
 
 static void
 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt,
 		    int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
 {
 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
 	    (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) {
 		rack->r_ctl.rack_rs.rs_rtt_lowest = rtt;
 	}
 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
 	    (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) {
 		rack->r_ctl.rack_rs.rs_rtt_highest = rtt;
 	}
 	if (rack->rc_tp->t_flags & TF_GPUTINPROG) {
 	    if (us_rtt < rack->r_ctl.rc_gp_lowrtt)
 		rack->r_ctl.rc_gp_lowrtt = us_rtt;
 	    if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd)
 		    rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
 	}
 	if ((confidence == 1) &&
 	    ((rsm == NULL) ||
 	     (rsm->r_just_ret) ||
 	     (rsm->r_one_out_nr &&
 	      len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) {
 		/*
 		 * If the rsm had a just return
 		 * hit it then we can't trust the
 		 * rtt measurement for buffer deterimination
 		 * Note that a confidence of 2, indicates
 		 * SACK'd which overrides the r_just_ret or
 		 * the r_one_out_nr. If it was a CUM-ACK and
 		 * we had only two outstanding, but get an
 		 * ack for only 1. Then that also lowers our
 		 * confidence.
 		 */
 		confidence = 0;
 	}
 	if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) ||
 	    (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) {
 		if (rack->r_ctl.rack_rs.confidence == 0) {
 			/*
 			 * We take anything with no current confidence
 			 * saved.
 			 */
 			rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
 			rack->r_ctl.rack_rs.confidence = confidence;
 			rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
 		} else if (confidence != 0) {
 			/*
 			 * Once we have a confident number,
 			 * we can update it with a smaller
 			 * value since this confident number
 			 * may include the DSACK time until
 			 * the next segment (the second one) arrived.
 			 */
 			rack->r_ctl.rack_rs.rs_us_rtt = us_rtt;
 			rack->r_ctl.rack_rs.confidence = confidence;
 			rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt;
 		}
 	}
 	rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID;
 	rack->r_ctl.rack_rs.rs_rtt_tot += rtt;
 	rack->r_ctl.rack_rs.rs_rtt_cnt++;
 }
 
 /*
  * Collect new round-trip time estimate
  * and update averages and current timeout.
  */
 static void
 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
 {
 	int32_t delta;
 	int32_t rtt;
 
 	if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY)
 		/* No valid sample */
 		return;
 	if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) {
 		/* We are to use the lowest RTT seen in a single ack */
 		rtt = rack->r_ctl.rack_rs.rs_rtt_lowest;
 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) {
 		/* We are to use the highest RTT seen in a single ack */
 		rtt = rack->r_ctl.rack_rs.rs_rtt_highest;
 	} else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) {
 		/* We are to use the average RTT seen in a single ack */
 		rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot /
 				(uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt);
 	} else {
 #ifdef INVARIANTS
 		panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
 #endif
 		return;
 	}
 	if (rtt == 0)
 		rtt = 1;
 	if (rack->rc_gp_rtt_set == 0) {
 		/*
 		 * With no RTT we have to accept
 		 * even one we are not confident of.
 		 */
 		rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt;
 		rack->rc_gp_rtt_set = 1;
 	} else if (rack->r_ctl.rack_rs.confidence) {
 		/* update the running gp srtt */
 		rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8);
 		rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8;
 	}
 	if (rack->r_ctl.rack_rs.confidence) {
 		/*
 		 * record the low and high for highly buffered path computation,
 		 * we only do this if we are confident (not a retransmission).
 		 */
 		if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) {
 			rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
 		}
 		if (rack->rc_highly_buffered == 0) {
 			/*
 			 * Currently once we declare a path has
 			 * highly buffered there is no going
 			 * back, which may be a problem...
 			 */
 			if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) {
 				rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt,
 						     rack->r_ctl.rc_highest_us_rtt,
 						     rack->r_ctl.rc_lowest_us_rtt,
 						     RACK_RTTS_SEEHBP);
 				rack->rc_highly_buffered = 1;
 			}
 		}
 	}
 	if ((rack->r_ctl.rack_rs.confidence) ||
 	    (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) {
 		/*
 		 * If we are highly confident of it <or> it was
 		 * never retransmitted we accept it as the last us_rtt.
 		 */
 		rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
 		/* The lowest rtt can be set if its was not retransmited */
 		if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) {
 			rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt;
 			if (rack->r_ctl.rc_lowest_us_rtt == 0)
 				rack->r_ctl.rc_lowest_us_rtt = 1;
 		}
 	}
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (tp->t_srtt != 0) {
 		/*
 		 * We keep a simple srtt in microseconds, like our rtt
 		 * measurement. We don't need to do any tricks with shifting
 		 * etc. Instead we just add in 1/8th of the new measurement
 		 * and subtract out 1/8 of the old srtt. We do the same with
 		 * the variance after finding the absolute value of the
 		 * difference between this sample and the current srtt.
 		 */
 		delta = tp->t_srtt - rtt;
 		/* Take off 1/8th of the current sRTT */
 		tp->t_srtt -= (tp->t_srtt >> 3);
 		/* Add in 1/8th of the new RTT just measured */
 		tp->t_srtt += (rtt >> 3);
 		if (tp->t_srtt <= 0)
 			tp->t_srtt = 1;
 		/* Now lets make the absolute value of the variance */
 		if (delta < 0)
 			delta = -delta;
 		/* Subtract out 1/8th */
 		tp->t_rttvar -= (tp->t_rttvar >> 3);
 		/* Add in 1/8th of the new variance we just saw */
 		tp->t_rttvar += (delta >> 3);
 		if (tp->t_rttvar <= 0)
 			tp->t_rttvar = 1;
 	} else {
 		/*
 		 * No rtt measurement yet - use the unsmoothed rtt. Set the
 		 * variance to half the rtt (so our first retransmit happens
 		 * at 3*rtt).
 		 */
 		tp->t_srtt = rtt;
 		tp->t_rttvar = rtt >> 1;
 	}
 	rack->rc_srtt_measure_made = 1;
 	KMOD_TCPSTAT_INC(tcps_rttupdated);
 	if (tp->t_rttupdated < UCHAR_MAX)
 		tp->t_rttupdated++;
 #ifdef STATS
 	if (rack_stats_gets_ms_rtt == 0) {
 		/* Send in the microsecond rtt used for rxt timeout purposes */
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt));
 	} else if (rack_stats_gets_ms_rtt == 1) {
 		/* Send in the millisecond rtt used for rxt timeout purposes */
 		int32_t ms_rtt;
 
 		/* Round up */
 		ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
 	} else if (rack_stats_gets_ms_rtt == 2) {
 		/* Send in the millisecond rtt has close to the path RTT as we can get  */
 		int32_t ms_rtt;
 
 		/* Round up */
 		ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC;
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt));
 	}  else {
 		/* Send in the microsecond rtt has close to the path RTT as we can get  */
 		stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
 	}
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt));
 #endif
 	/*
 	 * the retransmit should happen at rtt + 4 * rttvar. Because of the
 	 * way we do the smoothing, srtt and rttvar will each average +1/2
 	 * tick of bias.  When we compute the retransmit timer, we want 1/2
 	 * tick of rounding and 1 extra tick because of +-1/2 tick
 	 * uncertainty in the firing of the timer.  The bias will give us
 	 * exactly the 1.5 tick we need.  But, because the bias is
 	 * statistical, we have to test that we don't drop below the minimum
 	 * feasible timer (which is 2 ticks).
 	 */
 	tp->t_rxtshift = 0;
 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 		      max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop);
 	rack_log_rtt_sample(rack, rtt);
 	tp->t_softerror = 0;
 }
 
 
 static void
 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts)
 {
 	/*
 	 * Apply to filter the inbound us-rtt at us_cts.
 	 */
 	uint32_t old_rtt;
 
 	old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt);
 	apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt,
 			       us_rtt, us_cts);
 	if (old_rtt > us_rtt) {
 		/* We just hit a new lower rtt time */
 		rack_log_rtt_shrinks(rack,  us_cts,  old_rtt,
 				     __LINE__, RACK_RTTS_NEWRTT);
 		/*
 		 * Only count it if its lower than what we saw within our
 		 * calculated range.
 		 */
 		if ((old_rtt - us_rtt) > rack_min_rtt_movement) {
 			if (rack_probertt_lower_within &&
 			    rack->rc_gp_dyn_mul &&
 			    (rack->use_fixed_rate == 0) &&
 			    (rack->rc_always_pace)) {
 				/*
 				 * We are seeing a new lower rtt very close
 				 * to the time that we would have entered probe-rtt.
 				 * This is probably due to the fact that a peer flow
 				 * has entered probe-rtt. Lets go in now too.
 				 */
 				uint32_t val;
 
 				val = rack_probertt_lower_within * rack_time_between_probertt;
 				val /= 100;
 				if ((rack->in_probe_rtt == 0)  &&
 				    ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val)))	{
 					rack_enter_probertt(rack, us_cts);
 				}
 			}
 			rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
 		}
 	}
 }
 
 static int
 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
     struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
 {
 	uint32_t us_rtt;
 	int32_t i, all;
 	uint32_t t, len_acked;
 
 	if ((rsm->r_flags & RACK_ACKED) ||
 	    (rsm->r_flags & RACK_WAS_ACKED))
 		/* Already done */
 		return (0);
 	if (rsm->r_no_rtt_allowed) {
 		/* Not allowed */
 		return (0);
 	}
 	if (ack_type == CUM_ACKED) {
 		if (SEQ_GT(th_ack, rsm->r_end)) {
 			len_acked = rsm->r_end - rsm->r_start;
 			all = 1;
 		} else {
 			len_acked = th_ack - rsm->r_start;
 			all = 0;
 		}
 	} else {
 		len_acked = rsm->r_end - rsm->r_start;
 		all = 0;
 	}
 	if (rsm->r_rtr_cnt == 1) {
 
 		t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
 		if ((int)t <= 0)
 			t = 1;
 		if (!tp->t_rttlow || tp->t_rttlow > t)
 			tp->t_rttlow = t;
 		if (!rack->r_ctl.rc_rack_min_rtt ||
 		    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
 			rack->r_ctl.rc_rack_min_rtt = t;
 			if (rack->r_ctl.rc_rack_min_rtt == 0) {
 				rack->r_ctl.rc_rack_min_rtt = 1;
 			}
 		}
 		if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
 			us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
 		else
 			us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
 		if (us_rtt == 0)
 			us_rtt = 1;
 		if (CC_ALGO(tp)->rttsample != NULL) {
 			/* Kick the RTT to the CC */
 			CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
 		}
 		rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
 		if (ack_type == SACKED) {
 			rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
 			tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
 		} else {
 			/*
 			 * We need to setup what our confidence
 			 * is in this ack.
 			 *
 			 * If the rsm was app limited and it is
 			 * less than a mss in length (the end
 			 * of the send) then we have a gap. If we
 			 * were app limited but say we were sending
 			 * multiple MSS's then we are more confident
 			 * int it.
 			 *
 			 * When we are not app-limited then we see if
 			 * the rsm is being included in the current
 			 * measurement, we tell this by the app_limited_needs_set
 			 * flag.
 			 *
 			 * Note that being cwnd blocked is not applimited
 			 * as well as the pacing delay between packets which
 			 * are sending only 1 or 2 MSS's also will show up
 			 * in the RTT. We probably need to examine this algorithm
 			 * a bit more and enhance it to account for the delay
 			 * between rsm's. We could do that by saving off the
 			 * pacing delay of each rsm (in an rsm) and then
 			 * factoring that in somehow though for now I am
 			 * not sure how :)
 			 */
 			int calc_conf = 0;
 
 			if (rsm->r_flags & RACK_APP_LIMITED) {
 				if (all && (len_acked <= ctf_fixed_maxseg(tp)))
 					calc_conf = 0;
 				else
 					calc_conf = 1;
 			} else if (rack->app_limited_needs_set == 0) {
 				calc_conf = 1;
 			} else {
 				calc_conf = 0;
 			}
 			rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
 			tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt,
 					    calc_conf, rsm, rsm->r_rtr_cnt);
 		}
 		if ((rsm->r_flags & RACK_TLP) &&
 		    (!IN_FASTRECOVERY(tp->t_flags))) {
 			/* Segment was a TLP and our retrans matched */
 			if (rack->r_ctl.rc_tlp_cwnd_reduce) {
 				rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
 			}
 		}
 		if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
 		    (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
 			    (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
 			/* New more recent rack_tmit_time */
 			rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
 			if (rack->r_ctl.rc_rack_tmit_time == 0)
 				rack->r_ctl.rc_rack_tmit_time = 1;
 			rack->rc_rack_rtt = t;
 		}
 		return (1);
 	}
 	/*
 	 * We clear the soft/rxtshift since we got an ack.
 	 * There is no assurance we will call the commit() function
 	 * so we need to clear these to avoid incorrect handling.
 	 */
 	tp->t_rxtshift = 0;
 	RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 		      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 	tp->t_softerror = 0;
 	if (to && (to->to_flags & TOF_TS) &&
 	    (ack_type == CUM_ACKED) &&
 	    (to->to_tsecr) &&
 	    ((rsm->r_flags & RACK_OVERMAX) == 0)) {
 		/*
 		 * Now which timestamp does it match? In this block the ACK
 		 * must be coming from a previous transmission.
 		 */
 		for (i = 0; i < rsm->r_rtr_cnt; i++) {
 			if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
 				t = cts - (uint32_t)rsm->r_tim_lastsent[i];
 				if ((int)t <= 0)
 					t = 1;
 				if (CC_ALGO(tp)->rttsample != NULL) {
 					/*
 					 * Kick the RTT to the CC, here
 					 * we lie a bit in that we know the
 					 * retransmission is correct even though
 					 * we retransmitted. This is because
 					 * we match the timestamps.
 					 */
 					if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
 						us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
 					else
 						us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
 					CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
 				}
 				if ((i + 1) < rsm->r_rtr_cnt) {
 					/*
 					 * The peer ack'd from our previous
 					 * transmission. We have a spurious
 					 * retransmission and thus we dont
 					 * want to update our rack_rtt.
 					 *
 					 * Hmm should there be a CC revert here?
 					 *
 					 */
 					return (0);
 				}
 				if (!tp->t_rttlow || tp->t_rttlow > t)
 					tp->t_rttlow = t;
 				if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
 					rack->r_ctl.rc_rack_min_rtt = t;
 					if (rack->r_ctl.rc_rack_min_rtt == 0) {
 						rack->r_ctl.rc_rack_min_rtt = 1;
 					}
 				}
 				if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
 				    (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
 					    (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
 					/* New more recent rack_tmit_time */
 					rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
 					if (rack->r_ctl.rc_rack_tmit_time == 0)
 						rack->r_ctl.rc_rack_tmit_time = 1;
 					rack->rc_rack_rtt = t;
 				}
 				rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
 				tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
 						    rsm->r_rtr_cnt);
 				return (1);
 			}
 		}
 		/* If we are logging log out the sendmap */
 		if (tcp_bblogging_on(rack->rc_tp)) {
 			for (i = 0; i < rsm->r_rtr_cnt; i++) {
 				rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr);
 			}
 		}
 		goto ts_not_found;
 	} else {
 		/*
 		 * Ok its a SACK block that we retransmitted. or a windows
 		 * machine without timestamps. We can tell nothing from the
 		 * time-stamp since its not there or the time the peer last
 		 * recieved a segment that moved forward its cum-ack point.
 		 */
 ts_not_found:
 		i = rsm->r_rtr_cnt - 1;
 		t = cts - (uint32_t)rsm->r_tim_lastsent[i];
 		if ((int)t <= 0)
 			t = 1;
 		if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
 			/*
 			 * We retransmitted and the ack came back in less
 			 * than the smallest rtt we have observed. We most
 			 * likely did an improper retransmit as outlined in
 			 * 6.2 Step 2 point 2 in the rack-draft so we
 			 * don't want to update our rack_rtt. We in
 			 * theory (in future) might want to think about reverting our
 			 * cwnd state but we won't for now.
 			 */
 			return (0);
 		} else if (rack->r_ctl.rc_rack_min_rtt) {
 			/*
 			 * We retransmitted it and the retransmit did the
 			 * job.
 			 */
 			if (!rack->r_ctl.rc_rack_min_rtt ||
 			    SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) {
 				rack->r_ctl.rc_rack_min_rtt = t;
 				if (rack->r_ctl.rc_rack_min_rtt == 0) {
 					rack->r_ctl.rc_rack_min_rtt = 1;
 				}
 			}
 			if ((rack->r_ctl.rc_rack_tmit_time == 0) ||
 			    (SEQ_LT(rack->r_ctl.rc_rack_tmit_time,
 				    (uint32_t)rsm->r_tim_lastsent[i]))) {
 				/* New more recent rack_tmit_time */
 				rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
 				if (rack->r_ctl.rc_rack_tmit_time == 0)
 					rack->r_ctl.rc_rack_tmit_time = 1;
 				rack->rc_rack_rtt = t;
 			}
 			return (1);
 		}
 	}
 	return (0);
 }
 
 /*
  * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
  */
 static void
 rack_log_sack_passed(struct tcpcb *tp,
     struct tcp_rack *rack, struct rack_sendmap *rsm)
 {
 	struct rack_sendmap *nrsm;
 
 	nrsm = rsm;
 	TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap,
 	    rack_head, r_tnext) {
 		if (nrsm == rsm) {
 			/* Skip original segment he is acked */
 			continue;
 		}
 		if (nrsm->r_flags & RACK_ACKED) {
 			/*
 			 * Skip ack'd segments, though we
 			 * should not see these, since tmap
 			 * should not have ack'd segments.
 			 */
 			continue;
 		}
 		if (nrsm->r_flags & RACK_RWND_COLLAPSED) {
 			/*
 			 * If the peer dropped the rwnd on
 			 * these then we don't worry about them.
 			 */
 			continue;
 		}
 		if (nrsm->r_flags & RACK_SACK_PASSED) {
 			/*
 			 * We found one that is already marked
 			 * passed, we have been here before and
 			 * so all others below this are marked.
 			 */
 			break;
 		}
 		nrsm->r_flags |= RACK_SACK_PASSED;
 		nrsm->r_flags &= ~RACK_WAS_SACKPASS;
 	}
 }
 
 static void
 rack_need_set_test(struct tcpcb *tp,
 		   struct tcp_rack *rack,
 		   struct rack_sendmap *rsm,
 		   tcp_seq th_ack,
 		   int line,
 		   int use_which)
 {
 	struct rack_sendmap *s_rsm;
 
 	if ((tp->t_flags & TF_GPUTINPROG) &&
 	    SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
 		/*
 		 * We were app limited, and this ack
 		 * butts up or goes beyond the point where we want
 		 * to start our next measurement. We need
 		 * to record the new gput_ts as here and
 		 * possibly update the start sequence.
 		 */
 		uint32_t seq, ts;
 
 		if (rsm->r_rtr_cnt > 1) {
 			/*
 			 * This is a retransmit, can we
 			 * really make any assessment at this
 			 * point?  We are not really sure of
 			 * the timestamp, is it this or the
 			 * previous transmission?
 			 *
 			 * Lets wait for something better that
 			 * is not retransmitted.
 			 */
 			return;
 		}
 		seq = tp->gput_seq;
 		ts = tp->gput_ts;
 		rack->app_limited_needs_set = 0;
 		tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 		/* Do we start at a new end? */
 		if ((use_which == RACK_USE_BEG) &&
 		    SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
 			/*
 			 * When we get an ACK that just eats
 			 * up some of the rsm, we set RACK_USE_BEG
 			 * since whats at r_start (i.e. th_ack)
 			 * is left unacked and thats where the
 			 * measurement now starts.
 			 */
 			tp->gput_seq = rsm->r_start;
 		}
 		if ((use_which == RACK_USE_END) &&
 		    SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
 			/*
 			 * We use the end when the cumack
 			 * is moving forward and completely
 			 * deleting the rsm passed so basically
 			 * r_end holds th_ack.
 			 *
 			 * For SACK's we also want to use the end
 			 * since this piece just got sacked and
 			 * we want to target anything after that
 			 * in our measurement.
 			 */
 			tp->gput_seq = rsm->r_end;
 		}
 		if (use_which == RACK_USE_END_OR_THACK) {
 			/*
 			 * special case for ack moving forward,
 			 * not a sack, we need to move all the
 			 * way up to where this ack cum-ack moves
 			 * to.
 			 */
 			if (SEQ_GT(th_ack, rsm->r_end))
 				tp->gput_seq = th_ack;
 			else
 				tp->gput_seq = rsm->r_end;
 		}
 		if (SEQ_LT(tp->gput_seq, tp->snd_max))
 			s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
 		else
 			s_rsm = NULL;
 		/*
 		 * Pick up the correct send time if we can the rsm passed in
 		 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other
 		 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will
 		 * find a different seq i.e. the next send up.
 		 *
 		 * If that has not been sent, s_rsm will be NULL and we must
 		 * arrange it so this function will get called again by setting
 		 * app_limited_needs_set.
 		 */
 		if (s_rsm)
 			rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0];
 		else {
 			/* If we hit here we have to have *not* sent tp->gput_seq */
 			rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
 			/* Set it up so we will go through here again */
 			rack->app_limited_needs_set = 1;
 		}
 		if (SEQ_GT(tp->gput_seq, tp->gput_ack)) {
 			/*
 			 * We moved beyond this guy's range, re-calculate
 			 * the new end point.
 			 */
 			if (rack->rc_gp_filled == 0) {
 				tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
 			} else {
 				tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
 			}
 		}
 		/*
 		 * We are moving the goal post, we may be able to clear the
 		 * measure_saw_probe_rtt flag.
 		 */
 		if ((rack->in_probe_rtt == 0) &&
 		    (rack->measure_saw_probe_rtt) &&
 		    (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
 			rack->measure_saw_probe_rtt = 0;
 		rack_log_pacing_delay_calc(rack, ts, tp->gput_ts,
 					   seq, tp->gput_seq,
 					   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
 					    (uint64_t)rack->r_ctl.rc_gp_output_ts),
 					   5, line, NULL, 0);
 		if (rack->rc_gp_filled &&
 		    ((tp->gput_ack - tp->gput_seq) <
 		     max(rc_init_window(rack), (MIN_GP_WIN *
 						ctf_fixed_maxseg(tp))))) {
 			uint32_t ideal_amount;
 
 			ideal_amount = rack_get_measure_window(tp, rack);
 			if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) {
 				/*
 				 * There is no sense of continuing this measurement
 				 * because its too small to gain us anything we
 				 * trust. Skip it and that way we can start a new
 				 * measurement quicker.
 				 */
 				tp->t_flags &= ~TF_GPUTINPROG;
 				rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq,
 							   0, 0,
 							   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) |
 							    (uint64_t)rack->r_ctl.rc_gp_output_ts),
 							   6, __LINE__, NULL, 0);
 			} else {
 				/*
 				 * Reset the window further out.
 				 */
 				tp->gput_ack = tp->gput_seq + ideal_amount;
 			}
 		}
 		rack_tend_gp_marks(tp, rack);
 		rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm);
 	}
 }
 
 static inline int
 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
 {
 	if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
 		/* Behind our TLP definition or right at */
 		return (0);
 	}
 	if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
 		/* The start is beyond or right at our end of TLP definition */
 		return (0);
 	}
 	/* It has to be a sub-part of the original TLP recorded */
 	return (1);
 }
 
 
 
 static uint32_t
 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack,
 		   struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts,
 		   int *no_extra,
 		   int *moved_two, uint32_t segsiz)
 {
 	uint32_t start, end, changed = 0;
 	struct rack_sendmap stack_map;
 	struct rack_sendmap *rsm, *nrsm, *prev, *next;
 	int insret __diagused;
 	int32_t used_ref = 1;
 	int moved = 0;
 #ifdef TCP_SAD_DETECTION
 	int allow_segsiz;
 	int first_time_through = 1;
 #endif
 	int noextra = 0;
 	int can_use_hookery = 0;
 
 	start = sack->start;
 	end = sack->end;
 	rsm = *prsm;
 
 #ifdef TCP_SAD_DETECTION
 	/*
 	 * There are a strange number of proxys and meddle boxes in the world
 	 * that seem to cut up segments on different boundaries. This gets us
 	 * smaller sacks that are still ok in terms of it being an attacker.
 	 * We use the base segsiz to calculate an allowable smallness but
 	 * also enforce a min on the segsiz in case it is an attacker playing
 	 * games with MSS. So basically if the sack arrives and it is
 	 * larger than a worse case 960 bytes, we don't classify the guy
 	 * as supicious.
 	 */
 	allow_segsiz = max(segsiz, 1200) * sad_seg_size_per;
 	allow_segsiz /= 1000;
 #endif
 do_rest_ofb:
 	if ((rsm == NULL) ||
 	    (SEQ_LT(end, rsm->r_start)) ||
 	    (SEQ_GEQ(start, rsm->r_end)) ||
 	    (SEQ_LT(start, rsm->r_start))) {
 		/*
 		 * We are not in the right spot,
 		 * find the correct spot in the tree.
 		 */
 		used_ref = 0;
 		rsm = tqhash_find(rack->r_ctl.tqh, start);
 		moved++;
 	}
 	if (rsm == NULL) {
 		/* TSNH */
 		goto out;
 	}
 #ifdef TCP_SAD_DETECTION
 	/* Now we must check for suspicous activity */
 	if ((first_time_through == 1) &&
 	    ((end - start) < min((rsm->r_end - rsm->r_start), allow_segsiz)) &&
 	    ((rsm->r_flags & RACK_PMTU_CHG) == 0) &&
 	    ((rsm->r_flags & RACK_TLP) == 0)) {
 		/*
 		 * Its less than a full MSS or the segment being acked
 		 * this should only happen if the rsm in question had the
 		 * r_just_ret flag set <and> the end matches the end of
 		 * the rsm block.
 		 *
 		 * Note we do not look at segments that have had TLP's on
 		 * them since we can get un-reported rwnd collapses that
 		 * basically we TLP on and then we get back a sack block
 		 * that goes from the start to only a small way.
 		 *
 		 */
 		int loss, ok;
 
 		ok = 0;
 		if (SEQ_GEQ(end, rsm->r_end)) {
 			if (rsm->r_just_ret == 1) {
 				/* This was at the end of a send which is ok */
 				ok = 1;
 			} else {
 				/* A bit harder was it the end of our segment */
 				int segs, len;
 
 				len = (rsm->r_end - rsm->r_start);
 				segs = len / segsiz;
 				segs *= segsiz;
 				if ((segs + (rsm->r_end - start)) == len) {
 					/*
 					 * So this last bit was the
 					 * end of our send if we cut it
 					 * up into segsiz pieces so its ok.
 					 */
 					ok = 1;
 				}
 			}
 		}
 		if (ok == 0) {
 			/*
 			 * This guy is doing something suspicious
 			 * lets start detection.
 			 */
 			if (rack->rc_suspicious == 0) {
 				tcp_trace_point(rack->rc_tp, TCP_TP_SAD_SUSPECT);
 				counter_u64_add(rack_sack_attacks_suspect, 1);
 				rack->rc_suspicious = 1;
 				rack_log_sad(rack, 4);
 				if (tcp_bblogging_on(rack->rc_tp)) {
 					union tcp_log_stackspecific log;
 					struct timeval tv;
 
 					memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 					log.u_bbr.flex1 = end;
 					log.u_bbr.flex2 = start;
 					log.u_bbr.flex3 = rsm->r_end;
 					log.u_bbr.flex4 = rsm->r_start;
 					log.u_bbr.flex5 = segsiz;
 					log.u_bbr.flex6 = rsm->r_fas;
 					log.u_bbr.flex7 = rsm->r_bas;
 					log.u_bbr.flex8 = 5;
 					log.u_bbr.pkts_out = rsm->r_flags;
 					log.u_bbr.bbr_state = rack->rc_suspicious;
 					log.u_bbr.bbr_substate = rsm->r_just_ret;
 					log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 					log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 					TCP_LOG_EVENTP(rack->rc_tp, NULL,
 						       &rack->rc_inp->inp_socket->so_rcv,
 						       &rack->rc_inp->inp_socket->so_snd,
 						       TCP_SAD_DETECTION, 0,
 						       0, &log, false, &tv);
 				}
 			}
 			/* You loose some ack count every time you sack
 			 * a small bit that is not butting to the end of
 			 * what we have sent. This is because we never
 			 * send small bits unless its the end of the sb.
 			 * Anyone sending a sack that is not at the end
 			 * is thus very very suspicious.
 			 */
 			loss = (segsiz/2) / (end - start);
 			if (loss < rack->r_ctl.ack_count)
 				rack->r_ctl.ack_count -= loss;
 			else
 				rack->r_ctl.ack_count = 0;
 		}
 	}
 	first_time_through = 0;
 #endif
 	/* Ok we have an ACK for some piece of this rsm */
 	if (rsm->r_start != start) {
 		if ((rsm->r_flags & RACK_ACKED) == 0) {
 			/*
 			 * Before any splitting or hookery is
 			 * done is it a TLP of interest i.e. rxt?
 			 */
 			if ((rsm->r_flags & RACK_TLP) &&
 			    (rsm->r_rtr_cnt > 1)) {
 				/*
 				 * We are splitting a rxt TLP, check
 				 * if we need to save off the start/end
 				 */
 				if (rack->rc_last_tlp_acked_set &&
 				    (is_rsm_inside_declared_tlp_block(rack, rsm))) {
 					/*
 					 * We already turned this on since we are inside
 					 * the previous one was a partially sack now we
 					 * are getting another one (maybe all of it).
 					 *
 					 */
 					rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
 					/*
 					 * Lets make sure we have all of it though.
 					 */
 					if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
 						rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 					if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
 						rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 				} else {
 					rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 					rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 					rack->rc_last_tlp_past_cumack = 0;
 					rack->rc_last_tlp_acked_set = 1;
 					rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
 				}
 			}
 			/**
 			 * Need to split this in two pieces the before and after,
 			 * the before remains in the map, the after must be
 			 * added. In other words we have:
 			 * rsm        |--------------|
 			 * sackblk        |------->
 			 * rsm will become
 			 *     rsm    |---|
 			 * and nrsm will be  the sacked piece
 			 *     nrsm       |----------|
 			 *
 			 * But before we start down that path lets
 			 * see if the sack spans over on top of
 			 * the next guy and it is already sacked.
 			 *
 			 */
 			/*
 			 * Hookery can only be used if the two entries
 			 * are in the same bucket and neither one of
 			 * them staddle the bucket line.
 			 */
 			next = tqhash_next(rack->r_ctl.tqh, rsm);
 			if (next &&
 			    (rsm->bindex == next->bindex) &&
 			    ((rsm->r_flags & RACK_STRADDLE) == 0) &&
 			    ((next->r_flags & RACK_STRADDLE) == 0) &&
 			    (rsm->r_flags & RACK_IN_GP_WIN) &&
 			    (next->r_flags & RACK_IN_GP_WIN))
 				can_use_hookery = 1;
 			else if (next &&
 				 (rsm->bindex == next->bindex) &&
 				 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
 				 ((next->r_flags & RACK_STRADDLE) == 0) &&
 				 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) &&
 				 ((next->r_flags & RACK_IN_GP_WIN) == 0))
 				can_use_hookery = 1;
 			else
 				can_use_hookery = 0;
 			if (next && can_use_hookery &&
 			    (next->r_flags & RACK_ACKED) &&
 			    SEQ_GEQ(end, next->r_start)) {
 				/**
 				 * So the next one is already acked, and
 				 * we can thus by hookery use our stack_map
 				 * to reflect the piece being sacked and
 				 * then adjust the two tree entries moving
 				 * the start and ends around. So we start like:
 				 *  rsm     |------------|             (not-acked)
 				 *  next                 |-----------| (acked)
 				 *  sackblk        |-------->
 				 *  We want to end like so:
 				 *  rsm     |------|                   (not-acked)
 				 *  next           |-----------------| (acked)
 				 *  nrsm           |-----|
 				 * Where nrsm is a temporary stack piece we
 				 * use to update all the gizmos.
 				 */
 				/* Copy up our fudge block */
 				noextra++;
 				nrsm = &stack_map;
 				memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
 				/* Now adjust our tree blocks */
 				rsm->r_end = start;
 				next->r_start = start;
  				rsm->r_flags |= RACK_SHUFFLED;
 				next->r_flags |= RACK_SHUFFLED;
 				/* Now we must adjust back where next->m is */
 				rack_setup_offset_for_rsm(rack, rsm, next);
 				/*
 				 * Which timestamp do we keep? It is rather
 				 * important in GP measurements to have the
 				 * accurate end of the send window.
 				 *
 				 * We keep the largest value, which is the newest
 				 * send. We do this in case a segment that is
 				 * joined together and not part of a GP estimate
 				 * later gets expanded into the GP estimate.
 				 *
 				 * We prohibit the merging of unlike kinds i.e.
 				 * all pieces that are in the GP estimate can be
 				 * merged and all pieces that are not in a GP estimate
 				 * can be merged, but not disimilar pieces. Combine
 				 * this with taking the highest here and we should
 				 * be ok unless of course the client reneges. Then
 				 * all bets are off.
 				 */
 				if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] <
 				    nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)])
 					next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)];
 				/*
 				 * And we must keep the newest ack arrival time.
 				 */
 				if (next->r_ack_arrival <
 				    rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
 					next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 
 
 				/* We don't need to adjust rsm, it did not change */
 				/* Clear out the dup ack count of the remainder */
 				rsm->r_dupack = 0;
 				rsm->r_just_ret = 0;
 				rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 				/* Now lets make sure our fudge block is right */
 				nrsm->r_start = start;
 				/* Now lets update all the stats and such */
 				rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
 				if (rack->app_limited_needs_set)
 					rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
 				changed += (nrsm->r_end - nrsm->r_start);
 				/* You get a count for acking a whole segment or more */
 				if ((nrsm->r_end - nrsm->r_start) >= segsiz)
 					rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz);
 				rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
 				if (nrsm->r_flags & RACK_SACK_PASSED) {
 					rack->r_ctl.rc_reorder_ts = cts;
 					if (rack->r_ctl.rc_reorder_ts == 0)
 						rack->r_ctl.rc_reorder_ts = 1;
 				}
 				/*
 				 * Now we want to go up from rsm (the
 				 * one left un-acked) to the next one
 				 * in the tmap. We do this so when
 				 * we walk backwards we include marking
 				 * sack-passed on rsm (The one passed in
 				 * is skipped since it is generally called
 				 * on something sacked before removing it
 				 * from the tmap).
 				 */
 				if (rsm->r_in_tmap) {
 					nrsm = TAILQ_NEXT(rsm, r_tnext);
 					/*
 					 * Now that we have the next
 					 * one walk backwards from there.
 					 */
 					if (nrsm && nrsm->r_in_tmap)
 						rack_log_sack_passed(tp, rack, nrsm);
 				}
 				/* Now are we done? */
 				if (SEQ_LT(end, next->r_end) ||
 				    (end == next->r_end)) {
 					/* Done with block */
 					goto out;
 				}
 				rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
 				counter_u64_add(rack_sack_used_next_merge, 1);
 				/* Postion for the next block */
 				start = next->r_end;
 				rsm = tqhash_next(rack->r_ctl.tqh, next);
 				if (rsm == NULL)
 					goto out;
 			} else {
 				/**
 				 * We can't use any hookery here, so we
 				 * need to split the map. We enter like
 				 * so:
 				 *  rsm      |--------|
 				 *  sackblk       |----->
 				 * We will add the new block nrsm and
 				 * that will be the new portion, and then
 				 * fall through after reseting rsm. So we
 				 * split and look like this:
 				 *  rsm      |----|
 				 *  sackblk       |----->
 				 *  nrsm          |---|
 				 * We then fall through reseting
 				 * rsm to nrsm, so the next block
 				 * picks it up.
 				 */
 				nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
 				if (nrsm == NULL) {
 					/*
 					 * failed XXXrrs what can we do but loose the sack
 					 * info?
 					 */
 					goto out;
 				}
 				counter_u64_add(rack_sack_splits, 1);
 				rack_clone_rsm(rack, nrsm, rsm, start);
 				moved++;
 				rsm->r_just_ret = 0;
 #ifndef INVARIANTS
 				(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 				if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 					panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 					      nrsm, insret, rack, rsm);
 				}
 #endif
 				if (rsm->r_in_tmap) {
 					TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 					nrsm->r_in_tmap = 1;
 				}
 				rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
 				rsm->r_flags &= (~RACK_HAS_FIN);
 				/* Position us to point to the new nrsm that starts the sack blk */
 				rsm = nrsm;
 			}
 		} else {
 			/* Already sacked this piece */
 			counter_u64_add(rack_sack_skipped_acked, 1);
 			moved++;
 			if (end == rsm->r_end) {
 				/* Done with block */
 				rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 				goto out;
 			} else if (SEQ_LT(end, rsm->r_end)) {
 				/* A partial sack to a already sacked block */
 				moved++;
 				rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 				goto out;
 			} else {
 				/*
 				 * The end goes beyond this guy
 				 * reposition the start to the
 				 * next block.
 				 */
 				start = rsm->r_end;
 				rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 				if (rsm == NULL)
 					goto out;
 			}
 		}
 	}
 	if (SEQ_GEQ(end, rsm->r_end)) {
 		/**
 		 * The end of this block is either beyond this guy or right
 		 * at this guy. I.e.:
 		 *  rsm ---                 |-----|
 		 *  end                     |-----|
 		 *  <or>
 		 *  end                     |---------|
 		 */
 		if ((rsm->r_flags & RACK_ACKED) == 0) {
 			/*
 			 * Is it a TLP of interest?
 			 */
 			if ((rsm->r_flags & RACK_TLP) &&
 			    (rsm->r_rtr_cnt > 1)) {
 				/*
 				 * We are splitting a rxt TLP, check
 				 * if we need to save off the start/end
 				 */
 				if (rack->rc_last_tlp_acked_set &&
 				    (is_rsm_inside_declared_tlp_block(rack, rsm))) {
 					/*
 					 * We already turned this on since we are inside
 					 * the previous one was a partially sack now we
 					 * are getting another one (maybe all of it).
 					 */
 					rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
 					/*
 					 * Lets make sure we have all of it though.
 					 */
 					if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
 						rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 					if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
 						rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 				} else {
 					rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 					rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 					rack->rc_last_tlp_past_cumack = 0;
 					rack->rc_last_tlp_acked_set = 1;
 					rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
 				}
 			}
 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
 			changed += (rsm->r_end - rsm->r_start);
 			/* You get a count for acking a whole segment or more */
 			if ((rsm->r_end - rsm->r_start) >= segsiz)
 				rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz);
 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
 			if (rsm->r_in_tmap) /* should be true */
 				rack_log_sack_passed(tp, rack, rsm);
 			/* Is Reordering occuring? */
 			if (rsm->r_flags & RACK_SACK_PASSED) {
 				rsm->r_flags &= ~RACK_SACK_PASSED;
 				rack->r_ctl.rc_reorder_ts = cts;
 				if (rack->r_ctl.rc_reorder_ts == 0)
 					rack->r_ctl.rc_reorder_ts = 1;
 			}
 			if (rack->app_limited_needs_set)
 				rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 			rsm->r_flags |= RACK_ACKED;
 			if (rsm->r_in_tmap) {
 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 				rsm->r_in_tmap = 0;
 			}
 			rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
 		} else {
 			counter_u64_add(rack_sack_skipped_acked, 1);
 			moved++;
 		}
 		if (end == rsm->r_end) {
 			/* This block only - done, setup for next */
 			goto out;
 		}
 		/*
 		 * There is more not coverend by this rsm move on
 		 * to the next block in the tail queue hash table.
 		 */
 		nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
 		start = rsm->r_end;
 		rsm = nrsm;
 		if (rsm == NULL)
 			goto out;
 		goto do_rest_ofb;
 	}
 	/**
 	 * The end of this sack block is smaller than
 	 * our rsm i.e.:
 	 *  rsm ---                 |-----|
 	 *  end                     |--|
 	 */
 	if ((rsm->r_flags & RACK_ACKED) == 0) {
 		/*
 		 * Is it a TLP of interest?
 		 */
 		if ((rsm->r_flags & RACK_TLP) &&
 		    (rsm->r_rtr_cnt > 1)) {
 			/*
 			 * We are splitting a rxt TLP, check
 			 * if we need to save off the start/end
 			 */
 			if (rack->rc_last_tlp_acked_set &&
 			    (is_rsm_inside_declared_tlp_block(rack, rsm))) {
 				/*
 				 * We already turned this on since we are inside
 				 * the previous one was a partially sack now we
 				 * are getting another one (maybe all of it).
 				 */
 				rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
 				/*
 				 * Lets make sure we have all of it though.
 				 */
 				if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
 					rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 					rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 							     rack->r_ctl.last_tlp_acked_end);
 				}
 				if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
 					rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 					rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 							     rack->r_ctl.last_tlp_acked_end);
 				}
 			} else {
 				rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 				rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 				rack->rc_last_tlp_past_cumack = 0;
 				rack->rc_last_tlp_acked_set = 1;
 				rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
 			}
 		}
 		/*
 		 * Hookery can only be used if the two entries
 		 * are in the same bucket and neither one of
 		 * them staddle the bucket line.
 		 */
 		prev = tqhash_prev(rack->r_ctl.tqh, rsm);
 		if (prev &&
 		    (rsm->bindex == prev->bindex) &&
 		    ((rsm->r_flags & RACK_STRADDLE) == 0) &&
 		    ((prev->r_flags & RACK_STRADDLE) == 0) &&
 		    (rsm->r_flags & RACK_IN_GP_WIN) &&
 		    (prev->r_flags & RACK_IN_GP_WIN))
 			can_use_hookery = 1;
 		else if (prev &&
 			 (rsm->bindex == prev->bindex) &&
 			 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
 			 ((prev->r_flags & RACK_STRADDLE) == 0) &&
 			 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) &&
 			 ((prev->r_flags & RACK_IN_GP_WIN) == 0))
 			can_use_hookery = 1;
 		else
 			can_use_hookery = 0;
 
 		if (prev && can_use_hookery &&
 		    (prev->r_flags & RACK_ACKED)) {
 			/**
 			 * Goal, we want the right remainder of rsm to shrink
 			 * in place and span from (rsm->r_start = end) to rsm->r_end.
 			 * We want to expand prev to go all the way
 			 * to prev->r_end <- end.
 			 * so in the tree we have before:
 			 *   prev     |--------|         (acked)
 			 *   rsm               |-------| (non-acked)
 			 *   sackblk           |-|
 			 * We churn it so we end up with
 			 *   prev     |----------|       (acked)
 			 *   rsm                 |-----| (non-acked)
 			 *   nrsm              |-| (temporary)
 			 *
 			 * Note if either prev/rsm is a TLP we don't
 			 * do this.
 			 */
 			noextra++;
 			nrsm = &stack_map;
 			memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
 			prev->r_end = end;
 			rsm->r_start = end;
 			rsm->r_flags |= RACK_SHUFFLED;
 			prev->r_flags |= RACK_SHUFFLED;
 			/* Now adjust nrsm (stack copy) to be
 			 * the one that is the small
 			 * piece that was "sacked".
 			 */
 			nrsm->r_end = end;
 			rsm->r_dupack = 0;
 			/*
 			 * Which timestamp do we keep? It is rather
 			 * important in GP measurements to have the
 			 * accurate end of the send window.
 			 *
 			 * We keep the largest value, which is the newest
 			 * send. We do this in case a segment that is
 			 * joined together and not part of a GP estimate
 			 * later gets expanded into the GP estimate.
 			 *
 			 * We prohibit the merging of unlike kinds i.e.
 			 * all pieces that are in the GP estimate can be
 			 * merged and all pieces that are not in a GP estimate
 			 * can be merged, but not disimilar pieces. Combine
 			 * this with taking the highest here and we should
 			 * be ok unless of course the client reneges. Then
 			 * all bets are off.
 			 */
 			if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] <
 			   nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) {
 				prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
 			}
 			/*
 			 * And we must keep the newest ack arrival time.
 			 */
 
 			if(prev->r_ack_arrival <
 			   rack_to_usec_ts(&rack->r_ctl.act_rcv_time))
 				prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 
 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 			/*
 			 * Now that the rsm has had its start moved forward
 			 * lets go ahead and get its new place in the world.
 			 */
 			rack_setup_offset_for_rsm(rack, prev, rsm);
 			/*
 			 * Now nrsm is our new little piece
 			 * that is acked (which was merged
 			 * to prev). Update the rtt and changed
 			 * based on that. Also check for reordering.
 			 */
 			rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0);
 			if (rack->app_limited_needs_set)
 				rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END);
 			changed += (nrsm->r_end - nrsm->r_start);
 			/* You get a count for acking a whole segment or more */
 			if ((nrsm->r_end - nrsm->r_start) >= segsiz)
 				rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz);
 
 			rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start);
 			if (nrsm->r_flags & RACK_SACK_PASSED) {
 				rack->r_ctl.rc_reorder_ts = cts;
 				if (rack->r_ctl.rc_reorder_ts == 0)
 					rack->r_ctl.rc_reorder_ts = 1;
 			}
 			rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
 			rsm = prev;
 			counter_u64_add(rack_sack_used_prev_merge, 1);
 		} else {
 			/**
 			 * This is the case where our previous
 			 * block is not acked either, so we must
 			 * split the block in two.
 			 */
 			nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
 			if (nrsm == NULL) {
 				/* failed rrs what can we do but loose the sack info? */
 				goto out;
 			}
 			if ((rsm->r_flags & RACK_TLP) &&
 			    (rsm->r_rtr_cnt > 1)) {
 				/*
 				 * We are splitting a rxt TLP, check
 				 * if we need to save off the start/end
 				 */
 				if (rack->rc_last_tlp_acked_set &&
 				    (is_rsm_inside_declared_tlp_block(rack, rsm))) {
 					/*
 					 * We already turned this on since this block is inside
 					 * the previous one was a partially sack now we
 					 * are getting another one (maybe all of it).
 					 */
 					rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
 					/*
 					 * Lets make sure we have all of it though.
 					 */
 					if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
 						rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 					if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
 						rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 						rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 								     rack->r_ctl.last_tlp_acked_end);
 					}
 				} else {
 					rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 					rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 					rack->rc_last_tlp_acked_set = 1;
 					rack->rc_last_tlp_past_cumack = 0;
 					rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
 				}
 			}
 			/**
 			 * In this case nrsm becomes
 			 * nrsm->r_start = end;
 			 * nrsm->r_end = rsm->r_end;
 			 * which is un-acked.
 			 * <and>
 			 * rsm->r_end = nrsm->r_start;
 			 * i.e. the remaining un-acked
 			 * piece is left on the left
 			 * hand side.
 			 *
 			 * So we start like this
 			 * rsm      |----------| (not acked)
 			 * sackblk  |---|
 			 * build it so we have
 			 * rsm      |---|         (acked)
 			 * nrsm         |------|  (not acked)
 			 */
 			counter_u64_add(rack_sack_splits, 1);
 			rack_clone_rsm(rack, nrsm, rsm, end);
 			moved++;
 			rsm->r_flags &= (~RACK_HAS_FIN);
 			rsm->r_just_ret = 0;
 #ifndef INVARIANTS
 			(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 			if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 				panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p",
 				      nrsm, insret, rack, rsm);
 			}
 #endif
 			if (rsm->r_in_tmap) {
 				TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 				nrsm->r_in_tmap = 1;
 			}
 			nrsm->r_dupack = 0;
 			rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2);
 			rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
 			changed += (rsm->r_end - rsm->r_start);
 			/* You get a count for acking a whole segment or more */
 			if ((rsm->r_end - rsm->r_start) >= segsiz)
 				rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz);
 
 			rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
 			if (rsm->r_in_tmap) /* should be true */
 				rack_log_sack_passed(tp, rack, rsm);
 			/* Is Reordering occuring? */
 			if (rsm->r_flags & RACK_SACK_PASSED) {
 				rsm->r_flags &= ~RACK_SACK_PASSED;
 				rack->r_ctl.rc_reorder_ts = cts;
 				if (rack->r_ctl.rc_reorder_ts == 0)
 					rack->r_ctl.rc_reorder_ts = 1;
 			}
 			if (rack->app_limited_needs_set)
 				rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 			rsm->r_flags |= RACK_ACKED;
 			rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
 			if (rsm->r_in_tmap) {
 				TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 				rsm->r_in_tmap = 0;
 			}
 		}
 	} else if (start != end){
 		/*
 		 * The block was already acked.
 		 */
 		counter_u64_add(rack_sack_skipped_acked, 1);
 		moved++;
 	}
 out:
 	if (rsm &&
 	    ((rsm->r_flags & RACK_TLP) == 0) &&
 	    (rsm->r_flags & RACK_ACKED)) {
 		/*
 		 * Now can we merge where we worked
 		 * with either the previous or
 		 * next block?
 		 */
 		next = tqhash_next(rack->r_ctl.tqh, rsm);
 		while (next) {
 			if (next->r_flags & RACK_TLP)
 				break;
 			/* Only allow merges between ones in or out of GP window */
 			if ((next->r_flags & RACK_IN_GP_WIN) &&
 			    ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
 				break;
 			}
 			if ((rsm->r_flags & RACK_IN_GP_WIN) &&
 			    ((next->r_flags & RACK_IN_GP_WIN) == 0)) {
 				break;
 			}
 			if (rsm->bindex != next->bindex)
 				break;
 			if (rsm->r_flags & RACK_STRADDLE)
 				break;
 			if (next->r_flags & RACK_STRADDLE)
 				break;
 			if (next->r_flags & RACK_ACKED) {
 				/* yep this and next can be merged */
 				rsm = rack_merge_rsm(rack, rsm, next);
 				noextra++;
 				next = tqhash_next(rack->r_ctl.tqh, rsm);
 			} else
 				break;
 		}
 		/* Now what about the previous? */
 		prev = tqhash_prev(rack->r_ctl.tqh, rsm);
 		while (prev) {
 			if (prev->r_flags & RACK_TLP)
 				break;
 			/* Only allow merges between ones in or out of GP window */
 			if ((prev->r_flags & RACK_IN_GP_WIN) &&
 			    ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
 				break;
 			}
 			if ((rsm->r_flags & RACK_IN_GP_WIN) &&
 			    ((prev->r_flags & RACK_IN_GP_WIN) == 0)) {
 				break;
 			}
 			if (rsm->bindex != prev->bindex)
 				break;
 			if (rsm->r_flags & RACK_STRADDLE)
 				break;
 			if (prev->r_flags & RACK_STRADDLE)
 				break;
 			if (prev->r_flags & RACK_ACKED) {
 				/* yep the previous and this can be merged */
 				rsm = rack_merge_rsm(rack, prev, rsm);
 				noextra++;
 				prev = tqhash_prev(rack->r_ctl.tqh, rsm);
 			} else
 				break;
 		}
 	}
 	if (used_ref == 0) {
 		counter_u64_add(rack_sack_proc_all, 1);
 	} else {
 		counter_u64_add(rack_sack_proc_short, 1);
 	}
 	/* Save off the next one for quick reference. */
 	nrsm = tqhash_find(rack->r_ctl.tqh, end);
 	*prsm = rack->r_ctl.rc_sacklast = nrsm;
 	/* Pass back the moved. */
 	*moved_two = moved;
 	*no_extra = noextra;
 	return (changed);
 }
 
 static void inline
 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
 {
 	struct rack_sendmap *tmap;
 
 	tmap = NULL;
 	while (rsm && (rsm->r_flags & RACK_ACKED)) {
 		/* Its no longer sacked, mark it so */
 		rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 #ifdef INVARIANTS
 		if (rsm->r_in_tmap) {
 			panic("rack:%p rsm:%p flags:0x%x in tmap?",
 			      rack, rsm, rsm->r_flags);
 		}
 #endif
 		rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
 		/* Rebuild it into our tmap */
 		if (tmap == NULL) {
 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 			tmap = rsm;
 		} else {
 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
 			tmap = rsm;
 		}
 		tmap->r_in_tmap = 1;
 		rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 	}
 	/*
 	 * Now lets possibly clear the sack filter so we start
 	 * recognizing sacks that cover this area.
 	 */
 	sack_filter_clear(&rack->r_ctl.rack_sf, th_ack);
 
 }
 
 static void
 rack_do_decay(struct tcp_rack *rack)
 {
 	struct timeval res;
 
 #define	timersub(tvp, uvp, vvp)						\
 	do {								\
 		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
 		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
 		if ((vvp)->tv_usec < 0) {				\
 			(vvp)->tv_sec--;				\
 			(vvp)->tv_usec += 1000000;			\
 		}							\
 	} while (0)
 
 	timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res);
 #undef timersub
 
 	rack->r_ctl.input_pkt++;
 	if ((rack->rc_in_persist) ||
 	    (res.tv_sec >= 1) ||
 	    (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
 		/*
 		 * Check for decay of non-SAD,
 		 * we want all SAD detection metrics to
 		 * decay 1/4 per second (or more) passed.
 		 * Current default is 800 so it decays
 		 * 80% every second.
 		 */
 #ifdef TCP_SAD_DETECTION
 		uint32_t pkt_delta;
 
 		pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt;
 #endif
 		/* Update our saved tracking values */
 		rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt;
 		rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
 		/* Now do we escape without decay? */
 #ifdef TCP_SAD_DETECTION
 		if (rack->rc_in_persist ||
 		    (rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
 		    (pkt_delta < tcp_sad_low_pps)){
 			/*
 			 * We don't decay idle connections
 			 * or ones that have a low input pps.
 			 */
 			return;
 		}
 		/* Decay the counters */
 		rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count,
 							tcp_sad_decay_val);
 		rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count,
 							 tcp_sad_decay_val);
 		rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra,
 							       tcp_sad_decay_val);
 		rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
 								tcp_sad_decay_val);
 #endif
 	}
 }
 
 static void inline
 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from)
 {
 	/*
 	 * We look at advancing the end send time for our GP
 	 * measurement tracking only as the cumulative acknowledgment
 	 * moves forward. You might wonder about this, why not
 	 * at every transmission or retransmission within the
 	 * GP window update the rc_gp_cumack_ts? Well its rather
 	 * nuanced but basically the GP window *may* expand (as
 	 * it does below) or worse and harder to track it may shrink.
 	 *
 	 * This last makes it impossible to track at the time of
 	 * the send, since you may set forward your rc_gp_cumack_ts
 	 * when you send, because that send *is* in your currently
 	 * "guessed" window, but then it shrinks. Now which was
 	 * the send time of the last bytes in the window, by the
 	 * time you ask that question that part of the sendmap
 	 * is freed. So you don't know and you will have too
 	 * long of send window. Instead by updating the time
 	 * marker only when the cumack advances this assures us
 	 * that we will have only the sends in the window of our
 	 * GP measurement.
 	 *
 	 * Another complication from this is the
 	 * merging of sendmap entries. During SACK processing this
 	 * can happen to conserve the sendmap size. That breaks
 	 * everything down in tracking the send window of the GP
 	 * estimate. So to prevent that and keep it working with
 	 * a tiny bit more limited merging, we only allow like
 	 * types to be merged. I.e. if two sends are in the GP window
 	 * then its ok to merge them together. If two sends are not
 	 * in the GP window its ok to merge them together too. Though
 	 * one send in and one send out cannot be merged. We combine
 	 * this with never allowing the shrinking of the GP window when
 	 * we are in recovery so that we can properly calculate the
 	 * sending times.
 	 *
 	 * This all of course seems complicated, because it is.. :)
 	 *
 	 * The cum-ack is being advanced upon the sendmap.
 	 * If we are not doing a GP estimate don't
 	 * proceed.
 	 */
 	uint64_t ts;
 
 	if ((tp->t_flags & TF_GPUTINPROG) == 0)
 		return;
 	/*
 	 * If this sendmap entry is going
 	 * beyond the measurement window we had picked,
 	 * expand the measurement window by that much.
 	 */
 	if (SEQ_GT(rsm->r_end, tp->gput_ack)) {
 		tp->gput_ack = rsm->r_end;
 	}
 	/*
 	 * If we have not setup a ack, then we
 	 * have no idea if the newly acked pieces
 	 * will be "in our seq measurement range". If
 	 * it is when we clear the app_limited_needs_set
 	 * flag the timestamp will be updated.
 	 */
 	if (rack->app_limited_needs_set)
 		return;
 	/*
 	 * Finally, we grab out the latest timestamp
 	 * that this packet was sent and then see
 	 * if:
 	 *  a) The packet touches are newly defined GP range.
 	 *  b) The time is greater than (newer) than the
 	 *     one we currently have. If so we update
 	 *     our sending end time window.
 	 *
 	 * Note we *do not* do this at send time. The reason
 	 * is that if you do you *may* pick up a newer timestamp
 	 * for a range you are not going to measure. We project
 	 * out how far and then sometimes modify that to be
 	 * smaller. If that occurs then you will have a send
 	 * that does not belong to the range included.
 	 */
 	if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <=
 	    rack->r_ctl.rc_gp_cumack_ts)
 		return;
 	if (rack_in_gp_window(tp, rsm)) {
 		rack->r_ctl.rc_gp_cumack_ts = ts;
 		rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end,
 			       __LINE__, from, rsm);
 	}
 }
 
 static void
 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime)
 {
 	struct rack_sendmap *rsm;
 	/*
 	 * The ACK point is advancing to th_ack, we must drop off
 	 * the packets in the rack log and calculate any eligble
 	 * RTT's.
 	 */
 
 	rack->r_wanted_output = 1;
 	if (SEQ_GT(th_ack, tp->snd_una))
 		rack->r_ctl.last_cumack_advance = acktime;
 
 	/* Tend any TLP that has been marked for 1/2 the seq space (its old)  */
 	if ((rack->rc_last_tlp_acked_set == 1)&&
 	    (rack->rc_last_tlp_past_cumack == 1) &&
 	    (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) {
 		/*
 		 * We have reached the point where our last rack
 		 * tlp retransmit sequence is ahead of the cum-ack.
 		 * This can only happen when the cum-ack moves all
 		 * the way around (its been a full 2^^31+1 bytes
 		 * or more since we sent a retransmitted TLP). Lets
 		 * turn off the valid flag since its not really valid.
 		 *
 		 * Note since sack's also turn on this event we have
 		 * a complication, we have to wait to age it out until
 		 * the cum-ack is by the TLP before checking which is
 		 * what the next else clause does.
 		 */
 		rack_log_dsack_event(rack, 9, __LINE__,
 				     rack->r_ctl.last_tlp_acked_start,
 				     rack->r_ctl.last_tlp_acked_end);
 		rack->rc_last_tlp_acked_set = 0;
 		rack->rc_last_tlp_past_cumack = 0;
 	} else if ((rack->rc_last_tlp_acked_set == 1) &&
 		   (rack->rc_last_tlp_past_cumack == 0) &&
 		   (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) {
 		/*
 		 * It is safe to start aging TLP's out.
 		 */
 		rack->rc_last_tlp_past_cumack = 1;
 	}
 	/* We do the same for the tlp send seq as well */
 	if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
 	    (rack->rc_last_sent_tlp_past_cumack == 1) &&
 	    (SEQ_GT(rack->r_ctl.last_sent_tlp_seq,  th_ack))) {
 		rack_log_dsack_event(rack, 9, __LINE__,
 				     rack->r_ctl.last_sent_tlp_seq,
 				     (rack->r_ctl.last_sent_tlp_seq +
 				      rack->r_ctl.last_sent_tlp_len));
 		rack->rc_last_sent_tlp_seq_valid = 0;
 		rack->rc_last_sent_tlp_past_cumack = 0;
 	} else if ((rack->rc_last_sent_tlp_seq_valid == 1) &&
 		   (rack->rc_last_sent_tlp_past_cumack == 0) &&
 		   (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) {
 		/*
 		 * It is safe to start aging TLP's send.
 		 */
 		rack->rc_last_sent_tlp_past_cumack = 1;
 	}
 more:
 	rsm = tqhash_min(rack->r_ctl.tqh);
 	if (rsm == NULL) {
 		if ((th_ack - 1) == tp->iss) {
 			/*
 			 * For the SYN incoming case we will not
 			 * have called tcp_output for the sending of
 			 * the SYN, so there will be no map. All
 			 * other cases should probably be a panic.
 			 */
 			return;
 		}
 		if (tp->t_flags & TF_SENTFIN) {
 			/* if we sent a FIN we often will not have map */
 			return;
 		}
 #ifdef INVARIANTS
 		panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n",
 		      tp,
 		      tp->t_state, th_ack, rack,
 		      tp->snd_una, tp->snd_max, tp->snd_nxt);
 #endif
 		return;
 	}
 	if (SEQ_LT(th_ack, rsm->r_start)) {
 		/* Huh map is missing this */
 #ifdef INVARIANTS
 		printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n",
 		       rsm->r_start,
 		       th_ack, tp->t_state, rack->r_state);
 #endif
 		return;
 	}
 	rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
 
 	/* Now was it a retransmitted TLP? */
 	if ((rsm->r_flags & RACK_TLP) &&
 	    (rsm->r_rtr_cnt > 1)) {
 		/*
 		 * Yes, this rsm was a TLP and retransmitted, remember that
 		 * since if a DSACK comes back on this we don't want
 		 * to think of it as a reordered segment. This may
 		 * get updated again with possibly even other TLPs
 		 * in flight, but thats ok. Only when we don't send
 		 * a retransmitted TLP for 1/2 the sequences space
 		 * will it get turned off (above).
 		 */
 		if (rack->rc_last_tlp_acked_set &&
 		    (is_rsm_inside_declared_tlp_block(rack, rsm))) {
 			/*
 			 * We already turned this on since the end matches,
 			 * the previous one was a partially ack now we
 			 * are getting another one (maybe all of it).
 			 */
 			rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
 			/*
 			 * Lets make sure we have all of it though.
 			 */
 			if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
 				rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 				rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 						     rack->r_ctl.last_tlp_acked_end);
 			}
 			if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
 				rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 				rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start,
 						     rack->r_ctl.last_tlp_acked_end);
 			}
 		} else {
 			rack->rc_last_tlp_past_cumack = 1;
 			rack->r_ctl.last_tlp_acked_start = rsm->r_start;
 			rack->r_ctl.last_tlp_acked_end = rsm->r_end;
 			rack->rc_last_tlp_acked_set = 1;
 			rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
 		}
 	}
 	/* Now do we consume the whole thing? */
 	rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
 	if (SEQ_GEQ(th_ack, rsm->r_end)) {
 		/* Its all consumed. */
 		uint32_t left;
 		uint8_t newly_acked;
 
 		rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
 		rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
 		rsm->r_rtr_bytes = 0;
 		/*
 		 * Record the time of highest cumack sent if its in our measurement
 		 * window and possibly bump out the end.
 		 */
 		rack_rsm_sender_update(rack, tp, rsm, 4);
 		tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
 		if (rsm->r_in_tmap) {
 			TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 			rsm->r_in_tmap = 0;
 		}
 		newly_acked = 1;
 		if (rsm->r_flags & RACK_ACKED) {
 			/*
 			 * It was acked on the scoreboard -- remove
 			 * it from total
 			 */
 			rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
 			newly_acked = 0;
 		} else if (rsm->r_flags & RACK_SACK_PASSED) {
 			/*
 			 * There are segments ACKED on the
 			 * scoreboard further up. We are seeing
 			 * reordering.
 			 */
 			rsm->r_flags &= ~RACK_SACK_PASSED;
 			rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 			rsm->r_flags |= RACK_ACKED;
 			rack->r_ctl.rc_reorder_ts = cts;
 			if (rack->r_ctl.rc_reorder_ts == 0)
 				rack->r_ctl.rc_reorder_ts = 1;
 			if (rack->r_ent_rec_ns) {
 				/*
 				 * We have sent no more, and we saw an sack
 				 * then ack arrive.
 				 */
 				rack->r_might_revert = 1;
 			}
 		}
 		if ((rsm->r_flags & RACK_TO_REXT) &&
 		    (tp->t_flags & TF_RCVD_TSTMP) &&
 		    (to->to_flags & TOF_TS) &&
 		    (to->to_tsecr != 0) &&
 		    (tp->t_flags & TF_PREVVALID)) {
 			/*
 			 * We can use the timestamp to see
 			 * if this retransmission was from the
 			 * first transmit. If so we made a mistake.
 			 */
 			tp->t_flags &= ~TF_PREVVALID;
 			if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
 				/* The first transmit is what this ack is for */
 				rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__);
 			}
 		}
 		left = th_ack - rsm->r_end;
 		if (rack->app_limited_needs_set && newly_acked)
 			rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
 		/* Free back to zone */
 		rack_free(rack, rsm);
 		if (left) {
 			goto more;
 		}
 		/* Check for reneging */
 		rsm = tqhash_min(rack->r_ctl.tqh);
 		if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
 			/*
 			 * The peer has moved snd_una up to
 			 * the edge of this send, i.e. one
 			 * that it had previously acked. The only
 			 * way that can be true if the peer threw
 			 * away data (space issues) that it had
 			 * previously sacked (else it would have
 			 * given us snd_una up to (rsm->r_end).
 			 * We need to undo the acked markings here.
 			 *
 			 * Note we have to look to make sure th_ack is
 			 * our rsm->r_start in case we get an old ack
 			 * where th_ack is behind snd_una.
 			 */
 			rack_peer_reneges(rack, rsm, th_ack);
 		}
 		return;
 	}
 	if (rsm->r_flags & RACK_ACKED) {
 		/*
 		 * It was acked on the scoreboard -- remove it from
 		 * total for the part being cum-acked.
 		 */
 		rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
 	}
 	/*
 	 * Clear the dup ack count for
 	 * the piece that remains.
 	 */
 	rsm->r_dupack = 0;
 	rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
 	if (rsm->r_rtr_bytes) {
 		/*
 		 * It was retransmitted adjust the
 		 * sack holes for what was acked.
 		 */
 		int ack_am;
 
 		ack_am = (th_ack - rsm->r_start);
 		if (ack_am >= rsm->r_rtr_bytes) {
 			rack->r_ctl.rc_holes_rxt -= ack_am;
 			rsm->r_rtr_bytes -= ack_am;
 		}
 	}
 	/*
 	 * Update where the piece starts and record
 	 * the time of send of highest cumack sent if
 	 * its in our GP range.
 	 */
 	rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
 	/* Now we need to move our offset forward too */
 	if (rsm->m &&
 	    ((rsm->orig_m_len != rsm->m->m_len) ||
 	     (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
 		/* Fix up the orig_m_len and possibly the mbuf offset */
 		rack_adjust_orig_mlen(rsm);
 	}
 	rsm->soff += (th_ack - rsm->r_start);
 	rack_rsm_sender_update(rack, tp, rsm, 5);
 	/* The trim will move th_ack into r_start for us */
 	tqhash_trim(rack->r_ctl.tqh, th_ack);
 	/* Now do we need to move the mbuf fwd too? */
 	{
 		struct mbuf *m;
 		uint32_t soff;
 
 		m = rsm->m;
 		soff = rsm->soff;
 		if (m) {
 			while (soff >= m->m_len) {
 				soff -= m->m_len;
 				KASSERT((m->m_next != NULL),
 					(" rsm:%p  off:%u soff:%u m:%p",
 					 rsm, rsm->soff, soff, m));
 				m = m->m_next;
 				if (m == NULL) {
 					/*
 					 * This is a fall-back that prevents a panic. In reality
 					 * we should be able to walk the mbuf's and find our place.
 					 * At this point snd_una has not been updated with the sbcut() yet
 					 * but tqhash_trim did update rsm->r_start so the offset calcuation
 					 * should work fine. This is undesirable since we will take cache
 					 * hits to access the socket buffer. And even more puzzling is that
 					 * it happens occasionally. It should not :(
 					 */
 					m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
 						      (rsm->r_start - tp->snd_una),
 						      &soff);
 					break;
 				}
 			}
 			/*
 			 * Now save in our updated values.
 			 */
 			rsm->m = m;
 			rsm->soff = soff;
 			rsm->orig_m_len = rsm->m->m_len;
 			rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 		}
 	}
 	if (rack->app_limited_needs_set &&
 	    SEQ_GEQ(th_ack, tp->gput_seq))
 		rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
 }
 
 static void
 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm;
 	int sack_pass_fnd = 0;
 
 	if (rack->r_might_revert) {
 		/*
 		 * Ok we have reordering, have not sent anything, we
 		 * might want to revert the congestion state if nothing
 		 * further has SACK_PASSED on it. Lets check.
 		 *
 		 * We also get here when we have DSACKs come in for
 		 * all the data that we FR'd. Note that a rxt or tlp
 		 * timer clears this from happening.
 		 */
 
 		TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
 			if (rsm->r_flags & RACK_SACK_PASSED) {
 				sack_pass_fnd = 1;
 				break;
 			}
 		}
 		if (sack_pass_fnd == 0) {
 			/*
 			 * We went into recovery
 			 * incorrectly due to reordering!
 			 */
 			int orig_cwnd;
 
 			rack->r_ent_rec_ns = 0;
 			orig_cwnd = tp->snd_cwnd;
 			tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec;
 			tp->snd_recover = tp->snd_una;
 			rack_log_to_prr(rack, 14, orig_cwnd, __LINE__);
 			EXIT_RECOVERY(tp->t_flags);
 		}
 		rack->r_might_revert = 0;
 	}
 }
 
 #ifdef TCP_SAD_DETECTION
 
 static void
 rack_merge_out_sacks(struct tcp_rack *rack)
 {
 	struct rack_sendmap *cur, *next, *rsm, *trsm = NULL;
 
 	cur = tqhash_min(rack->r_ctl.tqh);
 	while(cur) {
 		next = tqhash_next(rack->r_ctl.tqh, cur);
 		/*
 		 * The idea is to go through all and merge back
 		 * together the pieces sent together,
 		 */
 		if ((next != NULL) &&
 		    (cur->r_tim_lastsent[0] == next->r_tim_lastsent[0])) {
 			rack_merge_rsm(rack, cur, next);
 		} else {
 			cur = next;
 		}
 	}
 	/*
 	 * now treat it like a rxt event, everything is outstanding
 	 * and sent nothing acvked and dupacks are all zero. If this
 	 * is not an attacker it will have to dupack its way through
 	 * it all.
 	 */
 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
 	TQHASH_FOREACH(rsm, rack->r_ctl.tqh)  {
 		rsm->r_dupack = 0;
 		/* We must re-add it back to the tlist */
 		if (trsm == NULL) {
 			TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 		} else {
 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
 		}
 		rsm->r_in_tmap = 1;
 		trsm = rsm;
 		rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED);
 	}
 	sack_filter_clear(&rack->r_ctl.rack_sf, rack->rc_tp->snd_una);
 }
 
 static void
 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack,  uint32_t bytes_this_ack, uint32_t segsiz)
 {
 	int do_detection = 0;
 
 	if (rack->sack_attack_disable || rack->rc_suspicious) {
 		/*
 		 * If we have been disabled we must detect
 		 * to possibly reverse it. Or if the guy has
 		 * sent in suspicious sacks we want to do detection too.
 		 */
 		do_detection = 1;
 
 	} else if  ((rack->do_detection || tcp_force_detection) &&
 		    (tcp_sack_to_ack_thresh > 0) &&
 		    (tcp_sack_to_move_thresh > 0) &&
 		    (rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum)) {
 		/*
 		 * We only detect here if:
 		 * 1) System wide forcing is on <or> do_detection is on
 		 *   <and>
 		 * 2) We have thresholds for move and ack (set one to 0 and we are off)
 		 *   <and>
 		 * 3) We have maps allocated larger than our min (500).
 		 */
 		do_detection = 1;
 	}
 	if (do_detection > 0) {
 		/*
 		 * We have thresholds set to find
 		 * possible attackers and disable sack.
 		 * Check them.
 		 */
 		uint64_t ackratio, moveratio, movetotal;
 
 		/* Log detecting */
 		rack_log_sad(rack, 1);
 		/* Do we establish a ack ratio */
 		if ((rack->r_ctl.sack_count > tcp_map_minimum)  ||
 		    (rack->rc_suspicious == 1) ||
 		    (rack->sack_attack_disable > 0)) {
 			ackratio = (uint64_t)(rack->r_ctl.sack_count);
 			ackratio *= (uint64_t)(1000);
 			if (rack->r_ctl.ack_count)
 				ackratio /= (uint64_t)(rack->r_ctl.ack_count);
 			else {
 				/* We can hit this due to ack totals degregation (via small sacks) */
 				ackratio = 1000;
 			}
 		} else {
 			/*
 			 * No ack ratio needed if we have not
 			 * seen more sacks then the number of map entries.
 			 * The exception to that is if we have disabled sack then
 			 * we need to find a ratio.
 			 */
 			ackratio = 0;
 		}
 
 		if ((rack->sack_attack_disable == 0) &&
 		    (ackratio > rack_highest_sack_thresh_seen))
 			rack_highest_sack_thresh_seen = (uint32_t)ackratio;
 		/* Do we establish a move ratio? */
 		if ((rack->r_ctl.sack_moved_extra > tcp_map_minimum) ||
 		    (rack->rc_suspicious == 1) ||
 		    (rack->sack_attack_disable > 0)) {
 			/*
 			 * We need to have more sack moves than maps
 			 * allocated to have a move ratio considered.
 			 */
 			movetotal = rack->r_ctl.sack_moved_extra;
 			movetotal += rack->r_ctl.sack_noextra_move;
 			moveratio = rack->r_ctl.sack_moved_extra;
 			moveratio *= (uint64_t)1000;
 			if (movetotal)
 				moveratio /= movetotal;
 			else {
 				/* No moves, thats pretty good */
 				moveratio = 0;
 			}
 		} else {
 			/*
 			 * Not enough moves have occured to consider
 			 * if we are out of whack in that ratio.
 			 * The exception to that is if we have disabled sack then
 			 * we need to find a ratio.
 			 */
 			moveratio = 0;
 		}
 		if ((rack->sack_attack_disable == 0) &&
 		    (moveratio > rack_highest_move_thresh_seen))
 			rack_highest_move_thresh_seen = (uint32_t)moveratio;
 		/* Now the tests */
 		if (rack->sack_attack_disable == 0) {
 			/* Not disabled, do we need to disable? */
 			if ((ackratio > tcp_sack_to_ack_thresh) &&
 			    (moveratio > tcp_sack_to_move_thresh)) {
 				/* Disable sack processing */
 				tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED);
 				rack->sack_attack_disable = 1;
 				/* set it so we have the built in delay */
 				rack->r_ctl.ack_during_sd = 1;
 				if (rack_merge_out_sacks_on_attack)
 					rack_merge_out_sacks(rack);
 				counter_u64_add(rack_sack_attacks_detected, 1);
 				tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED);
 				/* Clamp the cwnd at flight size */
 				rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd;
 				rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 				rack_log_sad(rack, 2);
 			}
 		} else {
 			/* We are sack-disabled check for false positives */
 			if ((ackratio <= tcp_restoral_thresh) ||
 			    ((rack_merge_out_sacks_on_attack == 0) &&
 			     (rack->rc_suspicious == 0) &&
 			     (rack->r_ctl.rc_num_maps_alloced <= (tcp_map_minimum/2)))) {
 				rack->sack_attack_disable = 0;
 				rack_log_sad(rack, 3);
 				/* Restart counting */
 				rack->r_ctl.sack_count = 0;
 				rack->r_ctl.sack_moved_extra = 0;
 				rack->r_ctl.sack_noextra_move = 1;
 				rack->rc_suspicious = 0;
 				rack->r_ctl.ack_count = max(1,
 							    (bytes_this_ack / segsiz));
 
 				counter_u64_add(rack_sack_attacks_reversed, 1);
 				/* Restore the cwnd */
 				if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd)
 					rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd;
 			}
 		}
 	}
 }
 #endif
 
 static int
 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end)
 {
 
 	uint32_t am, l_end;
 	int was_tlp = 0;
 
 	if (SEQ_GT(end, start))
 		am = end - start;
 	else
 		am = 0;
 	if ((rack->rc_last_tlp_acked_set ) &&
 	    (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) &&
 	    (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) {
 		/*
 		 * The DSACK is because of a TLP which we don't
 		 * do anything with the reordering window over since
 		 * it was not reordering that caused the DSACK but
 		 * our previous retransmit TLP.
 		 */
 		rack_log_dsack_event(rack, 7, __LINE__, start, end);
 		was_tlp = 1;
 		goto skip_dsack_round;
 	}
 	if (rack->rc_last_sent_tlp_seq_valid) {
 		l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len;
 		if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) &&
 		    (SEQ_LEQ(end, l_end))) {
 			/*
 			 * This dsack is from the last sent TLP, ignore it
 			 * for reordering purposes.
 			 */
 			rack_log_dsack_event(rack, 7, __LINE__, start, end);
 			was_tlp = 1;
 			goto skip_dsack_round;
 		}
 	}
 	if (rack->rc_dsack_round_seen == 0) {
 		rack->rc_dsack_round_seen = 1;
 		rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max;
 		rack->r_ctl.num_dsack++;
 		rack->r_ctl.dsack_persist = 16;	/* 16 is from the standard */
 		rack_log_dsack_event(rack, 2, __LINE__, 0, 0);
 	}
 skip_dsack_round:
 	/*
 	 * We keep track of how many DSACK blocks we get
 	 * after a recovery incident.
 	 */
 	rack->r_ctl.dsack_byte_cnt += am;
 	if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) &&
 	    rack->r_ctl.retran_during_recovery &&
 	    (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) {
 		/*
 		 * False recovery most likely culprit is reordering. If
 		 * nothing else is missing we need to revert.
 		 */
 		rack->r_might_revert = 1;
 		rack_handle_might_revert(rack->rc_tp, rack);
 		rack->r_might_revert = 0;
 		rack->r_ctl.retran_during_recovery = 0;
 		rack->r_ctl.dsack_byte_cnt = 0;
 	}
 	return (was_tlp);
 }
 
 static uint32_t
 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una)
 {
 	return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt);
 }
 
 static int32_t
 rack_compute_pipe(struct tcpcb *tp)
 {
 	return ((int32_t)do_rack_compute_pipe(tp,
 					      (struct tcp_rack *)tp->t_fb_ptr,
 					      tp->snd_una));
 }
 
 static void
 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack)
 {
 	/* Deal with changed and PRR here (in recovery only) */
 	uint32_t pipe, snd_una;
 
 	rack->r_ctl.rc_prr_delivered += changed;
 
 	if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) {
 		/*
 		 * It is all outstanding, we are application limited
 		 * and thus we don't need more room to send anything.
 		 * Note we use tp->snd_una here and not th_ack because
 		 * the data as yet not been cut from the sb.
 		 */
 		rack->r_ctl.rc_prr_sndcnt = 0;
 		return;
 	}
 	/* Compute prr_sndcnt */
 	if (SEQ_GT(tp->snd_una, th_ack)) {
 		snd_una = tp->snd_una;
 	} else {
 		snd_una = th_ack;
 	}
 	pipe = do_rack_compute_pipe(tp, rack, snd_una);
 	if (pipe > tp->snd_ssthresh) {
 		long sndcnt;
 
 		sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh;
 		if (rack->r_ctl.rc_prr_recovery_fs > 0)
 			sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs;
 		else {
 			rack->r_ctl.rc_prr_sndcnt = 0;
 			rack_log_to_prr(rack, 9, 0, __LINE__);
 			sndcnt = 0;
 		}
 		sndcnt++;
 		if (sndcnt > (long)rack->r_ctl.rc_prr_out)
 			sndcnt -= rack->r_ctl.rc_prr_out;
 		else
 			sndcnt = 0;
 		rack->r_ctl.rc_prr_sndcnt = sndcnt;
 		rack_log_to_prr(rack, 10, 0, __LINE__);
 	} else {
 		uint32_t limit;
 
 		if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out)
 			limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out);
 		else
 			limit = 0;
 		if (changed > limit)
 			limit = changed;
 		limit += ctf_fixed_maxseg(tp);
 		if (tp->snd_ssthresh > pipe) {
 			rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit);
 			rack_log_to_prr(rack, 11, 0, __LINE__);
 		} else {
 			rack->r_ctl.rc_prr_sndcnt = min(0, limit);
 			rack_log_to_prr(rack, 12, 0, __LINE__);
 		}
 	}
 }
 
 static void
 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck,
 	     int *dsack_seen, int *sacks_seen)
 {
 	uint32_t changed;
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm;
 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1];
 	register uint32_t th_ack;
 	int32_t i, j, k, num_sack_blks = 0;
 	uint32_t cts, acked, ack_point;
 	int loop_start = 0, moved_two = 0, no_extra = 0;
 	uint32_t tsused;
 	uint32_t segsiz, o_cnt;
 
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (tcp_get_flags(th) & TH_RST) {
 		/* We don't log resets */
 		return;
 	}
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	cts = tcp_get_usecs(NULL);
 	rsm = tqhash_min(rack->r_ctl.tqh);
 	changed = 0;
 	th_ack = th->th_ack;
 	if (rack->sack_attack_disable == 0)
 		rack_do_decay(rack);
 	segsiz = ctf_fixed_maxseg(rack->rc_tp);
 	if (BYTES_THIS_ACK(tp, th) >=  segsiz) {
 		/*
 		 * You only get credit for
 		 * MSS and greater (and you get extra
 		 * credit for larger cum-ack moves).
 		 */
 		int ac;
 
 		ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp);
 		rack->r_ctl.ack_count += ac;
 		counter_u64_add(rack_ack_total, ac);
 	}
 	if (rack->r_ctl.ack_count > 0xfff00000) {
 		/*
 		 * reduce the number to keep us under
 		 * a uint32_t.
 		 */
 		rack->r_ctl.ack_count /= 2;
 		rack->r_ctl.sack_count /= 2;
 	}
 	if (SEQ_GT(th_ack, tp->snd_una)) {
 		rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__);
 		tp->t_acktime = ticks;
 	}
 	if (rsm && SEQ_GT(th_ack, rsm->r_start))
 		changed = th_ack - rsm->r_start;
 	if (changed) {
 		rack_process_to_cumack(tp, rack, th_ack, cts, to,
 				       tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
 	}
 	if ((to->to_flags & TOF_SACK) == 0) {
 		/* We are done nothing left and no sack. */
 		rack_handle_might_revert(tp, rack);
 		/*
 		 * For cases where we struck a dup-ack
 		 * with no SACK, add to the changes so
 		 * PRR will work right.
 		 */
 		if (dup_ack_struck && (changed == 0)) {
 			changed += ctf_fixed_maxseg(rack->rc_tp);
 		}
 		goto out;
 	}
 	/* Sack block processing */
 	if (SEQ_GT(th_ack, tp->snd_una))
 		ack_point = th_ack;
 	else
 		ack_point = tp->snd_una;
 	for (i = 0; i < to->to_nsacks; i++) {
 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
 		      &sack, sizeof(sack));
 		sack.start = ntohl(sack.start);
 		sack.end = ntohl(sack.end);
 		if (SEQ_GT(sack.end, sack.start) &&
 		    SEQ_GT(sack.start, ack_point) &&
 		    SEQ_LT(sack.start, tp->snd_max) &&
 		    SEQ_GT(sack.end, ack_point) &&
 		    SEQ_LEQ(sack.end, tp->snd_max)) {
 			sack_blocks[num_sack_blks] = sack;
 			num_sack_blks++;
 		} else if (SEQ_LEQ(sack.start, th_ack) &&
 			   SEQ_LEQ(sack.end, th_ack)) {
 			int was_tlp;
 
 			if (dsack_seen != NULL)
 				*dsack_seen = 1;
 			was_tlp = rack_note_dsack(rack, sack.start, sack.end);
 			/*
 			 * Its a D-SACK block.
 			 */
 			tcp_record_dsack(tp, sack.start, sack.end, was_tlp);
 		}
 	}
 	if (rack->rc_dsack_round_seen) {
 		/* Is the dsack roound over? */
 		if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) {
 			/* Yes it is */
 			rack->rc_dsack_round_seen = 0;
 			rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
 		}
 	}
 	/*
 	 * Sort the SACK blocks so we can update the rack scoreboard with
 	 * just one pass.
 	 */
 	o_cnt = num_sack_blks;
 	num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks,
 					 num_sack_blks, th->th_ack);
 	ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks);
 	if (sacks_seen != NULL)
 		*sacks_seen = num_sack_blks;
 	if (num_sack_blks == 0) {
 		/* Nothing to sack, but we need to update counts */
 		if ((o_cnt == 1) &&
 		    (*dsack_seen != 1))
 			rack->r_ctl.sack_count++;
 		else if (o_cnt > 1)
 			rack->r_ctl.sack_count++;
 		goto out_with_totals;
 	}
 	if (rack->sack_attack_disable) {
 		/*
 		 * An attacker disablement is in place, for
 		 * every sack block that is not at least a full MSS
 		 * count up sack_count.
 		 */
 		for (i = 0; i < num_sack_blks; i++) {
 			if ((sack_blocks[i].end - sack_blocks[i].start) < segsiz) {
 				rack->r_ctl.sack_count++;
 			}
 			if (rack->r_ctl.sack_count > 0xfff00000) {
 				/*
 				 * reduce the number to keep us under
 				 * a uint32_t.
 				 */
 				rack->r_ctl.ack_count /= 2;
 				rack->r_ctl.sack_count /= 2;
 			}
 		}
 		goto out;
 	}
 	/* Its a sack of some sort */
 	rack->r_ctl.sack_count += num_sack_blks;
 	if (rack->r_ctl.sack_count > 0xfff00000) {
 		/*
 		 * reduce the number to keep us under
 		 * a uint32_t.
 		 */
 		rack->r_ctl.ack_count /= 2;
 		rack->r_ctl.sack_count /= 2;
 	}
 	if (num_sack_blks < 2) {
 		/* Only one, we don't need to sort */
 		goto do_sack_work;
 	}
 	/* Sort the sacks */
 	for (i = 0; i < num_sack_blks; i++) {
 		for (j = i + 1; j < num_sack_blks; j++) {
 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
 				sack = sack_blocks[i];
 				sack_blocks[i] = sack_blocks[j];
 				sack_blocks[j] = sack;
 			}
 		}
 	}
 	/*
 	 * Now are any of the sack block ends the same (yes some
 	 * implementations send these)?
 	 */
 again:
 	if (num_sack_blks == 0)
 		goto out_with_totals;
 	if (num_sack_blks > 1) {
 		for (i = 0; i < num_sack_blks; i++) {
 			for (j = i + 1; j < num_sack_blks; j++) {
 				if (sack_blocks[i].end == sack_blocks[j].end) {
 					/*
 					 * Ok these two have the same end we
 					 * want the smallest end and then
 					 * throw away the larger and start
 					 * again.
 					 */
 					if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) {
 						/*
 						 * The second block covers
 						 * more area use that
 						 */
 						sack_blocks[i].start = sack_blocks[j].start;
 					}
 					/*
 					 * Now collapse out the dup-sack and
 					 * lower the count
 					 */
 					for (k = (j + 1); k < num_sack_blks; k++) {
 						sack_blocks[j].start = sack_blocks[k].start;
 						sack_blocks[j].end = sack_blocks[k].end;
 						j++;
 					}
 					num_sack_blks--;
 					goto again;
 				}
 			}
 		}
 	}
 do_sack_work:
 	/*
 	 * First lets look to see if
 	 * we have retransmitted and
 	 * can use the transmit next?
 	 */
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	if (rsm &&
 	    SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
 	    SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
 		/*
 		 * We probably did the FR and the next
 		 * SACK in continues as we would expect.
 		 */
 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &no_extra, &moved_two, segsiz);
 		if (acked) {
 			rack->r_wanted_output = 1;
 			changed += acked;
 		}
 		if (num_sack_blks == 1) {
 			/*
 			 * This is what we would expect from
 			 * a normal implementation to happen
 			 * after we have retransmitted the FR,
 			 * i.e the sack-filter pushes down
 			 * to 1 block and the next to be retransmitted
 			 * is the sequence in the sack block (has more
 			 * are acked). Count this as ACK'd data to boost
 			 * up the chances of recovering any false positives.
 			 */
 			rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp));
 			counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
 			counter_u64_add(rack_express_sack, 1);
 			if (rack->r_ctl.ack_count > 0xfff00000) {
 				/*
 				 * reduce the number to keep us under
 				 * a uint32_t.
 				 */
 				rack->r_ctl.ack_count /= 2;
 				rack->r_ctl.sack_count /= 2;
 			}
 			if (moved_two) {
 				/*
 				 * If we did not get a SACK for at least a MSS and
 				 * had to move at all, or if we moved more than our
 				 * threshold, it counts against the "extra" move.
 				 */
 				rack->r_ctl.sack_moved_extra += moved_two;
 				rack->r_ctl.sack_noextra_move += no_extra;
 				counter_u64_add(rack_move_some, 1);
 			} else {
 				/*
 				 * else we did not have to move
 				 * any more than we would expect.
 				 */
 				rack->r_ctl.sack_noextra_move += no_extra;
 				rack->r_ctl.sack_noextra_move++;
 				counter_u64_add(rack_move_none, 1);
 			}
 			if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
 			    (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
 				rack->r_ctl.sack_moved_extra /= 2;
 				rack->r_ctl.sack_noextra_move /= 2;
 			}
 			goto out_with_totals;
 		} else {
 			/*
 			 * Start the loop through the
 			 * rest of blocks, past the first block.
 			 */
 			loop_start = 1;
 		}
 	}
 	counter_u64_add(rack_sack_total, 1);
 	rsm = rack->r_ctl.rc_sacklast;
 	for (i = loop_start; i < num_sack_blks; i++) {
 		acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &no_extra, &moved_two, segsiz);
 		if (acked) {
 			rack->r_wanted_output = 1;
 			changed += acked;
 		}
 		if (moved_two) {
 			/*
 			 * If we did not get a SACK for at least a MSS and
 			 * had to move at all, or if we moved more than our
 			 * threshold, it counts against the "extra" move.
 			 */
 			rack->r_ctl.sack_moved_extra += moved_two;
 			rack->r_ctl.sack_noextra_move += no_extra;
 			counter_u64_add(rack_move_some, 1);
 		} else {
 			/*
 			 * else we did not have to move
 			 * any more than we would expect.
 			 */
 			rack->r_ctl.sack_noextra_move += no_extra;
 			rack->r_ctl.sack_noextra_move++;
 			counter_u64_add(rack_move_none, 1);
 		}
 		if ((rack->r_ctl.sack_moved_extra > 0xfff00000) ||
 		    (rack->r_ctl.sack_noextra_move > 0xfff00000)) {
 			rack->r_ctl.sack_moved_extra /= 2;
 			rack->r_ctl.sack_noextra_move /= 2;
 		}
 		if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) {
 			/*
 			 * If the SACK was not a full MSS then
 			 * we add to sack_count the number of
 			 * MSS's (or possibly more than
 			 * a MSS if its a TSO send) we had to skip by.
 			 */
 			rack->r_ctl.sack_count += moved_two;
 			if (rack->r_ctl.sack_count > 0xfff00000) {
 				rack->r_ctl.ack_count /= 2;
 				rack->r_ctl.sack_count /= 2;
 			}
 			counter_u64_add(rack_sack_total, moved_two);
 		}
 		/*
 		 * Now we need to setup for the next
 		 * round. First we make sure we won't
 		 * exceed the size of our uint32_t on
 		 * the various counts, and then clear out
 		 * moved_two.
 		 */
 		moved_two = 0;
 		no_extra = 0;
 	}
 out_with_totals:
 	if (num_sack_blks > 1) {
 		/*
 		 * You get an extra stroke if
 		 * you have more than one sack-blk, this
 		 * could be where we are skipping forward
 		 * and the sack-filter is still working, or
 		 * it could be an attacker constantly
 		 * moving us.
 		 */
 		rack->r_ctl.sack_moved_extra++;
 		counter_u64_add(rack_move_some, 1);
 	}
 out:
 #ifdef TCP_SAD_DETECTION
 	rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp));
 #endif
 	if (changed) {
 		/* Something changed cancel the rack timer */
 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 	}
 	tsused = tcp_get_usecs(NULL);
 	rsm = tcp_rack_output(tp, rack, tsused);
 	if ((!IN_FASTRECOVERY(tp->t_flags)) &&
 	    rsm &&
 	    ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
 		/* Enter recovery */
 		entered_recovery = 1;
 		rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
 		/*
 		 * When we enter recovery we need to assure we send
 		 * one packet.
 		 */
 		if (rack->rack_no_prr == 0) {
 			rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
 			rack_log_to_prr(rack, 8, 0, __LINE__);
 		}
 		rack->r_timer_override = 1;
 		rack->r_early = 0;
 		rack->r_ctl.rc_agg_early = 0;
 	} else if (IN_FASTRECOVERY(tp->t_flags) &&
 		   rsm &&
 		   (rack->r_rr_config == 3)) {
 		/*
 		 * Assure we can output and we get no
 		 * remembered pace time except the retransmit.
 		 */
 		rack->r_timer_override = 1;
 		rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 		rack->r_ctl.rc_resend = rsm;
 	}
 	if (IN_FASTRECOVERY(tp->t_flags) &&
 	    (rack->rack_no_prr == 0) &&
 	    (entered_recovery == 0)) {
 		rack_update_prr(tp, rack, changed, th_ack);
 		if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
 		     ((tcp_in_hpts(rack->rc_tp) == 0) &&
 		      ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) {
 			/*
 			 * If you are pacing output you don't want
 			 * to override.
 			 */
 			rack->r_early = 0;
 			rack->r_ctl.rc_agg_early = 0;
 			rack->r_timer_override = 1;
 		}
 	}
 }
 
 static void
 rack_strike_dupack(struct tcp_rack *rack)
 {
 	struct rack_sendmap *rsm;
 
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	while (rsm) {
 		/*
 		 * We need to skip anything already set
 		 * to be retransmitted.
 		 */
 		if ((rsm->r_dupack >= DUP_ACK_THRESHOLD)  ||
 		    (rsm->r_flags & RACK_MUST_RXT)) {
 			rsm = TAILQ_NEXT(rsm, r_tnext);
 			continue;
 		}
 		break;
 	}
 	if (rsm && (rsm->r_dupack < 0xff)) {
 		rsm->r_dupack++;
 		if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
 			struct timeval tv;
 			uint32_t cts;
 			/*
 			 * Here we see if we need to retransmit. For
 			 * a SACK type connection if enough time has passed
 			 * we will get a return of the rsm. For a non-sack
 			 * connection we will get the rsm returned if the
 			 * dupack value is 3 or more.
 			 */
 			cts = tcp_get_usecs(&tv);
 			rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts);
 			if (rack->r_ctl.rc_resend != NULL) {
 				if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) {
 					rack_cong_signal(rack->rc_tp, CC_NDUPACK,
 							 rack->rc_tp->snd_una, __LINE__);
 				}
 				rack->r_wanted_output = 1;
 				rack->r_timer_override = 1;
 				rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
 			}
 		} else {
 			rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
 		}
 	}
 }
 
 static void
 rack_check_bottom_drag(struct tcpcb *tp,
 		       struct tcp_rack *rack,
 		       struct socket *so)
 {
 	uint32_t segsiz, minseg;
 
 	segsiz = ctf_fixed_maxseg(tp);
 	minseg = segsiz;
 	if (tp->snd_max == tp->snd_una) {
 		/*
 		 * We are doing dynamic pacing and we are way
 		 * under. Basically everything got acked while
 		 * we were still waiting on the pacer to expire.
 		 *
 		 * This means we need to boost the b/w in
 		 * addition to any earlier boosting of
 		 * the multiplier.
 		 */
 		uint64_t lt_bw;
 
 		lt_bw = rack_get_lt_bw(rack);
 		rack->rc_dragged_bottom = 1;
 		rack_validate_multipliers_at_or_above100(rack);
 		if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) &&
 		    (lt_bw > 0)) {
 			/*
 			 * Lets use the long-term b/w we have
 			 * been getting as a base.
 			 */
 			if (rack->rc_gp_filled == 0) {
 				if (lt_bw > ONE_POINT_TWO_MEG) {
 					/*
 					 * If we have no measurement
 					 * don't let us set in more than
 					 * 1.2Mbps. If we are still too
 					 * low after pacing with this we
 					 * will hopefully have a max b/w
 					 * available to sanity check things.
 					 */
 					lt_bw = ONE_POINT_TWO_MEG;
 				}
 				rack->r_ctl.rc_rtt_diff = 0;
 				rack->r_ctl.gp_bw = lt_bw;
 				rack->rc_gp_filled = 1;
 				if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
 					rack->r_ctl.num_measurements = RACK_REQ_AVG;
 				rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
 			} else if (lt_bw > rack->r_ctl.gp_bw) {
 				rack->r_ctl.rc_rtt_diff = 0;
 				if (rack->r_ctl.num_measurements < RACK_REQ_AVG)
 					rack->r_ctl.num_measurements = RACK_REQ_AVG;
 				rack->r_ctl.gp_bw = lt_bw;
 				rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
 			} else
 				rack_increase_bw_mul(rack, -1, 0, 0, 1);
 			if ((rack->gp_ready == 0) &&
 			    (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) {
 				/* We have enough measurements now */
 				rack->gp_ready = 1;
 				if (rack->dgp_on ||
 				    rack->rack_hibeta)
 					rack_set_cc_pacing(rack);
 				if (rack->defer_options)
 					rack_apply_deferred_options(rack);
 			}
 		} else {
 			/*
 			 * zero rtt possibly?, settle for just an old increase.
 			 */
 			rack_increase_bw_mul(rack, -1, 0, 0, 1);
 		}
 	} else if ((IN_FASTRECOVERY(tp->t_flags) == 0) &&
 		   (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)),
 					       minseg)) &&
 		   (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) &&
 		   (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) &&
 		   (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <=
 		    (segsiz * rack_req_segs))) {
 		/*
 		 * We are doing dynamic GP pacing and
 		 * we have everything except 1MSS or less
 		 * bytes left out. We are still pacing away.
 		 * And there is data that could be sent, This
 		 * means we are inserting delayed ack time in
 		 * our measurements because we are pacing too slow.
 		 */
 		rack_validate_multipliers_at_or_above100(rack);
 		rack->rc_dragged_bottom = 1;
 		rack_increase_bw_mul(rack, -1, 0, 0, 1);
 	}
 }
 
 #ifdef TCP_REQUEST_TRK
 static void
 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq,
 		struct tcp_sendfile_track *cur, uint8_t mod, int line, int err)
 {
 	int do_log;
 
 	do_log = tcp_bblogging_on(rack->rc_tp);
 	if (do_log == 0) {
 		if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0)
 			return;
 		/* We only allow the three below with point logging on */
 		if ((mod != HYBRID_LOG_RULES_APP) &&
 		    (mod != HYBRID_LOG_RULES_SET) &&
 		    (mod != HYBRID_LOG_REQ_COMP))
 			return;
 
 	}
 	if (do_log) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		/* Convert our ms to a microsecond */
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex1 = seq;
 		log.u_bbr.cwnd_gain = line;
 		if (cur != NULL) {
 			uint64_t off;
 
 			log.u_bbr.flex2 = cur->start_seq;
 			log.u_bbr.flex3 = cur->end_seq;
 			log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff);
 			log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff);
 			log.u_bbr.flex6 = cur->flags;
 			log.u_bbr.pkts_out = cur->hybrid_flags;
 			log.u_bbr.rttProp = cur->timestamp;
 			log.u_bbr.cur_del_rate = cur->cspr;
 			log.u_bbr.bw_inuse = cur->start;
 			log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff);
 			log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ;
 			log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff);
 			log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ;
 			log.u_bbr.bbr_state = 1;
 #ifdef TCP_REQUEST_TRK
 			off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]);
 			log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track));
 #endif
 		} else {
 			log.u_bbr.flex2 = err;
 		}
 		/*
 		 * Fill in flex7 to be CHD (catchup|hybrid|DGP)
 		 */
 		log.u_bbr.flex7 = rack->rc_catch_up;
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->rc_hybrid_mode;
 		log.u_bbr.flex7 <<= 1;
 		log.u_bbr.flex7 |= rack->dgp_on;
 		log.u_bbr.flex8 = mod;
 		log.u_bbr.delRate = rack->r_ctl.bw_rate_cap;
 		log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start;
 		log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error;
 		log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop;
 		tcp_log_event(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_HYBRID_PACING_LOG, 0,
 	            0, &log, false, NULL, __func__, __LINE__, &tv);
 	}
 }
 #endif
 
 #ifdef TCP_REQUEST_TRK
 static void
 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len)
 {
 	struct tcp_sendfile_track *rc_cur;
 	struct tcpcb *tp;
 	int err = 0;
 
 	rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq);
 	if (rc_cur == NULL) {
 		/* If not in the beginning what about the end piece */
 		if (rack->rc_hybrid_mode)
 			rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
 		rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1));
 	} else {
 		err = 12345;
 	}
 	/* If we find no parameters we are in straight DGP mode */
 	if(rc_cur == NULL) {
 		/* None found for this seq, just DGP for now */
 		rack->r_ctl.client_suggested_maxseg = 0;
 		rack->rc_catch_up = 0;
 		rack->r_ctl.bw_rate_cap = 0;
 		if (rack->rc_hybrid_mode)
 			rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err);
 		if (rack->r_ctl.rc_last_sft) {
 			rack->r_ctl.rc_last_sft = NULL;
 		}
 		return;
 	}
 	if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) {
 		/* This entry was never setup for hybrid pacing on/off etc */
 		return;
 	}
 	/*
 	 * Ok if we have a new entry *or* have never
 	 * set up an entry we need to proceed. If
 	 * we have already set it up this entry we
 	 * just continue along with what we already
 	 * setup.
 	 */
 	tp = rack->rc_tp;
 	if ((rack->r_ctl.rc_last_sft != NULL) &&
 	    (rack->r_ctl.rc_last_sft == rc_cur)) {
 		/* Its already in place */
 		if (rack->rc_hybrid_mode)
 			rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0);
 		return;
 	}
 	if (rack->rc_hybrid_mode == 0) {
 		rack->r_ctl.rc_last_sft = rc_cur;
 		rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
 		return;
 	}
 	if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){
 		/* Compensate for all the header overhead's */
 		rack->r_ctl.bw_rate_cap	= rack_compensate_for_linerate(rack, rc_cur->cspr);
 	} else
 		rack->r_ctl.bw_rate_cap = 0;
 	if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS)
 		rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg;
 	else
 		rack->r_ctl.client_suggested_maxseg = 0;
 	if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) &&
 	    (rc_cur->cspr > 0)) {
 		uint64_t len;
 
 		rack->rc_catch_up = 1;
 		/*
 		 * Calculate the deadline time, first set the
 		 * time to when the request arrived.
 		 */
 		rc_cur->deadline = rc_cur->localtime;
 		/*
 		 * Next calculate the length and compensate for
 		 * TLS if need be.
 		 */
 		len = rc_cur->end - rc_cur->start;
 		if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) {
 			/*
 			 * This session is doing TLS. Take a swag guess
 			 * at the overhead.
 			 */
 			len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len);
 		}
 		/*
 		 * Now considering the size, and the cspr, what is the time that
 		 * would be required at the cspr rate. Here we use the raw
 		 * cspr value since the client only looks at the raw data. We
 		 * do use len which includes TLS overhead, but not the TCP/IP etc.
 		 * That will get made up for in the CU pacing rate set.
 		 */
 		len *= HPTS_USEC_IN_SEC;
 		len /= rc_cur->cspr;
 		rc_cur->deadline += len;
 	} else {
 		rack->rc_catch_up = 0;
 		rc_cur->deadline = 0;
 	}
 	if (rack->r_ctl.client_suggested_maxseg != 0) {
 		/*
 		 * We need to reset the max pace segs if we have a
 		 * client_suggested_maxseg.
 		 */
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 	}
 	rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0);
 	/* Remember it for next time and for CU mode */
 	rack->r_ctl.rc_last_sft = rc_cur;
 }
 #endif
 
 static void
 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts)
 {
 #ifdef TCP_REQUEST_TRK
 	struct tcp_sendfile_track *ent;
 
 	ent = rack->r_ctl.rc_last_sft;
 	if ((ent == NULL) ||
 	    (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) ||
 	    (SEQ_GEQ(seq, ent->end_seq))) {
 		/* Time to update the track. */
 		rack_set_dgp_hybrid_mode(rack, seq, len);
 		ent = rack->r_ctl.rc_last_sft;
 	}
 	/* Out of all */
 	if (ent == NULL) {
 		return;
 	}
 	if (SEQ_LT(ent->end_seq, (seq + len))) {
 		/*
 		 * This is the case where our end_seq guess
 		 * was wrong. This is usually due to TLS having
 		 * more bytes then our guess. It could also be the
 		 * case that the client sent in two requests closely
 		 * and the SB is full of both so we are sending part
 		 * of each (end|beg). In such a case lets move this
 		 * guys end to match the end of this send. That
 		 * way it will complete when all of it is acked.
 		 */
 		ent->end_seq = (seq + len);
 		if (rack->rc_hybrid_mode)
 			rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__);
 	}
 	/* Now validate we have set the send time of this one */
 	if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) {
 		ent->flags |= TCP_TRK_TRACK_FLG_FSND;
 		ent->first_send = cts;
 		ent->sent_at_fs = rack->rc_tp->t_sndbytes;
 		ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes;
 	}
 #endif
 }
 
 static void
 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount)
 {
 	/*
 	 * The fast output path is enabled and we
 	 * have moved the cumack forward. Lets see if
 	 * we can expand forward the fast path length by
 	 * that amount. What we would ideally like to
 	 * do is increase the number of bytes in the
 	 * fast path block (left_to_send) by the
 	 * acked amount. However we have to gate that
 	 * by two factors:
 	 * 1) The amount outstanding and the rwnd of the peer
 	 *    (i.e. we don't want to exceed the rwnd of the peer).
 	 *    <and>
 	 * 2) The amount of data left in the socket buffer (i.e.
 	 *    we can't send beyond what is in the buffer).
 	 *
 	 * Note that this does not take into account any increase
 	 * in the cwnd. We will only extend the fast path by
 	 * what was acked.
 	 */
 	uint32_t new_total, gating_val;
 
 	new_total = acked_amount + rack->r_ctl.fsb.left_to_send;
 	gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)),
 			 (tp->snd_wnd - (tp->snd_max - tp->snd_una)));
 	if (new_total <= gating_val) {
 		/* We can increase left_to_send by the acked amount */
 		counter_u64_add(rack_extended_rfo, 1);
 		rack->r_ctl.fsb.left_to_send = new_total;
 		KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))),
 			("rack:%p left_to_send:%u sbavail:%u out:%u",
 			 rack, rack->r_ctl.fsb.left_to_send,
 			 sbavail(&rack->rc_inp->inp_socket->so_snd),
 			 (tp->snd_max - tp->snd_una)));
 
 	}
 }
 
 static void
 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb)
 {
 	/*
 	 * Here any sendmap entry that points to the
 	 * beginning mbuf must be adjusted to the correct
 	 * offset. This must be called with:
 	 * 1) The socket buffer locked
 	 * 2) snd_una adjusted to its new position.
 	 *
 	 * Note that (2) implies rack_ack_received has also
 	 * been called and all the sbcut's have been done.
 	 *
 	 * We grab the first mbuf in the socket buffer and
 	 * then go through the front of the sendmap, recalculating
 	 * the stored offset for any sendmap entry that has
 	 * that mbuf. We must use the sb functions to do this
 	 * since its possible an add was done has well as
 	 * the subtraction we may have just completed. This should
 	 * not be a penalty though, since we just referenced the sb
 	 * to go in and trim off the mbufs that we freed (of course
 	 * there will be a penalty for the sendmap references though).
 	 *
 	 * Note also with INVARIANT on, we validate with a KASSERT
 	 * that the first sendmap entry has a soff of 0.
 	 *
 	 */
 	struct mbuf *m;
 	struct rack_sendmap *rsm;
 	tcp_seq snd_una;
 #ifdef INVARIANTS
 	int first_processed = 0;
 #endif
 
 	snd_una = rack->rc_tp->snd_una;
 	SOCKBUF_LOCK_ASSERT(sb);
 	m = sb->sb_mb;
 	rsm = tqhash_min(rack->r_ctl.tqh);
 	if ((rsm == NULL) || (m == NULL)) {
 		/* Nothing outstanding */
 		return;
 	}
 	/* The very first RSM's mbuf must point to the head mbuf in the sb */
 	KASSERT((rsm->m == m),
 		("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb",
 		 rack, sb, rsm));
 	while (rsm->m && (rsm->m == m)) {
 		/* one to adjust */
 #ifdef INVARIANTS
 		struct mbuf *tm;
 		uint32_t soff;
 
 		tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
 		if ((rsm->orig_m_len != m->m_len) ||
 		    (rsm->orig_t_space != M_TRAILINGROOM(m))){
 			rack_adjust_orig_mlen(rsm);
 		}
 		if (first_processed == 0) {
 			KASSERT((rsm->soff == 0),
 				("Rack:%p rsm:%p -- rsm at head but soff not zero",
 				 rack, rsm));
 			first_processed = 1;
 		}
 		if ((rsm->soff != soff) || (rsm->m != tm)) {
 			/*
 			 * This is not a fatal error, we anticipate it
 			 * might happen (the else code), so we count it here
 			 * so that under invariant we can see that it really
 			 * does happen.
 			 */
 			counter_u64_add(rack_adjust_map_bw, 1);
 		}
 		rsm->m = tm;
 		rsm->soff = soff;
 		if (tm) {
 			rsm->orig_m_len = rsm->m->m_len;
 			rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 		} else {
 			rsm->orig_m_len = 0;
 			rsm->orig_t_space = 0;
 		}
 #else
 		rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
 		if (rsm->m) {
 			rsm->orig_m_len = rsm->m->m_len;
 			rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 		} else {
 			rsm->orig_m_len = 0;
 			rsm->orig_t_space = 0;
 		}
 #endif
 		rsm = tqhash_next(rack->r_ctl.tqh, rsm);
 		if (rsm == NULL)
 			break;
 	}
 }
 
 #ifdef TCP_REQUEST_TRK
 static inline void
 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack)
 {
 	struct tcp_sendfile_track *ent;
 	int i;
 
 	if ((rack->rc_hybrid_mode == 0) &&
 	    (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) {
 		/*
 		 * Just do normal completions hybrid pacing is not on
 		 * and CLDL is off as well.
 		 */
 		tcp_req_check_for_comp(rack->rc_tp, th_ack);
 		return;
 	}
 	/*
 	 * Originally I was just going to find the th_ack associated
 	 * with an entry. But then I realized a large strech ack could
 	 * in theory ack two or more requests at once. So instead we
 	 * need to find all entries that are completed by th_ack not
 	 * just a single entry and do our logging.
 	 */
 	ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
 	while (ent != NULL) {
 		/*
 		 * We may be doing hybrid pacing or CLDL and need more details possibly
 		 * so we do it manually instead of calling
 		 * tcp_req_check_for_comp()
 		 */
 		uint64_t laa, tim, data, cbw, ftim;
 
 		/* Ok this ack frees it */
 		rack_log_hybrid(rack, th_ack,
 				ent, HYBRID_LOG_REQ_COMP, __LINE__, 0);
 		rack_log_hybrid_sends(rack, ent, __LINE__);
 		/* calculate the time based on the ack arrival */
 		data = ent->end - ent->start;
 		laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time);
 		if (ent->flags & TCP_TRK_TRACK_FLG_FSND) {
 			if (ent->first_send > ent->localtime)
 				ftim = ent->first_send;
 			else
 				ftim = ent->localtime;
 		} else {
 			/* TSNH */
 			ftim = ent->localtime;
 		}
 		if (laa > ent->localtime)
 			tim = laa - ftim;
 		else
 			tim = 0;
 		cbw = data * HPTS_USEC_IN_SEC;
 		if (tim > 0)
 			cbw /= tim;
 		else
 			cbw = 0;
 		rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__);
 		/*
 		 * Check to see if we are freeing what we are pointing to send wise
 		 * if so be sure to NULL the pointer so we know we are no longer
 		 * set to anything.
 		 */
 		if (ent == rack->r_ctl.rc_last_sft)
 			rack->r_ctl.rc_last_sft = NULL;
 		/* Generate the log that the tcp_netflix call would have */
 		tcp_req_log_req_info(rack->rc_tp, ent,
 				      i, TCP_TRK_REQ_LOG_FREED, 0, 0);
 		/* Free it and see if there is another one */
 		tcp_req_free_a_slot(rack->rc_tp, ent);
 		ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i);
 	}
 }
 #endif
 
 
 /*
  * Return value of 1, we do not need to call rack_process_data().
  * return value of 0, rack_process_data can be called.
  * For ret_val if its 0 the TCP is locked, if its non-zero
  * its unlocked and probably unsafe to touch the TCB.
  */
 static int
 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to,
     uint32_t tiwin, int32_t tlen,
     int32_t * ofia, int32_t thflags, int32_t *ret_val)
 {
 	int32_t ourfinisacked = 0;
 	int32_t nsegs, acked_amount;
 	int32_t acked;
 	struct mbuf *mfree;
 	struct tcp_rack *rack;
 	int32_t under_pacing = 0;
 	int32_t recovery = 0;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (SEQ_GT(th->th_ack, tp->snd_max)) {
 		__ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
 				      &rack->r_ctl.challenge_ack_ts,
 				      &rack->r_ctl.challenge_ack_cnt);
 		rack->r_wanted_output = 1;
 		return (1);
 	}
 	if (rack->gp_ready &&
 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 		under_pacing = 1;
 	}
 	if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) {
 		int in_rec, dup_ack_struck = 0;
 		int dsack_seen = 0, sacks_seen = 0;
 
 		in_rec = IN_FASTRECOVERY(tp->t_flags);
 		if (rack->rc_in_persist) {
 			tp->t_rxtshift = 0;
 			RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 				      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 		}
 
 		if ((th->th_ack == tp->snd_una) &&
 		    (tiwin == tp->snd_wnd) &&
 		    ((to->to_flags & TOF_SACK) == 0)) {
 			rack_strike_dupack(rack);
 			dup_ack_struck = 1;
 		}
 		rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)),
 			     dup_ack_struck, &dsack_seen, &sacks_seen);
 		if ((rack->sack_attack_disable > 0) &&
 		    (th->th_ack == tp->snd_una) &&
 		    (tiwin == tp->snd_wnd) &&
 		    (dsack_seen == 0) &&
 		    (sacks_seen > 0)) {
 			/*
 			 * If sacks have been disabled we may
 			 * want to strike a dup-ack "ignoring" the
 			 * sack as long as the sack was not a "dsack". Note
 			 * that if no sack is sent (TOF_SACK is off) then the
 			 * normal dsack code above rack_log_ack() would have
 			 * already struck. So this is just to catch the case
 			 * were we are ignoring sacks from this guy due to
 			 * it being a suspected attacker.
 			 */
 			rack_strike_dupack(rack);
 		}
 
 	}
 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
 		/*
 		 * Old ack, behind (or duplicate to) the last one rcv'd
 		 * Note: We mark reordering is occuring if its
 		 * less than and we have not closed our window.
 		 */
 		if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) {
 			rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 			if (rack->r_ctl.rc_reorder_ts == 0)
 				rack->r_ctl.rc_reorder_ts = 1;
 		}
 		return (0);
 	}
 	/*
 	 * If we reach this point, ACK is not a duplicate, i.e., it ACKs
 	 * something we sent.
 	 */
 	if (tp->t_flags & TF_NEEDSYN) {
 		/*
 		 * T/TCP: Connection was half-synchronized, and our SYN has
 		 * been ACK'd (so connection is now fully synchronized).  Go
 		 * to non-starred state, increment snd_una for ACK of SYN,
 		 * and check if we can do window scaling.
 		 */
 		tp->t_flags &= ~TF_NEEDSYN;
 		tp->snd_una++;
 		/* Do window scaling? */
 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 			tp->rcv_scale = tp->request_r_scale;
 			/* Send window already scaled. */
 		}
 	}
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 
 	acked = BYTES_THIS_ACK(tp, th);
 	if (acked) {
 		/*
 		 * Any time we move the cum-ack forward clear
 		 * keep-alive tied probe-not-answered. The
 		 * persists clears its own on entry.
 		 */
 		rack->probe_not_answered = 0;
 	}
 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
 	/*
 	 * If we just performed our first retransmit, and the ACK arrives
 	 * within our recovery window, then it was a mistake to do the
 	 * retransmit in the first place.  Recover our original cwnd and
 	 * ssthresh, and proceed to transmit where we left off.
 	 */
 	if ((tp->t_flags & TF_PREVVALID) &&
 	    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
 		tp->t_flags &= ~TF_PREVVALID;
 		if (tp->t_rxtshift == 1 &&
 		    (int)(ticks - tp->t_badrxtwin) < 0)
 			rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
 	}
 	if (acked) {
 		/* assure we are not backed off */
 		tp->t_rxtshift = 0;
 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 		rack->rc_tlp_in_progress = 0;
 		rack->r_ctl.rc_tlp_cnt_out = 0;
 		/*
 		 * If it is the RXT timer we want to
 		 * stop it, so we can restart a TLP.
 		 */
 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 #ifdef TCP_REQUEST_TRK
 		rack_req_check_for_comp(rack, th->th_ack);
 #endif
 	}
 	/*
 	 * If we have a timestamp reply, update smoothed round trip time. If
 	 * no timestamp is present but transmit timer is running and timed
 	 * sequence number was acked, update smoothed round trip time. Since
 	 * we now have an rtt measurement, cancel the timer backoff (cf.,
 	 * Phil Karn's retransmit alg.). Recompute the initial retransmit
 	 * timer.
 	 *
 	 * Some boxes send broken timestamp replies during the SYN+ACK
 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
 	 * and blow up the retransmit timer.
 	 */
 	/*
 	 * If all outstanding data is acked, stop retransmit timer and
 	 * remember to restart (more output or persist). If there is more
 	 * data to be acked, restart retransmit timer, using current
 	 * (possibly backed-off) value.
 	 */
 	if (acked == 0) {
 		if (ofia)
 			*ofia = ourfinisacked;
 		return (0);
 	}
 	if (IN_RECOVERY(tp->t_flags)) {
 		if (SEQ_LT(th->th_ack, tp->snd_recover) &&
 		    (SEQ_LT(th->th_ack, tp->snd_max))) {
 			tcp_rack_partialack(tp);
 		} else {
 			rack_post_recovery(tp, th->th_ack);
 			recovery = 1;
 		}
 	}
 	/*
 	 * Let the congestion control algorithm update congestion control
 	 * related information. This typically means increasing the
 	 * congestion window.
 	 */
 	rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery);
 	SOCKBUF_LOCK(&so->so_snd);
 	acked_amount = min(acked, (int)sbavail(&so->so_snd));
 	tp->snd_wnd -= acked_amount;
 	mfree = sbcut_locked(&so->so_snd, acked_amount);
 	if ((sbused(&so->so_snd) == 0) &&
 	    (acked > acked_amount) &&
 	    (tp->t_state >= TCPS_FIN_WAIT_1) &&
 	    (tp->t_flags & TF_SENTFIN)) {
 		/*
 		 * We must be sure our fin
 		 * was sent and acked (we can be
 		 * in FIN_WAIT_1 without having
 		 * sent the fin).
 		 */
 		ourfinisacked = 1;
 	}
 	tp->snd_una = th->th_ack;
 	/* wakeups? */
 	if (acked_amount && sbavail(&so->so_snd))
 		rack_adjust_sendmap_head(rack, &so->so_snd);
 	rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
 	/* NB: sowwakeup_locked() does an implicit unlock. */
 	sowwakeup_locked(so);
 	/* now check the rxt clamps */
 	if ((recovery == 1) &&
 	    (rack->excess_rxt_on) &&
 	    (rack->r_cwnd_was_clamped == 0))  {
 		do_rack_excess_rxt(tp, rack);
 	} else if (rack->r_cwnd_was_clamped)
 		do_rack_check_for_unclamp(tp, rack);
 	m_freem(mfree);
 	if (SEQ_GT(tp->snd_una, tp->snd_recover))
 		tp->snd_recover = tp->snd_una;
 
 	if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
 		tp->snd_nxt = tp->snd_max;
 	}
 	if (under_pacing &&
 	    (rack->use_fixed_rate == 0) &&
 	    (rack->in_probe_rtt == 0) &&
 	    rack->rc_gp_dyn_mul &&
 	    rack->rc_always_pace) {
 		/* Check if we are dragging bottom */
 		rack_check_bottom_drag(tp, rack, so);
 	}
 	if (tp->snd_una == tp->snd_max) {
 		/* Nothing left outstanding */
 		tp->t_flags &= ~TF_PREVVALID;
 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
 		rack->r_ctl.retran_during_recovery = 0;
 		rack->r_ctl.dsack_byte_cnt = 0;
 		if (rack->r_ctl.rc_went_idle_time == 0)
 			rack->r_ctl.rc_went_idle_time = 1;
 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
 		if (sbavail(&tptosocket(tp)->so_snd) == 0)
 			tp->t_acktime = 0;
 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 		rack->rc_suspicious = 0;
 		/* Set need output so persist might get set */
 		rack->r_wanted_output = 1;
 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
 		if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 		    (sbavail(&so->so_snd) == 0) &&
 		    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
 			/*
 			 * The socket was gone and the
 			 * peer sent data (now or in the past), time to
 			 * reset him.
 			 */
 			*ret_val = 1;
 			/* tcp_close will kill the inp pre-log the Reset */
 			tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 			tp = tcp_close(tp);
 			ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
 			return (1);
 		}
 	}
 	if (ofia)
 		*ofia = ourfinisacked;
 	return (0);
 }
 
 
 static void
 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line,
 		  int dir, uint32_t flags, struct rack_sendmap *rsm)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = cnt;
 		log.u_bbr.flex2 = split;
 		log.u_bbr.flex3 = out;
 		log.u_bbr.flex4 = line;
 		log.u_bbr.flex5 = rack->r_must_retran;
 		log.u_bbr.flex6 = flags;
 		log.u_bbr.flex7 = rack->rc_has_collapsed;
 		log.u_bbr.flex8 = dir;	/*
 					 * 1 is collapsed, 0 is uncollapsed,
 					 * 2 is log of a rsm being marked, 3 is a split.
 					 */
 		if (rsm == NULL)
 			log.u_bbr.rttProp = 0;
 		else
 			log.u_bbr.rttProp = (uint64_t)rsm;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    TCP_RACK_LOG_COLLAPSE, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line)
 {
 	/*
 	 * Here all we do is mark the collapsed point and set the flag.
 	 * This may happen again and again, but there is no
 	 * sense splitting our map until we know where the
 	 * peer finally lands in the collapse.
 	 */
 	tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
 	if ((rack->rc_has_collapsed == 0) ||
 	    (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd)))
 		counter_u64_add(rack_collapsed_win_seen, 1);
 	rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd;
 	rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max;
 	rack->rc_has_collapsed = 1;
 	rack->r_collapse_point_valid = 1;
 	rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL);
 }
 
 static void
 rack_un_collapse_window(struct tcp_rack *rack, int line)
 {
 	struct rack_sendmap *nrsm, *rsm;
 	int cnt = 0, split = 0;
 	int insret __diagused;
 
 
 	tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND);
 	rack->rc_has_collapsed = 0;
 	rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
 	if (rsm == NULL) {
 		/* Nothing to do maybe the peer ack'ed it all */
 		rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
 		return;
 	}
 	/* Now do we need to split this one? */
 	if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
 		rack_log_collapse(rack, rsm->r_start, rsm->r_end,
 				  rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
 		nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT);
 		if (nrsm == NULL) {
 			/* We can't get a rsm, mark all? */
 			nrsm = rsm;
 			goto no_split;
 		}
 		/* Clone it */
 		split = 1;
 		rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
 #ifndef INVARIANTS
 		(void)tqhash_insert(rack->r_ctl.tqh, nrsm);
 #else
 		if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) {
 			panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
 			      nrsm, insret, rack, rsm);
 		}
 #endif
 		rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
 				 rack->r_ctl.last_collapse_point, __LINE__);
 		if (rsm->r_in_tmap) {
 			TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
 			nrsm->r_in_tmap = 1;
 		}
 		/*
 		 * Set in the new RSM as the
 		 * collapsed starting point
 		 */
 		rsm = nrsm;
 	}
 
 no_split:
 	TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm)  {
 		cnt++;
 		nrsm->r_flags |= RACK_RWND_COLLAPSED;
 		rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm);
 		cnt++;
 	}
 	if (cnt) {
 		counter_u64_add(rack_collapsed_win, 1);
 	}
 	rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL);
 }
 
 static void
 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack,
 			int32_t tlen, int32_t tfo_syn)
 {
 	if (DELAY_ACK(tp, tlen) || tfo_syn) {
 		rack_timer_cancel(tp, rack,
 				  rack->r_ctl.rc_rcvtime, __LINE__);
 		tp->t_flags |= TF_DELACK;
 	} else {
 		rack->r_wanted_output = 1;
 		tp->t_flags |= TF_ACKNOW;
 	}
 }
 
 static void
 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * If fast output is in progress, lets validate that
 	 * the new window did not shrink on us and make it
 	 * so fast output should end.
 	 */
 	if (rack->r_fast_output) {
 		uint32_t out;
 
 		/*
 		 * Calculate what we will send if left as is
 		 * and compare that to our send window.
 		 */
 		out = ctf_outstanding(tp);
 		if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) {
 			/* ok we have an issue */
 			if (out >= tp->snd_wnd) {
 				/* Turn off fast output the window is met or collapsed */
 				rack->r_fast_output = 0;
 			} else {
 				/* we have some room left */
 				rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out;
 				if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) {
 					/* If not at least 1 full segment never mind */
 					rack->r_fast_output = 0;
 				}
 			}
 		}
 	}
 }
 
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt)
 {
 	/*
 	 * Update window information. Don't look at window if no ACK: TAC's
 	 * send garbage on first SYN.
 	 */
 	int32_t nsegs;
 	int32_t tfo_syn;
 	struct tcp_rack *rack;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LT(tp->snd_wl1, th->th_seq) ||
 	    (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
 	    (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
 		/* keep track of pure window updates */
 		if (tlen == 0 &&
 		    tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
 			KMOD_TCPSTAT_INC(tcps_rcvwinupd);
 		tp->snd_wnd = tiwin;
 		rack_validate_fo_sendwin_up(tp, rack);
 		tp->snd_wl1 = th->th_seq;
 		tp->snd_wl2 = th->th_ack;
 		if (tp->snd_wnd > tp->max_sndwnd)
 			tp->max_sndwnd = tp->snd_wnd;
 		rack->r_wanted_output = 1;
 	} else if (thflags & TH_ACK) {
 		if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) {
 			tp->snd_wnd = tiwin;
 			rack_validate_fo_sendwin_up(tp, rack);
 			tp->snd_wl1 = th->th_seq;
 			tp->snd_wl2 = th->th_ack;
 		}
 	}
 	if (tp->snd_wnd < ctf_outstanding(tp))
 		/* The peer collapsed the window */
 		rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
 	else if (rack->rc_has_collapsed)
 		rack_un_collapse_window(rack, __LINE__);
 	if ((rack->r_collapse_point_valid) &&
 	    (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point)))
 		rack->r_collapse_point_valid = 0;
 	/* Was persist timer active and now we have window space? */
 	if ((rack->rc_in_persist != 0) &&
 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
 				rack->r_ctl.rc_pace_min_segs))) {
 		rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime);
 		tp->snd_nxt = tp->snd_max;
 		/* Make sure we output to start the timer */
 		rack->r_wanted_output = 1;
 	}
 	/* Do we enter persists? */
 	if ((rack->rc_in_persist == 0) &&
 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
 	    sbavail(&tptosocket(tp)->so_snd) &&
 	    (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
 		/*
 		 * Here the rwnd is less than
 		 * the pacing size, we are established,
 		 * nothing is outstanding, and there is
 		 * data to send. Enter persists.
 		 */
 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
 	}
 	if (tp->t_flags2 & TF2_DROP_AF_DATA) {
 		m_freem(m);
 		return (0);
 	}
 	/*
 	 * don't process the URG bit, ignore them drag
 	 * along the up.
 	 */
 	tp->rcv_up = tp->rcv_nxt;
 
 	/*
 	 * Process the segment text, merging it into the TCP sequencing
 	 * queue, and arranging for acknowledgment of receipt if necessary.
 	 * This process logically involves adjusting tp->rcv_wnd as data is
 	 * presented to the user (this happens in tcp_usrreq.c, case
 	 * PRU_RCVD).  If a FIN has already been received on this connection
 	 * then we just ignore the text.
 	 */
 	tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) &&
 		   IS_FASTOPEN(tp->t_flags));
 	if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) &&
 	    TCPS_HAVERCVDFIN(tp->t_state) == 0) {
 		tcp_seq save_start = th->th_seq;
 		tcp_seq save_rnxt  = tp->rcv_nxt;
 		int     save_tlen  = tlen;
 
 		m_adj(m, drop_hdrlen);	/* delayed header drop */
 		/*
 		 * Insert segment which includes th into TCP reassembly
 		 * queue with control block tp.  Set thflags to whether
 		 * reassembly now includes a segment with FIN.  This handles
 		 * the common case inline (segment is the next to be
 		 * received on an established connection, and the queue is
 		 * empty), avoiding linkage into and removal from the queue
 		 * and repetition of various conversions. Set DELACK for
 		 * segments received in order, but ack immediately when
 		 * segments are out of order (so fast retransmit can work).
 		 */
 		if (th->th_seq == tp->rcv_nxt &&
 		    SEGQ_EMPTY(tp) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state) ||
 		    tfo_syn)) {
 #ifdef NETFLIX_SB_LIMITS
 			u_int mcnt, appended;
 
 			if (so->so_rcv.sb_shlim) {
 				mcnt = m_memcnt(m);
 				appended = 0;
 				if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
 				    CFO_NOSLEEP, NULL) == false) {
 					counter_u64_add(tcp_sb_shlim_fails, 1);
 					m_freem(m);
 					return (0);
 				}
 			}
 #endif
 			rack_handle_delayed_ack(tp, rack, tlen, tfo_syn);
 			tp->rcv_nxt += tlen;
 			if (tlen &&
 			    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
 			    (tp->t_fbyte_in == 0)) {
 				tp->t_fbyte_in = ticks;
 				if (tp->t_fbyte_in == 0)
 					tp->t_fbyte_in = 1;
 				if (tp->t_fbyte_out && tp->t_fbyte_in)
 					tp->t_flags2 |= TF2_FBYTES_COMPLETE;
 			}
 			thflags = tcp_get_flags(th) & TH_FIN;
 			KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
 			KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
 			SOCKBUF_LOCK(&so->so_rcv);
 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 				m_freem(m);
 			} else
 #ifdef NETFLIX_SB_LIMITS
 				appended =
 #endif
 					sbappendstream_locked(&so->so_rcv, m, 0);
 
 			rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
 			/* NB: sorwakeup_locked() does an implicit unlock. */
 			sorwakeup_locked(so);
 #ifdef NETFLIX_SB_LIMITS
 			if (so->so_rcv.sb_shlim && appended != mcnt)
 				counter_fo_release(so->so_rcv.sb_shlim,
 				    mcnt - appended);
 #endif
 		} else {
 			/*
 			 * XXX: Due to the header drop above "th" is
 			 * theoretically invalid by now.  Fortunately
 			 * m_adj() doesn't actually frees any mbufs when
 			 * trimming from the head.
 			 */
 			tcp_seq temp = save_start;
 
 			thflags = tcp_reass(tp, th, &temp, &tlen, m);
 			tp->t_flags |= TF_ACKNOW;
 			if (tp->t_flags & TF_WAKESOR) {
 				tp->t_flags &= ~TF_WAKESOR;
 				/* NB: sorwakeup_locked() does an implicit unlock. */
 				sorwakeup_locked(so);
 			}
 		}
 		if ((tp->t_flags & TF_SACK_PERMIT) &&
 		    (save_tlen > 0) &&
 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
 			if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) {
 				/*
 				 * DSACK actually handled in the fastpath
 				 * above.
 				 */
 				tcp_update_sack_list(tp, save_start,
 				    save_start + save_tlen);
 			} else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) {
 				if ((tp->rcv_numsacks >= 1) &&
 				    (tp->sackblks[0].end == save_start)) {
 					/*
 					 * Partial overlap, recorded at todrop
 					 * above.
 					 */
 					tcp_update_sack_list(tp,
 					    tp->sackblks[0].start,
 					    tp->sackblks[0].end);
 				} else {
 					tcp_update_dsack_list(tp, save_start,
 					    save_start + save_tlen);
 				}
 			} else if (tlen >= save_tlen) {
 				/* Update of sackblks. */
 				tcp_update_dsack_list(tp, save_start,
 				    save_start + save_tlen);
 			} else if (tlen > 0) {
 				tcp_update_dsack_list(tp, save_start,
 				    save_start + tlen);
 			}
 		}
 	} else {
 		m_freem(m);
 		thflags &= ~TH_FIN;
 	}
 
 	/*
 	 * If FIN is received ACK the FIN and let the user know that the
 	 * connection is closing.
 	 */
 	if (thflags & TH_FIN) {
 		if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
 			/* The socket upcall is handled by socantrcvmore. */
 			socantrcvmore(so);
 			/*
 			 * If connection is half-synchronized (ie NEEDSYN
 			 * flag on) then delay ACK, so it may be piggybacked
 			 * when SYN is sent. Otherwise, since we received a
 			 * FIN then no more input can be expected, send ACK
 			 * now.
 			 */
 			if (tp->t_flags & TF_NEEDSYN) {
 				rack_timer_cancel(tp, rack,
 				    rack->r_ctl.rc_rcvtime, __LINE__);
 				tp->t_flags |= TF_DELACK;
 			} else {
 				tp->t_flags |= TF_ACKNOW;
 			}
 			tp->rcv_nxt++;
 		}
 		switch (tp->t_state) {
 			/*
 			 * In SYN_RECEIVED and ESTABLISHED STATES enter the
 			 * CLOSE_WAIT state.
 			 */
 		case TCPS_SYN_RECEIVED:
 			tp->t_starttime = ticks;
 			/* FALLTHROUGH */
 		case TCPS_ESTABLISHED:
 			rack_timer_cancel(tp, rack,
 			    rack->r_ctl.rc_rcvtime, __LINE__);
 			tcp_state_change(tp, TCPS_CLOSE_WAIT);
 			break;
 
 			/*
 			 * If still in FIN_WAIT_1 STATE FIN has not been
 			 * acked so enter the CLOSING state.
 			 */
 		case TCPS_FIN_WAIT_1:
 			rack_timer_cancel(tp, rack,
 			    rack->r_ctl.rc_rcvtime, __LINE__);
 			tcp_state_change(tp, TCPS_CLOSING);
 			break;
 
 			/*
 			 * In FIN_WAIT_2 state enter the TIME_WAIT state,
 			 * starting the time-wait timer, turning off the
 			 * other standard timers.
 			 */
 		case TCPS_FIN_WAIT_2:
 			rack_timer_cancel(tp, rack,
 			    rack->r_ctl.rc_rcvtime, __LINE__);
 			tcp_twstart(tp);
 			return (1);
 		}
 	}
 	/*
 	 * Return any desired output.
 	 */
 	if ((tp->t_flags & TF_ACKNOW) ||
 	    (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
 		rack->r_wanted_output = 1;
 	}
 	return (0);
 }
 
 /*
  * Here nothing is really faster, its just that we
  * have broken out the fast-data path also just like
  * the fast-ack.
  */
 static int
 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t nsegs;
 	int32_t newsize = 0;	/* automatic sockbuf scaling */
 	struct tcp_rack *rack;
 #ifdef NETFLIX_SB_LIMITS
 	u_int mcnt, appended;
 #endif
 
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * the timestamp. NOTE that the test is modified according to the
 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
 	 */
 	if (__predict_false(th->th_seq != tp->rcv_nxt)) {
 		return (0);
 	}
 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
 		return (0);
 	}
 	if (tiwin && tiwin != tp->snd_wnd) {
 		return (0);
 	}
 	if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) {
 		return (0);
 	}
 	if (__predict_false((to->to_flags & TOF_TS) &&
 	    (TSTMP_LT(to->to_tsval, tp->ts_recent)))) {
 		return (0);
 	}
 	if (__predict_false((th->th_ack != tp->snd_una))) {
 		return (0);
 	}
 	if (__predict_false(tlen > sbspace(&so->so_rcv))) {
 		return (0);
 	}
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	/*
 	 * This is a pure, in-sequence data packet with nothing on the
 	 * reassembly queue and we have enough buffer space to take it.
 	 */
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 
 #ifdef NETFLIX_SB_LIMITS
 	if (so->so_rcv.sb_shlim) {
 		mcnt = m_memcnt(m);
 		appended = 0;
 		if (counter_fo_get(so->so_rcv.sb_shlim, mcnt,
 		    CFO_NOSLEEP, NULL) == false) {
 			counter_u64_add(tcp_sb_shlim_fails, 1);
 			m_freem(m);
 			return (1);
 		}
 	}
 #endif
 	/* Clean receiver SACK report if present */
 	if (tp->rcv_numsacks)
 		tcp_clean_sackreport(tp);
 	KMOD_TCPSTAT_INC(tcps_preddat);
 	tp->rcv_nxt += tlen;
 	if (tlen &&
 	    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
 	    (tp->t_fbyte_in == 0)) {
 		tp->t_fbyte_in = ticks;
 		if (tp->t_fbyte_in == 0)
 			tp->t_fbyte_in = 1;
 		if (tp->t_fbyte_out && tp->t_fbyte_in)
 			tp->t_flags2 |= TF2_FBYTES_COMPLETE;
 	}
 	/*
 	 * Pull snd_wl1 up to prevent seq wrap relative to th_seq.
 	 */
 	tp->snd_wl1 = th->th_seq;
 	/*
 	 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt.
 	 */
 	tp->rcv_up = tp->rcv_nxt;
 	KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen);
 	newsize = tcp_autorcvbuf(m, th, so, tp, tlen);
 
 	/* Add data to socket buffer. */
 	SOCKBUF_LOCK(&so->so_rcv);
 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 		m_freem(m);
 	} else {
 		/*
 		 * Set new socket buffer size. Give up when limit is
 		 * reached.
 		 */
 		if (newsize)
 			if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
 				so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
 		m_adj(m, drop_hdrlen);	/* delayed header drop */
 #ifdef NETFLIX_SB_LIMITS
 		appended =
 #endif
 			sbappendstream_locked(&so->so_rcv, m, 0);
 		ctf_calc_rwin(so, tp);
 	}
 	rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1);
 	/* NB: sorwakeup_locked() does an implicit unlock. */
 	sorwakeup_locked(so);
 #ifdef NETFLIX_SB_LIMITS
 	if (so->so_rcv.sb_shlim && mcnt != appended)
 		counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended);
 #endif
 	rack_handle_delayed_ack(tp, rack, tlen, 0);
 	if (tp->snd_una == tp->snd_max)
 		sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
 	return (1);
 }
 
 /*
  * This subfunction is used to try to highly optimize the
  * fast path. We again allow window updates that are
  * in sequence to remain in the fast-path. We also add
  * in the __predict's to attempt to help the compiler.
  * Note that if we return a 0, then we can *not* process
  * it and the caller should push the packet into the
  * slow-path.
  */
 static int
 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t nxt_pkt, uint32_t cts)
 {
 	int32_t acked;
 	int32_t nsegs;
 	int32_t under_pacing = 0;
 	struct tcp_rack *rack;
 
 	if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) {
 		/* Old ack, behind (or duplicate to) the last one rcv'd */
 		return (0);
 	}
 	if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) {
 		/* Above what we have sent? */
 		return (0);
 	}
 	if (__predict_false(tp->snd_nxt != tp->snd_max)) {
 		/* We are retransmitting */
 		return (0);
 	}
 	if (__predict_false(tiwin == 0)) {
 		/* zero window */
 		return (0);
 	}
 	if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) {
 		/* We need a SYN or a FIN, unlikely.. */
 		return (0);
 	}
 	if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) {
 		/* Timestamp is behind .. old ack with seq wrap? */
 		return (0);
 	}
 	if (__predict_false(IN_RECOVERY(tp->t_flags))) {
 		/* Still recovering */
 		return (0);
 	}
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->r_ctl.rc_sacked) {
 		/* We have sack holes on our scoreboard */
 		return (0);
 	}
 	/* Ok if we reach here, we can process a fast-ack */
 	if (rack->gp_ready &&
 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 		under_pacing = 1;
 	}
 	nsegs = max(1, m->m_pkthdr.lro_nsegs);
 	rack_log_ack(tp, to, th, 0, 0, NULL, NULL);
 	/* Did the window get updated? */
 	if (tiwin != tp->snd_wnd) {
 		tp->snd_wnd = tiwin;
 		rack_validate_fo_sendwin_up(tp, rack);
 		tp->snd_wl1 = th->th_seq;
 		if (tp->snd_wnd > tp->max_sndwnd)
 			tp->max_sndwnd = tp->snd_wnd;
 	}
 	/* Do we exit persists? */
 	if ((rack->rc_in_persist != 0) &&
 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
 			       rack->r_ctl.rc_pace_min_segs))) {
 		rack_exit_persist(tp, rack, cts);
 	}
 	/* Do we enter persists? */
 	if ((rack->rc_in_persist == 0) &&
 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
 	    sbavail(&tptosocket(tp)->so_snd) &&
 	    (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
 		/*
 		 * Here the rwnd is less than
 		 * the pacing size, we are established,
 		 * nothing is outstanding, and there is
 		 * data to send. Enter persists.
 		 */
 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * the timestamp. NOTE that the test is modified according to the
 	 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26).
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * This is a pure ack for outstanding data.
 	 */
 	KMOD_TCPSTAT_INC(tcps_predack);
 
 	/*
 	 * "bad retransmit" recovery.
 	 */
 	if ((tp->t_flags & TF_PREVVALID) &&
 	    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
 		tp->t_flags &= ~TF_PREVVALID;
 		if (tp->t_rxtshift == 1 &&
 		    (int)(ticks - tp->t_badrxtwin) < 0)
 			rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__);
 	}
 	/*
 	 * Recalculate the transmit timer / rtt.
 	 *
 	 * Some boxes send broken timestamp replies during the SYN+ACK
 	 * phase, ignore timestamps of 0 or we could calculate a huge RTT
 	 * and blow up the retransmit timer.
 	 */
 	acked = BYTES_THIS_ACK(tp, th);
 
 #ifdef TCP_HHOOK
 	/* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */
 	hhook_run_tcp_est_in(tp, th, to);
 #endif
 	KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs);
 	KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
 	if (acked) {
 		struct mbuf *mfree;
 
 		rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0);
 		SOCKBUF_LOCK(&so->so_snd);
 		mfree = sbcut_locked(&so->so_snd, acked);
 		tp->snd_una = th->th_ack;
 		/* Note we want to hold the sb lock through the sendmap adjust */
 		rack_adjust_sendmap_head(rack, &so->so_snd);
 		/* Wake up the socket if we have room to write more */
 		rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
 		sowwakeup_locked(so);
 		m_freem(mfree);
 		tp->t_rxtshift = 0;
 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 			      rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 		rack->rc_tlp_in_progress = 0;
 		rack->r_ctl.rc_tlp_cnt_out = 0;
 		/*
 		 * If it is the RXT timer we want to
 		 * stop it, so we can restart a TLP.
 		 */
 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 
 #ifdef TCP_REQUEST_TRK
 		rack_req_check_for_comp(rack, th->th_ack);
 #endif
 	}
 	/*
 	 * Let the congestion control algorithm update congestion control
 	 * related information. This typically means increasing the
 	 * congestion window.
 	 */
 	if (tp->snd_wnd < ctf_outstanding(tp)) {
 		/* The peer collapsed the window */
 		rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__);
 	} else if (rack->rc_has_collapsed)
 		rack_un_collapse_window(rack, __LINE__);
 	if ((rack->r_collapse_point_valid) &&
 	    (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point)))
 		rack->r_collapse_point_valid = 0;
 	/*
 	 * Pull snd_wl2 up to prevent seq wrap relative to th_ack.
 	 */
 	tp->snd_wl2 = th->th_ack;
 	tp->t_dupacks = 0;
 	m_freem(m);
 	/* ND6_HINT(tp);	 *//* Some progress has been made. */
 
 	/*
 	 * If all outstanding data are acked, stop retransmit timer,
 	 * otherwise restart timer using current (possibly backed-off)
 	 * value. If process is waiting for space, wakeup/selwakeup/signal.
 	 * If data are ready to send, let tcp_output decide between more
 	 * output or persist.
 	 */
 	if (under_pacing &&
 	    (rack->use_fixed_rate == 0) &&
 	    (rack->in_probe_rtt == 0) &&
 	    rack->rc_gp_dyn_mul &&
 	    rack->rc_always_pace) {
 		/* Check if we are dragging bottom */
 		rack_check_bottom_drag(tp, rack, so);
 	}
 	if (tp->snd_una == tp->snd_max) {
 		tp->t_flags &= ~TF_PREVVALID;
 		rack->r_ctl.retran_during_recovery = 0;
 		rack->rc_suspicious = 0;
 		rack->r_ctl.dsack_byte_cnt = 0;
 		rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
 		if (rack->r_ctl.rc_went_idle_time == 0)
 			rack->r_ctl.rc_went_idle_time = 1;
 		rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
 		if (sbavail(&tptosocket(tp)->so_snd) == 0)
 			tp->t_acktime = 0;
 		rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 	}
 	if (acked && rack->r_fast_output)
 		rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked);
 	if (sbavail(&so->so_snd)) {
 		rack->r_wanted_output = 1;
 	}
 	return (1);
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	int32_t todrop;
 	int32_t ourfinisacked = 0;
 	struct tcp_rack *rack;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	ctf_calc_rwin(so, tp);
 	/*
 	 * If the state is SYN_SENT: if seg contains an ACK, but not for our
 	 * SYN, drop the input. if seg contains a RST, then drop the
 	 * connection. if seg does not contain SYN, then drop it. Otherwise
 	 * this is an acceptable SYN segment initialize tp->rcv_nxt and
 	 * tp->irs if seg contains ack then advance tp->snd_una if seg
 	 * contains an ECE and ECN support is enabled, the stream is ECN
 	 * capable. if SYN has been acked change to ESTABLISHED else
 	 * SYN_RCVD state arrange for segment to be acked (eventually)
 	 * continue processing rest of data/controls.
 	 */
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->iss) ||
 	    SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
 		TCP_PROBE5(connect__refused, NULL, tp,
 		    mtod(m, const char *), tp, th);
 		tp = tcp_drop(tp, ECONNREFUSED);
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (thflags & TH_RST) {
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (!(thflags & TH_SYN)) {
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	tp->irs = th->th_seq;
 	tcp_rcvseqinit(tp);
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (thflags & TH_ACK) {
 		int tfo_partial = 0;
 
 		KMOD_TCPSTAT_INC(tcps_connects);
 		soisconnected(so);
 #ifdef MAC
 		mac_socketpeer_set_from_mbuf(m, so);
 #endif
 		/* Do window scaling on this connection? */
 		if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 		    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 			tp->rcv_scale = tp->request_r_scale;
 		}
 		tp->rcv_adv += min(tp->rcv_wnd,
 		    TCP_MAXWIN << tp->rcv_scale);
 		/*
 		 * If not all the data that was sent in the TFO SYN
 		 * has been acked, resend the remainder right away.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) &&
 		    (tp->snd_una != tp->snd_max)) {
 			tp->snd_nxt = th->th_ack;
 			tfo_partial = 1;
 		}
 		/*
 		 * If there's data, delay ACK; if there's also a FIN ACKNOW
 		 * will be turned on later.
 		 */
 		if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) {
 			rack_timer_cancel(tp, rack,
 					  rack->r_ctl.rc_rcvtime, __LINE__);
 			tp->t_flags |= TF_DELACK;
 		} else {
 			rack->r_wanted_output = 1;
 			tp->t_flags |= TF_ACKNOW;
 		}
 
 		tcp_ecn_input_syn_sent(tp, thflags, iptos);
 
 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
 			/*
 			 * We advance snd_una for the
 			 * fast open case. If th_ack is
 			 * acknowledging data beyond
 			 * snd_una we can't just call
 			 * ack-processing since the
 			 * data stream in our send-map
 			 * will start at snd_una + 1 (one
 			 * beyond the SYN). If its just
 			 * equal we don't need to do that
 			 * and there is no send_map.
 			 */
 			tp->snd_una++;
 		}
 		/*
 		 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions:
 		 * SYN_SENT  --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1
 		 */
 		tp->t_starttime = ticks;
 		if (tp->t_flags & TF_NEEDFIN) {
 			tcp_state_change(tp, TCPS_FIN_WAIT_1);
 			tp->t_flags &= ~TF_NEEDFIN;
 			thflags &= ~TH_SYN;
 		} else {
 			tcp_state_change(tp, TCPS_ESTABLISHED);
 			TCP_PROBE5(connect__established, NULL, tp,
 			    mtod(m, const char *), tp, th);
 			rack_cc_conn_init(tp);
 		}
 	} else {
 		/*
 		 * Received initial SYN in SYN-SENT[*] state => simultaneous
 		 * open.  If segment contains CC option and there is a
 		 * cached CC, apply TAO test. If it succeeds, connection is *
 		 * half-synchronized. Otherwise, do 3-way handshake:
 		 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If
 		 * there was no CC option, clear cached CC value.
 		 */
 		tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
 		tcp_state_change(tp, TCPS_SYN_RECEIVED);
 	}
 	/*
 	 * Advance th->th_seq to correspond to first data byte. If data,
 	 * trim to stay within window, dropping FIN if necessary.
 	 */
 	th->th_seq++;
 	if (tlen > tp->rcv_wnd) {
 		todrop = tlen - tp->rcv_wnd;
 		m_adj(m, -todrop);
 		tlen = tp->rcv_wnd;
 		thflags &= ~TH_FIN;
 		KMOD_TCPSTAT_INC(tcps_rcvpackafterwin);
 		KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
 	}
 	tp->snd_wl1 = th->th_seq - 1;
 	tp->rcv_up = th->th_seq;
 	/*
 	 * Client side of transaction: already sent SYN and data. If the
 	 * remote host used T/TCP to validate the SYN, our data will be
 	 * ACK'd; if so, enter normal data segment processing in the middle
 	 * of step 5, ack processing. Otherwise, goto step 6.
 	 */
 	if (thflags & TH_ACK) {
 		/* For syn-sent we need to possibly update the rtt */
 		if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
 			uint32_t t, mcts;
 
 			mcts = tcp_ts_getticks();
 			t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
 			if (!tp->t_rttlow || tp->t_rttlow > t)
 				tp->t_rttlow = t;
 			rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4);
 			tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
 			tcp_rack_xmit_timer_commit(rack, tp);
 		}
 		if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
 			return (ret_val);
 		/* We may have changed to FIN_WAIT_1 above */
 		if (tp->t_state == TCPS_FIN_WAIT_1) {
 			/*
 			 * In FIN_WAIT_1 STATE in addition to the processing
 			 * for the ESTABLISHED state if our FIN is now
 			 * acknowledged then enter FIN_WAIT_2.
 			 */
 			if (ourfinisacked) {
 				/*
 				 * If we can't receive any more data, then
 				 * closing user can proceed. Starting the
 				 * timer is contrary to the specification,
 				 * but if we don't get a FIN we'll hang
 				 * forever.
 				 *
 				 * XXXjl: we should release the tp also, and
 				 * use a compressed state.
 				 */
 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 					soisdisconnected(so);
 					tcp_timer_activate(tp, TT_2MSL,
 					    (tcp_fast_finwait2_recycle ?
 					    tcp_finwait2_timeout :
 					    TP_MAXIDLE(tp)));
 				}
 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
 			}
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	   tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	struct tcp_rack *rack;
 	int32_t ret_val = 0;
 	int32_t ourfinisacked = 0;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	if ((thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->snd_una) ||
 	    SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if (IS_FASTOPEN(tp->t_flags)) {
 		/*
 		 * When a TFO connection is in SYN_RECEIVED, the
 		 * only valid packets are the initial SYN, a
 		 * retransmit/copy of the initial SYN (possibly with
 		 * a subset of the original data), a valid ACK, a
 		 * FIN, or a RST.
 		 */
 		if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
 			tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 			ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		} else if (thflags & TH_SYN) {
 			/* non-initial SYN is ignored */
 			if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) ||
 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) ||
 			    (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) {
 				ctf_do_drop(m, NULL);
 				return (0);
 			}
 		} else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	/*
 	 * In the SYN-RECEIVED state, validate that the packet belongs to
 	 * this connection before trimming the data to fit the receive
 	 * window.  Check the sequence number versus IRS since we know the
 	 * sequence numbers haven't wrapped.  This is a partial fix for the
 	 * "LAND" DoS attack.
 	 */
 	if (SEQ_LT(th->th_seq, tp->irs)) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 		return (1);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	tp->snd_wnd = tiwin;
 	rack_validate_fo_sendwin_up(tp, rack);
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (IS_FASTOPEN(tp->t_flags)) {
 			rack_cc_conn_init(tp);
 		}
 		return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 		    tiwin, thflags, nxt_pkt));
 	}
 	KMOD_TCPSTAT_INC(tcps_connects);
 	if (tp->t_flags & TF_SONOTCONN) {
 		tp->t_flags &= ~TF_SONOTCONN;
 		soisconnected(so);
 	}
 	/* Do window scaling? */
 	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
 	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 		tp->rcv_scale = tp->request_r_scale;
 	}
 	/*
 	 * Make transitions: SYN-RECEIVED  -> ESTABLISHED SYN-RECEIVED* ->
 	 * FIN-WAIT-1
 	 */
 	tp->t_starttime = ticks;
 	if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) {
 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
 		tp->t_tfo_pending = NULL;
 	}
 	if (tp->t_flags & TF_NEEDFIN) {
 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
 		tp->t_flags &= ~TF_NEEDFIN;
 	} else {
 		tcp_state_change(tp, TCPS_ESTABLISHED);
 		TCP_PROBE5(accept__established, NULL, tp,
 		    mtod(m, const char *), tp, th);
 		/*
 		 * TFO connections call cc_conn_init() during SYN
 		 * processing.  Calling it again here for such connections
 		 * is not harmless as it would undo the snd_cwnd reduction
 		 * that occurs when a TFO SYN|ACK is retransmitted.
 		 */
 		if (!IS_FASTOPEN(tp->t_flags))
 			rack_cc_conn_init(tp);
 	}
 	/*
 	 * Account for the ACK of our SYN prior to
 	 * regular ACK processing below, except for
 	 * simultaneous SYN, which is handled later.
 	 */
 	if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN))
 		tp->snd_una++;
 	/*
 	 * If segment contains data or ACK, will call tcp_reass() later; if
 	 * not, do so now to pass queued data to user.
 	 */
 	if (tlen == 0 && (thflags & TH_FIN) == 0) {
 		(void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0,
 		    (struct mbuf *)0);
 		if (tp->t_flags & TF_WAKESOR) {
 			tp->t_flags &= ~TF_WAKESOR;
 			/* NB: sorwakeup_locked() does an implicit unlock. */
 			sorwakeup_locked(so);
 		}
 	}
 	tp->snd_wl1 = th->th_seq - 1;
 	/* For syn-recv we need to possibly update the rtt */
 	if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) {
 		uint32_t t, mcts;
 
 		mcts = tcp_ts_getticks();
 		t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC;
 		if (!tp->t_rttlow || tp->t_rttlow > t)
 			tp->t_rttlow = t;
 		rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5);
 		tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2);
 		tcp_rack_xmit_timer_commit(rack, tp);
 	}
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (tp->t_state == TCPS_FIN_WAIT_1) {
 		/* We could have went to FIN_WAIT_1 (or EST) above */
 		/*
 		 * In FIN_WAIT_1 STATE in addition to the processing for the
 		 * ESTABLISHED state if our FIN is now acknowledged then
 		 * enter FIN_WAIT_2.
 		 */
 		if (ourfinisacked) {
 			/*
 			 * If we can't receive any more data, then closing
 			 * user can proceed. Starting the timer is contrary
 			 * to the specification, but if we don't get a FIN
 			 * we'll hang forever.
 			 *
 			 * XXXjl: we should release the tp also, and use a
 			 * compressed state.
 			 */
 			if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 				soisdisconnected(so);
 				tcp_timer_activate(tp, TT_2MSL,
 				    (tcp_fast_finwait2_recycle ?
 				    tcp_finwait2_timeout :
 				    TP_MAXIDLE(tp)));
 			}
 			tcp_state_change(tp, TCPS_FIN_WAIT_2);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	struct tcp_rack *rack;
 
 	/*
 	 * Header prediction: check for the two common cases of a
 	 * uni-directional data xfer.  If the packet has no control flags,
 	 * is in-sequence, the window didn't change and we're not
 	 * retransmitting, it's a candidate.  If the length is zero and the
 	 * ack moved forward, we're the sender side of the xfer.  Just free
 	 * the data acked & wake any higher level process that was blocked
 	 * waiting for space.  If the length is non-zero and the ack didn't
 	 * move, we're the receiver side.  If we're getting packets in-order
 	 * (the reassembly queue is empty), add the data toc The socket
 	 * buffer and note that we need a delayed ack. Make sure that the
 	 * hidden state-flags are also off. Since we check for
 	 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN.
 	 */
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (__predict_true(((to->to_flags & TOF_SACK) == 0)) &&
 	    __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) &&
 	    __predict_true(SEGQ_EMPTY(tp)) &&
 	    __predict_true(th->th_seq == tp->rcv_nxt)) {
 		if (tlen == 0) {
 			if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen,
 			    tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) {
 				return (0);
 			}
 		} else {
 			if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen,
 			    tiwin, nxt_pkt, iptos)) {
 				return (0);
 			}
 		}
 	}
 	ctf_calc_rwin(so, tp);
 
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	/* State changes only happen in rack_process_data() */
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 						tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 static int
 rack_check_data_after_close(struct mbuf *m,
     struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
 {
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->rc_allow_data_af_clo == 0) {
 	close_now:
 		tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
 		/* tcp_close will kill the inp pre-log the Reset */
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 		tp = tcp_close(tp);
 		KMOD_TCPSTAT_INC(tcps_rcvafterclose);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
 		return (1);
 	}
 	if (sbavail(&so->so_snd) == 0)
 		goto close_now;
 	/* Ok we allow data that is ignored and a followup reset */
 	tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE);
 	tp->rcv_nxt = th->th_seq + *tlen;
 	tp->t_flags2 |= TF2_DROP_AF_DATA;
 	rack->r_wanted_output = 1;
 	*tlen = 0;
 	return (0);
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	int32_t ourfinisacked = 0;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    rack_check_data_after_close(m, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		/*
 		 * If we can't receive any more data, then closing user can
 		 * proceed. Starting the timer is contrary to the
 		 * specification, but if we don't get a FIN we'll hang
 		 * forever.
 		 *
 		 * XXXjl: we should release the tp also, and use a
 		 * compressed state.
 		 */
 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 			soisdisconnected(so);
 			tcp_timer_activate(tp, TT_2MSL,
 			    (tcp_fast_finwait2_recycle ?
 			    tcp_finwait2_timeout :
 			    TP_MAXIDLE(tp)));
 		}
 		tcp_state_change(tp, TCPS_FIN_WAIT_2);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 						tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	int32_t ourfinisacked = 0;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    rack_check_data_after_close(m, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		tcp_twstart(tp);
 		m_freem(m);
 		return (1);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 						tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	int32_t ourfinisacked = 0;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    rack_check_data_after_close(m, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * case TCPS_LAST_ACK: Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (ourfinisacked) {
 		tp = tcp_close(tp);
 		ctf_do_drop(m, tp);
 		return (1);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 						tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 /*
  * Return value of 1, the TCB is unlocked and most
  * likely gone, return value of 0, the TCP is still
  * locked.
  */
 static int
 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
     struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen,
     uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos)
 {
 	int32_t ret_val = 0;
 	int32_t ourfinisacked = 0;
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	ctf_calc_rwin(so, tp);
 
 	/* Reset receive buffer auto scaling when not in bulk receive mode. */
 	if ((thflags & TH_RST) ||
 	    (tp->t_fin_is_rst && (thflags & TH_FIN)))
 		return (__ctf_process_rst(m, th, so, tp,
 					  &rack->r_ctl.challenge_ack_ts,
 					  &rack->r_ctl.challenge_ack_cnt));
 	/*
 	 * RFC5961 Section 4.2 Send challenge ACK for any SYN in
 	 * synchronized state.
 	 */
 	if (thflags & TH_SYN) {
 		ctf_challenge_ack(m, th, tp, iptos, &ret_val);
 		return (ret_val);
 	}
 	/*
 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment and
 	 * it's less than ts_recent, drop it.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent &&
 	    TSTMP_LT(to->to_tsval, tp->ts_recent)) {
 		if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
 			return (ret_val);
 	}
 	if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val,
 			      &rack->r_ctl.challenge_ack_ts,
 			      &rack->r_ctl.challenge_ack_cnt)) {
 		return (ret_val);
 	}
 	/*
 	 * If new data are received on a connection after the user processes
 	 * are gone, then RST the other end.
 	 */
 	if ((tp->t_flags & TF_CLOSED) && tlen &&
 	    rack_check_data_after_close(m, tp, &tlen, th, so))
 		return (1);
 	/*
 	 * If last ACK falls within this segment's sequence numbers, record
 	 * its timestamp. NOTE: 1) That the test incorporates suggestions
 	 * from the latest proposal of the tcplw@cray.com list (Braden
 	 * 1993/04/26). 2) That updating only on newer timestamps interferes
 	 * with our earlier PAWS tests, so this check should be solely
 	 * predicated on the sequence space of this segment. 3) That we
 	 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ
 	 * + SEG.Len  instead of RFC1323's Last.ACK.Sent < SEG.SEQ +
 	 * SEG.Len, This modified check allows us to overcome RFC1323's
 	 * limitations as described in Stevens TCP/IP Illustrated Vol. 2
 	 * p.869. In such cases, we can still calculate the RTT correctly
 	 * when RCV.NXT == Last.ACK.Sent.
 	 */
 	if ((to->to_flags & TOF_TS) != 0 &&
 	    SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
 	    SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
 	    ((thflags & (TH_SYN | TH_FIN)) != 0))) {
 		tp->ts_recent_age = tcp_ts_getticks();
 		tp->ts_recent = to->to_tsval;
 	}
 	/*
 	 * If the ACK bit is off:  if in SYN-RECEIVED state or SENDSYN flag
 	 * is on (half-synchronized state), then queue data for later
 	 * processing; else drop segment and return.
 	 */
 	if ((thflags & TH_ACK) == 0) {
 		if (tp->t_flags & TF_NEEDSYN) {
 			return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 			    tiwin, thflags, nxt_pkt));
 		} else if (tp->t_flags & TF_ACKNOW) {
 			ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val);
 			((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1;
 			return (ret_val);
 		} else {
 			ctf_do_drop(m, NULL);
 			return (0);
 		}
 	}
 	/*
 	 * Ack processing.
 	 */
 	if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
 		return (ret_val);
 	}
 	if (sbavail(&so->so_snd)) {
 		if (ctf_progress_timeout_check(tp, true)) {
 			rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 						tp, tick, PROGRESS_DROP, __LINE__);
 			ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 			return (1);
 		}
 	}
 	return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen,
 	    tiwin, thflags, nxt_pkt));
 }
 
 static void inline
 rack_clear_rate_sample(struct tcp_rack *rack)
 {
 	rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY;
 	rack->r_ctl.rack_rs.rs_rtt_cnt = 0;
 	rack->r_ctl.rack_rs.rs_rtt_tot = 0;
 }
 
 static void
 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override)
 {
 	uint64_t bw_est, rate_wanted;
 	int chged = 0;
 	uint32_t user_max, orig_min, orig_max;
 
 #ifdef TCP_REQUEST_TRK
 	if (rack->rc_hybrid_mode &&
 	    (rack->r_ctl.rc_pace_max_segs != 0) &&
 	    (rack_hybrid_allow_set_maxseg == 1) &&
 	    (rack->r_ctl.rc_last_sft != NULL)) {
 		rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS;
 		return;
 	}
 #endif
 	orig_min = rack->r_ctl.rc_pace_min_segs;
 	orig_max = rack->r_ctl.rc_pace_max_segs;
 	user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs;
 	if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs)
 		chged = 1;
 	rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
 	if (rack->use_fixed_rate || rack->rc_force_max_seg) {
 		if (user_max != rack->r_ctl.rc_pace_max_segs)
 			chged = 1;
 	}
 	if (rack->rc_force_max_seg) {
 		rack->r_ctl.rc_pace_max_segs = user_max;
 	} else if (rack->use_fixed_rate) {
 		bw_est = rack_get_bw(rack);
 		if ((rack->r_ctl.crte == NULL) ||
 		    (bw_est != rack->r_ctl.crte->rate)) {
 			rack->r_ctl.rc_pace_max_segs = user_max;
 		} else {
 			/* We are pacing right at the hardware rate */
 			uint32_t segsiz, pace_one;
 
 			if (rack_pace_one_seg ||
 			    (rack->r_ctl.rc_user_set_min_segs == 1))
 				pace_one = 1;
 			else
 				pace_one = 0;
 			segsiz = min(ctf_fixed_maxseg(tp),
 				     rack->r_ctl.rc_pace_min_segs);
 			rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(
 				tp, bw_est, segsiz, pace_one,
 				rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
 		}
 	} else if (rack->rc_always_pace) {
 		if (rack->r_ctl.gp_bw ||
 		    rack->r_ctl.init_rate) {
 			/* We have a rate of some sort set */
 			uint32_t  orig;
 
 			bw_est = rack_get_bw(rack);
 			orig = rack->r_ctl.rc_pace_max_segs;
 			if (fill_override)
 				rate_wanted = *fill_override;
 			else
 				rate_wanted = rack_get_gp_est(rack);
 			if (rate_wanted) {
 				/* We have something */
 				rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack,
 										   rate_wanted,
 										   ctf_fixed_maxseg(rack->rc_tp));
 			} else
 				rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs;
 			if (orig != rack->r_ctl.rc_pace_max_segs)
 				chged = 1;
 		} else if ((rack->r_ctl.gp_bw == 0) &&
 			   (rack->r_ctl.rc_pace_max_segs == 0)) {
 			/*
 			 * If we have nothing limit us to bursting
 			 * out IW sized pieces.
 			 */
 			chged = 1;
 			rack->r_ctl.rc_pace_max_segs = rc_init_window(rack);
 		}
 	}
 	if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) {
 		chged = 1;
 		rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES;
 	}
 	if (chged)
 		rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2);
 }
 
 
 static void
 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags)
 {
 #ifdef INET6
 	struct ip6_hdr *ip6 = NULL;
 #endif
 #ifdef INET
 	struct ip *ip = NULL;
 #endif
 	struct udphdr *udp = NULL;
 
 	/* Ok lets fill in the fast block, it can only be used with no IP options! */
 #ifdef INET6
 	if (rack->r_is_v6) {
 		rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
 		if (tp->t_port) {
 			rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
 			udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 			udp->uh_dport = tp->t_port;
 			rack->r_ctl.fsb.udp = udp;
 			rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
 		} else
 		{
 			rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1);
 			rack->r_ctl.fsb.udp = NULL;
 		}
 		tcpip_fillheaders(rack->rc_inp,
 				  tp->t_port,
 				  ip6, rack->r_ctl.fsb.th);
 		rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL);
 	} else
 #endif				/* INET6 */
 #ifdef INET
 	{
 		rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr);
 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
 		if (tp->t_port) {
 			rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr);
 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 			udp->uh_dport = tp->t_port;
 			rack->r_ctl.fsb.udp = udp;
 			rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1);
 		} else
 		{
 			rack->r_ctl.fsb.udp = NULL;
 			rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1);
 		}
 		tcpip_fillheaders(rack->rc_inp,
 				  tp->t_port,
 				  ip, rack->r_ctl.fsb.th);
 		rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl;
 	}
 #endif
 	rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0),
 	    (long)TCP_MAXWIN << tp->rcv_scale);
 	rack->r_fsb_inited = 1;
 }
 
 static int
 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	/*
 	 * Allocate the larger of spaces V6 if available else just
 	 * V4 and include udphdr (overbook)
 	 */
 #ifdef INET6
 	rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr);
 #else
 	rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr);
 #endif
 	rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len,
 					    M_TCPFSB, M_NOWAIT|M_ZERO);
 	if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) {
 		return (ENOMEM);
 	}
 	rack->r_fsb_inited = 0;
 	return (0);
 }
 
 static void
 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod)
 {
 	/*
 	 * Types of logs (mod value)
 	 * 20 - Initial round setup
 	 * 21 - Rack declares a new round.
 	 */
 	struct tcpcb *tp;
 
 	tp = rack->rc_tp;
 	if (tcp_bblogging_on(tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = rack->r_ctl.current_round;
 		log.u_bbr.flex2 = rack->r_ctl.roundends;
 		log.u_bbr.flex3 = high_seq;
 		log.u_bbr.flex4 = tp->snd_max;
 		log.u_bbr.flex8 = mod;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes;
 		log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes;
 		TCP_LOG_EVENTP(tp, NULL,
 		    &tptosocket(tp)->so_rcv,
 		    &tptosocket(tp)->so_snd,
 		    TCP_HYSTART, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static void
 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	rack->rack_deferred_inited = 1;
 	rack->r_ctl.roundends = tp->snd_max;
 	rack->r_ctl.rc_high_rwnd = tp->snd_wnd;
 	rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
 }
 
 static void
 rack_init_retransmit_value(struct tcp_rack *rack, int ctl)
 {
 	/* Retransmit bit controls.
 	 *
 	 * The setting of these values control one of
 	 * three settings you can have and dictate
 	 * how rack does retransmissions. Note this
 	 * is in *any* mode i.e. pacing on or off DGP
 	 * fixed rate pacing, or just bursting rack.
 	 *
 	 * 1 - Use full sized retransmits i.e. limit
 	 *     the size to whatever the pace_max_segments
 	 *     size is.
 	 *
 	 * 2 - Use pacer min granularity as a guide to
 	 *     the size combined with the current calculated
 	 *     goodput b/w measurement. So for example if
 	 *     the goodput is measured at 20Mbps we would
 	 *     calculate 8125 (pacer minimum 250usec in
 	 *     that b/w) and then round it up to the next
 	 *     MSS i.e. for 1448 mss 6 MSS or 8688 bytes.
 	 *
 	 * 0 - The rack default 1 MSS (anything not 0/1/2
 	 *     fall here too if we are setting via rack_init()).
 	 *
 	 */
 	if (ctl == 1) {
 		rack->full_size_rxt = 1;
 		rack->shape_rxt_to_pacing_min  = 0;
 	} else if (ctl == 2) {
 		rack->full_size_rxt = 0;
 		rack->shape_rxt_to_pacing_min  = 1;
 	} else {
 		rack->full_size_rxt = 0;
 		rack->shape_rxt_to_pacing_min  = 0;
 	}
 }
 
 static void
 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod,
 		  uint32_t flex1,
 		  uint32_t flex2,
 		  uint32_t flex3)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.flex8 = mod;
 		log.u_bbr.flex1 = flex1;
 		log.u_bbr.flex2 = flex2;
 		log.u_bbr.flex3 = flex3;
 		tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0,
 			       0, &log, false, NULL, __func__, __LINE__, &tv);
 	}
 }
 
 static int
 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr)
 {
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm;
 	int i;
 
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	switch (reqr->req) {
 	case TCP_QUERY_SENDMAP:
 		if ((reqr->req_param == tp->snd_max) ||
 		    (tp->snd_max == tp->snd_una)){
 			/* Unlikely */
 			return (0);
 		}
 		rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param);
 		if (rsm == NULL) {
 			/* Can't find that seq -- unlikely */
 			return (0);
 		}
 		reqr->sendmap_start = rsm->r_start;
 		reqr->sendmap_end = rsm->r_end;
 		reqr->sendmap_send_cnt = rsm->r_rtr_cnt;
 		reqr->sendmap_fas = rsm->r_fas;
 		if (reqr->sendmap_send_cnt > SNDMAP_NRTX)
 			reqr->sendmap_send_cnt = SNDMAP_NRTX;
 		for(i=0; i<reqr->sendmap_send_cnt; i++)
 			reqr->sendmap_time[i] = rsm->r_tim_lastsent[i];
 		reqr->sendmap_ack_arrival = rsm->r_ack_arrival;
 		reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK;
 		reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes;
 		reqr->sendmap_dupacks = rsm->r_dupack;
 		rack_log_chg_info(tp, rack, 1,
 				  rsm->r_start,
 				  rsm->r_end,
 				  rsm->r_flags);
 		return(1);
 		break;
 	case TCP_QUERY_TIMERS_UP:
 		if (rack->r_ctl.rc_hpts_flags == 0) {
 			/* no timers up */
 			return (0);
 		}
 		reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags;
 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 			reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to;
 		}
 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 			reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp;
 		}
 		rack_log_chg_info(tp, rack, 2,
 				  rack->r_ctl.rc_hpts_flags,
 				  rack->r_ctl.rc_last_output_to,
 				  rack->r_ctl.rc_timer_exp);
 		return (1);
 		break;
 	case TCP_QUERY_RACK_TIMES:
 		/* Reordering items */
 		reqr->rack_num_dsacks = rack->r_ctl.num_dsack;
 		reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts;
 		/* Timerstamps and timers */
 		reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time;
 		reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt;
 		reqr->rack_rtt = rack->rc_rack_rtt;
 		reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time;
 		reqr->rack_srtt_measured = rack->rc_srtt_measure_made;
 		/* PRR data */
 		reqr->rack_sacked = rack->r_ctl.rc_sacked;
 		reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt;
 		reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered;
 		reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs;
 		reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt;
 		reqr->rack_prr_out = rack->r_ctl.rc_prr_out;
 		/* TLP and persists info */
 		reqr->rack_tlp_out = rack->rc_tlp_in_progress;
 		reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out;
 		if (rack->rc_in_persist) {
 			reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time;
 			reqr->rack_in_persist = 1;
 		} else {
 			reqr->rack_time_went_idle = 0;
 			reqr->rack_in_persist = 0;
 		}
 		if (rack->r_wanted_output)
 			reqr->rack_wanted_output = 1;
 		else
 			reqr->rack_wanted_output = 0;
 		return (1);
 		break;
 	default:
 		return (-EINVAL);
 	}
 }
 
 static void
 rack_switch_failed(struct tcpcb *tp)
 {
 	/*
 	 * This method gets called if a stack switch was
 	 * attempted and it failed. We are left
 	 * but our hpts timers were stopped and we
 	 * need to validate time units and t_flags2.
 	 */
 	struct tcp_rack *rack;
 	struct timeval tv;
 	uint32_t cts;
 	uint32_t toval;
 	struct hpts_diag diag;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
 	if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
 		tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 	else
 		tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
 	if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
 		tp->t_flags2 |= TF2_MBUF_ACKCMP;
 	if (tp->t_in_hpts > IHPTS_NONE) {
 		/* Strange */
 		return;
 	}
 	cts = tcp_get_usecs(&tv);
 	if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 		if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
 			toval = rack->r_ctl.rc_last_output_to - cts;
 		} else {
 			/* one slot please */
 			toval = HPTS_TICKS_PER_SLOT;
 		}
 	} else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) {
 			toval = rack->r_ctl.rc_timer_exp - cts;
 		} else {
 			/* one slot please */
 			toval = HPTS_TICKS_PER_SLOT;
 		}
 	} else
 		toval = HPTS_TICKS_PER_SLOT;
 	(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
 				   __LINE__, &diag);
 	rack_log_hpts_diag(rack, cts, &diag, &tv);
 }
 
 static int
 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr)
 {
 	struct rack_sendmap *rsm, *ersm;
 	int insret __diagused;
 	/*
 	 * When initing outstanding, we must be quite careful
 	 * to not refer to tp->t_fb_ptr. This has the old rack
 	 * pointer in it, not the "new" one (when we are doing
 	 * a stack switch).
 	 */
 
 
 	if (tp->t_fb->tfb_chg_query == NULL) {
 		/* Create a send map for the current outstanding data */
 
 		rsm = rack_alloc(rack);
 		if (rsm == NULL) {
 			uma_zfree(rack_pcb_zone, ptr);
 			return (ENOMEM);
 		}
 		rsm->r_no_rtt_allowed = 1;
 		rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
 		rsm->r_rtr_cnt = 1;
 		rsm->r_rtr_bytes = 0;
 		if (tp->t_flags & TF_SENTFIN)
 			rsm->r_flags |= RACK_HAS_FIN;
 		rsm->r_end = tp->snd_max;
 		if (tp->snd_una == tp->iss) {
 			/* The data space is one beyond snd_una */
 			rsm->r_flags |= RACK_HAS_SYN;
 			rsm->r_start = tp->iss;
 			rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
 		} else
 			rsm->r_start = tp->snd_una;
 		rsm->r_dupack = 0;
 		if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) {
 			rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
 			if (rsm->m) {
 				rsm->orig_m_len = rsm->m->m_len;
 				rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 			} else {
 				rsm->orig_m_len = 0;
 				rsm->orig_t_space = 0;
 			}
 		} else {
 			/*
 			 * This can happen if we have a stand-alone FIN or
 			 *  SYN.
 			 */
 			rsm->m = NULL;
 			rsm->orig_m_len = 0;
 			rsm->orig_t_space = 0;
 			rsm->soff = 0;
 		}
 #ifdef INVARIANTS
 		if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
 			panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
 			      insret, rack, rsm);
 		}
 #else
 		(void)tqhash_insert(rack->r_ctl.tqh, rsm);
 #endif
 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 		rsm->r_in_tmap = 1;
 	} else {
 		/* We have a query mechanism, lets use it */
 		struct tcp_query_resp qr;
 		int i;
 		tcp_seq at;
 
 		at = tp->snd_una;
 		while (at != tp->snd_max) {
 			memset(&qr, 0, sizeof(qr));
 			qr.req = TCP_QUERY_SENDMAP;
 			qr.req_param = at;
 			if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0)
 				break;
 			/* Move forward */
 			at = qr.sendmap_end;
 			/* Now lets build the entry for this one */
 			rsm = rack_alloc(rack);
 			if (rsm == NULL) {
 				uma_zfree(rack_pcb_zone, ptr);
 				return (ENOMEM);
 			}
 			memset(rsm, 0, sizeof(struct rack_sendmap));
 			/* Now configure the rsm and insert it */
 			rsm->r_dupack = qr.sendmap_dupacks;
 			rsm->r_start = qr.sendmap_start;
 			rsm->r_end = qr.sendmap_end;
 			if (qr.sendmap_fas)
 				rsm->r_fas = qr.sendmap_end;
 			else
 				rsm->r_fas = rsm->r_start - tp->snd_una;
 			/*
 			 * We have carefully aligned the bits
 			 * so that all we have to do is copy over
 			 * the bits with the mask.
 			 */
 			rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK;
 			rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes;
 			rsm->r_rtr_cnt = qr.sendmap_send_cnt;
 			rsm->r_ack_arrival = qr.sendmap_ack_arrival;
 			for (i=0 ; i<rsm->r_rtr_cnt; i++)
 				rsm->r_tim_lastsent[i]	= qr.sendmap_time[i];
 			rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
 					   (rsm->r_start - tp->snd_una), &rsm->soff);
 			if (rsm->m) {
 				rsm->orig_m_len = rsm->m->m_len;
 				rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
 			} else {
 				rsm->orig_m_len = 0;
 				rsm->orig_t_space = 0;
 			}
 #ifdef INVARIANTS
 			if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
 				panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
 				      insret, rack, rsm);
 			}
 #else
 			(void)tqhash_insert(rack->r_ctl.tqh, rsm);
 #endif
 			if ((rsm->r_flags & RACK_ACKED) == 0)  {
 				TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) {
 					if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] >
 					    rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) {
 						/*
 						 * If the existing ersm was sent at
 						 * a later time than the new one, then
 						 * the new one should appear ahead of this
 						 * ersm.
 						 */
 						rsm->r_in_tmap = 1;
 						TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext);
 						break;
 					}
 				}
 				if (rsm->r_in_tmap == 0) {
 					/*
 					 * Not found so shove it on the tail.
 					 */
 					TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
 					rsm->r_in_tmap = 1;
 				}
  			} else {
 				if ((rack->r_ctl.rc_sacklast == NULL) ||
 				    (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) {
 					rack->r_ctl.rc_sacklast = rsm;
 				}
 			}
 			rack_log_chg_info(tp, rack, 3,
 					  rsm->r_start,
 					  rsm->r_end,
 					  rsm->r_flags);
 		}
 	}
 	return (0);
 }
 
 static void
 rack_translate_clamp_value(struct tcp_rack *rack, uint32_t optval)
 {
 	/*
 	 * P = percent bits
 	 * F = fill cw bit -- Toggle fillcw if this bit is set.
 	 * S = Segment bits
 	 * M = set max segment bit
 	 * U = Unclamined
 	 * C = If set to non-zero override the max number of clamps.
 	 * L = Bit to indicate if clamped gets lower.
 	 *
 	 * CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP
 	 *
 	 * The lowest 3 nibbles is the perentage .1 - 6553.5%
 	 * where 10.1 = 101, max 6553.5
 	 * The upper 16 bits  holds some options.
 	 * The F bit will turn on fill-cw on if you are
 	 * not pacing, it will turn it off if dgp is on.
 	 * The L bit will change it so when clamped we get
 	 * the min(gp, lt-bw) for dgp.
 	 */
 	uint16_t per;
 
 	rack->r_ctl.saved_rxt_clamp_val = optval;
 	per = optval & 0x0000ffff;
 	rack->r_ctl.rxt_threshold = (uint64_t)(per & 0xffff);
 	if (optval > 0) {
 		uint16_t clamp_opt;
 
 		rack->excess_rxt_on = 1;
 		clamp_opt = ((optval & 0xffff0000) >> 16);
 		rack->r_ctl.clamp_options = clamp_opt & 0x00ff;
 		if (clamp_opt & 0xff00) {
 			/* A max clamps is also present */
 			rack->r_ctl.max_clamps = (clamp_opt >> 8);
 		} else {
 			/* No specified clamps means no limit */
 			rack->r_ctl.max_clamps = 0;
 		}
 		if (rack->r_ctl.clamp_options & 0x0002) {
 			rack->r_clamped_gets_lower  = 1;
 		} else {
 			rack->r_clamped_gets_lower  = 0;
 		}
 	} else {
 		/* Turn it off back to default */
 		rack->excess_rxt_on = 0;
 		rack->r_clamped_gets_lower  = 0;
 	}
 
 }
 
 
 static int32_t
 rack_init(struct tcpcb *tp, void **ptr)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct tcp_rack *rack = NULL;
 	uint32_t iwin, snt, us_cts;
 	int err, no_query;
 
 	tcp_hpts_init(tp);
 
 	/*
 	 * First are we the initial or are we a switched stack?
 	 * If we are initing via tcp_newtcppcb the ptr passed
 	 * will be tp->t_fb_ptr. If its a stack switch that
 	 * has a previous stack we can query it will be a local
 	 * var that will in the end be set into t_fb_ptr.
 	 */
 	if (ptr == &tp->t_fb_ptr)
 		no_query = 1;
 	else
 		no_query = 0;
 	*ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT);
 	if (*ptr == NULL) {
 		/*
 		 * We need to allocate memory but cant. The INP and INP_INFO
 		 * locks and they are recursive (happens during setup. So a
 		 * scheme to drop the locks fails :(
 		 *
 		 */
 		return(ENOMEM);
 	}
 	memset(*ptr, 0, sizeof(struct tcp_rack));
 	rack = (struct tcp_rack *)*ptr;
 	rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT);
 	if (rack->r_ctl.tqh == NULL) {
 		uma_zfree(rack_pcb_zone, rack);
 		return(ENOMEM);
 	}
 	tqhash_init(rack->r_ctl.tqh);
 	TAILQ_INIT(&rack->r_ctl.rc_free);
 	TAILQ_INIT(&rack->r_ctl.rc_tmap);
 	rack->rc_tp = tp;
 	rack->rc_inp = inp;
 	/* Set the flag */
 	rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
 	/* Probably not needed but lets be sure */
 	rack_clear_rate_sample(rack);
 	/*
 	 * Save off the default values, socket options will poke
 	 * at these if pacing is not on or we have not yet
 	 * reached where pacing is on (gp_ready/fixed enabled).
 	 * When they get set into the CC module (when gp_ready
 	 * is enabled or we enable fixed) then we will set these
 	 * values into the CC and place in here the old values
 	 * so we have a restoral. Then we will set the flag
 	 * rc_pacing_cc_set. That way whenever we turn off pacing
 	 * or switch off this stack, we will know to go restore
 	 * the saved values.
 	 *
 	 * We specifically put into the beta the ecn value for pacing.
 	 */
 	rack->rc_new_rnd_needed = 1;
 	rack->r_ctl.rc_split_limit = V_tcp_map_split_limit;
 	/* We want abe like behavior as well */
 	rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED;
 	rack->r_ctl.rc_reorder_fade = rack_reorder_fade;
 	rack->rc_allow_data_af_clo = rack_ignore_data_after_close;
 	rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh;
 	if (rack_rxt_clamp_thresh) {
 		rack_translate_clamp_value(rack, rack_rxt_clamp_thresh);
 		rack->excess_rxt_on = 1;
 	}
 	if (rack_uses_full_dgp_in_rec)
 		rack->r_ctl.full_dgp_in_rec = 1;
 	if (rack_fill_cw_state)
 		rack->rc_pace_to_cwnd = 1;
 	if (rack_pacing_min_seg)
 		rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg;
 	if (use_rack_rr)
 		rack->use_rack_rr = 1;
 	if (rack_dnd_default) {
 		rack->rc_pace_dnd = 1;
 	}
 	if (V_tcp_delack_enabled)
 		tp->t_delayed_ack = 1;
 	else
 		tp->t_delayed_ack = 0;
 #ifdef TCP_ACCOUNTING
 	if (rack_tcp_accounting) {
 		tp->t_flags2 |= TF2_TCP_ACCOUNTING;
 	}
 #endif
 	rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss;
 	rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca;
 	if (rack_enable_shared_cwnd)
 		rack->rack_enable_scwnd = 1;
 	rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
 	rack->rc_user_set_max_segs = rack_hptsi_segments;
 	rack->rc_force_max_seg = 0;
 	TAILQ_INIT(&rack->r_ctl.opt_list);
 	rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn;
 	rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn;
 	if (rack_hibeta_setting) {
 		rack->rack_hibeta = 1;
 		if ((rack_hibeta_setting >= 50) &&
 		    (rack_hibeta_setting <= 100)) {
 			rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting;
 			rack->r_ctl.saved_hibeta = rack_hibeta_setting;
 		}
 	} else {
 		rack->r_ctl.saved_hibeta = 50;
 	}
 	rack->r_ctl.rc_reorder_shift = rack_reorder_thresh;
 	rack->r_ctl.rc_pkt_delay = rack_pkt_delay;
 	rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp;
 	rack->r_ctl.rc_lowest_us_rtt = 0xffffffff;
 	rack->r_ctl.rc_highest_us_rtt = 0;
 	rack->r_ctl.bw_rate_cap = rack_bw_rate_cap;
 	rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop);
 	if (rack_use_cmp_acks)
 		rack->r_use_cmp_ack = 1;
 	if (rack_disable_prr)
 		rack->rack_no_prr = 1;
 	if (rack_gp_no_rec_chg)
 		rack->rc_gp_no_rec_chg = 1;
 	if (rack_pace_every_seg && tcp_can_enable_pacing()) {
 		rack->rc_always_pace = 1;
 		if (rack->rack_hibeta)
 			rack_set_cc_pacing(rack);
 	} else
 		rack->rc_always_pace = 0;
 	if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack)
 		rack->r_mbuf_queue = 1;
 	else
 		rack->r_mbuf_queue = 0;
 	rack_set_pace_segments(tp, rack, __LINE__, NULL);
 	if (rack_limits_scwnd)
 		rack->r_limit_scw = 1;
 	else
 		rack->r_limit_scw = 0;
 	rack_init_retransmit_value(rack, rack_rxt_controls);
 	rack->rc_labc = V_tcp_abc_l_var;
 	rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method;
 	rack->rack_tlp_threshold_use = rack_tlp_threshold_use;
 	rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr;
 	rack->r_ctl.rc_min_to = rack_min_to;
 	microuptime(&rack->r_ctl.act_rcv_time);
 	rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time;
 	rack->rc_init_win = rack_default_init_window;
 	rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss;
 	if (rack_hw_up_only)
 		rack->r_up_only = 1;
 	if (rack_do_dyn_mul) {
 		/* When dynamic adjustment is on CA needs to start at 100% */
 		rack->rc_gp_dyn_mul = 1;
 		if (rack_do_dyn_mul >= 100)
 			rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
 	} else
 		rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
 	rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec;
 	rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt;
 	rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time);
 	setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN,
 				rack_probertt_filter_life);
 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 	rack->r_ctl.rc_lower_rtt_us_cts = us_cts;
 	rack->r_ctl.rc_time_of_last_probertt = us_cts;
 	rack->r_ctl.challenge_ack_ts = tcp_ts_getticks();
 	rack->r_ctl.rc_time_probertt_starts = 0;
 	if (rack_dsack_std_based & 0x1) {
 		/* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
 		rack->rc_rack_tmr_std_based = 1;
 	}
 	if (rack_dsack_std_based & 0x2) {
 		/* Basically this means  rack timers are extended based on dsack by up to (2 * srtt) */
 		rack->rc_rack_use_dsack = 1;
 	}
 	/* We require at least one measurement, even if the sysctl is 0 */
 	if (rack_req_measurements)
 		rack->r_ctl.req_measurements = rack_req_measurements;
 	else
 		rack->r_ctl.req_measurements = 1;
 	if (rack_enable_hw_pacing)
 		rack->rack_hdw_pace_ena = 1;
 	if (rack_hw_rate_caps)
 		rack->r_rack_hw_rate_caps = 1;
 #ifdef TCP_SAD_DETECTION
 	rack->do_detection = 1;
 #else
 	rack->do_detection = 0;
 #endif
 	if (rack_non_rxt_use_cr)
 		rack->rack_rec_nonrxt_use_cr = 1;
 	/* Lets setup the fsb block */
 	err = rack_init_fsb(tp, rack);
 	if (err) {
 		uma_zfree(rack_pcb_zone, *ptr);
 		*ptr = NULL;
 		return (err);
 	}
 	if (rack_do_hystart) {
 		tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
 		if (rack_do_hystart > 1)
 			tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
 		if (rack_do_hystart > 2)
 			tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
 	}
 	/* Log what we will do with queries */
 	rack_log_chg_info(tp, rack, 7,
 			  no_query, 0, 0);
 	if (rack_def_profile)
 		rack_set_profile(rack, rack_def_profile);
 	/* Cancel the GP measurement in progress */
 	tp->t_flags &= ~TF_GPUTINPROG;
 	if ((tp->t_state != TCPS_CLOSED) &&
 	    (tp->t_state != TCPS_TIME_WAIT)) {
 		/*
 		 * We are already open, we may
 		 * need to adjust a few things.
 		 */
 		if (SEQ_GT(tp->snd_max, tp->iss))
 			snt = tp->snd_max - tp->iss;
 		else
 			snt = 0;
 		iwin = rc_init_window(rack);
 		if ((snt < iwin) &&
 		    (no_query == 1)) {
 			/* We are not past the initial window
 			 * on the first init (i.e. a stack switch
 			 * has not yet occured) so we need to make
 			 * sure cwnd and ssthresh is correct.
 			 */
 			if (tp->snd_cwnd < iwin)
 				tp->snd_cwnd = iwin;
 			/*
 			 * If we are within the initial window
 			 * we want ssthresh to be unlimited. Setting
 			 * it to the rwnd (which the default stack does
 			 * and older racks) is not really a good idea
 			 * since we want to be in SS and grow both the
 			 * cwnd and the rwnd (via dynamic rwnd growth). If
 			 * we set it to the rwnd then as the peer grows its
 			 * rwnd we will be stuck in CA and never hit SS.
 			 *
 			 * Its far better to raise it up high (this takes the
 			 * risk that there as been a loss already, probably
 			 * we should have an indicator in all stacks of loss
 			 * but we don't), but considering the normal use this
 			 * is a risk worth taking. The consequences of not
 			 * hitting SS are far worse than going one more time
 			 * into it early on (before we have sent even a IW).
 			 * It is highly unlikely that we will have had a loss
 			 * before getting the IW out.
 			 */
 			tp->snd_ssthresh = 0xffffffff;
 		}
 		/*
 		 * Any init based on sequence numbers
 		 * should be done in the deferred init path
 		 * since we can be CLOSED and not have them
 		 * inited when rack_init() is called. We
 		 * are not closed so lets call it.
 		 */
 		rack_deferred_init(tp, rack);
 	}
 	if ((tp->t_state != TCPS_CLOSED) &&
 	    (tp->t_state != TCPS_TIME_WAIT) &&
 	    (no_query == 0) &&
 	    (tp->snd_una != tp->snd_max))  {
 		err = rack_init_outstanding(tp, rack, us_cts, *ptr);
 		if (err) {
 			*ptr = NULL;
 			return(err);
 		}
 	}
 	rack_stop_all_timers(tp, rack);
 	/* Setup all the t_flags2 */
 	if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
 		tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 	else
 		tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
 	if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
 		tp->t_flags2 |= TF2_MBUF_ACKCMP;
 	/*
 	 * Timers in Rack are kept in microseconds so lets
 	 * convert any initial incoming variables
 	 * from ticks into usecs. Note that we
 	 * also change the values of t_srtt and t_rttvar, if
 	 * they are non-zero. They are kept with a 5
 	 * bit decimal so we have to carefully convert
 	 * these to get the full precision.
 	 */
 	rack_convert_rtts(tp);
 	rack_log_hystart_event(rack, rack->r_ctl.roundends, 20);
 	if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) {
 		/* We do not start any timers on DROPPED connections */
 		if (tp->t_fb->tfb_chg_query == NULL) {
 			rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
 		} else {
 			struct tcp_query_resp qr;
 			int ret;
 
 			memset(&qr, 0, sizeof(qr));
 
 			/* Get the misc time stamps and such for rack */
 			qr.req = TCP_QUERY_RACK_TIMES;
 			ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
 			if (ret == 1) {
 				rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts;
 				rack->r_ctl.num_dsack  = qr.rack_num_dsacks;
 				rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time;
 				rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt;
 				rack->rc_rack_rtt = qr.rack_rtt;
 				rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time;
 				rack->r_ctl.rc_sacked = qr.rack_sacked;
 				rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt;
 				rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered;
 				rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs;
 				rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt;
 				rack->r_ctl.rc_prr_out = qr.rack_prr_out;
 				if (qr.rack_tlp_out) {
 					rack->rc_tlp_in_progress = 1;
 					rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out;
 				} else {
 					rack->rc_tlp_in_progress = 0;
 					rack->r_ctl.rc_tlp_cnt_out = 0;
 				}
 				if (qr.rack_srtt_measured)
 					rack->rc_srtt_measure_made = 1;
 				if (qr.rack_in_persist == 1) {
 					rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle;
 #ifdef NETFLIX_SHARED_CWND
 					if (rack->r_ctl.rc_scw) {
 						tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
 						rack->rack_scwnd_is_idle = 1;
 					}
 #endif
 					rack->r_ctl.persist_lost_ends = 0;
 					rack->probe_not_answered = 0;
 					rack->forced_ack = 0;
 					tp->t_rxtshift = 0;
 					rack->rc_in_persist = 1;
 					RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 							   rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 				}
 				if (qr.rack_wanted_output)
 					rack->r_wanted_output = 1;
 				rack_log_chg_info(tp, rack, 6,
 						  qr.rack_min_rtt,
 						  qr.rack_rtt,
 						  qr.rack_reorder_ts);
 			}
 			/* Get the old stack timers */
 			qr.req_param = 0;
 			qr.req = TCP_QUERY_TIMERS_UP;
 			ret = (*tp->t_fb->tfb_chg_query)(tp, &qr);
 			if (ret) {
 				/*
 				 * non-zero return means we have a timer('s)
 				 * to start. Zero means no timer (no keepalive
 				 * I suppose).
 				 */
 				uint32_t tov = 0;
 
 				rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags;
 				if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) {
 					rack->r_ctl.rc_last_output_to = qr.timer_pacing_to;
 					if (TSTMP_GT(qr.timer_pacing_to, us_cts))
 						tov = qr.timer_pacing_to - us_cts;
 					else
 						tov = HPTS_TICKS_PER_SLOT;
 				}
 				if (qr.timer_hpts_flags & PACE_TMR_MASK) {
 					rack->r_ctl.rc_timer_exp = qr.timer_timer_exp;
 					if (tov == 0) {
 						if (TSTMP_GT(qr.timer_timer_exp, us_cts))
 							tov = qr.timer_timer_exp - us_cts;
 						else
 							tov = HPTS_TICKS_PER_SLOT;
 					}
 				}
 				rack_log_chg_info(tp, rack, 4,
 						  rack->r_ctl.rc_hpts_flags,
 						  rack->r_ctl.rc_last_output_to,
 						  rack->r_ctl.rc_timer_exp);
 				if (tov) {
 					struct hpts_diag diag;
 
 					(void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov),
 								   __LINE__, &diag);
 					rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time);
 				}
 			}
 		}
 		rack_log_rtt_shrinks(rack,  us_cts,  tp->t_rxtcur,
 				     __LINE__, RACK_RTTS_INIT);
 	}
 	return (0);
 }
 
 static int
 rack_handoff_ok(struct tcpcb *tp)
 {
 	if ((tp->t_state == TCPS_CLOSED) ||
 	    (tp->t_state == TCPS_LISTEN)) {
 		/* Sure no problem though it may not stick */
 		return (0);
 	}
 	if ((tp->t_state == TCPS_SYN_SENT) ||
 	    (tp->t_state == TCPS_SYN_RECEIVED)) {
 		/*
 		 * We really don't know if you support sack,
 		 * you have to get to ESTAB or beyond to tell.
 		 */
 		return (EAGAIN);
 	}
 	if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) {
 		/*
 		 * Rack will only send a FIN after all data is acknowledged.
 		 * So in this case we have more data outstanding. We can't
 		 * switch stacks until either all data and only the FIN
 		 * is left (in which case rack_init() now knows how
 		 * to deal with that) <or> all is acknowledged and we
 		 * are only left with incoming data, though why you
 		 * would want to switch to rack after all data is acknowledged
 		 * I have no idea (rrs)!
 		 */
 		return (EAGAIN);
 	}
 	if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){
 		return (0);
 	}
 	/*
 	 * If we reach here we don't do SACK on this connection so we can
 	 * never do rack.
 	 */
 	return (EINVAL);
 }
 
 static void
 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
 {
 
 	if (tp->t_fb_ptr) {
 		uint32_t cnt_free = 0;
 		struct tcp_rack *rack;
 		struct rack_sendmap *rsm;
 
 		tcp_handle_orphaned_packets(tp);
 		tp->t_flags &= ~TF_FORCEDATA;
 		rack = (struct tcp_rack *)tp->t_fb_ptr;
 		rack_log_pacing_delay_calc(rack,
 					   0,
 					   0,
 					   0,
 					   rack_get_gp_est(rack), /* delRate */
 					   rack_get_lt_bw(rack), /* rttProp */
 					   20, __LINE__, NULL, 0);
 #ifdef NETFLIX_SHARED_CWND
 		if (rack->r_ctl.rc_scw) {
 			uint32_t limit;
 
 			if (rack->r_limit_scw)
 				limit = max(1, rack->r_ctl.rc_lowest_us_rtt);
 			else
 				limit = 0;
 			tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw,
 						  rack->r_ctl.rc_scw_index,
 						  limit);
 			rack->r_ctl.rc_scw = NULL;
 		}
 #endif
 		if (rack->r_ctl.fsb.tcp_ip_hdr) {
 			free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB);
 			rack->r_ctl.fsb.tcp_ip_hdr = NULL;
 			rack->r_ctl.fsb.th = NULL;
 		}
 		if (rack->rc_always_pace) {
 			tcp_decrement_paced_conn();
 			rack_undo_cc_pacing(rack);
 			rack->rc_always_pace = 0;
 		}
 		/* Clean up any options if they were not applied */
 		while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) {
 			struct deferred_opt_list *dol;
 
 			dol = TAILQ_FIRST(&rack->r_ctl.opt_list);
 			TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
 			free(dol, M_TCPDO);
 		}
 		/* rack does not use force data but other stacks may clear it */
 		if (rack->r_ctl.crte != NULL) {
 			tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
 			rack->rack_hdrw_pacing = 0;
 			rack->r_ctl.crte = NULL;
 		}
 #ifdef TCP_BLACKBOX
 		tcp_log_flowend(tp);
 #endif
 		/*
 		 * Lets take a different approach to purging just
 		 * get each one and free it like a cum-ack would and
 		 * not use a foreach loop.
 		 */
 		rsm = tqhash_min(rack->r_ctl.tqh);
 		while (rsm) {
 			tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
 			rack->r_ctl.rc_num_maps_alloced--;
 			uma_zfree(rack_zone, rsm);
 			rsm = tqhash_min(rack->r_ctl.tqh);
 		}
 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
 		while (rsm) {
 			TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
 			rack->r_ctl.rc_num_maps_alloced--;
 			rack->rc_free_cnt--;
 			cnt_free++;
 			uma_zfree(rack_zone, rsm);
 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
 		}
 		if ((rack->r_ctl.rc_num_maps_alloced > 0) &&
 		    (tcp_bblogging_on(tp))) {
 			union tcp_log_stackspecific log;
 			struct timeval tv;
 
 			memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 			log.u_bbr.flex8 = 10;
 			log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced;
 			log.u_bbr.flex2 = rack->rc_free_cnt;
 			log.u_bbr.flex3 = cnt_free;
 			log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 			rsm = tqhash_min(rack->r_ctl.tqh);
 			log.u_bbr.delRate = (uint64_t)rsm;
 			rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
 			log.u_bbr.cur_del_rate = (uint64_t)rsm;
 			log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 			log.u_bbr.pkt_epoch = __LINE__;
 			(void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
 					     0, &log, false, NULL, NULL, 0, &tv);
 		}
 		KASSERT((rack->r_ctl.rc_num_maps_alloced == 0),
 			("rack:%p num_aloc:%u after freeing all?",
 			 rack,
 			 rack->r_ctl.rc_num_maps_alloced));
 		rack->rc_free_cnt = 0;
 		free(rack->r_ctl.tqh, M_TCPFSB);
 		rack->r_ctl.tqh = NULL;
 		uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
 		tp->t_fb_ptr = NULL;
 	}
 	/* Make sure snd_nxt is correctly set */
 	tp->snd_nxt = tp->snd_max;
 }
 
 static void
 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
 {
 	if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
 		rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0;
 	}
 	switch (tp->t_state) {
 	case TCPS_SYN_SENT:
 		rack->r_state = TCPS_SYN_SENT;
 		rack->r_substate = rack_do_syn_sent;
 		break;
 	case TCPS_SYN_RECEIVED:
 		rack->r_state = TCPS_SYN_RECEIVED;
 		rack->r_substate = rack_do_syn_recv;
 		break;
 	case TCPS_ESTABLISHED:
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		rack->r_state = TCPS_ESTABLISHED;
 		rack->r_substate = rack_do_established;
 		break;
 	case TCPS_CLOSE_WAIT:
 		rack->r_state = TCPS_CLOSE_WAIT;
 		rack->r_substate = rack_do_close_wait;
 		break;
 	case TCPS_FIN_WAIT_1:
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		rack->r_state = TCPS_FIN_WAIT_1;
 		rack->r_substate = rack_do_fin_wait_1;
 		break;
 	case TCPS_CLOSING:
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		rack->r_state = TCPS_CLOSING;
 		rack->r_substate = rack_do_closing;
 		break;
 	case TCPS_LAST_ACK:
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		rack->r_state = TCPS_LAST_ACK;
 		rack->r_substate = rack_do_lastack;
 		break;
 	case TCPS_FIN_WAIT_2:
 		rack->r_state = TCPS_FIN_WAIT_2;
 		rack->r_substate = rack_do_fin_wait_2;
 		break;
 	case TCPS_LISTEN:
 	case TCPS_CLOSED:
 	case TCPS_TIME_WAIT:
 	default:
 		break;
 	};
 	if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
 		rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
 
 }
 
 static void
 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
 {
 	/*
 	 * We received an ack, and then did not
 	 * call send or were bounced out due to the
 	 * hpts was running. Now a timer is up as well, is
 	 * it the right timer?
 	 */
 	struct rack_sendmap *rsm;
 	int tmr_up;
 
 	tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
 	if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
 		return;
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
 	    (tmr_up == PACE_TMR_RXT)) {
 		/* Should be an RXT */
 		return;
 	}
 	if (rsm == NULL) {
 		/* Nothing outstanding? */
 		if (tp->t_flags & TF_DELACK) {
 			if (tmr_up == PACE_TMR_DELACK)
 				/* We are supposed to have delayed ack up and we do */
 				return;
 		} else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) {
 			/*
 			 * if we hit enobufs then we would expect the possibility
 			 * of nothing outstanding and the RXT up (and the hptsi timer).
 			 */
 			return;
 		} else if (((V_tcp_always_keepalive ||
 			     rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
 			    (tp->t_state <= TCPS_CLOSING)) &&
 			   (tmr_up == PACE_TMR_KEEP) &&
 			   (tp->snd_max == tp->snd_una)) {
 			/* We should have keep alive up and we do */
 			return;
 		}
 	}
 	if (SEQ_GT(tp->snd_max, tp->snd_una) &&
 		   ((tmr_up == PACE_TMR_TLP) ||
 		    (tmr_up == PACE_TMR_RACK) ||
 		    (tmr_up == PACE_TMR_RXT))) {
 		/*
 		 * Either a Rack, TLP or RXT is fine if  we
 		 * have outstanding data.
 		 */
 		return;
 	} else if (tmr_up == PACE_TMR_DELACK) {
 		/*
 		 * If the delayed ack was going to go off
 		 * before the rtx/tlp/rack timer were going to
 		 * expire, then that would be the timer in control.
 		 * Note we don't check the time here trusting the
 		 * code is correct.
 		 */
 		return;
 	}
 	/*
 	 * Ok the timer originally started is not what we want now.
 	 * We will force the hpts to be stopped if any, and restart
 	 * with the slot set to what was in the saved slot.
 	 */
 	if (tcp_in_hpts(rack->rc_tp)) {
 		if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 			uint32_t us_cts;
 
 			us_cts = tcp_get_usecs(NULL);
 			if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
 				rack->r_early = 1;
 				rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
 			}
 			rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 		}
 		tcp_hpts_remove(rack->rc_tp);
 	}
 	rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 	rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
 }
 
 
 static void
 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts)
 {
 	if ((SEQ_LT(tp->snd_wl1, seq) ||
 	    (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) ||
 	    (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) {
 		/* keep track of pure window updates */
 		if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd))
 			KMOD_TCPSTAT_INC(tcps_rcvwinupd);
 		tp->snd_wnd = tiwin;
 		rack_validate_fo_sendwin_up(tp, rack);
 		tp->snd_wl1 = seq;
 		tp->snd_wl2 = ack;
 		if (tp->snd_wnd > tp->max_sndwnd)
 			tp->max_sndwnd = tp->snd_wnd;
 	    rack->r_wanted_output = 1;
 	} else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) {
 		tp->snd_wnd = tiwin;
 		rack_validate_fo_sendwin_up(tp, rack);
 		tp->snd_wl1 = seq;
 		tp->snd_wl2 = ack;
 	} else {
 		/* Not a valid win update */
 		return;
 	}
 	if (tp->snd_wnd > tp->max_sndwnd)
 		tp->max_sndwnd = tp->snd_wnd;
 	/* Do we exit persists? */
 	if ((rack->rc_in_persist != 0) &&
 	    (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2),
 				rack->r_ctl.rc_pace_min_segs))) {
 		rack_exit_persist(tp, rack, cts);
 	}
 	/* Do we enter persists? */
 	if ((rack->rc_in_persist == 0) &&
 	    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state) &&
 	    ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
 	    sbavail(&tptosocket(tp)->so_snd) &&
 	    (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
 		/*
 		 * Here the rwnd is less than
 		 * the pacing size, we are established,
 		 * nothing is outstanding, and there is
 		 * data to send. Enter persists.
 		 */
 		rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack);
 	}
 }
 
 static void
 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq)
 {
 
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		struct inpcb *inp = tptoinpcb(tp);
 		union tcp_log_stackspecific log;
 		struct timeval ltv;
 		char tcp_hdr_buf[60];
 		struct tcphdr *th;
 		struct timespec ts;
 		uint32_t orig_snd_una;
 		uint8_t xx = 0;
 
 #ifdef TCP_REQUEST_TRK
 		struct tcp_sendfile_track *tcp_req;
 
 		if (SEQ_GT(ae->ack, tp->snd_una)) {
 			tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1));
 		} else {
 			tcp_req = tcp_req_find_req_for_seq(tp, ae->ack);
 		}
 #endif
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		if (rack->rack_no_prr == 0)
 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
 		else
 			log.u_bbr.flex1 = 0;
 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
 		log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
 		log.u_bbr.bbr_state = rack->rc_free_cnt;
 		log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = tp->t_maxseg;
 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.flex7 = 1;
 		log.u_bbr.lost = ae->flags;
 		log.u_bbr.cwnd_gain = ackval;
 		log.u_bbr.pacing_gain = 0x2;
 		if (ae->flags & TSTMP_HDWR) {
 			/* Record the hardware timestamp if present */
 			log.u_bbr.flex3 = M_TSTMP;
 			ts.tv_sec = ae->timestamp / 1000000000;
 			ts.tv_nsec = ae->timestamp % 1000000000;
 			ltv.tv_sec = ts.tv_sec;
 			ltv.tv_usec = ts.tv_nsec / 1000;
 			log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
 		} else if (ae->flags & TSTMP_LRO) {
 			/* Record the LRO the arrival timestamp */
 			log.u_bbr.flex3 = M_TSTMP_LRO;
 			ts.tv_sec = ae->timestamp / 1000000000;
 			ts.tv_nsec = ae->timestamp % 1000000000;
 			ltv.tv_sec = ts.tv_sec;
 			ltv.tv_usec = ts.tv_nsec / 1000;
 			log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
 		}
 		log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
 		/* Log the rcv time */
 		log.u_bbr.delRate = ae->timestamp;
 #ifdef TCP_REQUEST_TRK
 		log.u_bbr.applimited = tp->t_tcpreq_closed;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_open;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_req;
 		if (tcp_req) {
 			/* Copy out any client req info */
 			/* seconds */
 			log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
 			/* useconds */
 			log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
 			log.u_bbr.rttProp = tcp_req->timestamp;
 			log.u_bbr.cur_del_rate = tcp_req->start;
 			if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
 				log.u_bbr.flex8 |= 1;
 			} else {
 				log.u_bbr.flex8 |= 2;
 				log.u_bbr.bw_inuse = tcp_req->end;
 			}
 			log.u_bbr.flex6 = tcp_req->start_seq;
 			if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
 				log.u_bbr.flex8 |= 4;
 				log.u_bbr.epoch = tcp_req->end_seq;
 			}
 		}
 #endif
 		memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf));
 		th = (struct tcphdr *)tcp_hdr_buf;
 		th->th_seq = ae->seq;
 		th->th_ack = ae->ack;
 		th->th_win = ae->win;
 		/* Now fill in the ports */
 		th->th_sport = inp->inp_fport;
 		th->th_dport = inp->inp_lport;
 		tcp_set_flags(th, ae->flags);
 		/* Now do we have a timestamp option? */
 		if (ae->flags & HAS_TSTMP) {
 			u_char *cp;
 			uint32_t val;
 
 			th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2);
 			cp = (u_char *)(th + 1);
 			*cp = TCPOPT_NOP;
 			cp++;
 			*cp = TCPOPT_NOP;
 			cp++;
 			*cp = TCPOPT_TIMESTAMP;
 			cp++;
 			*cp = TCPOLEN_TIMESTAMP;
 			cp++;
 			val = htonl(ae->ts_value);
 			bcopy((char *)&val,
 			      (char *)cp, sizeof(uint32_t));
 			val = htonl(ae->ts_echo);
 			bcopy((char *)&val,
 			      (char *)(cp + 4), sizeof(uint32_t));
 		} else
 			th->th_off = (sizeof(struct tcphdr) >> 2);
 
 		/*
 		 * For sane logging we need to play a little trick.
 		 * If the ack were fully processed we would have moved
 		 * snd_una to high_seq, but since compressed acks are
 		 * processed in two phases, at this point (logging) snd_una
 		 * won't be advanced. So we would see multiple acks showing
 		 * the advancement. We can prevent that by "pretending" that
 		 * snd_una was advanced and then un-advancing it so that the
 		 * logging code has the right value for tlb_snd_una.
 		 */
 		if (tp->snd_una != high_seq) {
 			orig_snd_una = tp->snd_una;
 			tp->snd_una = high_seq;
 			xx = 1;
 		} else
 			xx = 0;
 		TCP_LOG_EVENTP(tp, th,
 			       &tptosocket(tp)->so_rcv,
 			       &tptosocket(tp)->so_snd, TCP_LOG_IN, 0,
 			       0, &log, true, &ltv);
 		if (xx) {
 			tp->snd_una = orig_snd_una;
 		}
 	}
 
 }
 
 static void
 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts)
 {
 	uint32_t us_rtt;
 	/*
 	 * A persist or keep-alive was forced out, update our
 	 * min rtt time. Note now worry about lost responses.
 	 * When a subsequent keep-alive or persist times out
 	 * and forced_ack is still on, then the last probe
 	 * was not responded to. In such cases we have a
 	 * sysctl that controls the behavior. Either we apply
 	 * the rtt but with reduced confidence (0). Or we just
 	 * plain don't apply the rtt estimate. Having data flow
 	 * will clear the probe_not_answered flag i.e. cum-ack
 	 * move forward <or> exiting and reentering persists.
 	 */
 
 	rack->forced_ack = 0;
 	rack->rc_tp->t_rxtshift = 0;
 	if ((rack->rc_in_persist &&
 	     (tiwin == rack->rc_tp->snd_wnd)) ||
 	    (rack->rc_in_persist == 0)) {
 		/*
 		 * In persists only apply the RTT update if this is
 		 * a response to our window probe. And that
 		 * means the rwnd sent must match the current
 		 * snd_wnd. If it does not, then we got a
 		 * window update ack instead. For keepalive
 		 * we allow the answer no matter what the window.
 		 *
 		 * Note that if the probe_not_answered is set then
 		 * the forced_ack_ts is the oldest one i.e. the first
 		 * probe sent that might have been lost. This assures
 		 * us that if we do calculate an RTT it is longer not
 		 * some short thing.
 		 */
 		if (rack->rc_in_persist)
 			counter_u64_add(rack_persists_acks, 1);
 		us_rtt = us_cts - rack->r_ctl.forced_ack_ts;
 		if (us_rtt == 0)
 			us_rtt = 1;
 		if (rack->probe_not_answered == 0) {
 			rack_apply_updated_usrtt(rack, us_rtt, us_cts);
 			tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1);
 		} else {
 			/* We have a retransmitted probe here too */
 			if (rack_apply_rtt_with_reduced_conf) {
 				rack_apply_updated_usrtt(rack, us_rtt, us_cts);
 				tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1);
 			}
 		}
 	}
 }
 
 static int
 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv)
 {
 	/*
 	 * Handle a "special" compressed ack mbuf. Each incoming
 	 * ack has only four possible dispositions:
 	 *
 	 * A) It moves the cum-ack forward
 	 * B) It is behind the cum-ack.
 	 * C) It is a window-update ack.
 	 * D) It is a dup-ack.
 	 *
 	 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES
 	 * in the incoming mbuf. We also need to still pay attention
 	 * to nxt_pkt since there may be another packet after this
 	 * one.
 	 */
 #ifdef TCP_ACCOUNTING
 	uint64_t ts_val;
 	uint64_t rdstc;
 #endif
 	int segsiz;
 	struct timespec ts;
 	struct tcp_rack *rack;
 	struct tcp_ackent *ae;
 	uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack;
 	int cnt, i, did_out, ourfinisacked = 0;
 	struct tcpopt to_holder, *to = NULL;
 #ifdef TCP_ACCOUNTING
 	int win_up_req = 0;
 #endif
 	int nsegs = 0;
 	int under_pacing = 0;
 	int recovery = 0;
 #ifdef TCP_ACCOUNTING
 	sched_pin();
 #endif
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->gp_ready &&
 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT))
 		under_pacing = 1;
 
 	if (rack->r_state != tp->t_state)
 		rack_set_state(tp, rack);
 	if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 	    (tp->t_flags & TF_GPUTINPROG)) {
 		/*
 		 * We have a goodput in progress
 		 * and we have entered a late state.
 		 * Do we have enough data in the sb
 		 * to handle the GPUT request?
 		 */
 		uint32_t bytes;
 
 		bytes = tp->gput_ack - tp->gput_seq;
 		if (SEQ_GT(tp->gput_seq, tp->snd_una))
 			bytes += tp->gput_seq - tp->snd_una;
 		if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
 			/*
 			 * There are not enough bytes in the socket
 			 * buffer that have been sent to cover this
 			 * measurement. Cancel it.
 			 */
 			rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 						   rack->r_ctl.rc_gp_srtt /*flex1*/,
 						   tp->gput_seq,
 						   0, 0, 18, __LINE__, NULL, 0);
 			tp->t_flags &= ~TF_GPUTINPROG;
 		}
 	}
 	to = &to_holder;
 	to->to_flags = 0;
 	KASSERT((m->m_len >= sizeof(struct tcp_ackent)),
 		("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len));
 	cnt = m->m_len / sizeof(struct tcp_ackent);
 	counter_u64_add(rack_multi_single_eq, cnt);
 	high_seq = tp->snd_una;
 	the_win = tp->snd_wnd;
 	win_seq = tp->snd_wl1;
 	win_upd_ack = tp->snd_wl2;
 	cts = tcp_tv_to_usectick(tv);
 	ms_cts = tcp_tv_to_mssectick(tv);
 	rack->r_ctl.rc_rcvtime = cts;
 	segsiz = ctf_fixed_maxseg(tp);
 	if ((rack->rc_gp_dyn_mul) &&
 	    (rack->use_fixed_rate == 0) &&
 	    (rack->rc_always_pace)) {
 		/* Check in on probertt */
 		rack_check_probe_rtt(rack, cts);
 	}
 	for (i = 0; i < cnt; i++) {
 #ifdef TCP_ACCOUNTING
 		ts_val = get_cyclecount();
 #endif
 		rack_clear_rate_sample(rack);
 		ae = ((mtod(m, struct tcp_ackent *)) + i);
 		if (ae->flags & TH_FIN)
 			rack_log_pacing_delay_calc(rack,
 						   0,
 						   0,
 						   0,
 						   rack_get_gp_est(rack), /* delRate */
 						   rack_get_lt_bw(rack), /* rttProp */
 						   20, __LINE__, NULL, 0);
 		/* Setup the window */
 		tiwin = ae->win << tp->snd_scale;
 		if (tiwin > rack->r_ctl.rc_high_rwnd)
 			rack->r_ctl.rc_high_rwnd = tiwin;
 		/* figure out the type of ack */
 		if (SEQ_LT(ae->ack, high_seq)) {
 			/* Case B*/
 			ae->ack_val_set = ACK_BEHIND;
 		} else if (SEQ_GT(ae->ack, high_seq)) {
 			/* Case A */
 			ae->ack_val_set = ACK_CUMACK;
 		} else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){
 			/* Case D */
 			ae->ack_val_set = ACK_DUPACK;
 		} else {
 			/* Case C */
 			ae->ack_val_set = ACK_RWND;
 		}
 		if (rack->sack_attack_disable > 0) {
 			rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
 			rack->r_ctl.ack_during_sd++;
 		}
 		rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq);
 		/* Validate timestamp */
 		if (ae->flags & HAS_TSTMP) {
 			/* Setup for a timestamp */
 			to->to_flags = TOF_TS;
 			ae->ts_echo -= tp->ts_offset;
 			to->to_tsecr = ae->ts_echo;
 			to->to_tsval = ae->ts_value;
 			/*
 			 * If echoed timestamp is later than the current time, fall back to
 			 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
 			 * were used when this connection was established.
 			 */
 			if (TSTMP_GT(ae->ts_echo, ms_cts))
 				to->to_tsecr = 0;
 			if (tp->ts_recent &&
 			    TSTMP_LT(ae->ts_value, tp->ts_recent)) {
 				if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) {
 #ifdef TCP_ACCOUNTING
 					rdstc = get_cyclecount();
 					if (rdstc > ts_val) {
 						if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 							tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
 						}
 					}
 #endif
 					continue;
 				}
 			}
 			if (SEQ_LEQ(ae->seq, tp->last_ack_sent) &&
 			    SEQ_LEQ(tp->last_ack_sent, ae->seq)) {
 				tp->ts_recent_age = tcp_ts_getticks();
 				tp->ts_recent = ae->ts_value;
 			}
 		} else {
 			/* Setup for a no options */
 			to->to_flags = 0;
 		}
 		/* Update the rcv time and perform idle reduction possibly */
 		if  (tp->t_idle_reduce &&
 		     (tp->snd_max == tp->snd_una) &&
 		     (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
 			counter_u64_add(rack_input_idle_reduces, 1);
 			rack_cc_after_idle(rack, tp);
 		}
 		tp->t_rcvtime = ticks;
 		/* Now what about ECN of a chain of pure ACKs? */
 		if (tcp_ecn_input_segment(tp, ae->flags, 0,
 			tcp_packets_this_ack(tp, ae->ack),
 			ae->codepoint))
 			rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__);
 #ifdef TCP_ACCOUNTING
 		/* Count for the specific type of ack in */
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[ae->ack_val_set]++;
 		}
 #endif
 		/*
 		 * Note how we could move up these in the determination
 		 * above, but we don't so that way the timestamp checks (and ECN)
 		 * is done first before we do any processing on the ACK.
 		 * The non-compressed path through the code has this
 		 * weakness (noted by @jtl) that it actually does some
 		 * processing before verifying the timestamp information.
 		 * We don't take that path here which is why we set
 		 * the ack_val_set first, do the timestamp and ecn
 		 * processing, and then look at what we have setup.
 		 */
 		if (ae->ack_val_set == ACK_BEHIND) {
 			/*
 			 * Case B flag reordering, if window is not closed
 			 * or it could be a keep-alive or persists
 			 */
 			if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) {
 				rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 				if (rack->r_ctl.rc_reorder_ts == 0)
 					rack->r_ctl.rc_reorder_ts = 1;
 			}
 		} else if (ae->ack_val_set == ACK_DUPACK) {
 			/* Case D */
 			rack_strike_dupack(rack);
 		} else if (ae->ack_val_set == ACK_RWND) {
 			/* Case C */
 			if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
 				ts.tv_sec = ae->timestamp / 1000000000;
 				ts.tv_nsec = ae->timestamp % 1000000000;
 				rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
 				rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
 			} else {
 				rack->r_ctl.act_rcv_time = *tv;
 			}
 			if (rack->forced_ack) {
 				rack_handle_probe_response(rack, tiwin,
 							   tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time));
 			}
 #ifdef TCP_ACCOUNTING
 			win_up_req = 1;
 #endif
 			win_upd_ack = ae->ack;
 			win_seq = ae->seq;
 			the_win = tiwin;
 			rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
 		} else {
 			/* Case A */
 			if (SEQ_GT(ae->ack, tp->snd_max)) {
 				/*
 				 * We just send an ack since the incoming
 				 * ack is beyond the largest seq we sent.
 				 */
 				if ((tp->t_flags & TF_ACKNOW) == 0) {
 					ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt);
 					if (tp->t_flags && TF_ACKNOW)
 						rack->r_wanted_output = 1;
 				}
 			} else {
 				nsegs++;
 				/* If the window changed setup to update */
 				if (tiwin != tp->snd_wnd) {
 					win_upd_ack = ae->ack;
 					win_seq = ae->seq;
 					the_win = tiwin;
 					rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts);
 				}
 #ifdef TCP_ACCOUNTING
 				/* Account for the acks */
 				if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 					tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz);
 				}
 #endif
 				high_seq = ae->ack;
 				if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp))
 					rack_log_hystart_event(rack, high_seq, 8);
 				/* Setup our act_rcv_time */
 				if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) {
 					ts.tv_sec = ae->timestamp / 1000000000;
 					ts.tv_nsec = ae->timestamp % 1000000000;
 					rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
 					rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
 				} else {
 					rack->r_ctl.act_rcv_time = *tv;
 				}
 				rack_process_to_cumack(tp, rack, ae->ack, cts, to,
 						       tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time));
 #ifdef TCP_REQUEST_TRK
 				rack_req_check_for_comp(rack, high_seq);
 #endif
 				if (rack->rc_dsack_round_seen) {
 					/* Is the dsack round over? */
 					if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) {
 						/* Yes it is */
 						rack->rc_dsack_round_seen = 0;
 						rack_log_dsack_event(rack, 3, __LINE__, 0, 0);
 					}
 				}
 			}
 		}
 		/* And lets be sure to commit the rtt measurements for this ack */
 		tcp_rack_xmit_timer_commit(rack, tp);
 #ifdef TCP_ACCOUNTING
 		rdstc = get_cyclecount();
 		if (rdstc > ts_val) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val);
 				if (ae->ack_val_set == ACK_CUMACK)
 					tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val);
 			}
 		}
 #endif
 	}
 #ifdef TCP_ACCOUNTING
 	ts_val = get_cyclecount();
 #endif
 	/* Tend to any collapsed window */
 	if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) {
 		/* The peer collapsed the window */
 		rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__);
 	} else if (rack->rc_has_collapsed)
 		rack_un_collapse_window(rack, __LINE__);
 	if ((rack->r_collapse_point_valid) &&
 	    (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point)))
 		rack->r_collapse_point_valid = 0;
 	acked_amount = acked = (high_seq - tp->snd_una);
 	if (acked) {
 		/*
 		 * The draft (v3) calls for us to use SEQ_GEQ, but that
 		 * causes issues when we are just going app limited. Lets
 		 * instead use SEQ_GT <or> where its equal but more data
 		 * is outstanding.
 		 *
 		 * Also make sure we are on the last ack of a series. We
 		 * have to have all the ack's processed in queue to know
 		 * if there is something left outstanding.
 		 *
 		 */
 		if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) &&
 		    (rack->rc_new_rnd_needed == 0) &&
 		    (nxt_pkt == 0)) {
 			rack_log_hystart_event(rack, high_seq, 21);
 			rack->r_ctl.current_round++;
 			/* Force the next send to setup the next round */
 			rack->rc_new_rnd_needed = 1;
 			if (CC_ALGO(tp)->newround != NULL) {
 				CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
 			}
 		}
 		/*
 		 * Clear the probe not answered flag
 		 * since cum-ack moved forward.
 		 */
 		rack->probe_not_answered = 0;
 		if (rack->sack_attack_disable == 0)
 			rack_do_decay(rack);
 		if (acked >= segsiz) {
 			/*
 			 * You only get credit for
 			 * MSS and greater (and you get extra
 			 * credit for larger cum-ack moves).
 			 */
 			int ac;
 
 			ac = acked / segsiz;
 			rack->r_ctl.ack_count += ac;
 			counter_u64_add(rack_ack_total, ac);
 		}
 		if (rack->r_ctl.ack_count > 0xfff00000) {
 			/*
 			 * reduce the number to keep us under
 			 * a uint32_t.
 			 */
 			rack->r_ctl.ack_count /= 2;
 			rack->r_ctl.sack_count /= 2;
 		}
 		if (tp->t_flags & TF_NEEDSYN) {
 			/*
 			 * T/TCP: Connection was half-synchronized, and our SYN has
 			 * been ACK'd (so connection is now fully synchronized).  Go
 			 * to non-starred state, increment snd_una for ACK of SYN,
 			 * and check if we can do window scaling.
 			 */
 			tp->t_flags &= ~TF_NEEDSYN;
 			tp->snd_una++;
 			acked_amount = acked = (high_seq - tp->snd_una);
 		}
 		if (acked > sbavail(&so->so_snd))
 			acked_amount = sbavail(&so->so_snd);
 #ifdef TCP_SAD_DETECTION
 		/*
 		 * We only care on a cum-ack move if we are in a sack-disabled
 		 * state. We have already added in to the ack_count, and we never
 		 * would disable on a cum-ack move, so we only care to do the
 		 * detection if it may "undo" it, i.e. we were in disabled already.
 		 */
 		if (rack->sack_attack_disable)
 			rack_do_detection(tp, rack, acked_amount, segsiz);
 #endif
 		if (IN_FASTRECOVERY(tp->t_flags) &&
 		    (rack->rack_no_prr == 0))
 			rack_update_prr(tp, rack, acked_amount, high_seq);
 		if (IN_RECOVERY(tp->t_flags)) {
 			if (SEQ_LT(high_seq, tp->snd_recover) &&
 			    (SEQ_LT(high_seq, tp->snd_max))) {
 				tcp_rack_partialack(tp);
 			} else {
 				rack_post_recovery(tp, high_seq);
 				recovery = 1;
 			}
 		}
 		/* Handle the rack-log-ack part (sendmap) */
 		if ((sbused(&so->so_snd) == 0) &&
 		    (acked > acked_amount) &&
 		    (tp->t_state >= TCPS_FIN_WAIT_1) &&
 		    (tp->t_flags & TF_SENTFIN)) {
 			/*
 			 * We must be sure our fin
 			 * was sent and acked (we can be
 			 * in FIN_WAIT_1 without having
 			 * sent the fin).
 			 */
 			ourfinisacked = 1;
 			/*
 			 * Lets make sure snd_una is updated
 			 * since most likely acked_amount = 0 (it
 			 * should be).
 			 */
 			tp->snd_una = high_seq;
 		}
 		/* Did we make a RTO error? */
 		if ((tp->t_flags & TF_PREVVALID) &&
 		    ((tp->t_flags & TF_RCVD_TSTMP) == 0)) {
 			tp->t_flags &= ~TF_PREVVALID;
 			if (tp->t_rxtshift == 1 &&
 			    (int)(ticks - tp->t_badrxtwin) < 0)
 				rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__);
 		}
 		/* Handle the data in the socket buffer */
 		KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1);
 		KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked);
 		if (acked_amount > 0) {
 			struct mbuf *mfree;
 
 			rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery);
 			SOCKBUF_LOCK(&so->so_snd);
 			mfree = sbcut_locked(&so->so_snd, acked_amount);
 			tp->snd_una = high_seq;
 			/* Note we want to hold the sb lock through the sendmap adjust */
 			rack_adjust_sendmap_head(rack, &so->so_snd);
 			/* Wake up the socket if we have room to write more */
 			rack_log_wakeup(tp,rack, &so->so_snd, acked, 2);
 			sowwakeup_locked(so);
 			if ((recovery == 1) &&
 			    (rack->excess_rxt_on) &&
 			    (rack->r_cwnd_was_clamped == 0)) {
 				do_rack_excess_rxt(tp, rack);
 			} else if (rack->r_cwnd_was_clamped)
 				do_rack_check_for_unclamp(tp, rack);
 			m_freem(mfree);
 		}
 		/* update progress */
 		tp->t_acktime = ticks;
 		rack_log_progress_event(rack, tp, tp->t_acktime,
 					PROGRESS_UPDATE, __LINE__);
 		/* Clear out shifts and such */
 		tp->t_rxtshift = 0;
 		RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 				   rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop);
 		rack->rc_tlp_in_progress = 0;
 		rack->r_ctl.rc_tlp_cnt_out = 0;
 		/* Send recover and snd_nxt must be dragged along */
 		if (SEQ_GT(tp->snd_una, tp->snd_recover))
 			tp->snd_recover = tp->snd_una;
 		if (SEQ_LT(tp->snd_nxt, tp->snd_max))
 			tp->snd_nxt = tp->snd_max;
 		/*
 		 * If the RXT timer is running we want to
 		 * stop it, so we can restart a TLP (or new RXT).
 		 */
 		if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT)
 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 		tp->snd_wl2 = high_seq;
 		tp->t_dupacks = 0;
 		if (under_pacing &&
 		    (rack->use_fixed_rate == 0) &&
 		    (rack->in_probe_rtt == 0) &&
 		    rack->rc_gp_dyn_mul &&
 		    rack->rc_always_pace) {
 			/* Check if we are dragging bottom */
 			rack_check_bottom_drag(tp, rack, so);
 		}
 		if (tp->snd_una == tp->snd_max) {
 			tp->t_flags &= ~TF_PREVVALID;
 			rack->r_ctl.retran_during_recovery = 0;
 			rack->rc_suspicious = 0;
 			rack->r_ctl.dsack_byte_cnt = 0;
 			rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL);
 			if (rack->r_ctl.rc_went_idle_time == 0)
 				rack->r_ctl.rc_went_idle_time = 1;
 			rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
 			if (sbavail(&tptosocket(tp)->so_snd) == 0)
 				tp->t_acktime = 0;
 			/* Set so we might enter persists... */
 			rack->r_wanted_output = 1;
 			rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 			sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
 			if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 			    (sbavail(&so->so_snd) == 0) &&
 			    (tp->t_flags2 & TF2_DROP_AF_DATA)) {
 				/*
 				 * The socket was gone and the
 				 * peer sent data (not now in the past), time to
 				 * reset him.
 				 */
 				rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
 				/* tcp_close will kill the inp pre-log the Reset */
 				tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 #ifdef TCP_ACCOUNTING
 				rdstc = get_cyclecount();
 				if (rdstc > ts_val) {
 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 					}
 				}
 #endif
 				m_freem(m);
 				tp = tcp_close(tp);
 				if (tp == NULL) {
 #ifdef TCP_ACCOUNTING
 					sched_unpin();
 #endif
 					return (1);
 				}
 				/*
 				 * We would normally do drop-with-reset which would
 				 * send back a reset. We can't since we don't have
 				 * all the needed bits. Instead lets arrange for
 				 * a call to tcp_output(). That way since we
 				 * are in the closed state we will generate a reset.
 				 *
 				 * Note if tcp_accounting is on we don't unpin since
 				 * we do that after the goto label.
 				 */
 				goto send_out_a_rst;
 			}
 			if ((sbused(&so->so_snd) == 0) &&
 			    (tp->t_state >= TCPS_FIN_WAIT_1) &&
 			    (tp->t_flags & TF_SENTFIN)) {
 				/*
 				 * If we can't receive any more data, then closing user can
 				 * proceed. Starting the timer is contrary to the
 				 * specification, but if we don't get a FIN we'll hang
 				 * forever.
 				 *
 				 */
 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 					soisdisconnected(so);
 					tcp_timer_activate(tp, TT_2MSL,
 							   (tcp_fast_finwait2_recycle ?
 							    tcp_finwait2_timeout :
 							    TP_MAXIDLE(tp)));
 				}
 				if (ourfinisacked == 0) {
 					/*
 					 * We don't change to fin-wait-2 if we have our fin acked
 					 * which means we are probably in TCPS_CLOSING.
 					 */
 					tcp_state_change(tp, TCPS_FIN_WAIT_2);
 				}
 			}
 		}
 		/* Wake up the socket if we have room to write more */
 		if (sbavail(&so->so_snd)) {
 			rack->r_wanted_output = 1;
 			if (ctf_progress_timeout_check(tp, true)) {
 				rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
 							tp, tick, PROGRESS_DROP, __LINE__);
 				/*
 				 * We cheat here and don't send a RST, we should send one
 				 * when the pacer drops the connection.
 				 */
 #ifdef TCP_ACCOUNTING
 				rdstc = get_cyclecount();
 				if (rdstc > ts_val) {
 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 					}
 				}
 				sched_unpin();
 #endif
 				(void)tcp_drop(tp, ETIMEDOUT);
 				m_freem(m);
 				return (1);
 			}
 		}
 		if (ourfinisacked) {
 			switch(tp->t_state) {
 			case TCPS_CLOSING:
 #ifdef TCP_ACCOUNTING
 				rdstc = get_cyclecount();
 				if (rdstc > ts_val) {
 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 					}
 				}
 				sched_unpin();
 #endif
 				tcp_twstart(tp);
 				m_freem(m);
 				return (1);
 				break;
 			case TCPS_LAST_ACK:
 #ifdef TCP_ACCOUNTING
 				rdstc = get_cyclecount();
 				if (rdstc > ts_val) {
 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 					}
 				}
 				sched_unpin();
 #endif
 				tp = tcp_close(tp);
 				ctf_do_drop(m, tp);
 				return (1);
 				break;
 			case TCPS_FIN_WAIT_1:
 #ifdef TCP_ACCOUNTING
 				rdstc = get_cyclecount();
 				if (rdstc > ts_val) {
 					if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 						tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 						tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 					}
 				}
 #endif
 				if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
 					soisdisconnected(so);
 					tcp_timer_activate(tp, TT_2MSL,
 							   (tcp_fast_finwait2_recycle ?
 							    tcp_finwait2_timeout :
 							    TP_MAXIDLE(tp)));
 				}
 				tcp_state_change(tp, TCPS_FIN_WAIT_2);
 				break;
 			default:
 				break;
 			}
 		}
 		if (rack->r_fast_output) {
 			/*
 			 * We re doing fast output.. can we expand that?
 			 */
 			rack_gain_for_fastoutput(rack, tp, so, acked_amount);
 		}
 #ifdef TCP_ACCOUNTING
 		rdstc = get_cyclecount();
 		if (rdstc > ts_val) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val);
 				tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val);
 			}
 		}
 
 	} else if (win_up_req) {
 		rdstc = get_cyclecount();
 		if (rdstc > ts_val) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val);
 			}
 		}
 #endif
 	}
 	/* Now is there a next packet, if so we are done */
 	m_freem(m);
 	did_out = 0;
 	if (nxt_pkt) {
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs);
 		return (0);
 	}
 	rack_handle_might_revert(tp, rack);
 	ctf_calc_rwin(so, tp);
 	if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
 	send_out_a_rst:
 		if (tcp_output(tp) < 0) {
 #ifdef TCP_ACCOUNTING
 			sched_unpin();
 #endif
 			return (1);
 		}
 		did_out = 1;
 	}
 	if (tp->t_flags2 & TF2_HPTS_CALLS)
 		tp->t_flags2 &= ~TF2_HPTS_CALLS;
 	rack_free_trim(rack);
 #ifdef TCP_ACCOUNTING
 	sched_unpin();
 #endif
 	rack_timer_audit(tp, rack, &so->so_snd);
 	rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs);
 	return (0);
 }
 
 #define	TCP_LRO_TS_OPTION \
     ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
 	  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
 
 static int
 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
     int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt,
     struct timeval *tv)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 #ifdef TCP_ACCOUNTING
 	uint64_t ts_val;
 #endif
 	int32_t thflags, retval, did_out = 0;
 	int32_t way_out = 0;
 	/*
 	 * cts - is the current time from tv (caller gets ts) in microseconds.
 	 * ms_cts - is the current time from tv in milliseconds.
 	 * us_cts - is the time that LRO or hardware actually got the packet in microseconds.
 	 */
 	uint32_t cts, us_cts, ms_cts;
 	uint32_t tiwin, high_seq;
 	struct timespec ts;
 	struct tcpopt to;
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm;
 	int32_t prev_state = 0;
 	int no_output = 0;
 	int slot_remaining = 0;
 #ifdef TCP_ACCOUNTING
 	int ack_val_set = 0xf;
 #endif
 	int nsegs;
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	/*
 	 * tv passed from common code is from either M_TSTMP_LRO or
 	 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
 	 */
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->rack_deferred_inited == 0) {
 		/*
 		 * If we are the connecting socket we will
 		 * hit rack_init() when no sequence numbers
 		 * are setup. This makes it so we must defer
 		 * some initialization. Call that now.
 		 */
 		rack_deferred_init(tp, rack);
 	}
 	/*
 	 * Check to see if we need to skip any output plans. This
 	 * can happen in the non-LRO path where we are pacing and
 	 * must process the ack coming in but need to defer sending
 	 * anything becase a pacing timer is running.
 	 */
 	us_cts = tcp_tv_to_usectick(tv);
 	if (m->m_flags & M_ACKCMP) {
 		/*
 		 * All compressed ack's are ack's by definition so
 		 * remove any ack required flag and then do the processing.
 		 */
 		rack->rc_ack_required = 0;
 		return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv));
 	}
 	thflags = tcp_get_flags(th);
 	if ((rack->rc_always_pace == 1) &&
 	    (rack->rc_ack_can_sendout_data == 0) &&
 	    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 	    (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) {
 		/*
 		 * Ok conditions are right for queuing the packets
 		 * but we do have to check the flags in the inp, it
 		 * could be, if a sack is present, we want to be awoken and
 		 * so should process the packets.
 		 */
 		slot_remaining = rack->r_ctl.rc_last_output_to - us_cts;
 		if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) {
 			no_output = 1;
 		} else {
 			/*
 			 * If there is no options, or just a
 			 * timestamp option, we will want to queue
 			 * the packets. This is the same that LRO does
 			 * and will need to change with accurate ECN.
 			 */
 			uint32_t *ts_ptr;
 			int optlen;
 
 			optlen = (th->th_off << 2) - sizeof(struct tcphdr);
 			ts_ptr = (uint32_t *)(th + 1);
 			if ((optlen == 0) ||
 			    ((optlen == TCPOLEN_TSTAMP_APPA) &&
 			     (*ts_ptr == TCP_LRO_TS_OPTION)))
 				no_output = 1;
 		}
 		if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) {
 			/*
 			 * It is unrealistic to think we can pace in less than
 			 * the minimum granularity of the pacer (def:250usec). So
 			 * if we have less than that time remaining we should go
 			 * ahead and allow output to be "early". We will attempt to
 			 * make up for it in any pacing time we try to apply on
 			 * the outbound packet.
 			 */
 			no_output = 0;
 		}
 	}
 	/*
 	 * If there is a RST or FIN lets dump out the bw
 	 * with a FIN the connection may go on but we
 	 * may not.
 	 */
 	if ((thflags & TH_FIN) || (thflags & TH_RST))
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.gp_bw,
 					   0,
 					   0,
 					   rack_get_gp_est(rack), /* delRate */
 					   rack_get_lt_bw(rack), /* rttProp */
 					   20, __LINE__, NULL, 0);
 	if (m->m_flags & M_ACKCMP) {
 		panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp);
 	}
 	cts = tcp_tv_to_usectick(tv);
 	ms_cts =  tcp_tv_to_mssectick(tv);
 	nsegs = m->m_pkthdr.lro_nsegs;
 	counter_u64_add(rack_proc_non_comp_ack, 1);
 #ifdef TCP_ACCOUNTING
 	sched_pin();
 	if (thflags & TH_ACK)
 		ts_val = get_cyclecount();
 #endif
 	if ((m->m_flags & M_TSTMP) ||
 	    (m->m_flags & M_TSTMP_LRO)) {
 		mbuf_tstmp2timespec(m, &ts);
 		rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec;
 		rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000;
 	} else
 		rack->r_ctl.act_rcv_time = *tv;
 	kern_prefetch(rack, &prev_state);
 	prev_state = 0;
 	/*
 	 * Unscale the window into a 32-bit value. For the SYN_SENT state
 	 * the scale is zero.
 	 */
 	tiwin = th->th_win << tp->snd_scale;
 #ifdef TCP_ACCOUNTING
 	if (thflags & TH_ACK) {
 		/*
 		 * We have a tradeoff here. We can either do what we are
 		 * doing i.e. pinning to this CPU and then doing the accounting
 		 * <or> we could do a critical enter, setup the rdtsc and cpu
 		 * as in below, and then validate we are on the same CPU on
 		 * exit. I have choosen to not do the critical enter since
 		 * that often will gain you a context switch, and instead lock
 		 * us (line above this if) to the same CPU with sched_pin(). This
 		 * means we may be context switched out for a higher priority
 		 * interupt but we won't be moved to another CPU.
 		 *
 		 * If this occurs (which it won't very often since we most likely
 		 * are running this code in interupt context and only a higher
 		 * priority will bump us ... clock?) we will falsely add in
 		 * to the time the interupt processing time plus the ack processing
 		 * time. This is ok since its a rare event.
 		 */
 		ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin,
 						    ctf_fixed_maxseg(tp));
 	}
 #endif
 	/*
 	 * Parse options on any incoming segment.
 	 */
 	memset(&to, 0, sizeof(to));
 	tcp_dooptions(&to, (u_char *)(th + 1),
 	    (th->th_off << 2) - sizeof(struct tcphdr),
 	    (thflags & TH_SYN) ? TO_SYN : 0);
 	KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
 	    __func__));
 	KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
 	    __func__));
 
 	if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
 	    (tp->t_flags & TF_GPUTINPROG)) {
 		/*
 		 * We have a goodput in progress
 		 * and we have entered a late state.
 		 * Do we have enough data in the sb
 		 * to handle the GPUT request?
 		 */
 		uint32_t bytes;
 
 		bytes = tp->gput_ack - tp->gput_seq;
 		if (SEQ_GT(tp->gput_seq, tp->snd_una))
 			bytes += tp->gput_seq - tp->snd_una;
 		if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
 			/*
 			 * There are not enough bytes in the socket
 			 * buffer that have been sent to cover this
 			 * measurement. Cancel it.
 			 */
 			rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 						   rack->r_ctl.rc_gp_srtt /*flex1*/,
 						   tp->gput_seq,
 						   0, 0, 18, __LINE__, NULL, 0);
 			tp->t_flags &= ~TF_GPUTINPROG;
 		}
 	}
 	high_seq = th->th_ack;
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval ltv;
 #ifdef TCP_REQUEST_TRK
 		struct tcp_sendfile_track *tcp_req;
 
 		if (SEQ_GT(th->th_ack, tp->snd_una)) {
 			tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1));
 		} else {
 			tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack);
 		}
 #endif
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		if (rack->rack_no_prr == 0)
 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
 		else
 			log.u_bbr.flex1 = 0;
 		log.u_bbr.use_lt_bw = rack->r_ent_rec_ns;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_might_revert;
 		log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced;
 		log.u_bbr.bbr_state = rack->rc_free_cnt;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.pkts_out = rack->rc_tp->t_maxseg;
 		log.u_bbr.flex3 = m->m_flags;
 		log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags;
 		log.u_bbr.lost = thflags;
 		log.u_bbr.pacing_gain = 0x1;
 #ifdef TCP_ACCOUNTING
 		log.u_bbr.cwnd_gain = ack_val_set;
 #endif
 		log.u_bbr.flex7 = 2;
 		if (m->m_flags & M_TSTMP) {
 			/* Record the hardware timestamp if present */
 			mbuf_tstmp2timespec(m, &ts);
 			ltv.tv_sec = ts.tv_sec;
 			ltv.tv_usec = ts.tv_nsec / 1000;
 			log.u_bbr.lt_epoch = tcp_tv_to_usectick(&ltv);
 		} else if (m->m_flags & M_TSTMP_LRO) {
 			/* Record the LRO the arrival timestamp */
 			mbuf_tstmp2timespec(m, &ts);
 			ltv.tv_sec = ts.tv_sec;
 			ltv.tv_usec = ts.tv_nsec / 1000;
 			log.u_bbr.flex5 = tcp_tv_to_usectick(&ltv);
 		}
 		log.u_bbr.timeStamp = tcp_get_usecs(&ltv);
 		/* Log the rcv time */
 		log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp;
 #ifdef TCP_REQUEST_TRK
 		log.u_bbr.applimited = tp->t_tcpreq_closed;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_open;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_req;
 		if (tcp_req) {
 			/* Copy out any client req info */
 			/* seconds */
 			log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC);
 			/* useconds */
 			log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC);
 			log.u_bbr.rttProp = tcp_req->timestamp;
 			log.u_bbr.cur_del_rate = tcp_req->start;
 			if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) {
 				log.u_bbr.flex8 |= 1;
 			} else {
 				log.u_bbr.flex8 |= 2;
 				log.u_bbr.bw_inuse = tcp_req->end;
 			}
 			log.u_bbr.flex6 = tcp_req->start_seq;
 			if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) {
 				log.u_bbr.flex8 |= 4;
 				log.u_bbr.epoch = tcp_req->end_seq;
 			}
 		}
 #endif
 		TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0,
 		    tlen, &log, true, &ltv);
 	}
 	/* Remove ack required flag if set, we have one  */
 	if (thflags & TH_ACK)
 		rack->rc_ack_required = 0;
 	if (rack->sack_attack_disable > 0) {
 		rack->r_ctl.ack_during_sd++;
 		rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__);
 	}
 	if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
 		way_out = 4;
 		retval = 0;
 		m_freem(m);
 		goto done_with_input;
 	}
 	/*
 	 * If a segment with the ACK-bit set arrives in the SYN-SENT state
 	 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9.
 	 */
 	if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
 	    (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
 		tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
 		ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		return (1);
 	}
 	/*
 	 * If timestamps were negotiated during SYN/ACK and a
 	 * segment without a timestamp is received, silently drop
 	 * the segment, unless it is a RST segment or missing timestamps are
 	 * tolerated.
 	 * See section 3.2 of RFC 7323.
 	 */
 	if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) &&
 	    ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) {
 		way_out = 5;
 		retval = 0;
 		m_freem(m);
 		goto done_with_input;
 	}
 
 	/*
 	 * Segment received on connection. Reset idle time and keep-alive
 	 * timer. XXX: This should be done after segment validation to
 	 * ignore broken/spoofed segs.
 	 */
 	if  (tp->t_idle_reduce &&
 	     (tp->snd_max == tp->snd_una) &&
 	     (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) {
 		counter_u64_add(rack_input_idle_reduces, 1);
 		rack_cc_after_idle(rack, tp);
 	}
 	tp->t_rcvtime = ticks;
 #ifdef STATS
 	stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin);
 #endif
 	if (tiwin > rack->r_ctl.rc_high_rwnd)
 		rack->r_ctl.rc_high_rwnd = tiwin;
 	/*
 	 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move
 	 * this to occur after we've validated the segment.
 	 */
 	if (tcp_ecn_input_segment(tp, thflags, tlen,
 	    tcp_packets_this_ack(tp, th->th_ack),
 	    iptos))
 		rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__);
 
 	/*
 	 * If echoed timestamp is later than the current time, fall back to
 	 * non RFC1323 RTT calculation.  Normalize timestamp if syncookies
 	 * were used when this connection was established.
 	 */
 	if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
 		to.to_tsecr -= tp->ts_offset;
 		if (TSTMP_GT(to.to_tsecr, ms_cts))
 			to.to_tsecr = 0;
 	}
 
 	/*
 	 * If its the first time in we need to take care of options and
 	 * verify we can do SACK for rack!
 	 */
 	if (rack->r_state == 0) {
 		/* Should be init'd by rack_init() */
 		KASSERT(rack->rc_inp != NULL,
 		    ("%s: rack->rc_inp unexpectedly NULL", __func__));
 		if (rack->rc_inp == NULL) {
 			rack->rc_inp = inp;
 		}
 
 		/*
 		 * Process options only when we get SYN/ACK back. The SYN
 		 * case for incoming connections is handled in tcp_syncache.
 		 * According to RFC1323 the window field in a SYN (i.e., a
 		 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX
 		 * this is traditional behavior, may need to be cleaned up.
 		 */
 		if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
 			/* Handle parallel SYN for ECN */
 			tcp_ecn_input_parallel_syn(tp, thflags, iptos);
 			if ((to.to_flags & TOF_SCALE) &&
 			    (tp->t_flags & TF_REQ_SCALE)) {
 				tp->t_flags |= TF_RCVD_SCALE;
 				tp->snd_scale = to.to_wscale;
 			} else
 				tp->t_flags &= ~TF_REQ_SCALE;
 			/*
 			 * Initial send window.  It will be updated with the
 			 * next incoming segment to the scaled value.
 			 */
 			tp->snd_wnd = th->th_win;
 			rack_validate_fo_sendwin_up(tp, rack);
 			if ((to.to_flags & TOF_TS) &&
 			    (tp->t_flags & TF_REQ_TSTMP)) {
 				tp->t_flags |= TF_RCVD_TSTMP;
 				tp->ts_recent = to.to_tsval;
 				tp->ts_recent_age = cts;
 			} else
 				tp->t_flags &= ~TF_REQ_TSTMP;
 			if (to.to_flags & TOF_MSS) {
 				tcp_mss(tp, to.to_mss);
 			}
 			if ((tp->t_flags & TF_SACK_PERMIT) &&
 			    (to.to_flags & TOF_SACKPERM) == 0)
 				tp->t_flags &= ~TF_SACK_PERMIT;
 			if (IS_FASTOPEN(tp->t_flags)) {
 				if (to.to_flags & TOF_FASTOPEN) {
 					uint16_t mss;
 
 					if (to.to_flags & TOF_MSS)
 						mss = to.to_mss;
 					else
 						if ((inp->inp_vflag & INP_IPV6) != 0)
 							mss = TCP6_MSS;
 						else
 							mss = TCP_MSS;
 					tcp_fastopen_update_cache(tp, mss,
 					    to.to_tfo_len, to.to_tfo_cookie);
 				} else
 					tcp_fastopen_disable_path(tp);
 			}
 		}
 		/*
 		 * At this point we are at the initial call. Here we decide
 		 * if we are doing RACK or not. We do this by seeing if
 		 * TF_SACK_PERMIT is set and the sack-not-required is clear.
 		 * The code now does do dup-ack counting so if you don't
 		 * switch back you won't get rack & TLP, but you will still
 		 * get this stack.
 		 */
 
 		if ((rack_sack_not_required == 0) &&
 		    ((tp->t_flags & TF_SACK_PERMIT) == 0)) {
 			tcp_switch_back_to_default(tp);
 			(*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen,
 			    tlen, iptos);
 #ifdef TCP_ACCOUNTING
 			sched_unpin();
 #endif
 			return (1);
 		}
 		tcp_set_hpts(tp);
 		sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
 	}
 	if (thflags & TH_FIN)
 		tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN);
 	us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time);
 	if ((rack->rc_gp_dyn_mul) &&
 	    (rack->use_fixed_rate == 0) &&
 	    (rack->rc_always_pace)) {
 		/* Check in on probertt */
 		rack_check_probe_rtt(rack, us_cts);
 	}
 	rack_clear_rate_sample(rack);
 	if ((rack->forced_ack) &&
 	    ((tcp_get_flags(th) & TH_RST) == 0)) {
 		rack_handle_probe_response(rack, tiwin, us_cts);
 	}
 	/*
 	 * This is the one exception case where we set the rack state
 	 * always. All other times (timers etc) we must have a rack-state
 	 * set (so we assure we have done the checks above for SACK).
 	 */
 	rack->r_ctl.rc_rcvtime = cts;
 	if (rack->r_state != tp->t_state)
 		rack_set_state(tp, rack);
 	if (SEQ_GT(th->th_ack, tp->snd_una) &&
 	    (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL)
 		kern_prefetch(rsm, &prev_state);
 	prev_state = rack->r_state;
 	if ((thflags & TH_RST) &&
 	    ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
 	      SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
 	     (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) {
 		/* The connection will be killed by a reset check the tracepoint */
 		tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV);
 	}
 	retval = (*rack->r_substate) (m, th, so,
 	    tp, &to, drop_hdrlen,
 	    tlen, tiwin, thflags, nxt_pkt, iptos);
 	if (retval == 0) {
 		/*
 		 * If retval is 1 the tcb is unlocked and most likely the tp
 		 * is gone.
 		 */
 		INP_WLOCK_ASSERT(inp);
 		if ((rack->rc_gp_dyn_mul) &&
 		    (rack->rc_always_pace) &&
 		    (rack->use_fixed_rate == 0) &&
 		    rack->in_probe_rtt &&
 		    (rack->r_ctl.rc_time_probertt_starts == 0)) {
 			/*
 			 * If we are going for target, lets recheck before
 			 * we output.
 			 */
 			rack_check_probe_rtt(rack, us_cts);
 		}
 		if (rack->set_pacing_done_a_iw == 0) {
 			/* How much has been acked? */
 			if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) {
 				/* We have enough to set in the pacing segment size */
 				rack->set_pacing_done_a_iw = 1;
 				rack_set_pace_segments(tp, rack, __LINE__, NULL);
 			}
 		}
 		tcp_rack_xmit_timer_commit(rack, tp);
 #ifdef TCP_ACCOUNTING
 		/*
 		 * If we set the ack_val_se to what ack processing we are doing
 		 * we also want to track how many cycles we burned. Note
 		 * the bits after tcp_output we let be "free". This is because
 		 * we are also tracking the tcp_output times as well. Note the
 		 * use of 0xf here since we only have 11 counter (0 - 0xa) and
 		 * 0xf cannot be returned and is what we initialize it too to
 		 * indicate we are not doing the tabulations.
 		 */
 		if (ack_val_set != 0xf) {
 			uint64_t crtsc;
 
 			crtsc = get_cyclecount();
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val);
 			}
 		}
 #endif
 		if ((nxt_pkt == 0) && (no_output == 0)) {
 			if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) {
 do_output_now:
 				if (tcp_output(tp) < 0) {
 #ifdef TCP_ACCOUNTING
 					sched_unpin();
 #endif
 					return (1);
 				}
 				did_out = 1;
 			}
 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
 			rack_free_trim(rack);
 		} else if ((no_output == 1) &&
 			   (nxt_pkt == 0)  &&
 			   (tcp_in_hpts(rack->rc_tp) == 0)) {
 			/*
 			 * We are not in hpts and we had a pacing timer up. Use
 			 * the remaining time (slot_remaining) to restart the timer.
 			 */
 			KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp));
 			rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0);
 			rack_free_trim(rack);
 		}
 		/* Clear the flag, it may have been cleared by output but we may not have  */
 		if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS))
 			tp->t_flags2 &= ~TF2_HPTS_CALLS;
 		/* Update any rounds needed */
 		if (rack_verbose_logging &&  tcp_bblogging_on(rack->rc_tp))
 			rack_log_hystart_event(rack, high_seq, 8);
 		/*
 		 * The draft (v3) calls for us to use SEQ_GEQ, but that
 		 * causes issues when we are just going app limited. Lets
 		 * instead use SEQ_GT <or> where its equal but more data
 		 * is outstanding.
 		 *
 		 * Also make sure we are on the last ack of a series. We
 		 * have to have all the ack's processed in queue to know
 		 * if there is something left outstanding.
 		 */
 		if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) &&
 		    (rack->rc_new_rnd_needed == 0) &&
 		    (nxt_pkt == 0)) {
 			rack_log_hystart_event(rack, tp->snd_una, 21);
 			rack->r_ctl.current_round++;
 			/* Force the next send to setup the next round */
 			rack->rc_new_rnd_needed = 1;
 			if (CC_ALGO(tp)->newround != NULL) {
 				CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round);
 			}
 		}
 		if ((nxt_pkt == 0) &&
 		    ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) &&
 		    (SEQ_GT(tp->snd_max, tp->snd_una) ||
 		     (tp->t_flags & TF_DELACK) ||
 		     ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) &&
 		      (tp->t_state <= TCPS_CLOSING)))) {
 			/* We could not send (probably in the hpts but stopped the timer earlier)? */
 			if ((tp->snd_max == tp->snd_una) &&
 			    ((tp->t_flags & TF_DELACK) == 0) &&
 			    (tcp_in_hpts(rack->rc_tp)) &&
 			    (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
 				/* keep alive not needed if we are hptsi output yet */
 				;
 			} else {
 				int late = 0;
 				if (tcp_in_hpts(tp)) {
 					if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
 						us_cts = tcp_get_usecs(NULL);
 						if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
 							rack->r_early = 1;
 							rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts);
 						} else
 							late = 1;
 						rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 					}
 					tcp_hpts_remove(tp);
 				}
 				if (late && (did_out == 0)) {
 					/*
 					 * We are late in the sending
 					 * and we did not call the output
 					 * (this probably should not happen).
 					 */
 					goto do_output_now;
 				}
 				rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
 			}
 			way_out = 1;
 		} else if (nxt_pkt == 0) {
 			/* Do we have the correct timer running? */
 			rack_timer_audit(tp, rack, &so->so_snd);
 			way_out = 2;
 		}
 	done_with_input:
 		rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
 		if (did_out)
 			rack->r_wanted_output = 0;
 	}
 #ifdef TCP_ACCOUNTING
 	sched_unpin();
 #endif
 	return (retval);
 }
 
 static void
 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
     int32_t drop_hdrlen, int32_t tlen, uint8_t iptos)
 {
 	struct timeval tv;
 
 	/* First lets see if we have old packets */
 	if (!STAILQ_EMPTY(&tp->t_inqueue)) {
 		if (ctf_do_queued_segments(tp, 1)) {
 			m_freem(m);
 			return;
 		}
 	}
 	if (m->m_flags & M_TSTMP_LRO) {
 		mbuf_tstmp2timeval(m, &tv);
 	} else {
 		/* Should not be should we kassert instead? */
 		tcp_get_usecs(&tv);
 	}
 	if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0,
 	    &tv) == 0) {
 		INP_WUNLOCK(tptoinpcb(tp));
 	}
 }
 
 struct rack_sendmap *
 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused)
 {
 	struct rack_sendmap *rsm = NULL;
 	int32_t idx;
 	uint32_t srtt = 0, thresh = 0, ts_low = 0;
 	int no_sack = 0;
 
 	/* Return the next guy to be re-transmitted */
 	if (tqhash_empty(rack->r_ctl.tqh)) {
 		return (NULL);
 	}
 	if (tp->t_flags & TF_SENTFIN) {
 		/* retran the end FIN? */
 		return (NULL);
 	}
 	/* ok lets look at this one */
 	rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 	if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
 		return (rsm);
 	}
 	if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
 		goto check_it;
 	}
 	rsm = rack_find_lowest_rsm(rack);
 	if (rsm == NULL) {
 		return (NULL);
 	}
 check_it:
 	if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) ||
 	    (rack->sack_attack_disable > 0)) {
 		no_sack = 1;
 	}
 	if ((no_sack > 0) &&
 	    (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
 		/*
 		 * No sack so we automatically do the 3 strikes and
 		 * retransmit (no rack timer would be started).
 		 */
 		return (rsm);
 	}
 	if (rsm->r_flags & RACK_ACKED) {
 		return (NULL);
 	}
 	if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
 	    (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
 		/* Its not yet ready */
 		return (NULL);
 	}
 	srtt = rack_grab_rtt(tp, rack);
 	idx = rsm->r_rtr_cnt - 1;
 	ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
 	thresh = rack_calc_thresh_rack(rack, srtt, tsused);
 	if ((tsused == ts_low) ||
 	    (TSTMP_LT(tsused, ts_low))) {
 		/* No time since sending */
 		return (NULL);
 	}
 	if ((tsused - ts_low) < thresh) {
 		/* It has not been long enough yet */
 		return (NULL);
 	}
 	if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
 	    ((rsm->r_flags & RACK_SACK_PASSED) &&
 	     (rack->sack_attack_disable == 0))) {
 		/*
 		 * We have passed the dup-ack threshold <or>
 		 * a SACK has indicated this is missing.
 		 * Note that if you are a declared attacker
 		 * it is only the dup-ack threshold that
 		 * will cause retransmits.
 		 */
 		/* log retransmit reason */
 		rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
 		rack->r_fast_output = 0;
 		return (rsm);
 	}
 	return (NULL);
 }
 
 static void
 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
 			   uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
 			   int line, struct rack_sendmap *rsm, uint8_t quality)
 {
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		if (rack_verbose_logging == 0) {
 			/*
 			 * We are not verbose screen out all but
 			 * ones we always want.
 			 */
 			if ((method != 2) &&
 			    (method != 3) &&
 			    (method != 7) &&
 			    (method != 14) &&
 			    (method != 20)) {
 				return;
 			}
 		}
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.flex1 = slot;
 		log.u_bbr.flex2 = len;
 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss;
 		log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca;
 		log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_late;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->r_early;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->app_limited_needs_set;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->rc_gp_filled;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->in_probe_rtt;
 		log.u_bbr.use_lt_bw <<= 1;
 		log.u_bbr.use_lt_bw |= rack->gp_ready;
 		log.u_bbr.pkt_epoch = line;
 		log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed;
 		log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early;
 		log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec;
 		log.u_bbr.bw_inuse = bw_est;
 		log.u_bbr.delRate = bw;
 		if (rack->r_ctl.gp_bw == 0)
 			log.u_bbr.cur_del_rate = 0;
 		else
 			log.u_bbr.cur_del_rate = rack_get_bw(rack);
 		log.u_bbr.rttProp = len_time;
 		log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt;
 		log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit;
 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
 		if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) {
 			/* We are in slow start */
 			log.u_bbr.flex7 = 1;
 		} else {
 			/* we are on congestion avoidance */
 			log.u_bbr.flex7 = 0;
 		}
 		log.u_bbr.flex8 = method;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec;
 		log.u_bbr.cwnd_gain <<= 1;
 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss;
 		log.u_bbr.cwnd_gain <<= 1;
 		log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca;
 		log.u_bbr.bbr_substate = quality;
 		log.u_bbr.bbr_state = rack->dgp_on;
 		log.u_bbr.bbr_state <<= 1;
 		log.u_bbr.bbr_state |= rack->r_fill_less_agg;
 		log.u_bbr.bbr_state <<= 1;
 		log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd;
 		log.u_bbr.bbr_state <<= 2;
 		log.u_bbr.bbr_state |= rack->r_pacing_discount;
 		log.u_bbr.flex7 = ((rack->r_ctl.pacing_discount_amm << 1) | log.u_bbr.flex7);
 		TCP_LOG_EVENTP(rack->rc_tp, NULL,
 		    &rack->rc_inp->inp_socket->so_rcv,
 		    &rack->rc_inp->inp_socket->so_snd,
 		    BBR_LOG_HPTSI_CALC, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 static uint32_t
 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss)
 {
 	uint32_t new_tso, user_max, pace_one;
 
 	user_max = rack->rc_user_set_max_segs * mss;
 	if (rack->rc_force_max_seg) {
 		return (user_max);
 	}
 	if (rack->use_fixed_rate &&
 	    ((rack->r_ctl.crte == NULL) ||
 	     (bw != rack->r_ctl.crte->rate))) {
 		/* Use the user mss since we are not exactly matched */
 		return (user_max);
 	}
 	if (rack_pace_one_seg ||
 	    (rack->r_ctl.rc_user_set_min_segs == 1))
 		pace_one = 1;
 	else
 		pace_one = 0;
 
 	new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss,
 		     pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor);
 	if (new_tso > user_max)
 		new_tso = user_max;
 	if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) {
 		if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso)
 			new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss;
 	}
 	if (rack->r_ctl.rc_user_set_min_segs &&
 	    ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso))
 	    new_tso = rack->r_ctl.rc_user_set_min_segs * mss;
 	return (new_tso);
 }
 
 static uint64_t
 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b)
 {
 	uint64_t reduced_win;
 	uint32_t gain;
 
 	if (window_input < rc_init_window(rack)) {
 		/*
 		 * The cwnd is collapsed to
 		 * nearly zero, maybe because of a time-out?
 		 * Lets drop back to the lt-bw.
 		 */
 		reduced_win = rack_get_lt_bw(rack);
 		/* Set the flag so the caller knows its a rate and not a reduced window */
 		*rate_set = 1;
 		gain = 100;
 	} else if  (IN_RECOVERY(rack->rc_tp->t_flags)) {
 		/*
 		 * If we are in recover our cwnd needs to be less for
 		 * our pacing consideration.
 		 */
 		if (rack->rack_hibeta == 0) {
 			reduced_win = window_input / 2;
 			gain = 50;
 		} else {
 			reduced_win = window_input * rack->r_ctl.saved_hibeta;
 			reduced_win /= 100;
 			gain = rack->r_ctl.saved_hibeta;
 		}
 	} else {
 		/*
 		 * Apply Timely factor to increase/decrease the
 		 * amount we are pacing at.
 		 */
 		gain = rack_get_output_gain(rack, NULL);
 		if (gain > rack_gain_p5_ub) {
 			gain = rack_gain_p5_ub;
 		}
 		reduced_win = window_input * gain;
 		reduced_win /= 100;
 	}
 	if (gain_b != NULL)
 		*gain_b = gain;
 	/*
 	 * What is being returned here is a trimmed down
 	 * window values in all cases where rate_set is left
 	 * at 0. In one case we actually return the rate (lt_bw).
 	 * the "reduced_win" is returned as a slimmed down cwnd that
 	 * is then calculated by the caller into a rate when rate_set
 	 * is 0.
 	 */
 	return (reduced_win);
 }
 
 static int32_t
 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
 {
 	uint64_t lentim, fill_bw;
 
 	/* Lets first see if we are full, if so continue with normal rate */
 	rack->r_via_fill_cw = 0;
 	if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
 		return (slot);
 	if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
 		return (slot);
 	if (rack->r_ctl.rc_last_us_rtt == 0)
 		return (slot);
 	if (rack->rc_pace_fill_if_rttin_range &&
 	    (rack->r_ctl.rc_last_us_rtt >=
 	     (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
 		/* The rtt is huge, N * smallest, lets not fill */
 		return (slot);
 	}
 	/*
 	 * first lets calculate the b/w based on the last us-rtt
 	 * and the the smallest send window.
 	 */
 	fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use);
 	if (rack->rc_fillcw_apply_discount) {
 		uint32_t rate_set = 0;
 
 		fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL);
 		if (rate_set) {
 			goto at_lt_bw;
 		}
 	}
 	/* Take the rwnd if its smaller */
 	if (fill_bw > rack->rc_tp->snd_wnd)
 		fill_bw = rack->rc_tp->snd_wnd;
 	/* Now lets make it into a b/w */
 	fill_bw *= (uint64_t)HPTS_USEC_IN_SEC;
 	fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
 at_lt_bw:
 	if (rack->r_fill_less_agg) {
 		/*
 		 * We want the average of the rate_wanted
 		 * and our fill-cw calculated bw. We also want
 		 * to cap any increase to be no more than
 		 * X times the lt_bw (where X is the rack_bw_multipler).
 		 */
 		uint64_t lt_bw, rate;
 
 		lt_bw = rack_get_lt_bw(rack);
 		if (lt_bw > *rate_wanted)
 			rate = lt_bw;
 		else
 			rate = *rate_wanted;
 		fill_bw += rate;
 		fill_bw /= 2;
 		if (rack_bw_multipler && (fill_bw > (rate * rack_bw_multipler))) {
 			fill_bw = rate * rack_bw_multipler;
 		}
 	}
 	/* We are below the min b/w */
 	if (non_paced)
 		*rate_wanted = fill_bw;
 	if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
 		return (slot);
 	rack->r_via_fill_cw = 1;
 	if (rack->r_rack_hw_rate_caps &&
 	    (rack->r_ctl.crte != NULL)) {
 		uint64_t high_rate;
 
 		high_rate = tcp_hw_highest_rate(rack->r_ctl.crte);
 		if (fill_bw > high_rate) {
 			/* We are capping bw at the highest rate table entry */
 			if (*rate_wanted > high_rate) {
 				/* The original rate was also capped */
 				rack->r_via_fill_cw = 0;
 			}
 			rack_log_hdwr_pacing(rack,
 					     fill_bw, high_rate, __LINE__,
 					     0, 3);
 			fill_bw = high_rate;
 			if (capped)
 				*capped = 1;
 		}
 	} else if ((rack->r_ctl.crte == NULL) &&
 		   (rack->rack_hdrw_pacing == 0) &&
 		   (rack->rack_hdw_pace_ena) &&
 		   rack->r_rack_hw_rate_caps &&
 		   (rack->rack_attempt_hdwr_pace == 0) &&
 		   (rack->rc_inp->inp_route.ro_nh != NULL) &&
 		   (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
 		/*
 		 * Ok we may have a first attempt that is greater than our top rate
 		 * lets check.
 		 */
 		uint64_t high_rate;
 
 		high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp);
 		if (high_rate) {
 			if (fill_bw > high_rate) {
 				fill_bw = high_rate;
 				if (capped)
 					*capped = 1;
 			}
 		}
 	}
 	if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) {
 		if (rack->rc_hybrid_mode)
 			rack_log_hybrid_bw(rack, rack->rc_tp->snd_max,
 					   fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__);
 		fill_bw = rack->r_ctl.bw_rate_cap;
 	}
 	/*
 	 * Ok fill_bw holds our mythical b/w to fill the cwnd
 	 * in an rtt (unless it was capped), what does that
 	 * time wise equate too?
 	 */
 	lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
 	lentim /= fill_bw;
 	*rate_wanted = fill_bw;
 	if (non_paced || (lentim < slot)) {
 		rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
 					   0, lentim, 12, __LINE__, NULL, 0);
 		return ((int32_t)lentim);
 	} else
 		return (slot);
 }
 
 static int32_t
 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz)
 {
 	uint64_t srtt;
 	int32_t slot = 0;
 	int32_t minslot = 0;
 	int can_start_hw_pacing = 1;
 	int err;
 	int pace_one;
 
 	if (rack_pace_one_seg ||
 	    (rack->r_ctl.rc_user_set_min_segs == 1))
 		pace_one = 1;
 	else
 		pace_one = 0;
 	if (rack->rc_always_pace == 0) {
 		/*
 		 * We use the most optimistic possible cwnd/srtt for
 		 * sending calculations. This will make our
 		 * calculation anticipate getting more through
 		 * quicker then possible. But thats ok we don't want
 		 * the peer to have a gap in data sending.
 		 */
 		uint64_t cwnd, tr_perms = 0;
 		int32_t reduce = 0;
 
 	old_method:
 		/*
 		 * We keep no precise pacing with the old method
 		 * instead we use the pacer to mitigate bursts.
 		 */
 		if (rack->r_ctl.rc_rack_min_rtt)
 			srtt = rack->r_ctl.rc_rack_min_rtt;
 		else
 			srtt = max(tp->t_srtt, 1);
 		if (rack->r_ctl.rc_rack_largest_cwnd)
 			cwnd = rack->r_ctl.rc_rack_largest_cwnd;
 		else
 			cwnd = rack->r_ctl.cwnd_to_use;
 		/* Inflate cwnd by 1000 so srtt of usecs is in ms */
 		tr_perms = (cwnd * 1000) / srtt;
 		if (tr_perms == 0) {
 			tr_perms = ctf_fixed_maxseg(tp);
 		}
 		/*
 		 * Calculate how long this will take to drain, if
 		 * the calculation comes out to zero, thats ok we
 		 * will use send_a_lot to possibly spin around for
 		 * more increasing tot_len_this_send to the point
 		 * that its going to require a pace, or we hit the
 		 * cwnd. Which in that case we are just waiting for
 		 * a ACK.
 		 */
 		slot = len / tr_perms;
 		/* Now do we reduce the time so we don't run dry? */
 		if (slot && rack_slot_reduction) {
 			reduce = (slot / rack_slot_reduction);
 			if (reduce < slot) {
 				slot -= reduce;
 			} else
 				slot = 0;
 		}
 		slot *= HPTS_USEC_IN_MSEC;
 		if (rack->rc_pace_to_cwnd) {
 			uint64_t rate_wanted = 0;
 
 			slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
 			rack->rc_ack_can_sendout_data = 1;
 			rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
 		} else
 			rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
 		/*******************************************************/
 		/* RRS: We insert non-paced call to stats here for len */
 		/*******************************************************/
 	} else {
 		uint64_t bw_est, res, lentim, rate_wanted;
 		uint32_t segs, oh;
 		int capped = 0;
 		int prev_fill;
 
 		if ((rack->r_rr_config == 1) && rsm) {
 			return (rack->r_ctl.rc_min_to);
 		}
 		if (rack->use_fixed_rate) {
 			rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack);
 		} else if ((rack->r_ctl.init_rate == 0) &&
 			   (rack->r_ctl.gp_bw == 0)) {
 			/* no way to yet do an estimate */
 			bw_est = rate_wanted = 0;
 		} else if (rack->dgp_on) {
 			bw_est = rack_get_bw(rack);
 			rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
 		} else {
 			uint32_t gain, rate_set = 0;
 
 			rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use);
 			rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain);
 			if (rate_set == 0) {
 				if (rate_wanted > rack->rc_tp->snd_wnd)
 					rate_wanted = rack->rc_tp->snd_wnd;
 				/* Now lets make it into a b/w */
 				rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC;
 				rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt;
 			}
 			bw_est = rate_wanted;
 			rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd,
 						   rack->r_ctl.cwnd_to_use,
 						   rate_wanted, bw_est,
 						   rack->r_ctl.rc_last_us_rtt,
 						   88, __LINE__, NULL, gain);
 		}
 		if ((bw_est == 0) || (rate_wanted == 0) ||
 		    ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) {
 			/*
 			 * No way yet to make a b/w estimate or
 			 * our raise is set incorrectly.
 			 */
 			goto old_method;
 		}
 		rack_rate_cap_bw(rack, &rate_wanted, &capped);
 		/* We need to account for all the overheads */
 		segs = (len + segsiz - 1) / segsiz;
 		/*
 		 * We need the diff between 1514 bytes (e-mtu with e-hdr)
 		 * and how much data we put in each packet. Yes this
 		 * means we may be off if we are larger than 1500 bytes
 		 * or smaller. But this just makes us more conservative.
 		 */
 
 		oh =  (tp->t_maxseg - segsiz) + sizeof(struct tcphdr);
 		if (rack->r_is_v6) {
 #ifdef INET6
 			oh += sizeof(struct ip6_hdr);
 #endif
 		} else {
 #ifdef INET
 			oh += sizeof(struct ip);
 #endif
 		}
 		/* We add a fixed 14 for the ethernet header */
 		oh += 14;
 		segs *= oh;
 		lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
 		res = lentim / rate_wanted;
 		slot = (uint32_t)res;
 		if (rack_hw_rate_min &&
 		    (rate_wanted < rack_hw_rate_min)) {
 			can_start_hw_pacing = 0;
 			if (rack->r_ctl.crte) {
 				/*
 				 * Ok we need to release it, we
 				 * have fallen too low.
 				 */
 				tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
 				rack->r_ctl.crte = NULL;
 				rack->rack_attempt_hdwr_pace = 0;
 				rack->rack_hdrw_pacing = 0;
 			}
 		}
 		if (rack->r_ctl.crte &&
 		    (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
 			/*
 			 * We want more than the hardware can give us,
 			 * don't start any hw pacing.
 			 */
 			can_start_hw_pacing = 0;
 			if (rack->r_rack_hw_rate_caps == 0) {
 				/*
 				 * Ok we need to release it, we
 				 * want more than the card can give us and
 				 * no rate cap is in place. Set it up so
 				 * when we want less we can retry.
 				 */
 				tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
 				rack->r_ctl.crte = NULL;
 				rack->rack_attempt_hdwr_pace = 0;
 				rack->rack_hdrw_pacing = 0;
 			}
 		}
 		if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) {
 			/*
 			 * We lost our rate somehow, this can happen
 			 * if the interface changed underneath us.
 			 */
 			tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
 			rack->r_ctl.crte = NULL;
 			/* Lets re-allow attempting to setup pacing */
 			rack->rack_hdrw_pacing = 0;
 			rack->rack_attempt_hdwr_pace = 0;
 			rack_log_hdwr_pacing(rack,
 					     rate_wanted, bw_est, __LINE__,
 					     0, 6);
 		}
 		prev_fill = rack->r_via_fill_cw;
 		if ((rack->rc_pace_to_cwnd) &&
 		    (capped == 0) &&
 		    (rack->dgp_on == 1) &&
 		    (rack->use_fixed_rate == 0) &&
 		    (rack->in_probe_rtt == 0) &&
 		    (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) {
 			/*
 			 * We want to pace at our rate *or* faster to
 			 * fill the cwnd to the max if its not full.
 			 */
 			slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
 			/* Re-check to make sure we are not exceeding our max b/w */
 			if ((rack->r_ctl.crte != NULL) &&
 			    (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
 				/*
 				 * We want more than the hardware can give us,
 				 * don't start any hw pacing.
 				 */
 				can_start_hw_pacing = 0;
 				if (rack->r_rack_hw_rate_caps == 0) {
 					/*
 					 * Ok we need to release it, we
 					 * want more than the card can give us and
 					 * no rate cap is in place. Set it up so
 					 * when we want less we can retry.
 					 */
 					tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
 					rack->r_ctl.crte = NULL;
 					rack->rack_attempt_hdwr_pace = 0;
 					rack->rack_hdrw_pacing = 0;
 					rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
 				}
 			}
 		}
 		if ((rack->rc_inp->inp_route.ro_nh != NULL) &&
 		    (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) {
 			if ((rack->rack_hdw_pace_ena) &&
 			    (can_start_hw_pacing > 0) &&
 			    (rack->rack_hdrw_pacing == 0) &&
 			    (rack->rack_attempt_hdwr_pace == 0)) {
 				/*
 				 * Lets attempt to turn on hardware pacing
 				 * if we can.
 				 */
 				rack->rack_attempt_hdwr_pace = 1;
 				rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp,
 								       rack->rc_inp->inp_route.ro_nh->nh_ifp,
 								       rate_wanted,
 								       RS_PACING_GEQ,
 								       &err, &rack->r_ctl.crte_prev_rate);
 				if (rack->r_ctl.crte) {
 					rack->rack_hdrw_pacing = 1;
 					rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz,
 													   pace_one, rack->r_ctl.crte,
 													   NULL, rack->r_ctl.pace_len_divisor);
 					rack_log_hdwr_pacing(rack,
 							     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
 							     err, 0);
 					rack->r_ctl.last_hw_bw_req = rate_wanted;
 				} else {
 					counter_u64_add(rack_hw_pace_init_fail, 1);
 				}
 			} else if (rack->rack_hdrw_pacing &&
 				   (rack->r_ctl.last_hw_bw_req != rate_wanted)) {
 				/* Do we need to adjust our rate? */
 				const struct tcp_hwrate_limit_table *nrte;
 
 				if (rack->r_up_only &&
 				    (rate_wanted < rack->r_ctl.crte->rate)) {
 					/**
 					 * We have four possible states here
 					 * having to do with the previous time
 					 * and this time.
 					 *   previous  |  this-time
 					 * A)     0      |     0   -- fill_cw not in the picture
 					 * B)     1      |     0   -- we were doing a fill-cw but now are not
 					 * C)     1      |     1   -- all rates from fill_cw
 					 * D)     0      |     1   -- we were doing non-fill and now we are filling
 					 *
 					 * For case A, C and D we don't allow a drop. But for
 					 * case B where we now our on our steady rate we do
 					 * allow a drop.
 					 *
 					 */
 					if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0)))
 						goto done_w_hdwr;
 				}
 				if ((rate_wanted > rack->r_ctl.crte->rate) ||
 				    (rate_wanted <= rack->r_ctl.crte_prev_rate)) {
 					if (rack_hw_rate_to_low &&
 					    (bw_est < rack_hw_rate_to_low)) {
 						/*
 						 * The pacing rate is too low for hardware, but
 						 * do allow hardware pacing to be restarted.
 						 */
 						rack_log_hdwr_pacing(rack,
 								     bw_est, rack->r_ctl.crte->rate, __LINE__,
 								     0, 5);
 						tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp);
 						rack->r_ctl.crte = NULL;
 						rack->rack_attempt_hdwr_pace = 0;
 						rack->rack_hdrw_pacing = 0;
 						rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
 						goto done_w_hdwr;
 					}
 					nrte = tcp_chg_pacing_rate(rack->r_ctl.crte,
 								   rack->rc_tp,
 								   rack->rc_inp->inp_route.ro_nh->nh_ifp,
 								   rate_wanted,
 								   RS_PACING_GEQ,
 								   &err, &rack->r_ctl.crte_prev_rate);
 					if (nrte == NULL) {
 						/*
 						 * Lost the rate, lets drop hardware pacing
 						 * period.
 						 */
 						rack->rack_hdrw_pacing = 0;
 						rack->r_ctl.crte = NULL;
 						rack_log_hdwr_pacing(rack,
 								     rate_wanted, 0, __LINE__,
 								     err, 1);
 						rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
 						counter_u64_add(rack_hw_pace_lost, 1);
 					} else if (nrte != rack->r_ctl.crte) {
 						rack->r_ctl.crte = nrte;
 						rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted,
 														   segsiz, pace_one, rack->r_ctl.crte,
 														   NULL, rack->r_ctl.pace_len_divisor);
 						rack_log_hdwr_pacing(rack,
 								     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
 								     err, 2);
 						rack->r_ctl.last_hw_bw_req = rate_wanted;
 					}
 				} else {
 					/* We just need to adjust the segment size */
 					rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted);
 					rack_log_hdwr_pacing(rack,
 							     rate_wanted, rack->r_ctl.crte->rate, __LINE__,
 							     0, 4);
 					rack->r_ctl.last_hw_bw_req = rate_wanted;
 				}
 			}
 		}
 		if (minslot && (minslot > slot)) {
 			rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim,
 						   98, __LINE__, NULL, 0);
 			slot = minslot;
 		}
 	done_w_hdwr:
 		if (rack_limit_time_with_srtt &&
 		    (rack->use_fixed_rate == 0) &&
 		    (rack->rack_hdrw_pacing == 0)) {
 			/*
 			 * Sanity check, we do not allow the pacing delay
 			 * to be longer than the SRTT of the path. If it is
 			 * a slow path, then adding a packet should increase
 			 * the RTT and compensate for this i.e. the srtt will
 			 * be greater so the allowed pacing time will be greater.
 			 *
 			 * Note this restriction is not for where a peak rate
 			 * is set, we are doing fixed pacing or hardware pacing.
 			 */
 			if (rack->rc_tp->t_srtt)
 				srtt = rack->rc_tp->t_srtt;
 			else
 				srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC;	/* its in ms convert */
 			if (srtt < (uint64_t)slot) {
 				rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
 				slot = srtt;
 			}
 		}
 		/*******************************************************************/
 		/* RRS: We insert paced call to stats here for len and rate_wanted */
 		/*******************************************************************/
 		rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
 	}
 	if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
 		/*
 		 * If this rate is seeing enobufs when it
 		 * goes to send then either the nic is out
 		 * of gas or we are mis-estimating the time
 		 * somehow and not letting the queue empty
 		 * completely. Lets add to the pacing time.
 		 */
 		int hw_boost_delay;
 
 		hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult;
 		if (hw_boost_delay > rack_enobuf_hw_max)
 			hw_boost_delay = rack_enobuf_hw_max;
 		else if (hw_boost_delay < rack_enobuf_hw_min)
 			hw_boost_delay = rack_enobuf_hw_min;
 		slot += hw_boost_delay;
 	}
 	return (slot);
 }
 
 static void
 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
     tcp_seq startseq, uint32_t sb_offset)
 {
 	struct rack_sendmap *my_rsm = NULL;
 
 	if (tp->t_state < TCPS_ESTABLISHED) {
 		/*
 		 * We don't start any measurements if we are
 		 * not at least established.
 		 */
 		return;
 	}
 	if (tp->t_state >= TCPS_FIN_WAIT_1) {
 		/*
 		 * We will get no more data into the SB
 		 * this means we need to have the data available
 		 * before we start a measurement.
 		 */
 
 		if (sbavail(&tptosocket(tp)->so_snd) <
 		    max(rc_init_window(rack),
 			(MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
 			/* Nope not enough data */
 			return;
 		}
 	}
 	tp->t_flags |= TF_GPUTINPROG;
 	rack->r_ctl.rc_gp_cumack_ts = 0;
 	rack->r_ctl.rc_gp_lowrtt = 0xffffffff;
 	rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd;
 	tp->gput_seq = startseq;
 	rack->app_limited_needs_set = 0;
 	if (rack->in_probe_rtt)
 		rack->measure_saw_probe_rtt = 1;
 	else if ((rack->measure_saw_probe_rtt) &&
 		 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit)))
 		rack->measure_saw_probe_rtt = 0;
 	if (rack->rc_gp_filled)
 		tp->gput_ts = rack->r_ctl.last_cumack_advance;
 	else {
 		/* Special case initial measurement */
 		struct timeval tv;
 
 		tp->gput_ts = tcp_get_usecs(&tv);
 		rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
 	}
 	/*
 	 * We take a guess out into the future,
 	 * if we have no measurement and no
 	 * initial rate, we measure the first
 	 * initial-windows worth of data to
 	 * speed up getting some GP measurement and
 	 * thus start pacing.
 	 */
 	if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) {
 		rack->app_limited_needs_set = 1;
 		tp->gput_ack = startseq + max(rc_init_window(rack),
 					      (MIN_GP_WIN * ctf_fixed_maxseg(tp)));
 		rack_log_pacing_delay_calc(rack,
 					   tp->gput_seq,
 					   tp->gput_ack,
 					   0,
 					   tp->gput_ts,
 					   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
 					   9,
 					   __LINE__, NULL, 0);
 		rack_tend_gp_marks(tp, rack);
 		rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
 		return;
 	}
 	if (sb_offset) {
 		/*
 		 * We are out somewhere in the sb
 		 * can we use the already outstanding data?
 		 */
 
 		if (rack->r_ctl.rc_app_limited_cnt == 0) {
 			/*
 			 * Yes first one is good and in this case
 			 * the tp->gput_ts is correctly set based on
 			 * the last ack that arrived (no need to
 			 * set things up when an ack comes in).
 			 */
 			my_rsm = tqhash_min(rack->r_ctl.tqh);
 			if ((my_rsm == NULL) ||
 			    (my_rsm->r_rtr_cnt != 1)) {
 				/* retransmission? */
 				goto use_latest;
 			}
 		} else {
 			if (rack->r_ctl.rc_first_appl == NULL) {
 				/*
 				 * If rc_first_appl is NULL
 				 * then the cnt should be 0.
 				 * This is probably an error, maybe
 				 * a KASSERT would be approprate.
 				 */
 				goto use_latest;
 			}
 			/*
 			 * If we have a marker pointer to the last one that is
 			 * app limited we can use that, but we need to set
 			 * things up so that when it gets ack'ed we record
 			 * the ack time (if its not already acked).
 			 */
 			rack->app_limited_needs_set = 1;
 			/*
 			 * We want to get to the rsm that is either
 			 * next with space i.e. over 1 MSS or the one
 			 * after that (after the app-limited).
 			 */
 			my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl);
 			if (my_rsm) {
 				if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
 					/* Have to use the next one */
 					my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
 				else {
 					/* Use after the first MSS of it is acked */
 					tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp);
 					goto start_set;
 				}
 			}
 			if ((my_rsm == NULL) ||
 			    (my_rsm->r_rtr_cnt != 1)) {
 				/*
 				 * Either its a retransmit or
 				 * the last is the app-limited one.
 				 */
 				goto use_latest;
 			}
 		}
 		tp->gput_seq = my_rsm->r_start;
 start_set:
 		if (my_rsm->r_flags & RACK_ACKED) {
 			/*
 			 * This one has been acked use the arrival ack time
 			 */
 			struct rack_sendmap *nrsm;
 
 			tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
 			rack->app_limited_needs_set = 0;
 			/*
 			 * Ok in this path we need to use the r_end now
 			 * since this guy is the starting ack.
 			 */
 			tp->gput_seq = my_rsm->r_end;
 			/*
 			 * We also need to adjust up the sendtime
 			 * to the send of the next data after my_rsm.
 			 */
 			nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm);
 			if (nrsm != NULL)
 				my_rsm = nrsm;
 			else {
 				/*
 				 * The next as not been sent, thats the
 				 * case for using the latest.
 				 */
 				goto use_latest;
 			}
 		}
 		rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
 		tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack);
 		rack->r_ctl.rc_gp_cumack_ts = 0;
 		rack_log_pacing_delay_calc(rack,
 					   tp->gput_seq,
 					   tp->gput_ack,
 					   (uint64_t)my_rsm,
 					   tp->gput_ts,
 					   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
 					   9,
 					   __LINE__, my_rsm, 0);
 		/* Now lets make sure all are marked as they should be */
 		rack_tend_gp_marks(tp, rack);
 		rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
 		return;
 	}
 
 use_latest:
 	/*
 	 * We don't know how long we may have been
 	 * idle or if this is the first-send. Lets
 	 * setup the flag so we will trim off
 	 * the first ack'd data so we get a true
 	 * measurement.
 	 */
 	rack->app_limited_needs_set = 1;
 	tp->gput_ack = startseq + rack_get_measure_window(tp, rack);
 	rack->r_ctl.rc_gp_cumack_ts = 0;
 	/* Find this guy so we can pull the send time */
 	my_rsm = tqhash_find(rack->r_ctl.tqh, startseq);
 	if (my_rsm) {
 		rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0];
 		if (my_rsm->r_flags & RACK_ACKED) {
 			/*
 			 * Unlikely since its probably what was
 			 * just transmitted (but I am paranoid).
 			 */
 			tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival;
 			rack->app_limited_needs_set = 0;
 		}
 		if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) {
 			/* This also is unlikely */
 			tp->gput_seq = my_rsm->r_start;
 		}
 	} else {
 		/*
 		 * TSNH unless we have some send-map limit,
 		 * and even at that it should not be hitting
 		 * that limit (we should have stopped sending).
 		 */
 		struct timeval tv;
 
 		microuptime(&tv);
 		rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv);
 	}
 	rack_tend_gp_marks(tp, rack);
 	rack_log_pacing_delay_calc(rack,
 				   tp->gput_seq,
 				   tp->gput_ack,
 				   (uint64_t)my_rsm,
 				   tp->gput_ts,
 				   (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts),
 				   9, __LINE__, NULL, 0);
 	rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL);
 }
 
 static inline uint32_t
 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack,  uint32_t cwnd_to_use,
     uint32_t avail, int32_t sb_offset)
 {
 	uint32_t len;
 	uint32_t sendwin;
 
 	if (tp->snd_wnd > cwnd_to_use)
 		sendwin = cwnd_to_use;
 	else
 		sendwin = tp->snd_wnd;
 	if (ctf_outstanding(tp) >= tp->snd_wnd) {
 		/* We never want to go over our peers rcv-window */
 		len = 0;
 	} else {
 		uint32_t flight;
 
 		flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked);
 		if (flight >= sendwin) {
 			/*
 			 * We have in flight what we are allowed by cwnd (if
 			 * it was rwnd blocking it would have hit above out
 			 * >= tp->snd_wnd).
 			 */
 			return (0);
 		}
 		len = sendwin - flight;
 		if ((len + ctf_outstanding(tp)) > tp->snd_wnd) {
 			/* We would send too much (beyond the rwnd) */
 			len = tp->snd_wnd - ctf_outstanding(tp);
 		}
 		if ((len + sb_offset) > avail) {
 			/*
 			 * We don't have that much in the SB, how much is
 			 * there?
 			 */
 			len = avail - sb_offset;
 		}
 	}
 	return (len);
 }
 
 static void
 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags,
 	     unsigned ipoptlen, int32_t orig_len, int32_t len, int error,
 	     int rsm_is_null, int optlen, int line, uint16_t mode)
 {
 	if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = error;
 		log.u_bbr.flex2 = flags;
 		log.u_bbr.flex3 = rsm_is_null;
 		log.u_bbr.flex4 = ipoptlen;
 		log.u_bbr.flex5 = tp->rcv_numsacks;
 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
 		log.u_bbr.flex7 = optlen;
 		log.u_bbr.flex8 = rack->r_fsb_inited;
 		log.u_bbr.applimited = rack->r_fast_output;
 		log.u_bbr.bw_inuse = rack_get_bw(rack);
 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
 		log.u_bbr.cwnd_gain = mode;
 		log.u_bbr.pkts_out = orig_len;
 		log.u_bbr.lt_epoch = len;
 		log.u_bbr.delivered = line;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0,
 			       len, &log, false, NULL, __func__, __LINE__, &tv);
 	}
 }
 
 
 static struct mbuf *
 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen,
 		   struct rack_fast_send_blk *fsb,
 		   int32_t seglimit, int32_t segsize, int hw_tls)
 {
 #ifdef KERN_TLS
 	struct ktls_session *tls, *ntls;
 #ifdef INVARIANTS
 	struct mbuf *start;
 #endif
 #endif
 	struct mbuf *m, *n, **np, *smb;
 	struct mbuf *top;
 	int32_t off, soff;
 	int32_t len = *plen;
 	int32_t fragsize;
 	int32_t len_cp = 0;
 	uint32_t mlen, frags;
 
 	soff = off = the_off;
 	smb = m = the_m;
 	np = &top;
 	top = NULL;
 #ifdef KERN_TLS
 	if (hw_tls && (m->m_flags & M_EXTPG))
 		tls = m->m_epg_tls;
 	else
 		tls = NULL;
 #ifdef INVARIANTS
 	start = m;
 #endif
 #endif
 	while (len > 0) {
 		if (m == NULL) {
 			*plen = len_cp;
 			break;
 		}
 #ifdef KERN_TLS
 		if (hw_tls) {
 			if (m->m_flags & M_EXTPG)
 				ntls = m->m_epg_tls;
 			else
 				ntls = NULL;
 
 			/*
 			 * Avoid mixing TLS records with handshake
 			 * data or TLS records from different
 			 * sessions.
 			 */
 			if (tls != ntls) {
 				MPASS(m != start);
 				*plen = len_cp;
 				break;
 			}
 		}
 #endif
 		mlen = min(len, m->m_len - off);
 		if (seglimit) {
 			/*
 			 * For M_EXTPG mbufs, add 3 segments
 			 * + 1 in case we are crossing page boundaries
 			 * + 2 in case the TLS hdr/trailer are used
 			 * It is cheaper to just add the segments
 			 * than it is to take the cache miss to look
 			 * at the mbuf ext_pgs state in detail.
 			 */
 			if (m->m_flags & M_EXTPG) {
 				fragsize = min(segsize, PAGE_SIZE);
 				frags = 3;
 			} else {
 				fragsize = segsize;
 				frags = 0;
 			}
 
 			/* Break if we really can't fit anymore. */
 			if ((frags + 1) >= seglimit) {
 				*plen =	len_cp;
 				break;
 			}
 
 			/*
 			 * Reduce size if you can't copy the whole
 			 * mbuf. If we can't copy the whole mbuf, also
 			 * adjust len so the loop will end after this
 			 * mbuf.
 			 */
 			if ((frags + howmany(mlen, fragsize)) >= seglimit) {
 				mlen = (seglimit - frags - 1) * fragsize;
 				len = mlen;
 				*plen = len_cp + len;
 			}
 			frags += howmany(mlen, fragsize);
 			if (frags == 0)
 				frags++;
 			seglimit -= frags;
 			KASSERT(seglimit > 0,
 			    ("%s: seglimit went too low", __func__));
 		}
 		n = m_get(M_NOWAIT, m->m_type);
 		*np = n;
 		if (n == NULL)
 			goto nospace;
 		n->m_len = mlen;
 		soff += mlen;
 		len_cp += n->m_len;
 		if (m->m_flags & (M_EXT|M_EXTPG)) {
 			n->m_data = m->m_data + off;
 			mb_dupcl(n, m);
 		} else {
 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
 			    (u_int)n->m_len);
 		}
 		len -= n->m_len;
 		off = 0;
 		m = m->m_next;
 		np = &n->m_next;
 		if (len || (soff == smb->m_len)) {
 			/*
 			 * We have more so we move forward  or
 			 * we have consumed the entire mbuf and
 			 * len has fell to 0.
 			 */
 			soff = 0;
 			smb = m;
 		}
 
 	}
 	if (fsb != NULL) {
 		fsb->m = smb;
 		fsb->off = soff;
 		if (smb) {
 			/*
 			 * Save off the size of the mbuf. We do
 			 * this so that we can recognize when it
 			 * has been trimmed by sbcut() as acks
 			 * come in.
 			 */
 			fsb->o_m_len = smb->m_len;
 			fsb->o_t_len = M_TRAILINGROOM(smb);
 		} else {
 			/*
 			 * This is the case where the next mbuf went to NULL. This
 			 * means with this copy we have sent everything in the sb.
 			 * In theory we could clear the fast_output flag, but lets
 			 * not since its possible that we could get more added
 			 * and acks that call the extend function which would let
 			 * us send more.
 			 */
 			fsb->o_m_len = 0;
 			fsb->o_t_len = 0;
 		}
 	}
 	return (top);
 nospace:
 	if (top)
 		m_freem(top);
 	return (NULL);
 
 }
 
 /*
  * This is a copy of m_copym(), taking the TSO segment size/limit
  * constraints into account, and advancing the sndptr as it goes.
  */
 static struct mbuf *
 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen,
 		int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff)
 {
 	struct mbuf *m, *n;
 	int32_t soff;
 
 	m = rack->r_ctl.fsb.m;
 	if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) {
 		/*
 		 * The trailing space changed, mbufs can grow
 		 * at the tail but they can't shrink from
 		 * it, KASSERT that. Adjust the orig_m_len to
 		 * compensate for this change.
 		 */
 		KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)),
 			("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
 			 m,
 			 rack,
 			 (intmax_t)M_TRAILINGROOM(m),
 			 rack->r_ctl.fsb.o_t_len,
 			 rack->r_ctl.fsb.o_m_len,
 			 m->m_len));
 		rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m));
 		rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m);
 	}
 	if (m->m_len < rack->r_ctl.fsb.o_m_len) {
 		/*
 		 * Mbuf shrank, trimmed off the top by an ack, our
 		 * offset changes.
 		 */
 		KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)),
 			("mbuf:%p len:%u rack:%p oml:%u soff:%u\n",
 			 m, m->m_len,
 			 rack, rack->r_ctl.fsb.o_m_len,
 			 rack->r_ctl.fsb.off));
 
 		if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len))
 			rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len);
 		else
 			rack->r_ctl.fsb.off = 0;
 		rack->r_ctl.fsb.o_m_len = m->m_len;
 #ifdef INVARIANTS
 	} else if (m->m_len > rack->r_ctl.fsb.o_m_len) {
 		panic("rack:%p m:%p m_len grew outside of t_space compensation",
 		      rack, m);
 #endif
 	}
 	soff = rack->r_ctl.fsb.off;
 	KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff));
 	KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen));
 	KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?",
 				 __FUNCTION__,
 				 rack, *plen, m, m->m_len));
 	/* Save off the right location before we copy and advance */
 	*s_soff = soff;
 	*s_mb = rack->r_ctl.fsb.m;
 	n = rack_fo_base_copym(m, soff, plen,
 			       &rack->r_ctl.fsb,
 			       seglimit, segsize, rack->r_ctl.fsb.hw_tls);
 	return (n);
 }
 
 /* Log the buffer level */
 static void
 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack,
 		     int len, struct timeval *tv,
 		     uint32_t cts)
 {
 	uint32_t p_rate = 0, p_queue = 0, err = 0;
 	union tcp_log_stackspecific log;
 
 #ifdef RATELIMIT
 	err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
 	err = in_pcbquery_txrtlmt(rack->rc_inp,	&p_rate);
 #endif
 	memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 	log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 	log.u_bbr.flex1 = p_rate;
 	log.u_bbr.flex2 = p_queue;
 	log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
 	log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
 	log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
 	log.u_bbr.flex7 = 99;
 	log.u_bbr.flex8 = 0;
 	log.u_bbr.pkts_out = err;
 	log.u_bbr.delRate = rack->r_ctl.crte->rate;
 	log.u_bbr.timeStamp = cts;
 	log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 	tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
 		       len, &log, false, NULL, __func__, __LINE__, tv);
 
 }
 
 static uint32_t
 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp,
 		       struct timeval *tv, uint32_t cts, int len, uint32_t segsiz)
 {
 	uint64_t lentime = 0;
 #ifdef RATELIMIT
 	uint32_t p_rate = 0, p_queue = 0, err;
 	union tcp_log_stackspecific log;
 	uint64_t bw;
 
 	err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue);
 	/* Failed or queue is zero */
 	if (err || (p_queue == 0)) {
 		lentime = 0;
 		goto out;
 	}
 	err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate);
 	if (err) {
 		lentime = 0;
 		goto out;
 	}
 	/*
 	 * If we reach here we have some bytes in
 	 * the queue. The number returned is a value
 	 * between 0 and 0xffff where ffff is full
 	 * and 0 is empty. So how best to make this into
 	 * something usable?
 	 *
 	 * The "safer" way is lets take the b/w gotten
 	 * from the query (which should be our b/w rate)
 	 * and pretend that a full send (our rc_pace_max_segs)
 	 * is outstanding. We factor it so its as if a full
 	 * number of our MSS segment is terms of full
 	 * ethernet segments are outstanding.
 	 */
 	bw = p_rate / 8;
 	if (bw) {
 		lentime = (rack->r_ctl.rc_pace_max_segs / segsiz);
 		lentime *= ETHERNET_SEGMENT_SIZE;
 		lentime *= (uint64_t)HPTS_USEC_IN_SEC;
 		lentime /= bw;
 	} else {
 		/* TSNH -- KASSERT? */
 		lentime = 0;
 	}
 out:
 	if (tcp_bblogging_on(tp)) {
 		memset(&log, 0, sizeof(log));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		log.u_bbr.flex1 = p_rate;
 		log.u_bbr.flex2 = p_queue;
 		log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using;
 		log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs;
 		log.u_bbr.flex6 = rack->r_ctl.crte->time_between;
 		log.u_bbr.flex7 = 99;
 		log.u_bbr.flex8 = 0;
 		log.u_bbr.pkts_out = err;
 		log.u_bbr.delRate = rack->r_ctl.crte->rate;
 		log.u_bbr.cur_del_rate = lentime;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0,
 			       len, &log, false, NULL, __func__, __LINE__,tv);
 	}
 #endif
 	return ((uint32_t)lentime);
 }
 
 static int
 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
 		     uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp)
 {
 	/*
 	 * Enter the fast retransmit path. We are given that a sched_pin is
 	 * in place (if accounting is compliled in) and the cycle count taken
 	 * at the entry is in the ts_val. The concept her is that the rsm
 	 * now holds the mbuf offsets and such so we can directly transmit
 	 * without a lot of overhead, the len field is already set for
 	 * us to prohibit us from sending too much (usually its 1MSS).
 	 */
 	struct ip *ip = NULL;
 	struct udphdr *udp = NULL;
 	struct tcphdr *th = NULL;
 	struct mbuf *m = NULL;
 	struct inpcb *inp;
 	uint8_t *cpto;
 	struct tcp_log_buffer *lgb;
 #ifdef TCP_ACCOUNTING
 	uint64_t crtsc;
 	int cnt_thru = 1;
 #endif
 	struct tcpopt to;
 	u_char opt[TCP_MAXOLEN];
 	uint32_t hdrlen, optlen;
 	int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0;
 	uint16_t flags;
 	uint32_t if_hw_tsomaxsegcount = 0, startseq;
 	uint32_t if_hw_tsomaxsegsize;
 	int32_t ip_sendflag = IP_NO_SND_TAG_RL;
 
 #ifdef INET6
 	struct ip6_hdr *ip6 = NULL;
 
 	if (rack->r_is_v6) {
 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 	} else
 #endif				/* INET6 */
 	{
 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
 		hdrlen = sizeof(struct tcpiphdr);
 	}
 	if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
 		goto failed;
 	}
 	if (doing_tlp) {
 		/* Its a TLP add the flag, it may already be there but be sure */
 		rsm->r_flags |= RACK_TLP;
 	} else {
 		/* If it was a TLP it is not not on this retransmit */
 		rsm->r_flags &= ~RACK_TLP;
 	}
 	startseq = rsm->r_start;
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	inp = rack->rc_inp;
 	to.to_flags = 0;
 	flags = tcp_outflags[tp->t_state];
 	if (flags & (TH_SYN|TH_RST)) {
 		goto failed;
 	}
 	if (rsm->r_flags & RACK_HAS_FIN) {
 		/* We can't send a FIN here */
 		goto failed;
 	}
 	if (flags & TH_FIN) {
 		/* We never send a FIN */
 		flags &= ~TH_FIN;
 	}
 	if (tp->t_flags & TF_RCVD_TSTMP) {
 		to.to_tsval = ms_cts + tp->ts_offset;
 		to.to_tsecr = tp->ts_recent;
 		to.to_flags = TOF_TS;
 	}
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	/* TCP-MD5 (RFC2385). */
 	if (tp->t_flags & TF_SIGNATURE)
 		to.to_flags |= TOF_SIGNATURE;
 #endif
 	optlen = tcp_addoptions(&to, opt);
 	hdrlen += optlen;
 	udp = rack->r_ctl.fsb.udp;
 	if (udp)
 		hdrlen += sizeof(struct udphdr);
 	if (rack->r_ctl.rc_pace_max_segs)
 		max_val = rack->r_ctl.rc_pace_max_segs;
 	else if (rack->rc_user_set_max_segs)
 		max_val = rack->rc_user_set_max_segs * segsiz;
 	else
 		max_val = len;
 	if ((tp->t_flags & TF_TSO) &&
 	    V_tcp_do_tso &&
 	    (len > segsiz) &&
 	    (tp->t_port == 0))
 		tso = 1;
 #ifdef INET6
 	if (MHLEN < hdrlen + max_linkhdr)
 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 	else
 #endif
 		m = m_gethdr(M_NOWAIT, MT_DATA);
 	if (m == NULL)
 		goto failed;
 	m->m_data += max_linkhdr;
 	m->m_len = hdrlen;
 	th = rack->r_ctl.fsb.th;
 	/* Establish the len to send */
 	if (len > max_val)
 		len = max_val;
 	if ((tso) && (len + optlen > segsiz)) {
 		uint32_t if_hw_tsomax;
 		int32_t max_len;
 
 		/* extract TSO information */
 		if_hw_tsomax = tp->t_tsomax;
 		if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
 		if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
 		/*
 		 * Check if we should limit by maximum payload
 		 * length:
 		 */
 		if (if_hw_tsomax != 0) {
 			/* compute maximum TSO length */
 			max_len = (if_hw_tsomax - hdrlen -
 				   max_linkhdr);
 			if (max_len <= 0) {
 				goto failed;
 			} else if (len > max_len) {
 				len = max_len;
 			}
 		}
 		if (len <= segsiz) {
 			/*
 			 * In case there are too many small fragments don't
 			 * use TSO:
 			 */
 			tso = 0;
 		}
 	} else {
 		tso = 0;
 	}
 	if ((tso == 0) && (len > segsiz))
 		len = segsiz;
 	(void)tcp_get_usecs(tv);
 	if ((len == 0) ||
 	    (len <= MHLEN - hdrlen - max_linkhdr)) {
 		goto failed;
 	}
 	th->th_seq = htonl(rsm->r_start);
 	th->th_ack = htonl(tp->rcv_nxt);
 	/*
 	 * The PUSH bit should only be applied
 	 * if the full retransmission is made. If
 	 * we are sending less than this is the
 	 * left hand edge and should not have
 	 * the PUSH bit.
 	 */
 	if ((rsm->r_flags & RACK_HAD_PUSH) &&
 	    (len == (rsm->r_end - rsm->r_start)))
 		flags |= TH_PUSH;
 	th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
 	if (th->th_win == 0) {
 		tp->t_sndzerowin++;
 		tp->t_flags |= TF_RXWIN0SENT;
 	} else
 		tp->t_flags &= ~TF_RXWIN0SENT;
 	if (rsm->r_flags & RACK_TLP) {
 		/*
 		 * TLP should not count in retran count, but
 		 * in its own bin
 		 */
 		counter_u64_add(rack_tlp_retran, 1);
 		counter_u64_add(rack_tlp_retran_bytes, len);
 	} else {
 		tp->t_sndrexmitpack++;
 		KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
 		KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
 	}
 #ifdef STATS
 	stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
 				 len);
 #endif
 	if (rsm->m == NULL)
 		goto failed;
 	if (rsm->m &&
 	    ((rsm->orig_m_len != rsm->m->m_len) ||
 	     (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
 		/* Fix up the orig_m_len and possibly the mbuf offset */
 		rack_adjust_orig_mlen(rsm);
 	}
 	m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
 	if (len <= segsiz) {
 		/*
 		 * Must have ran out of mbufs for the copy
 		 * shorten it to no longer need tso. Lets
 		 * not put on sendalot since we are low on
 		 * mbufs.
 		 */
 		tso = 0;
 	}
 	if ((m->m_next == NULL) || (len <= 0)){
 		goto failed;
 	}
 	if (udp) {
 		if (rack->r_is_v6)
 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
 		else
 			ulen = hdrlen + len - sizeof(struct ip);
 		udp->uh_ulen = htons(ulen);
 	}
 	m->m_pkthdr.rcvif = (struct ifnet *)0;
 	if (TCPS_HAVERCVDSYN(tp->t_state) &&
 	    (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
 		int ect = tcp_ecn_output_established(tp, &flags, len, true);
 		if ((tp->t_state == TCPS_SYN_RECEIVED) &&
 		    (tp->t_flags2 & TF2_ECN_SND_ECE))
 		    tp->t_flags2 &= ~TF2_ECN_SND_ECE;
 #ifdef INET6
 		if (rack->r_is_v6) {
 		    ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
 		    ip6->ip6_flow |= htonl(ect << 20);
 		}
 		else
 #endif
 		{
 		    ip->ip_tos &= ~IPTOS_ECN_MASK;
 		    ip->ip_tos |= ect;
 		}
 	}
 	if (rack->r_ctl.crte != NULL) {
 		/* See if we can send via the hw queue */
 		slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz);
 		/* If there is nothing in queue (no pacing time) we can send via the hw queue */
 		if (slot == 0)
 			ip_sendflag = 0;
 	}
 	tcp_set_flags(th, flags);
 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	if (to.to_flags & TOF_SIGNATURE) {
 		/*
 		 * Calculate MD5 signature and put it into the place
 		 * determined before.
 		 * NOTE: since TCP options buffer doesn't point into
 		 * mbuf's data, calculate offset and use it.
 		 */
 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
 						       (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
 			/*
 			 * Do not send segment if the calculation of MD5
 			 * digest has failed.
 			 */
 			goto failed;
 		}
 	}
 #endif
 #ifdef INET6
 	if (rack->r_is_v6) {
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in6_cksum_pseudo(ip6,
 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
 						      0);
 		}
 	}
 #endif
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
 									IPPROTO_TCP + len + optlen));
 		}
 		/* IP version must be set here for ipv4/ipv6 checking later */
 		KASSERT(ip->ip_v == IPVERSION,
 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
 	}
 #endif
 	if (tso) {
 		/*
 		 * Here we use segsiz since we have no added options besides
 		 * any standard timestamp options (no DSACKs or SACKS are sent
 		 * via either fast-path).
 		 */
 		KASSERT(len > segsiz,
 			("%s: len <= tso_segsz tp:%p", __func__, tp));
 		m->m_pkthdr.csum_flags |= CSUM_TSO;
 		m->m_pkthdr.tso_segsz = segsiz;
 	}
 #ifdef INET6
 	if (rack->r_is_v6) {
 		ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 	}
 #endif
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		ip->ip_len = htons(m->m_pkthdr.len);
 		ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 			if (tp->t_port == 0 || len < V_tcp_minmss) {
 				ip->ip_off |= htons(IP_DF);
 			}
 		} else {
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 		}
 	}
 #endif
 	if (doing_tlp == 0) {
 		/* Set we retransmitted */
 		rack->rc_gp_saw_rec = 1;
 	} else {
 		/* Its a TLP set ca or ss */
 		if (tp->snd_cwnd > tp->snd_ssthresh) {
 			/* Set we sent in CA */
 			rack->rc_gp_saw_ca = 1;
 		} else {
 			/* Set we sent in SS */
 			rack->rc_gp_saw_ss = 1;
 		}
 	}
 	/* Time to copy in our header */
 	cpto = mtod(m, uint8_t *);
 	memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
 	th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
 	if (optlen) {
 		bcopy(opt, th + 1, optlen);
 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
 	} else {
 		th->th_off = sizeof(struct tcphdr) >> 2;
 	}
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		if (rsm->r_flags & RACK_RWND_COLLAPSED) {
 			rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
 			counter_u64_add(rack_collapsed_win_rxt, 1);
 			counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
 		}
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		if (rack->rack_no_prr)
 			log.u_bbr.flex1 = 0;
 		else
 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex4 = max_val;
 		/* Save off the early/late values */
 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
 		log.u_bbr.bw_inuse = rack_get_bw(rack);
 		log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
 		if (doing_tlp == 0)
 			log.u_bbr.flex8 = 1;
 		else
 			log.u_bbr.flex8 = 2;
 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
 		log.u_bbr.flex7 = 55;
 		log.u_bbr.pkts_out = tp->t_maxseg;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		if (rsm && (rsm->r_rtr_cnt > 0)) {
 			/*
 			 * When we have a retransmit we want to log the
 			 * burst at send and flight at send from before.
 			 */
 			log.u_bbr.flex5 = rsm->r_fas;
 			log.u_bbr.bbr_substate = rsm->r_bas;
 		} else {
 			/*
 			 * This is currently unlikely until we do the
 			 * packet pair probes but I will add it for completeness.
 			 */
 			log.u_bbr.flex5 = log.u_bbr.inflight;
 			log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
 		}
 		log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
 		log.u_bbr.delivered = 0;
 		log.u_bbr.rttProp = (uint64_t)rsm;
 		log.u_bbr.delRate = rsm->r_flags;
 		log.u_bbr.delRate <<= 31;
 		log.u_bbr.delRate |= rack->r_must_retran;
 		log.u_bbr.delRate <<= 1;
 		log.u_bbr.delRate |= 1;
 		log.u_bbr.pkt_epoch = __LINE__;
 		lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
 				     len, &log, false, NULL, __func__, __LINE__, tv);
 	} else
 		lgb = NULL;
 	if ((rack->r_ctl.crte != NULL) &&
 	    tcp_bblogging_on(tp)) {
 		rack_log_queue_level(tp, rack, len, tv, cts);
 	}
 #ifdef INET6
 	if (rack->r_is_v6) {
 		error = ip6_output(m, inp->in6p_outputopts,
 				   &inp->inp_route6,
 				   ip_sendflag, NULL, NULL, inp);
 	}
 	else
 #endif
 #ifdef INET
 	{
 		error = ip_output(m, NULL,
 				  &inp->inp_route,
 				  ip_sendflag, 0, inp);
 	}
 #endif
 	m = NULL;
 	if (lgb) {
 		lgb->tlb_errno = error;
 		lgb = NULL;
 	}
 	/* Move snd_nxt to snd_max so we don't have false retransmissions */
 	tp->snd_nxt = tp->snd_max;
 	if (error) {
 		goto failed;
 	} else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) {
 		rack->rc_hw_nobuf = 0;
 		rack->r_ctl.rc_agg_delayed = 0;
 		rack->r_early = 0;
 		rack->r_late = 0;
 		rack->r_ctl.rc_agg_early = 0;
 	}
 
 	rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
 			rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz);
 	if (doing_tlp) {
 		rack->rc_tlp_in_progress = 1;
 		rack->r_ctl.rc_tlp_cnt_out++;
 	}
 	if (error == 0) {
 		counter_u64_add(rack_total_bytes, len);
 		tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
 		if (doing_tlp) {
 			rack->rc_last_sent_tlp_past_cumack = 0;
 			rack->rc_last_sent_tlp_seq_valid = 1;
 			rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
 			rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
 		}
 		if (rack->r_ctl.rc_prr_sndcnt >= len)
 			rack->r_ctl.rc_prr_sndcnt -= len;
 		else
 			rack->r_ctl.rc_prr_sndcnt = 0;
 	}
 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
 	rack->forced_ack = 0;	/* If we send something zap the FA flag */
 	if (IN_FASTRECOVERY(tp->t_flags) && rsm)
 		rack->r_ctl.retran_during_recovery += len;
 	{
 		int idx;
 
 		idx = (len / segsiz) + 3;
 		if (idx >= TCP_MSS_ACCT_ATIMER)
 			counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
 		else
 			counter_u64_add(rack_out_size[idx], 1);
 	}
 	if (tp->t_rtttime == 0) {
 		tp->t_rtttime = ticks;
 		tp->t_rtseq = startseq;
 		KMOD_TCPSTAT_INC(tcps_segstimed);
 	}
 	counter_u64_add(rack_fto_rsm_send, 1);
 	if (error && (error == ENOBUFS)) {
 		if (rack->r_ctl.crte != NULL) {
 			tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
 			if (tcp_bblogging_on(rack->rc_tp))
 				rack_log_queue_level(tp, rack, len, tv, cts);
 		} else
 			tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
 		slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
 		if (rack->rc_enobuf < 0x7f)
 			rack->rc_enobuf++;
 		if (slot < (10 * HPTS_USEC_IN_MSEC))
 			slot = 10 * HPTS_USEC_IN_MSEC;
 		if (rack->r_ctl.crte != NULL) {
 			counter_u64_add(rack_saw_enobuf_hw, 1);
 			tcp_rl_log_enobuf(rack->r_ctl.crte);
 		}
 		counter_u64_add(rack_saw_enobuf, 1);
 	} else
 		slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz);
 	if ((slot == 0) ||
 	    (rack->rc_always_pace == 0) ||
 	    (rack->r_rr_config == 1)) {
 		/*
 		 * We have no pacing set or we
 		 * are using old-style rack or
 		 * we are overridden to use the old 1ms pacing.
 		 */
 		slot = rack->r_ctl.rc_min_to;
 	}
 	rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
 #ifdef TCP_ACCOUNTING
 	crtsc = get_cyclecount();
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
 	}
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
 	}
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz);
 	}
 	sched_unpin();
 #endif
 	return (0);
 failed:
 	if (m)
 		m_free(m);
 	return (-1);
 }
 
 static void
 rack_sndbuf_autoscale(struct tcp_rack *rack)
 {
 	/*
 	 * Automatic sizing of send socket buffer.  Often the send buffer
 	 * size is not optimally adjusted to the actual network conditions
 	 * at hand (delay bandwidth product).  Setting the buffer size too
 	 * small limits throughput on links with high bandwidth and high
 	 * delay (eg. trans-continental/oceanic links).  Setting the
 	 * buffer size too big consumes too much real kernel memory,
 	 * especially with many connections on busy servers.
 	 *
 	 * The criteria to step up the send buffer one notch are:
 	 *  1. receive window of remote host is larger than send buffer
 	 *     (with a fudge factor of 5/4th);
 	 *  2. send buffer is filled to 7/8th with data (so we actually
 	 *     have data to make use of it);
 	 *  3. send buffer fill has not hit maximal automatic size;
 	 *  4. our send window (slow start and cogestion controlled) is
 	 *     larger than sent but unacknowledged data in send buffer.
 	 *
 	 * Note that the rack version moves things much faster since
 	 * we want to avoid hitting cache lines in the rack_fast_output()
 	 * path so this is called much less often and thus moves
 	 * the SB forward by a percentage.
 	 */
 	struct socket *so;
 	struct tcpcb *tp;
 	uint32_t sendwin, scaleup;
 
 	tp = rack->rc_tp;
 	so = rack->rc_inp->inp_socket;
 	sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd);
 	if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
 		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
 		    sbused(&so->so_snd) >=
 		    (so->so_snd.sb_hiwat / 8 * 7) &&
 		    sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
 		    sendwin >= (sbused(&so->so_snd) -
 		    (tp->snd_nxt - tp->snd_una))) {
 			if (rack_autosndbuf_inc)
 				scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100;
 			else
 				scaleup = V_tcp_autosndbuf_inc;
 			if (scaleup < V_tcp_autosndbuf_inc)
 				scaleup = V_tcp_autosndbuf_inc;
 			scaleup += so->so_snd.sb_hiwat;
 			if (scaleup > V_tcp_autosndbuf_max)
 				scaleup = V_tcp_autosndbuf_max;
 			if (!sbreserve_locked(so, SO_SND, scaleup, curthread))
 				so->so_snd.sb_flags &= ~SB_AUTOSIZE;
 		}
 	}
 }
 
 static int
 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
 		 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err)
 {
 	/*
 	 * Enter to do fast output. We are given that the sched_pin is
 	 * in place (if accounting is compiled in) and the cycle count taken
 	 * at entry is in place in ts_val. The idea here is that
 	 * we know how many more bytes needs to be sent (presumably either
 	 * during pacing or to fill the cwnd and that was greater than
 	 * the max-burst). We have how much to send and all the info we
 	 * need to just send.
 	 */
 #ifdef INET
 	struct ip *ip = NULL;
 #endif
 	struct udphdr *udp = NULL;
 	struct tcphdr *th = NULL;
 	struct mbuf *m, *s_mb;
 	struct inpcb *inp;
 	uint8_t *cpto;
 	struct tcp_log_buffer *lgb;
 #ifdef TCP_ACCOUNTING
 	uint64_t crtsc;
 #endif
 	struct tcpopt to;
 	u_char opt[TCP_MAXOLEN];
 	uint32_t hdrlen, optlen;
 #ifdef TCP_ACCOUNTING
 	int cnt_thru = 1;
 #endif
 	int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
 	uint16_t flags;
 	uint32_t s_soff;
 	uint32_t if_hw_tsomaxsegcount = 0, startseq;
 	uint32_t if_hw_tsomaxsegsize;
 	uint16_t add_flag = RACK_SENT_FP;
 #ifdef INET6
 	struct ip6_hdr *ip6 = NULL;
 
 	if (rack->r_is_v6) {
 		ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 	} else
 #endif				/* INET6 */
 	{
 #ifdef INET
 		ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
 		hdrlen = sizeof(struct tcpiphdr);
 #endif
 	}
 	if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) {
 		m = NULL;
 		goto failed;
 	}
 	rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
 	startseq = tp->snd_max;
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	inp = rack->rc_inp;
 	len = rack->r_ctl.fsb.left_to_send;
 	to.to_flags = 0;
 	flags = rack->r_ctl.fsb.tcp_flags;
 	if (tp->t_flags & TF_RCVD_TSTMP) {
 		to.to_tsval = ms_cts + tp->ts_offset;
 		to.to_tsecr = tp->ts_recent;
 		to.to_flags = TOF_TS;
 	}
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	/* TCP-MD5 (RFC2385). */
 	if (tp->t_flags & TF_SIGNATURE)
 		to.to_flags |= TOF_SIGNATURE;
 #endif
 	optlen = tcp_addoptions(&to, opt);
 	hdrlen += optlen;
 	udp = rack->r_ctl.fsb.udp;
 	if (udp)
 		hdrlen += sizeof(struct udphdr);
 	if (rack->r_ctl.rc_pace_max_segs)
 		max_val = rack->r_ctl.rc_pace_max_segs;
 	else if (rack->rc_user_set_max_segs)
 		max_val = rack->rc_user_set_max_segs * segsiz;
 	else
 		max_val = len;
 	if ((tp->t_flags & TF_TSO) &&
 	    V_tcp_do_tso &&
 	    (len > segsiz) &&
 	    (tp->t_port == 0))
 		tso = 1;
 again:
 #ifdef INET6
 	if (MHLEN < hdrlen + max_linkhdr)
 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 	else
 #endif
 		m = m_gethdr(M_NOWAIT, MT_DATA);
 	if (m == NULL)
 		goto failed;
 	m->m_data += max_linkhdr;
 	m->m_len = hdrlen;
 	th = rack->r_ctl.fsb.th;
 	/* Establish the len to send */
 	if (len > max_val)
 		len = max_val;
 	if ((tso) && (len + optlen > segsiz)) {
 		uint32_t if_hw_tsomax;
 		int32_t max_len;
 
 		/* extract TSO information */
 		if_hw_tsomax = tp->t_tsomax;
 		if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
 		if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
 		/*
 		 * Check if we should limit by maximum payload
 		 * length:
 		 */
 		if (if_hw_tsomax != 0) {
 			/* compute maximum TSO length */
 			max_len = (if_hw_tsomax - hdrlen -
 				   max_linkhdr);
 			if (max_len <= 0) {
 				goto failed;
 			} else if (len > max_len) {
 				len = max_len;
 			}
 		}
 		if (len <= segsiz) {
 			/*
 			 * In case there are too many small fragments don't
 			 * use TSO:
 			 */
 			tso = 0;
 		}
 	} else {
 		tso = 0;
 	}
 	if ((tso == 0) && (len > segsiz))
 		len = segsiz;
 	(void)tcp_get_usecs(tv);
 	if ((len == 0) ||
 	    (len <= MHLEN - hdrlen - max_linkhdr)) {
 		goto failed;
 	}
 	sb_offset = tp->snd_max - tp->snd_una;
 	th->th_seq = htonl(tp->snd_max);
 	th->th_ack = htonl(tp->rcv_nxt);
 	th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale));
 	if (th->th_win == 0) {
 		tp->t_sndzerowin++;
 		tp->t_flags |= TF_RXWIN0SENT;
 	} else
 		tp->t_flags &= ~TF_RXWIN0SENT;
 	tp->snd_up = tp->snd_una;	/* drag it along, its deprecated */
 	KMOD_TCPSTAT_INC(tcps_sndpack);
 	KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
 #ifdef STATS
 	stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
 				 len);
 #endif
 	if (rack->r_ctl.fsb.m == NULL)
 		goto failed;
 
 	/* s_mb and s_soff are saved for rack_log_output */
 	m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize,
 				    &s_mb, &s_soff);
 	if (len <= segsiz) {
 		/*
 		 * Must have ran out of mbufs for the copy
 		 * shorten it to no longer need tso. Lets
 		 * not put on sendalot since we are low on
 		 * mbufs.
 		 */
 		tso = 0;
 	}
 	if (rack->r_ctl.fsb.rfo_apply_push &&
 	    (len == rack->r_ctl.fsb.left_to_send)) {
 		tcp_set_flags(th, flags | TH_PUSH);
 		add_flag |= RACK_HAD_PUSH;
 	}
 	if ((m->m_next == NULL) || (len <= 0)){
 		goto failed;
 	}
 	if (udp) {
 		if (rack->r_is_v6)
 			ulen = hdrlen + len - sizeof(struct ip6_hdr);
 		else
 			ulen = hdrlen + len - sizeof(struct ip);
 		udp->uh_ulen = htons(ulen);
 	}
 	m->m_pkthdr.rcvif = (struct ifnet *)0;
 	if (TCPS_HAVERCVDSYN(tp->t_state) &&
 	    (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
 		int ect = tcp_ecn_output_established(tp, &flags, len, false);
 		if ((tp->t_state == TCPS_SYN_RECEIVED) &&
 		    (tp->t_flags2 & TF2_ECN_SND_ECE))
 			tp->t_flags2 &= ~TF2_ECN_SND_ECE;
 #ifdef INET6
 		if (rack->r_is_v6) {
 			ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
 			ip6->ip6_flow |= htonl(ect << 20);
 		}
 		else
 #endif
 		{
 #ifdef INET
 			ip->ip_tos &= ~IPTOS_ECN_MASK;
 			ip->ip_tos |= ect;
 #endif
 		}
 	}
 	tcp_set_flags(th, flags);
 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	if (to.to_flags & TOF_SIGNATURE) {
 		/*
 		 * Calculate MD5 signature and put it into the place
 		 * determined before.
 		 * NOTE: since TCP options buffer doesn't point into
 		 * mbuf's data, calculate offset and use it.
 		 */
 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
 						       (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
 			/*
 			 * Do not send segment if the calculation of MD5
 			 * digest has failed.
 			 */
 			goto failed;
 		}
 	}
 #endif
 #ifdef INET6
 	if (rack->r_is_v6) {
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in6_cksum_pseudo(ip6,
 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
 						      0);
 		}
 	}
 #endif
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
 									IPPROTO_TCP + len + optlen));
 		}
 		/* IP version must be set here for ipv4/ipv6 checking later */
 		KASSERT(ip->ip_v == IPVERSION,
 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
 	}
 #endif
 	if (tso) {
 		/*
 		 * Here we use segsiz since we have no added options besides
 		 * any standard timestamp options (no DSACKs or SACKS are sent
 		 * via either fast-path).
 		 */
 		KASSERT(len > segsiz,
 			("%s: len <= tso_segsz tp:%p", __func__, tp));
 		m->m_pkthdr.csum_flags |= CSUM_TSO;
 		m->m_pkthdr.tso_segsz = segsiz;
 	}
 #ifdef INET6
 	if (rack->r_is_v6) {
 		ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit;
 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 	}
 #endif
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		ip->ip_len = htons(m->m_pkthdr.len);
 		ip->ip_ttl = rack->r_ctl.fsb.hoplimit;
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 			if (tp->t_port == 0 || len < V_tcp_minmss) {
 				ip->ip_off |= htons(IP_DF);
 			}
 		} else {
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 		}
 	}
 #endif
 	if (tp->snd_cwnd > tp->snd_ssthresh) {
 		/* Set we sent in CA */
 		rack->rc_gp_saw_ca = 1;
 	} else {
 		/* Set we sent in SS */
 		rack->rc_gp_saw_ss = 1;
 	}
 	/* Time to copy in our header */
 	cpto = mtod(m, uint8_t *);
 	memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
 	th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
 	if (optlen) {
 		bcopy(opt, th + 1, optlen);
 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
 	} else {
 		th->th_off = sizeof(struct tcphdr) >> 2;
 	}
 	if ((rack->r_ctl.crte != NULL) &&
 	    tcp_bblogging_on(tp)) {
 		rack_log_queue_level(tp, rack, len, tv, cts);
 	}
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		if (rack->rack_no_prr)
 			log.u_bbr.flex1 = 0;
 		else
 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex4 = max_val;
 		/* Save off the early/late values */
 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
 		log.u_bbr.bw_inuse = rack_get_bw(rack);
 		log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
 		log.u_bbr.flex8 = 0;
 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL);
 		log.u_bbr.flex7 = 44;
 		log.u_bbr.pkts_out = tp->t_maxseg;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		log.u_bbr.flex5 = log.u_bbr.inflight;
 		log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use;
 		log.u_bbr.delivered = 0;
 		log.u_bbr.rttProp = 0;
 		log.u_bbr.delRate = rack->r_must_retran;
 		log.u_bbr.delRate <<= 1;
 		log.u_bbr.pkt_epoch = __LINE__;
 		/* For fast output no retrans so just inflight and how many mss we send */
 		log.u_bbr.flex5 = log.u_bbr.inflight;
 		log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
 		lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK,
 				     len, &log, false, NULL, __func__, __LINE__, tv);
 	} else
 		lgb = NULL;
 #ifdef INET6
 	if (rack->r_is_v6) {
 		error = ip6_output(m, inp->in6p_outputopts,
 				   &inp->inp_route6,
 				   0, NULL, NULL, inp);
 	}
 #endif
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		error = ip_output(m, NULL,
 				  &inp->inp_route,
 				  0, 0, inp);
 	}
 #endif
 	if (lgb) {
 		lgb->tlb_errno = error;
 		lgb = NULL;
 	}
 	if (error) {
 		*send_err = error;
 		m = NULL;
 		goto failed;
 	} else if (rack->rc_hw_nobuf) {
 		rack->rc_hw_nobuf = 0;
 		rack->r_ctl.rc_agg_delayed = 0;
 		rack->r_early = 0;
 		rack->r_late = 0;
 		rack->r_ctl.rc_agg_early = 0;
 	}
 	if ((error == 0) && (rack->lt_bw_up == 0)) {
 		/* Unlikely */
 		rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv);
 		rack->r_ctl.lt_seq = tp->snd_una;
 		rack->lt_bw_up = 1;
 	}
 	rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv),
 			NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz);
 	m = NULL;
 	if (tp->snd_una == tp->snd_max) {
 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
 		rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
 		tp->t_acktime = ticks;
 	}
 	counter_u64_add(rack_total_bytes, len);
 	tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls);
 
 	rack->forced_ack = 0;	/* If we send something zap the FA flag */
 	tot_len += len;
 	if ((tp->t_flags & TF_GPUTINPROG) == 0)
 		rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset);
 	tp->snd_max += len;
 	tp->snd_nxt = tp->snd_max;
 	if (rack->rc_new_rnd_needed) {
 		/*
 		 * Update the rnd to start ticking not
 		 * that from a time perspective all of
 		 * the preceding idle time is "in the round"
 		 */
 		rack->rc_new_rnd_needed = 0;
 		rack->r_ctl.roundends = tp->snd_max;
 	}
 	{
 		int idx;
 
 		idx = (len / segsiz) + 3;
 		if (idx >= TCP_MSS_ACCT_ATIMER)
 			counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
 		else
 			counter_u64_add(rack_out_size[idx], 1);
 	}
 	if (len <= rack->r_ctl.fsb.left_to_send)
 		rack->r_ctl.fsb.left_to_send -= len;
 	else
 		rack->r_ctl.fsb.left_to_send = 0;
 	if (rack->r_ctl.fsb.left_to_send < segsiz) {
 		rack->r_fast_output = 0;
 		rack->r_ctl.fsb.left_to_send = 0;
 		/* At the end of fast_output scale up the sb */
 		SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd);
 		rack_sndbuf_autoscale(rack);
 		SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd);
 	}
 	if (tp->t_rtttime == 0) {
 		tp->t_rtttime = ticks;
 		tp->t_rtseq = startseq;
 		KMOD_TCPSTAT_INC(tcps_segstimed);
 	}
 	if ((rack->r_ctl.fsb.left_to_send >= segsiz) &&
 	    (max_val > len) &&
 	    (tso == 0)) {
 		max_val -= len;
 		len = segsiz;
 		th = rack->r_ctl.fsb.th;
 #ifdef TCP_ACCOUNTING
 		cnt_thru++;
 #endif
 		goto again;
 	}
 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
 	counter_u64_add(rack_fto_send, 1);
 	slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz);
 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0);
 #ifdef TCP_ACCOUNTING
 	crtsc = get_cyclecount();
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru;
 	}
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
 	}
 	if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 		tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz);
 	}
 	sched_unpin();
 #endif
 	return (0);
 failed:
 	if (m)
 		m_free(m);
 	rack->r_fast_output = 0;
 	return (-1);
 }
 
 static inline void
 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack,
 		       struct sockbuf *sb,
 		       int len, int orig_len, int segsiz, uint32_t pace_max_seg,
 		       bool hw_tls,
 		       uint16_t flags)
 {
 	rack->r_fast_output = 1;
 	rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
 	rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
 	rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
 	rack->r_ctl.fsb.tcp_flags = flags;
 	rack->r_ctl.fsb.left_to_send = orig_len - len;
 	if (rack->r_ctl.fsb.left_to_send < pace_max_seg) {
 		/* Less than a full sized pace, lets not  */
 		rack->r_fast_output = 0;
 		return;
 	} else {
 		/* Round down to the nearest pace_max_seg */
 		rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg);
 	}
 	if (hw_tls)
 		rack->r_ctl.fsb.hw_tls = 1;
 	else
 		rack->r_ctl.fsb.hw_tls = 0;
 	KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))),
 		("rack:%p left_to_send:%u sbavail:%u out:%u",
 		 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb),
 		 (tp->snd_max - tp->snd_una)));
 	if (rack->r_ctl.fsb.left_to_send < segsiz)
 		rack->r_fast_output = 0;
 	else {
 		if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una)))
 			rack->r_ctl.fsb.rfo_apply_push = 1;
 		else
 			rack->r_ctl.fsb.rfo_apply_push = 0;
 	}
 }
 
 static uint32_t
 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz)
 {
 	uint64_t min_time;
 	uint32_t maxlen;
 
 	min_time = (uint64_t)get_hpts_min_sleep_time();
 	maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC);
 	maxlen = roundup(maxlen, segsiz);
 	return (maxlen);
 }
 
 static struct rack_sendmap *
 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts)
 {
 	struct rack_sendmap *rsm = NULL;
 	int thresh;
 
 restart:
 	rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
 	if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
 		/* Nothing, strange turn off validity  */
 		rack->r_collapse_point_valid = 0;
 		return (NULL);
 	}
 	/* Can we send it yet? */
 	if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
 		/*
 		 * Receiver window has not grown enough for
 		 * the segment to be put on the wire.
 		 */
 		return (NULL);
 	}
 	if (rsm->r_flags & RACK_ACKED) {
 		/*
 		 * It has been sacked, lets move to the
 		 * next one if possible.
 		 */
 		rack->r_ctl.last_collapse_point = rsm->r_end;
 		/* Are we done? */
 		if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
 			    rack->r_ctl.high_collapse_point)) {
 			rack->r_collapse_point_valid = 0;
 			return (NULL);
 		}
 		goto restart;
 	}
 	/* Now has it been long enough ? */
 	thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts);
 	if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
 		rack_log_collapse(rack, rsm->r_start,
 				  (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
 				  thresh, __LINE__, 6, rsm->r_flags, rsm);
 		return (rsm);
 	}
 	/* Not enough time */
 	rack_log_collapse(rack, rsm->r_start,
 			  (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
 			  thresh, __LINE__, 7, rsm->r_flags, rsm);
 	return (NULL);
 }
 
 static inline void
 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg)
 {
 	if ((rack->full_size_rxt == 0) &&
 	    (rack->shape_rxt_to_pacing_min == 0) &&
 	    (*len >= segsiz)) {
 		*len = segsiz;
 	} else if (rack->shape_rxt_to_pacing_min &&
 		 rack->gp_ready) {
 		/* We use pacing min as shaping len req */
 		uint32_t maxlen;
 
 		maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
 		if (*len > maxlen)
 			*len = maxlen;
 	} else {
 		/*
 		 * The else is full_size_rxt is on so send it all
 		 * note we do need to check this for exceeding
 		 * our max segment size due to the fact that
 		 * we do sometimes merge chunks together i.e.
 		 * we cannot just assume that we will never have
 		 * a chunk greater than pace_max_seg
 		 */
 		if (*len > pace_max_seg)
 			*len = pace_max_seg;
 	}
 }
 
 static int
 rack_output(struct tcpcb *tp)
 {
 	struct socket *so;
 	uint32_t recwin;
 	uint32_t sb_offset, s_moff = 0;
 	int32_t len, error = 0;
 	uint16_t flags;
 	struct mbuf *m, *s_mb = NULL;
 	struct mbuf *mb;
 	uint32_t if_hw_tsomaxsegcount = 0;
 	uint32_t if_hw_tsomaxsegsize;
 	int32_t segsiz, minseg;
 	long tot_len_this_send = 0;
 #ifdef INET
 	struct ip *ip = NULL;
 #endif
 	struct udphdr *udp = NULL;
 	struct tcp_rack *rack;
 	struct tcphdr *th;
 	uint8_t pass = 0;
 	uint8_t mark = 0;
 	uint8_t check_done = 0;
 	uint8_t wanted_cookie = 0;
 	u_char opt[TCP_MAXOLEN];
 	unsigned ipoptlen, optlen, hdrlen, ulen=0;
 	uint32_t rack_seq;
 
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	unsigned ipsec_optlen = 0;
 
 #endif
 	int32_t idle, sendalot;
 	int32_t sub_from_prr = 0;
 	volatile int32_t sack_rxmit;
 	struct rack_sendmap *rsm = NULL;
 	int32_t tso, mtu;
 	struct tcpopt to;
 	int32_t slot = 0;
 	int32_t sup_rack = 0;
 	uint32_t cts, ms_cts, delayed, early;
 	uint16_t add_flag = RACK_SENT_SP;
 	/* The doing_tlp flag will be set by the actual rack_timeout_tlp() */
 	uint8_t doing_tlp = 0;
 	uint32_t cwnd_to_use, pace_max_seg;
 	int32_t do_a_prefetch = 0;
 	int32_t prefetch_rsm = 0;
 	int32_t orig_len = 0;
 	struct timeval tv;
 	int32_t prefetch_so_done = 0;
 	struct tcp_log_buffer *lgb;
 	struct inpcb *inp = tptoinpcb(tp);
 	struct sockbuf *sb;
 	uint64_t ts_val = 0;
 #ifdef TCP_ACCOUNTING
 	uint64_t crtsc;
 #endif
 #ifdef INET6
 	struct ip6_hdr *ip6 = NULL;
 	int32_t isipv6;
 #endif
 	bool hpts_calling, hw_tls = false;
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	/* setup and take the cache hits here */
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 #ifdef TCP_ACCOUNTING
 	sched_pin();
 	ts_val = get_cyclecount();
 #endif
 	hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS);
 	tp->t_flags2 &= ~TF2_HPTS_CALLS;
 #ifdef TCP_OFFLOAD
 	if (tp->t_flags & TF_TOE) {
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		return (tcp_offload_output(tp));
 	}
 #endif
 	if (rack->rack_deferred_inited == 0) {
 		/*
 		 * If we are the connecting socket we will
 		 * hit rack_init() when no sequence numbers
 		 * are setup. This makes it so we must defer
 		 * some initialization. Call that now.
 		 */
 		rack_deferred_init(tp, rack);
 	}
 	/*
 	 * For TFO connections in SYN_RECEIVED, only allow the initial
 	 * SYN|ACK and those sent by the retransmit timer.
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    (tp->t_state == TCPS_SYN_RECEIVED) &&
 	    SEQ_GT(tp->snd_max, tp->snd_una) &&    /* initial SYN|ACK sent */
 	    (rack->r_ctl.rc_resend == NULL)) {         /* not a retransmit */
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		return (0);
 	}
 #ifdef INET6
 	if (rack->r_state) {
 		/* Use the cache line loaded if possible */
 		isipv6 = rack->r_is_v6;
 	} else {
 		isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0;
 	}
 #endif
 	early = 0;
 	cts = tcp_get_usecs(&tv);
 	ms_cts = tcp_tv_to_mssectick(&tv);
 	if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) &&
 	    tcp_in_hpts(rack->rc_tp)) {
 		/*
 		 * We are on the hpts for some timer but not hptsi output.
 		 * Remove from the hpts unconditionally.
 		 */
 		rack_timer_cancel(tp, rack, cts, __LINE__);
 	}
 	/* Are we pacing and late? */
 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 	    TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) {
 		/* We are delayed */
 		delayed = cts - rack->r_ctl.rc_last_output_to;
 	} else {
 		delayed = 0;
 	}
 	/* Do the timers, which may override the pacer */
 	if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) {
 		int retval;
 
 		retval = rack_process_timers(tp, rack, cts, hpts_calling,
 					     &doing_tlp);
 		if (retval != 0) {
 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1);
 #ifdef TCP_ACCOUNTING
 			sched_unpin();
 #endif
 			/*
 			 * If timers want tcp_drop(), then pass error out,
 			 * otherwise suppress it.
 			 */
 			return (retval < 0 ? retval : 0);
 		}
 	}
 	if (rack->rc_in_persist) {
 		if (tcp_in_hpts(rack->rc_tp) == 0) {
 			/* Timer is not running */
 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
 		}
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		return (0);
 	}
 	if ((rack->rc_ack_required == 1) &&
 	    (rack->r_timer_override == 0)){
 		/* A timeout occurred and no ack has arrived */
 		if (tcp_in_hpts(rack->rc_tp) == 0) {
 			/* Timer is not running */
 			rack_start_hpts_timer(rack, tp, cts, 0, 0, 0);
 		}
 #ifdef TCP_ACCOUNTING
 		sched_unpin();
 #endif
 		return (0);
 	}
 	if ((rack->r_timer_override) ||
 	    (rack->rc_ack_can_sendout_data) ||
 	    (delayed) ||
 	    (tp->t_state < TCPS_ESTABLISHED)) {
 		rack->rc_ack_can_sendout_data = 0;
 		if (tcp_in_hpts(rack->rc_tp))
 			tcp_hpts_remove(rack->rc_tp);
 	} else if (tcp_in_hpts(rack->rc_tp)) {
 		/*
 		 * On the hpts you can't pass even if ACKNOW is on, we will
 		 * when the hpts fires.
 		 */
 #ifdef TCP_ACCOUNTING
 		crtsc = get_cyclecount();
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val);
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[SND_BLOCKED]++;
 		}
 		sched_unpin();
 #endif
 		counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1);
 		return (0);
 	}
 	/* Finish out both pacing early and late accounting */
 	if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) &&
 	    TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) {
 		early = rack->r_ctl.rc_last_output_to - cts;
 	} else
 		early = 0;
 	if (delayed) {
 		rack->r_ctl.rc_agg_delayed += delayed;
 		rack->r_late = 1;
 	} else if (early) {
 		rack->r_ctl.rc_agg_early += early;
 		rack->r_early = 1;
 	}
 	/* Now that early/late accounting is done turn off the flag */
 	rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
 	rack->r_wanted_output = 0;
 	rack->r_timer_override = 0;
 	if ((tp->t_state != rack->r_state) &&
 	    TCPS_HAVEESTABLISHED(tp->t_state)) {
 		rack_set_state(tp, rack);
 	}
 	if ((rack->r_fast_output) &&
 	    (doing_tlp == 0) &&
 	    (tp->rcv_numsacks == 0)) {
 		int ret;
 
 		error = 0;
 		ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
 		if (ret >= 0)
 			return(ret);
 		else if (error) {
 			inp = rack->rc_inp;
 			so = inp->inp_socket;
 			sb = &so->so_snd;
 			goto nomore;
 		}
 	}
 	inp = rack->rc_inp;
 	/*
 	 * For TFO connections in SYN_SENT or SYN_RECEIVED,
 	 * only allow the initial SYN or SYN|ACK and those sent
 	 * by the retransmit timer.
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    ((tp->t_state == TCPS_SYN_RECEIVED) ||
 	     (tp->t_state == TCPS_SYN_SENT)) &&
 	    SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */
 	    (tp->t_rxtshift == 0)) {              /* not a retransmit */
 		cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
 		so = inp->inp_socket;
 		sb = &so->so_snd;
 		goto just_return_nolock;
 	}
 	/*
 	 * Determine length of data that should be transmitted, and flags
 	 * that will be used. If there is some data or critical controls
 	 * (SYN, RST) to send, then transmit; otherwise, investigate
 	 * further.
 	 */
 	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
 	if (tp->t_idle_reduce) {
 		if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur))
 			rack_cc_after_idle(rack, tp);
 	}
 	tp->t_flags &= ~TF_LASTIDLE;
 	if (idle) {
 		if (tp->t_flags & TF_MORETOCOME) {
 			tp->t_flags |= TF_LASTIDLE;
 			idle = 0;
 		}
 	}
 	if ((tp->snd_una == tp->snd_max) &&
 	    rack->r_ctl.rc_went_idle_time &&
 	    TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) {
 		idle = cts - rack->r_ctl.rc_went_idle_time;
 		if (idle > rack_min_probertt_hold) {
 			/* Count as a probe rtt */
 			if (rack->in_probe_rtt == 0) {
 				rack->r_ctl.rc_lower_rtt_us_cts = cts;
 				rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts;
 				rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts;
 				rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts;
 			} else {
 				rack_exit_probertt(rack, cts);
 			}
 		}
 		idle = 0;
 	}
 	if (rack_use_fsb &&
 	    (rack->r_ctl.fsb.tcp_ip_hdr) &&
 	    (rack->r_fsb_inited == 0) &&
 	    (rack->r_state != TCPS_CLOSED))
 		rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]);
 again:
 	/*
 	 * If we've recently taken a timeout, snd_max will be greater than
 	 * snd_nxt.  There may be SACK information that allows us to avoid
 	 * resending already delivered data.  Adjust snd_nxt accordingly.
 	 */
 	sendalot = 0;
 	cts = tcp_get_usecs(&tv);
 	ms_cts = tcp_tv_to_mssectick(&tv);
 	tso = 0;
 	mtu = 0;
 	segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs);
 	minseg = segsiz;
 	if (rack->r_ctl.rc_pace_max_segs == 0)
 		pace_max_seg = rack->rc_user_set_max_segs * segsiz;
 	else
 		pace_max_seg = rack->r_ctl.rc_pace_max_segs;
 	sb_offset = tp->snd_max - tp->snd_una;
 	cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd;
 	flags = tcp_outflags[tp->t_state];
 	while (rack->rc_free_cnt < rack_free_cache) {
 		rsm = rack_alloc(rack);
 		if (rsm == NULL) {
 			if (hpts_calling)
 				/* Retry in a ms */
 				slot = (1 * HPTS_USEC_IN_MSEC);
 			so = inp->inp_socket;
 			sb = &so->so_snd;
 			goto just_return_nolock;
 		}
 		TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
 		rack->rc_free_cnt++;
 		rsm = NULL;
 	}
 	sack_rxmit = 0;
 	len = 0;
 	rsm = NULL;
 	if (flags & TH_RST) {
 		SOCKBUF_LOCK(&inp->inp_socket->so_snd);
 		so = inp->inp_socket;
 		sb = &so->so_snd;
 		goto send;
 	}
 	if (rack->r_ctl.rc_resend) {
 		/* Retransmit timer */
 		rsm = rack->r_ctl.rc_resend;
 		rack->r_ctl.rc_resend = NULL;
 		len = rsm->r_end - rsm->r_start;
 		sack_rxmit = 1;
 		sendalot = 0;
 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
 			 __func__, __LINE__,
 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
 		sb_offset = rsm->r_start - tp->snd_una;
 		rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
 	} else if (rack->r_collapse_point_valid &&
 		   ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
 		/*
 		 * If an RSM is returned then enough time has passed
 		 * for us to retransmit it. Move up the collapse point,
 		 * since this rsm has its chance to retransmit now.
 		 */
 		tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT);
 		rack->r_ctl.last_collapse_point = rsm->r_end;
 		/* Are we done? */
 		if (SEQ_GEQ(rack->r_ctl.last_collapse_point,
 			    rack->r_ctl.high_collapse_point))
 			rack->r_collapse_point_valid = 0;
 		sack_rxmit = 1;
 		/* We are not doing a TLP */
 		doing_tlp = 0;
 		len = rsm->r_end - rsm->r_start;
 		sb_offset = rsm->r_start - tp->snd_una;
 		sendalot = 0;
 		rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
 	} else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
 		/* We have a retransmit that takes precedence */
 		if ((!IN_FASTRECOVERY(tp->t_flags)) &&
 		    ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
 		    ((tp->t_flags & TF_WASFRECOVERY) == 0)) {
 			/* Enter recovery if not induced by a time-out */
 			rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__);
 		}
 #ifdef INVARIANTS
 		if (SEQ_LT(rsm->r_start, tp->snd_una)) {
 			panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
 			      tp, rack, rsm, rsm->r_start, tp->snd_una);
 		}
 #endif
 		len = rsm->r_end - rsm->r_start;
 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
 			 __func__, __LINE__,
 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
 		sb_offset = rsm->r_start - tp->snd_una;
 		sendalot = 0;
 		rack_validate_sizes(rack, &len, segsiz, pace_max_seg);
 		if (len > 0) {
 			sack_rxmit = 1;
 			KMOD_TCPSTAT_INC(tcps_sack_rexmits);
 			KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes,
 					 min(len, segsiz));
 		}
 	} else if (rack->r_ctl.rc_tlpsend) {
 		/* Tail loss probe */
 		long cwin;
 		long tlen;
 
 		/*
 		 * Check if we can do a TLP with a RACK'd packet
 		 * this can happen if we are not doing the rack
 		 * cheat and we skipped to a TLP and it
 		 * went off.
 		 */
 		rsm = rack->r_ctl.rc_tlpsend;
 		/* We are doing a TLP make sure the flag is preent */
 		rsm->r_flags |= RACK_TLP;
 		rack->r_ctl.rc_tlpsend = NULL;
 		sack_rxmit = 1;
 		tlen = rsm->r_end - rsm->r_start;
 		if (tlen > segsiz)
 			tlen = segsiz;
 		KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
 			("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
 			 __func__, __LINE__,
 			 rsm->r_start, tp->snd_una, tp, rack, rsm));
 		sb_offset = rsm->r_start - tp->snd_una;
 		cwin = min(tp->snd_wnd, tlen);
 		len = cwin;
 	}
 	if (rack->r_must_retran &&
 	    (doing_tlp == 0) &&
 	    (SEQ_GT(tp->snd_max, tp->snd_una)) &&
 	    (rsm == NULL)) {
 		/*
 		 * There are two different ways that we
 		 * can get into this block:
 		 * a) This is a non-sack connection, we had a time-out
 		 *    and thus r_must_retran was set and everything
 		 *    left outstanding as been marked for retransmit.
 		 * b) The MTU of the path shrank, so that everything
 		 *    was marked to be retransmitted with the smaller
 		 *    mtu and r_must_retran was set.
 		 *
 		 * This means that we expect the sendmap (outstanding)
 		 * to all be marked must. We can use the tmap to
 		 * look at them.
 		 *
 		 */
 		int sendwin, flight;
 
 		sendwin = min(tp->snd_wnd, tp->snd_cwnd);
 		flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto);
 		if (flight >= sendwin) {
 			/*
 			 * We can't send yet.
 			 */
 			so = inp->inp_socket;
 			sb = &so->so_snd;
 			goto just_return_nolock;
 		}
 		/*
 		 * This is the case a/b mentioned above. All
 		 * outstanding/not-acked should be marked.
 		 * We can use the tmap to find them.
 		 */
 		rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
 		if (rsm == NULL) {
 			/* TSNH */
 			rack->r_must_retran = 0;
 			rack->r_ctl.rc_out_at_rto = 0;
 			so = inp->inp_socket;
 			sb = &so->so_snd;
 			goto just_return_nolock;
 		}
 		if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
 			/*
 			 * The first one does not have the flag, did we collapse
 			 * further up in our list?
 			 */
 			rack->r_must_retran = 0;
 			rack->r_ctl.rc_out_at_rto = 0;
 			rsm = NULL;
 			sack_rxmit = 0;
 		} else {
 			sack_rxmit = 1;
 			len = rsm->r_end - rsm->r_start;
 			sb_offset = rsm->r_start - tp->snd_una;
 			sendalot = 0;
 			if ((rack->full_size_rxt == 0) &&
 			    (rack->shape_rxt_to_pacing_min == 0) &&
 			    (len >= segsiz))
 				len = segsiz;
 			else if (rack->shape_rxt_to_pacing_min &&
 				 rack->gp_ready) {
 				/* We use pacing min as shaping len req */
 				uint32_t maxlen;
 
 				maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
 				if (len > maxlen)
 					len = maxlen;
 			}
 			/*
 			 * Delay removing the flag RACK_MUST_RXT so
 			 * that the fastpath for retransmit will
 			 * work with this rsm.
 			 */
 		}
 	}
 	/*
 	 * Enforce a connection sendmap count limit if set
 	 * as long as we are not retransmiting.
 	 */
 	if ((rsm == NULL) &&
 	    (rack->do_detection == 0) &&
 	    (V_tcp_map_entries_limit > 0) &&
 	    (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) {
 		counter_u64_add(rack_to_alloc_limited, 1);
 		if (!rack->alloc_limit_reported) {
 			rack->alloc_limit_reported = 1;
 			counter_u64_add(rack_alloc_limited_conns, 1);
 		}
 		so = inp->inp_socket;
 		sb = &so->so_snd;
 		goto just_return_nolock;
 	}
 	if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
 		/* we are retransmitting the fin */
 		len--;
 		if (len) {
 			/*
 			 * When retransmitting data do *not* include the
 			 * FIN. This could happen from a TLP probe.
 			 */
 			flags &= ~TH_FIN;
 		}
 	}
 	if (rsm && rack->r_fsb_inited &&
 	    rack_use_rsm_rfo &&
 	    ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
 		int ret;
 
 		ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
 		if (ret == 0)
 			return (0);
 	}
 	so = inp->inp_socket;
 	sb = &so->so_snd;
 	if (do_a_prefetch == 0) {
 		kern_prefetch(sb, &do_a_prefetch);
 		do_a_prefetch = 1;
 	}
 #ifdef NETFLIX_SHARED_CWND
 	if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) &&
 	    rack->rack_enable_scwnd) {
 		/* We are doing cwnd sharing */
 		if (rack->gp_ready &&
 		    (rack->rack_attempted_scwnd == 0) &&
 		    (rack->r_ctl.rc_scw == NULL) &&
 		    tp->t_lib) {
 			/* The pcbid is in, lets make an attempt */
 			counter_u64_add(rack_try_scwnd, 1);
 			rack->rack_attempted_scwnd = 1;
 			rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp,
 								   &rack->r_ctl.rc_scw_index,
 								   segsiz);
 		}
 		if (rack->r_ctl.rc_scw &&
 		    (rack->rack_scwnd_is_idle == 1) &&
 		    sbavail(&so->so_snd)) {
 			/* we are no longer out of data */
 			tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
 			rack->rack_scwnd_is_idle = 0;
 		}
 		if (rack->r_ctl.rc_scw) {
 			/* First lets update and get the cwnd */
 			rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw,
 										       rack->r_ctl.rc_scw_index,
 										       tp->snd_cwnd, tp->snd_wnd, segsiz);
 		}
 	}
 #endif
 	/*
 	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
 	 * state flags.
 	 */
 	if (tp->t_flags & TF_NEEDFIN)
 		flags |= TH_FIN;
 	if (tp->t_flags & TF_NEEDSYN)
 		flags |= TH_SYN;
 	if ((sack_rxmit == 0) && (prefetch_rsm == 0)) {
 		void *end_rsm;
 		end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
 		if (end_rsm)
 			kern_prefetch(end_rsm, &prefetch_rsm);
 		prefetch_rsm = 1;
 	}
 	SOCKBUF_LOCK(sb);
 	/*
 	 * If snd_nxt == snd_max and we have transmitted a FIN, the
 	 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a
 	 * negative length.  This can also occur when TCP opens up its
 	 * congestion window while receiving additional duplicate acks after
 	 * fast-retransmit because TCP will reset snd_nxt to snd_max after
 	 * the fast-retransmit.
 	 *
 	 * In the normal retransmit-FIN-only case, however, snd_nxt will be
 	 * set to snd_una, the sb_offset will be 0, and the length may wind
 	 * up 0.
 	 *
 	 * If sack_rxmit is true we are retransmitting from the scoreboard
 	 * in which case len is already set.
 	 */
 	if ((sack_rxmit == 0) &&
 	    (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) {
 		uint32_t avail;
 
 		avail = sbavail(sb);
 		if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail)
 			sb_offset = tp->snd_nxt - tp->snd_una;
 		else
 			sb_offset = 0;
 		if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) {
 			if (rack->r_ctl.rc_tlp_new_data) {
 				/* TLP is forcing out new data */
 				if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) {
 					rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset);
 				}
 				if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) {
 					if (tp->snd_wnd > sb_offset)
 						len = tp->snd_wnd - sb_offset;
 					else
 						len = 0;
 				} else {
 					len = rack->r_ctl.rc_tlp_new_data;
 				}
 				rack->r_ctl.rc_tlp_new_data = 0;
 			}  else {
 				len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset);
 			}
 			if ((rack->r_ctl.crte == NULL) &&
 			    IN_FASTRECOVERY(tp->t_flags) &&
 			    (rack->full_size_rxt == 0) &&
 			    (rack->shape_rxt_to_pacing_min == 0) &&
 			    (len > segsiz)) {
 				/*
 				 * For prr=off, we need to send only 1 MSS
 				 * at a time. We do this because another sack could
 				 * be arriving that causes us to send retransmits and
 				 * we don't want to be on a long pace due to a larger send
 				 * that keeps us from sending out the retransmit.
 				 */
 				len = segsiz;
 			} else if (rack->shape_rxt_to_pacing_min &&
 				   rack->gp_ready) {
 				/* We use pacing min as shaping len req */
 				uint32_t maxlen;
 
 				maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz);
 				if (len > maxlen)
 					len = maxlen;
 			}/* The else is full_size_rxt is on so send it all */
 		} else {
 			uint32_t outstanding;
 			/*
 			 * We are inside of a Fast recovery episode, this
 			 * is caused by a SACK or 3 dup acks. At this point
 			 * we have sent all the retransmissions and we rely
 			 * on PRR to dictate what we will send in the form of
 			 * new data.
 			 */
 
 			outstanding = tp->snd_max - tp->snd_una;
 			if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) {
 				if (tp->snd_wnd > outstanding) {
 					len = tp->snd_wnd - outstanding;
 					/* Check to see if we have the data */
 					if ((sb_offset + len) > avail) {
 						/* It does not all fit */
 						if (avail > sb_offset)
 							len = avail - sb_offset;
 						else
 							len = 0;
 					}
 				} else {
 					len = 0;
 				}
 			} else if (avail > sb_offset) {
 				len = avail - sb_offset;
 			} else {
 				len = 0;
 			}
 			if (len > 0) {
 				if (len > rack->r_ctl.rc_prr_sndcnt) {
 					len = rack->r_ctl.rc_prr_sndcnt;
 				}
 				if (len > 0) {
 					sub_from_prr = 1;
 				}
 			}
 			if (len > segsiz) {
 				/*
 				 * We should never send more than a MSS when
 				 * retransmitting or sending new data in prr
 				 * mode unless the override flag is on. Most
 				 * likely the PRR algorithm is not going to
 				 * let us send a lot as well :-)
 				 */
 				if (rack->r_ctl.rc_prr_sendalot == 0) {
 					len = segsiz;
 				}
 			} else if (len < segsiz) {
 				/*
 				 * Do we send any? The idea here is if the
 				 * send empty's the socket buffer we want to
 				 * do it. However if not then lets just wait
 				 * for our prr_sndcnt to get bigger.
 				 */
 				long leftinsb;
 
 				leftinsb = sbavail(sb) - sb_offset;
 				if (leftinsb > len) {
 					/* This send does not empty the sb */
 					len = 0;
 				}
 			}
 		}
 	} else if (!TCPS_HAVEESTABLISHED(tp->t_state)) {
 		/*
 		 * If you have not established
 		 * and are not doing FAST OPEN
 		 * no data please.
 		 */
 		if ((sack_rxmit == 0) &&
 		    (!IS_FASTOPEN(tp->t_flags))){
 			len = 0;
 			sb_offset = 0;
 		}
 	}
 	if (prefetch_so_done == 0) {
 		kern_prefetch(so, &prefetch_so_done);
 		prefetch_so_done = 1;
 	}
 	/*
 	 * Lop off SYN bit if it has already been sent.  However, if this is
 	 * SYN-SENT state and if segment contains data and if we don't know
 	 * that foreign host supports TAO, suppress sending segment.
 	 */
 	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) &&
 	    ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) {
 		/*
 		 * When sending additional segments following a TFO SYN|ACK,
 		 * do not include the SYN bit.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) &&
 		    (tp->t_state == TCPS_SYN_RECEIVED))
 			flags &= ~TH_SYN;
 	}
 	/*
 	 * Be careful not to send data and/or FIN on SYN segments. This
 	 * measure is needed to prevent interoperability problems with not
 	 * fully conformant TCP implementations.
 	 */
 	if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
 		len = 0;
 		flags &= ~TH_FIN;
 	}
 	/*
 	 * On TFO sockets, ensure no data is sent in the following cases:
 	 *
 	 *  - When retransmitting SYN|ACK on a passively-created socket
 	 *
 	 *  - When retransmitting SYN on an actively created socket
 	 *
 	 *  - When sending a zero-length cookie (cookie request) on an
 	 *    actively created socket
 	 *
 	 *  - When the socket is in the CLOSED state (RST is being sent)
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
 	     ((tp->t_state == TCPS_SYN_SENT) &&
 	      (tp->t_tfo_client_cookie_len == 0)) ||
 	     (flags & TH_RST))) {
 		sack_rxmit = 0;
 		len = 0;
 	}
 	/* Without fast-open there should never be data sent on a SYN */
 	if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) {
 		tp->snd_nxt = tp->iss;
 		len = 0;
 	}
 	if ((len > segsiz) && (tcp_dsack_block_exists(tp))) {
 		/* We only send 1 MSS if we have a DSACK block */
 		add_flag |= RACK_SENT_W_DSACK;
 		len = segsiz;
 	}
 	orig_len = len;
 	if (len <= 0) {
 		/*
 		 * If FIN has been sent but not acked, but we haven't been
 		 * called to retransmit, len will be < 0.  Otherwise, window
 		 * shrank after we sent into it.  If window shrank to 0,
 		 * cancel pending retransmit, pull snd_nxt back to (closed)
 		 * window, and set the persist timer if it isn't already
 		 * going.  If the window didn't close completely, just wait
 		 * for an ACK.
 		 *
 		 * We also do a general check here to ensure that we will
 		 * set the persist timer when we have data to send, but a
 		 * 0-byte window. This makes sure the persist timer is set
 		 * even if the packet hits one of the "goto send" lines
 		 * below.
 		 */
 		len = 0;
 		if ((tp->snd_wnd == 0) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
 		    (tp->snd_una == tp->snd_max) &&
 		    (sb_offset < (int)sbavail(sb))) {
 			rack_enter_persist(tp, rack, cts, tp->snd_una);
 		}
 	} else if ((rsm == NULL) &&
 		   (doing_tlp == 0) &&
 		   (len < pace_max_seg)) {
 		/*
 		 * We are not sending a maximum sized segment for
 		 * some reason. Should we not send anything (think
 		 * sws or persists)?
 		 */
 		if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
 		    (TCPS_HAVEESTABLISHED(tp->t_state)) &&
 		    (len < minseg) &&
 		    (len < (int)(sbavail(sb) - sb_offset))) {
 			/*
 			 * Here the rwnd is less than
 			 * the minimum pacing size, this is not a retransmit,
 			 * we are established and
 			 * the send is not the last in the socket buffer
 			 * we send nothing, and we may enter persists
 			 * if nothing is outstanding.
 			 */
 			len = 0;
 			if (tp->snd_max == tp->snd_una) {
 				/*
 				 * Nothing out we can
 				 * go into persists.
 				 */
 				rack_enter_persist(tp, rack, cts, tp->snd_una);
 			}
 		} else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) &&
 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
 			   (len < (int)(sbavail(sb) - sb_offset)) &&
 			   (len < minseg)) {
 			/*
 			 * Here we are not retransmitting, and
 			 * the cwnd is not so small that we could
 			 * not send at least a min size (rxt timer
 			 * not having gone off), We have 2 segments or
 			 * more already in flight, its not the tail end
 			 * of the socket buffer  and the cwnd is blocking
 			 * us from sending out a minimum pacing segment size.
 			 * Lets not send anything.
 			 */
 			len = 0;
 		} else if (((tp->snd_wnd - ctf_outstanding(tp)) <
 			    min((rack->r_ctl.rc_high_rwnd/2), minseg)) &&
 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) &&
 			   (len < (int)(sbavail(sb) - sb_offset)) &&
 			   (TCPS_HAVEESTABLISHED(tp->t_state))) {
 			/*
 			 * Here we have a send window but we have
 			 * filled it up and we can't send another pacing segment.
 			 * We also have in flight more than 2 segments
 			 * and we are not completing the sb i.e. we allow
 			 * the last bytes of the sb to go out even if
 			 * its not a full pacing segment.
 			 */
 			len = 0;
 		} else if ((rack->r_ctl.crte != NULL) &&
 			   (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) &&
 			   (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) &&
 			   (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) &&
 			   (len < (int)(sbavail(sb) - sb_offset))) {
 			/*
 			 * Here we are doing hardware pacing, this is not a TLP,
 			 * we are not sending a pace max segment size, there is rwnd
 			 * room to send at least N pace_max_seg, the cwnd is greater
 			 * than or equal to a full pacing segments plus 4 mss and we have 2 or
 			 * more segments in flight and its not the tail of the socket buffer.
 			 *
 			 * We don't want to send instead we need to get more ack's in to
 			 * allow us to send a full pacing segment. Normally, if we are pacing
 			 * about the right speed, we should have finished our pacing
 			 * send as most of the acks have come back if we are at the
 			 * right rate. This is a bit fuzzy since return path delay
 			 * can delay the acks, which is why we want to make sure we
 			 * have cwnd space to have a bit more than a max pace segments in flight.
 			 *
 			 * If we have not gotten our acks back we are pacing at too high a
 			 * rate delaying will not hurt and will bring our GP estimate down by
 			 * injecting the delay. If we don't do this we will send
 			 * 2 MSS out in response to the acks being clocked in which
 			 * defeats the point of hw-pacing (i.e. to help us get
 			 * larger TSO's out).
 			 */
 			len = 0;
 		}
 
 	}
 	/* len will be >= 0 after this point. */
 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
 	rack_sndbuf_autoscale(rack);
 	/*
 	 * Decide if we can use TCP Segmentation Offloading (if supported by
 	 * hardware).
 	 *
 	 * TSO may only be used if we are in a pure bulk sending state.  The
 	 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP
 	 * options prevent using TSO.  With TSO the TCP header is the same
 	 * (except for the sequence number) for all generated packets.  This
 	 * makes it impossible to transmit any options which vary per
 	 * generated segment or packet.
 	 *
 	 * IPv4 handling has a clear separation of ip options and ip header
 	 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
 	 * the right thing below to provide length of just ip options and thus
 	 * checking for ipoptlen is enough to decide if ip options are present.
 	 */
 	ipoptlen = 0;
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	/*
 	 * Pre-calculate here as we save another lookup into the darknesses
 	 * of IPsec that way and can actually decide if TSO is ok.
 	 */
 #ifdef INET6
 	if (isipv6 && IPSEC_ENABLED(ipv6))
 		ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
 #ifdef INET
 	else
 #endif
 #endif				/* INET6 */
 #ifdef INET
 		if (IPSEC_ENABLED(ipv4))
 			ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
 #endif				/* INET */
 #endif
 
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	ipoptlen += ipsec_optlen;
 #endif
 	if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz &&
 	    (tp->t_port == 0) &&
 	    ((tp->t_flags & TF_SIGNATURE) == 0) &&
 	    tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
 	    ipoptlen == 0)
 		tso = 1;
 	{
 		uint32_t outstanding __unused;
 
 		outstanding = tp->snd_max - tp->snd_una;
 		if (tp->t_flags & TF_SENTFIN) {
 			/*
 			 * If we sent a fin, snd_max is 1 higher than
 			 * snd_una
 			 */
 			outstanding--;
 		}
 		if (sack_rxmit) {
 			if ((rsm->r_flags & RACK_HAS_FIN) == 0)
 				flags &= ~TH_FIN;
 		} else {
 			if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
 				   sbused(sb)))
 				flags &= ~TH_FIN;
 		}
 	}
 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
 		      (long)TCP_MAXWIN << tp->rcv_scale);
 
 	/*
 	 * Sender silly window avoidance.   We transmit under the following
 	 * conditions when len is non-zero:
 	 *
 	 * - We have a full segment (or more with TSO) - This is the last
 	 * buffer in a write()/send() and we are either idle or running
 	 * NODELAY - we've timed out (e.g. persist timer) - we have more
 	 * then 1/2 the maximum send window's worth of data (receiver may be
 	 * limited the window size) - we need to retransmit
 	 */
 	if (len) {
 		if (len >= segsiz) {
 			goto send;
 		}
 		/*
 		 * NOTE! on localhost connections an 'ack' from the remote
 		 * end may occur synchronously with the output and cause us
 		 * to flush a buffer queued with moretocome.  XXX
 		 *
 		 */
 		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
 		    (idle || (tp->t_flags & TF_NODELAY)) &&
 		    ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
 		    (tp->t_flags & TF_NOPUSH) == 0) {
 			pass = 2;
 			goto send;
 		}
 		if ((tp->snd_una == tp->snd_max) && len) {	/* Nothing outstanding */
 			pass = 22;
 			goto send;
 		}
 		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) {
 			pass = 4;
 			goto send;
 		}
 		if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {	/* retransmit case */
 			pass = 5;
 			goto send;
 		}
 		if (sack_rxmit) {
 			pass = 6;
 			goto send;
 		}
 		if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) &&
 		    (ctf_outstanding(tp) < (segsiz * 2))) {
 			/*
 			 * We have less than two MSS outstanding (delayed ack)
 			 * and our rwnd will not let us send a full sized
 			 * MSS. Lets go ahead and let this small segment
 			 * out because we want to try to have at least two
 			 * packets inflight to not be caught by delayed ack.
 			 */
 			pass = 12;
 			goto send;
 		}
 	}
 	/*
 	 * Sending of standalone window updates.
 	 *
 	 * Window updates are important when we close our window due to a
 	 * full socket buffer and are opening it again after the application
 	 * reads data from it.  Once the window has opened again and the
 	 * remote end starts to send again the ACK clock takes over and
 	 * provides the most current window information.
 	 *
 	 * We must avoid the silly window syndrome whereas every read from
 	 * the receive buffer, no matter how small, causes a window update
 	 * to be sent.  We also should avoid sending a flurry of window
 	 * updates when the socket buffer had queued a lot of data and the
 	 * application is doing small reads.
 	 *
 	 * Prevent a flurry of pointless window updates by only sending an
 	 * update when we can increase the advertized window by more than
 	 * 1/4th of the socket buffer capacity.  When the buffer is getting
 	 * full or is very small be more aggressive and send an update
 	 * whenever we can increase by two mss sized segments. In all other
 	 * situations the ACK's to new incoming data will carry further
 	 * window increases.
 	 *
 	 * Don't send an independent window update if a delayed ACK is
 	 * pending (it will get piggy-backed on it) or the remote side
 	 * already has done a half-close and won't send more data.  Skip
 	 * this if the connection is in T/TCP half-open state.
 	 */
 	if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
 	    !(tp->t_flags & TF_DELACK) &&
 	    !TCPS_HAVERCVDFIN(tp->t_state)) {
 		/*
 		 * "adv" is the amount we could increase the window, taking
 		 * into account that we are limited by TCP_MAXWIN <<
 		 * tp->rcv_scale.
 		 */
 		int32_t adv;
 		int oldwin;
 
 		adv = recwin;
 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
 			oldwin = (tp->rcv_adv - tp->rcv_nxt);
 			if (adv > oldwin)
 				adv -= oldwin;
 			else {
 				/* We can't increase the window */
 				adv = 0;
 			}
 		} else
 			oldwin = 0;
 
 		/*
 		 * If the new window size ends up being the same as or less
 		 * than the old size when it is scaled, then don't force
 		 * a window update.
 		 */
 		if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
 			goto dontupdate;
 
 		if (adv >= (int32_t)(2 * segsiz) &&
 		    (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
 		     recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) ||
 		     so->so_rcv.sb_hiwat <= 8 * segsiz)) {
 			pass = 7;
 			goto send;
 		}
 		if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) {
 			pass = 23;
 			goto send;
 		}
 	}
 dontupdate:
 
 	/*
 	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
 	 * is also a catch-all for the retransmit timer timeout case.
 	 */
 	if (tp->t_flags & TF_ACKNOW) {
 		pass = 8;
 		goto send;
 	}
 	if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) {
 		pass = 9;
 		goto send;
 	}
 	/*
 	 * If our state indicates that FIN should be sent and we have not
 	 * yet done so, then we need to send.
 	 */
 	if ((flags & TH_FIN) &&
 	    (tp->snd_nxt == tp->snd_una)) {
 		pass = 11;
 		goto send;
 	}
 	/*
 	 * No reason to send a segment, just return.
 	 */
 just_return:
 	SOCKBUF_UNLOCK(sb);
 just_return_nolock:
 	{
 		int app_limited = CTF_JR_SENT_DATA;
 
 		if (tot_len_this_send > 0) {
 			/* Make sure snd_nxt is up to max */
 			rack->r_ctl.fsb.recwin = recwin;
 			slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz);
 			if ((error == 0) &&
 			    rack_use_rfo &&
 			    ((flags & (TH_SYN|TH_FIN)) == 0) &&
 			    (ipoptlen == 0) &&
 			    (tp->snd_nxt == tp->snd_max) &&
 			    (tp->rcv_numsacks == 0) &&
 			    rack->r_fsb_inited &&
 			    TCPS_HAVEESTABLISHED(tp->t_state) &&
 			    ((IN_RECOVERY(tp->t_flags)) == 0) &&
 			    (rack->r_must_retran == 0) &&
 			    ((tp->t_flags & TF_NEEDFIN) == 0) &&
 			    (len > 0) && (orig_len > 0) &&
 			    (orig_len > len) &&
 			    ((orig_len - len) >= segsiz) &&
 			    ((optlen == 0) ||
 			     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
 				/* We can send at least one more MSS using our fsb */
 				rack_setup_fast_output(tp, rack, sb, len, orig_len,
 						       segsiz, pace_max_seg, hw_tls, flags);
 			} else
 				rack->r_fast_output = 0;
 
 
 			rack_log_fsb(rack, tp, so, flags,
 				     ipoptlen, orig_len, len, 0,
 				     1, optlen, __LINE__, 1);
 			if (SEQ_GT(tp->snd_max, tp->snd_nxt))
 				tp->snd_nxt = tp->snd_max;
 		} else {
 			int end_window = 0;
 			uint32_t seq = tp->gput_ack;
 
 			rsm = tqhash_max(rack->r_ctl.tqh);
 			if (rsm) {
 				/*
 				 * Mark the last sent that we just-returned (hinting
 				 * that delayed ack may play a role in any rtt measurement).
 				 */
 				rsm->r_just_ret = 1;
 			}
 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1);
 			rack->r_ctl.rc_agg_delayed = 0;
 			rack->r_early = 0;
 			rack->r_late = 0;
 			rack->r_ctl.rc_agg_early = 0;
 			if ((ctf_outstanding(tp) +
 			     min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)),
 				 minseg)) >= tp->snd_wnd) {
 				/* We are limited by the rwnd */
 				app_limited = CTF_JR_RWND_LIMITED;
 				if (IN_FASTRECOVERY(tp->t_flags))
 					rack->r_ctl.rc_prr_sndcnt = 0;
 			} else if (ctf_outstanding(tp) >= sbavail(sb)) {
 				/* We are limited by whats available -- app limited */
 				app_limited = CTF_JR_APP_LIMITED;
 				if (IN_FASTRECOVERY(tp->t_flags))
 					rack->r_ctl.rc_prr_sndcnt = 0;
 			} else if ((idle == 0) &&
 				   ((tp->t_flags & TF_NODELAY) == 0) &&
 				   ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) &&
 				   (len < segsiz)) {
 				/*
 				 * No delay is not on and the
 				 * user is sending less than 1MSS. This
 				 * brings out SWS avoidance so we
 				 * don't send. Another app-limited case.
 				 */
 				app_limited = CTF_JR_APP_LIMITED;
 			} else if (tp->t_flags & TF_NOPUSH) {
 				/*
 				 * The user has requested no push of
 				 * the last segment and we are
 				 * at the last segment. Another app
 				 * limited case.
 				 */
 				app_limited = CTF_JR_APP_LIMITED;
 			} else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) {
 				/* Its the cwnd */
 				app_limited = CTF_JR_CWND_LIMITED;
 			} else if (IN_FASTRECOVERY(tp->t_flags) &&
 				   (rack->rack_no_prr == 0) &&
 				   (rack->r_ctl.rc_prr_sndcnt < segsiz)) {
 				app_limited = CTF_JR_PRR;
 			} else {
 				/* Now why here are we not sending? */
 #ifdef NOW
 #ifdef INVARIANTS
 				panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use);
 #endif
 #endif
 				app_limited = CTF_JR_ASSESSING;
 			}
 			/*
 			 * App limited in some fashion, for our pacing GP
 			 * measurements we don't want any gap (even cwnd).
 			 * Close  down the measurement window.
 			 */
 			if (rack_cwnd_block_ends_measure &&
 			    ((app_limited == CTF_JR_CWND_LIMITED) ||
 			     (app_limited == CTF_JR_PRR))) {
 				/*
 				 * The reason we are not sending is
 				 * the cwnd (or prr). We have been configured
 				 * to end the measurement window in
 				 * this case.
 				 */
 				end_window = 1;
 			} else if (rack_rwnd_block_ends_measure &&
 				   (app_limited == CTF_JR_RWND_LIMITED)) {
 				/*
 				 * We are rwnd limited and have been
 				 * configured to end the measurement
 				 * window in this case.
 				 */
 				end_window = 1;
 			} else if (app_limited == CTF_JR_APP_LIMITED) {
 				/*
 				 * A true application limited period, we have
 				 * ran out of data.
 				 */
 				end_window = 1;
 			} else if (app_limited == CTF_JR_ASSESSING) {
 				/*
 				 * In the assessing case we hit the end of
 				 * the if/else and had no known reason
 				 * This will panic us under invariants..
 				 *
 				 * If we get this out in logs we need to
 				 * investagate which reason we missed.
 				 */
 				end_window = 1;
 			}
 			if (end_window) {
 				uint8_t log = 0;
 
 				/* Adjust the Gput measurement */
 				if ((tp->t_flags & TF_GPUTINPROG) &&
 				    SEQ_GT(tp->gput_ack, tp->snd_max)) {
 					tp->gput_ack = tp->snd_max;
 					if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) {
 						/*
 						 * There is not enough to measure.
 						 */
 						tp->t_flags &= ~TF_GPUTINPROG;
 						rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/,
 									   rack->r_ctl.rc_gp_srtt /*flex1*/,
 									   tp->gput_seq,
 									   0, 0, 18, __LINE__, NULL, 0);
 					} else
 						log = 1;
 				}
 				/* Mark the last packet has app limited */
 				rsm = tqhash_max(rack->r_ctl.tqh);
 				if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
 					if (rack->r_ctl.rc_app_limited_cnt == 0)
 						rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
 					else {
 						/*
 						 * Go out to the end app limited and mark
 						 * this new one as next and move the end_appl up
 						 * to this guy.
 						 */
 						if (rack->r_ctl.rc_end_appl)
 							rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
 						rack->r_ctl.rc_end_appl = rsm;
 					}
 					rsm->r_flags |= RACK_APP_LIMITED;
 					rack->r_ctl.rc_app_limited_cnt++;
 				}
 				if (log)
 					rack_log_pacing_delay_calc(rack,
 								   rack->r_ctl.rc_app_limited_cnt, seq,
 								   tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0);
 			}
 		}
 		/* Check if we need to go into persists or not */
 		if ((tp->snd_max == tp->snd_una) &&
 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    sbavail(sb) &&
 		    (sbavail(sb) > tp->snd_wnd) &&
 		    (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) {
 			/* Yes lets make sure to move to persist before timer-start */
 			rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
 		}
 		rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
 		rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
 	}
 #ifdef NETFLIX_SHARED_CWND
 	if ((sbavail(sb) == 0) &&
 	    rack->r_ctl.rc_scw) {
 		tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index);
 		rack->rack_scwnd_is_idle = 1;
 	}
 #endif
 #ifdef TCP_ACCOUNTING
 	if (tot_len_this_send > 0) {
 		crtsc = get_cyclecount();
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[SND_OUT_DATA]++;
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val);
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz);
 		}
 	} else {
 		crtsc = get_cyclecount();
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[SND_LIMITED]++;
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val);
 		}
 	}
 	sched_unpin();
 #endif
 	return (0);
 
 send:
 	if ((rack->r_ctl.crte != NULL) &&
 	    (rsm == NULL) &&
 	    ((rack->rc_hw_nobuf == 1) ||
 	     (rack_hw_check_queue && (check_done == 0)))) {
 		/*
 		 * We only want to do this once with the hw_check_queue,
 		 * for the enobuf case we would only do it once if
 		 * we come around to again, the flag will be clear.
 		 */
 		check_done = 1;
 		slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz);
 		if (slot) {
 			rack->r_ctl.rc_agg_delayed = 0;
 			rack->r_ctl.rc_agg_early = 0;
 			rack->r_early = 0;
 			rack->r_late = 0;
 			SOCKBUF_UNLOCK(&so->so_snd);
 			goto skip_all_send;
 		}
 	}
 	if (rsm || sack_rxmit)
 		counter_u64_add(rack_nfto_resend, 1);
 	else
 		counter_u64_add(rack_non_fto_send, 1);
 	if ((flags & TH_FIN) &&
 	    sbavail(sb)) {
 		/*
 		 * We do not transmit a FIN
 		 * with data outstanding. We
 		 * need to make it so all data
 		 * is acked first.
 		 */
 		flags &= ~TH_FIN;
 	}
 	/* Enforce stack imposed max seg size if we have one */
 	if (rack->r_ctl.rc_pace_max_segs &&
 	    (len > rack->r_ctl.rc_pace_max_segs)) {
 		mark = 1;
 		len = rack->r_ctl.rc_pace_max_segs;
 	}
 	SOCKBUF_LOCK_ASSERT(sb);
 	if (len > 0) {
 		if (len >= segsiz)
 			tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
 	}
 	/*
 	 * Before ESTABLISHED, force sending of initial options unless TCP
 	 * set not to do any options. NOTE: we assume that the IP/TCP header
 	 * plus TCP options always fit in a single mbuf, leaving room for a
 	 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr)
 	 * + optlen <= MCLBYTES
 	 */
 	optlen = 0;
 #ifdef INET6
 	if (isipv6)
 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 	else
 #endif
 		hdrlen = sizeof(struct tcpiphdr);
 
 	/*
 	 * Compute options for segment. We only have to care about SYN and
 	 * established connection segments.  Options for SYN-ACK segments
 	 * are handled in TCP syncache.
 	 */
 	to.to_flags = 0;
 	if ((tp->t_flags & TF_NOOPT) == 0) {
 		/* Maximum segment size. */
 		if (flags & TH_SYN) {
 			tp->snd_nxt = tp->iss;
 			to.to_mss = tcp_mssopt(&inp->inp_inc);
 			if (tp->t_port)
 				to.to_mss -= V_tcp_udp_tunneling_overhead;
 			to.to_flags |= TOF_MSS;
 
 			/*
 			 * On SYN or SYN|ACK transmits on TFO connections,
 			 * only include the TFO option if it is not a
 			 * retransmit, as the presence of the TFO option may
 			 * have caused the original SYN or SYN|ACK to have
 			 * been dropped by a middlebox.
 			 */
 			if (IS_FASTOPEN(tp->t_flags) &&
 			    (tp->t_rxtshift == 0)) {
 				if (tp->t_state == TCPS_SYN_RECEIVED) {
 					to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
 					to.to_tfo_cookie =
 						(u_int8_t *)&tp->t_tfo_cookie.server;
 					to.to_flags |= TOF_FASTOPEN;
 					wanted_cookie = 1;
 				} else if (tp->t_state == TCPS_SYN_SENT) {
 					to.to_tfo_len =
 						tp->t_tfo_client_cookie_len;
 					to.to_tfo_cookie =
 						tp->t_tfo_cookie.client;
 					to.to_flags |= TOF_FASTOPEN;
 					wanted_cookie = 1;
 					/*
 					 * If we wind up having more data to
 					 * send with the SYN than can fit in
 					 * one segment, don't send any more
 					 * until the SYN|ACK comes back from
 					 * the other end.
 					 */
 					sendalot = 0;
 				}
 			}
 		}
 		/* Window scaling. */
 		if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
 			to.to_wscale = tp->request_r_scale;
 			to.to_flags |= TOF_SCALE;
 		}
 		/* Timestamps. */
 		if ((tp->t_flags & TF_RCVD_TSTMP) ||
 		    ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
 			to.to_tsval = ms_cts + tp->ts_offset;
 			to.to_tsecr = tp->ts_recent;
 			to.to_flags |= TOF_TS;
 		}
 		/* Set receive buffer autosizing timestamp. */
 		if (tp->rfbuf_ts == 0 &&
 		    (so->so_rcv.sb_flags & SB_AUTOSIZE))
 			tp->rfbuf_ts = tcp_ts_getticks();
 		/* Selective ACK's. */
 		if (tp->t_flags & TF_SACK_PERMIT) {
 			if (flags & TH_SYN)
 				to.to_flags |= TOF_SACKPERM;
 			else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 				 tp->rcv_numsacks > 0) {
 				to.to_flags |= TOF_SACK;
 				to.to_nsacks = tp->rcv_numsacks;
 				to.to_sacks = (u_char *)tp->sackblks;
 			}
 		}
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		/* TCP-MD5 (RFC2385). */
 		if (tp->t_flags & TF_SIGNATURE)
 			to.to_flags |= TOF_SIGNATURE;
 #endif
 
 		/* Processing the options. */
 		hdrlen += optlen = tcp_addoptions(&to, opt);
 		/*
 		 * If we wanted a TFO option to be added, but it was unable
 		 * to fit, ensure no data is sent.
 		 */
 		if (IS_FASTOPEN(tp->t_flags) && wanted_cookie &&
 		    !(to.to_flags & TOF_FASTOPEN))
 			len = 0;
 	}
 	if (tp->t_port) {
 		if (V_tcp_udp_tunneling_port == 0) {
 			/* The port was removed?? */
 			SOCKBUF_UNLOCK(&so->so_snd);
 #ifdef TCP_ACCOUNTING
 			crtsc = get_cyclecount();
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
 			}
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
 			}
 			sched_unpin();
 #endif
 			return (EHOSTUNREACH);
 		}
 		hdrlen += sizeof(struct udphdr);
 	}
 #ifdef INET6
 	if (isipv6)
 		ipoptlen = ip6_optlen(inp);
 	else
 #endif
 		if (inp->inp_options)
 			ipoptlen = inp->inp_options->m_len -
 				offsetof(struct ipoption, ipopt_list);
 		else
 			ipoptlen = 0;
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 	ipoptlen += ipsec_optlen;
 #endif
 
 	/*
 	 * Adjust data length if insertion of options will bump the packet
 	 * length beyond the t_maxseg length. Clear the FIN bit because we
 	 * cut off the tail of the segment.
 	 */
 	if (len + optlen + ipoptlen > tp->t_maxseg) {
 		if (tso) {
 			uint32_t if_hw_tsomax;
 			uint32_t moff;
 			int32_t max_len;
 
 			/* extract TSO information */
 			if_hw_tsomax = tp->t_tsomax;
 			if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
 			if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
 			KASSERT(ipoptlen == 0,
 				("%s: TSO can't do IP options", __func__));
 
 			/*
 			 * Check if we should limit by maximum payload
 			 * length:
 			 */
 			if (if_hw_tsomax != 0) {
 				/* compute maximum TSO length */
 				max_len = (if_hw_tsomax - hdrlen -
 					   max_linkhdr);
 				if (max_len <= 0) {
 					len = 0;
 				} else if (len > max_len) {
 					sendalot = 1;
 					len = max_len;
 					mark = 2;
 				}
 			}
 			/*
 			 * Prevent the last segment from being fractional
 			 * unless the send sockbuf can be emptied:
 			 */
 			max_len = (tp->t_maxseg - optlen);
 			if ((sb_offset + len) < sbavail(sb)) {
 				moff = len % (u_int)max_len;
 				if (moff != 0) {
 					mark = 3;
 					len -= moff;
 				}
 			}
 			/*
 			 * In case there are too many small fragments don't
 			 * use TSO:
 			 */
 			if (len <= max_len) {
 				mark = 4;
 				tso = 0;
 			}
 			/*
 			 * Send the FIN in a separate segment after the bulk
 			 * sending is done. We don't trust the TSO
 			 * implementations to clear the FIN flag on all but
 			 * the last segment.
 			 */
 			if (tp->t_flags & TF_NEEDFIN) {
 				sendalot = 4;
 			}
 		} else {
 			mark = 5;
 			if (optlen + ipoptlen >= tp->t_maxseg) {
 				/*
 				 * Since we don't have enough space to put
 				 * the IP header chain and the TCP header in
 				 * one packet as required by RFC 7112, don't
 				 * send it. Also ensure that at least one
 				 * byte of the payload can be put into the
 				 * TCP segment.
 				 */
 				SOCKBUF_UNLOCK(&so->so_snd);
 				error = EMSGSIZE;
 				sack_rxmit = 0;
 				goto out;
 			}
 			len = tp->t_maxseg - optlen - ipoptlen;
 			sendalot = 5;
 		}
 	} else {
 		tso = 0;
 		mark = 6;
 	}
 	KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
 		("%s: len > IP_MAXPACKET", __func__));
 #ifdef DIAGNOSTIC
 #ifdef INET6
 	if (max_linkhdr + hdrlen > MCLBYTES)
 #else
 		if (max_linkhdr + hdrlen > MHLEN)
 #endif
 			panic("tcphdr too big");
 #endif
 
 	/*
 	 * This KASSERT is here to catch edge cases at a well defined place.
 	 * Before, those had triggered (random) panic conditions further
 	 * down.
 	 */
 	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
 	if ((len == 0) &&
 	    (flags & TH_FIN) &&
 	    (sbused(sb))) {
 		/*
 		 * We have outstanding data, don't send a fin by itself!.
 		 */
 		goto just_return;
 	}
 	/*
 	 * Grab a header mbuf, attaching a copy of data to be transmitted,
 	 * and initialize the header from the template for sends on this
 	 * connection.
 	 */
 	hw_tls = tp->t_nic_ktls_xmit != 0;
 	if (len) {
 		uint32_t max_val;
 		uint32_t moff;
 
 		if (rack->r_ctl.rc_pace_max_segs)
 			max_val = rack->r_ctl.rc_pace_max_segs;
 		else if (rack->rc_user_set_max_segs)
 			max_val = rack->rc_user_set_max_segs * segsiz;
 		else
 			max_val = len;
 		/*
 		 * We allow a limit on sending with hptsi.
 		 */
 		if (len > max_val) {
 			mark = 7;
 			len = max_val;
 		}
 #ifdef INET6
 		if (MHLEN < hdrlen + max_linkhdr)
 			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 		else
 #endif
 			m = m_gethdr(M_NOWAIT, MT_DATA);
 
 		if (m == NULL) {
 			SOCKBUF_UNLOCK(sb);
 			error = ENOBUFS;
 			sack_rxmit = 0;
 			goto out;
 		}
 		m->m_data += max_linkhdr;
 		m->m_len = hdrlen;
 
 		/*
 		 * Start the m_copy functions from the closest mbuf to the
 		 * sb_offset in the socket buffer chain.
 		 */
 		mb = sbsndptr_noadv(sb, sb_offset, &moff);
 		s_mb = mb;
 		s_moff = moff;
 		if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
 			m_copydata(mb, moff, (int)len,
 				   mtod(m, caddr_t)+hdrlen);
 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
 				sbsndptr_adv(sb, mb, len);
 			m->m_len += len;
 		} else {
 			struct sockbuf *msb;
 
 			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
 				msb = NULL;
 			else
 				msb = sb;
 			m->m_next = tcp_m_copym(
 				mb, moff, &len,
 				if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
 				((rsm == NULL) ? hw_tls : 0)
 #ifdef NETFLIX_COPY_ARGS
 				, &s_mb, &s_moff
 #endif
 				);
 			if (len <= (tp->t_maxseg - optlen)) {
 				/*
 				 * Must have ran out of mbufs for the copy
 				 * shorten it to no longer need tso. Lets
 				 * not put on sendalot since we are low on
 				 * mbufs.
 				 */
 				tso = 0;
 			}
 			if (m->m_next == NULL) {
 				SOCKBUF_UNLOCK(sb);
 				(void)m_free(m);
 				error = ENOBUFS;
 				sack_rxmit = 0;
 				goto out;
 			}
 		}
 		if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
 			if (rsm && (rsm->r_flags & RACK_TLP)) {
 				/*
 				 * TLP should not count in retran count, but
 				 * in its own bin
 				 */
 				counter_u64_add(rack_tlp_retran, 1);
 				counter_u64_add(rack_tlp_retran_bytes, len);
 			} else {
 				tp->t_sndrexmitpack++;
 				KMOD_TCPSTAT_INC(tcps_sndrexmitpack);
 				KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len);
 			}
 #ifdef STATS
 			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
 						 len);
 #endif
 		} else {
 			KMOD_TCPSTAT_INC(tcps_sndpack);
 			KMOD_TCPSTAT_ADD(tcps_sndbyte, len);
 #ifdef STATS
 			stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
 						 len);
 #endif
 		}
 		/*
 		 * If we're sending everything we've got, set PUSH. (This
 		 * will keep happy those implementations which only give
 		 * data to the user when a buffer fills or a PUSH comes in.)
 		 */
 		if (sb_offset + len == sbused(sb) &&
 		    sbused(sb) &&
 		    !(flags & TH_SYN)) {
 			flags |= TH_PUSH;
 			add_flag |= RACK_HAD_PUSH;
 		}
 
 		SOCKBUF_UNLOCK(sb);
 	} else {
 		SOCKBUF_UNLOCK(sb);
 		if (tp->t_flags & TF_ACKNOW)
 			KMOD_TCPSTAT_INC(tcps_sndacks);
 		else if (flags & (TH_SYN | TH_FIN | TH_RST))
 			KMOD_TCPSTAT_INC(tcps_sndctrl);
 		else
 			KMOD_TCPSTAT_INC(tcps_sndwinup);
 
 		m = m_gethdr(M_NOWAIT, MT_DATA);
 		if (m == NULL) {
 			error = ENOBUFS;
 			sack_rxmit = 0;
 			goto out;
 		}
 #ifdef INET6
 		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
 		    MHLEN >= hdrlen) {
 			M_ALIGN(m, hdrlen);
 		} else
 #endif
 			m->m_data += max_linkhdr;
 		m->m_len = hdrlen;
 	}
 	SOCKBUF_UNLOCK_ASSERT(sb);
 	m->m_pkthdr.rcvif = (struct ifnet *)0;
 #ifdef MAC
 	mac_inpcb_create_mbuf(inp, m);
 #endif
 	if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) &&  rack->r_fsb_inited) {
 #ifdef INET6
 		if (isipv6)
 			ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr;
 		else
 #endif				/* INET6 */
 #ifdef INET
 			ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
 #endif
 		th = rack->r_ctl.fsb.th;
 		udp = rack->r_ctl.fsb.udp;
 		if (udp) {
 #ifdef INET6
 			if (isipv6)
 				ulen = hdrlen + len - sizeof(struct ip6_hdr);
 			else
 #endif				/* INET6 */
 				ulen = hdrlen + len - sizeof(struct ip);
 			udp->uh_ulen = htons(ulen);
 		}
 	} else {
 #ifdef INET6
 		if (isipv6) {
 			ip6 = mtod(m, struct ip6_hdr *);
 			if (tp->t_port) {
 				udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
 				udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 				udp->uh_dport = tp->t_port;
 				ulen = hdrlen + len - sizeof(struct ip6_hdr);
 				udp->uh_ulen = htons(ulen);
 				th = (struct tcphdr *)(udp + 1);
 			} else
 				th = (struct tcphdr *)(ip6 + 1);
 			tcpip_fillheaders(inp, tp->t_port, ip6, th);
 		} else
 #endif				/* INET6 */
 		{
 #ifdef INET
 			ip = mtod(m, struct ip *);
 			if (tp->t_port) {
 				udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
 				udp->uh_sport = htons(V_tcp_udp_tunneling_port);
 				udp->uh_dport = tp->t_port;
 				ulen = hdrlen + len - sizeof(struct ip);
 				udp->uh_ulen = htons(ulen);
 				th = (struct tcphdr *)(udp + 1);
 			} else
 				th = (struct tcphdr *)(ip + 1);
 			tcpip_fillheaders(inp, tp->t_port, ip, th);
 #endif
 		}
 	}
 	/*
 	 * Fill in fields, remembering maximum advertised window for use in
 	 * delaying messages about window sizes. If resending a FIN, be sure
 	 * not to use a new sequence number.
 	 */
 	if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
 	    tp->snd_nxt == tp->snd_max)
 		tp->snd_nxt--;
 	/*
 	 * If we are starting a connection, send ECN setup SYN packet. If we
 	 * are on a retransmit, we may resend those bits a number of times
 	 * as per RFC 3168.
 	 */
 	if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
 		flags |= tcp_ecn_output_syn_sent(tp);
 	}
 	/* Also handle parallel SYN for ECN */
 	if (TCPS_HAVERCVDSYN(tp->t_state) &&
 	    (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
 		int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
 		if ((tp->t_state == TCPS_SYN_RECEIVED) &&
 		    (tp->t_flags2 & TF2_ECN_SND_ECE))
 			tp->t_flags2 &= ~TF2_ECN_SND_ECE;
 #ifdef INET6
 		if (isipv6) {
 			ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20);
 			ip6->ip6_flow |= htonl(ect << 20);
 		}
 		else
 #endif
 		{
 #ifdef INET
 			ip->ip_tos &= ~IPTOS_ECN_MASK;
 			ip->ip_tos |= ect;
 #endif
 		}
 	}
 	/*
 	 * If we are doing retransmissions, then snd_nxt will not reflect
 	 * the first unsent octet.  For ACK only packets, we do not want the
 	 * sequence number of the retransmitted packet, we want the sequence
 	 * number of the next unsent octet.  So, if there is no data (and no
 	 * SYN or FIN), use snd_max instead of snd_nxt when filling in
 	 * ti_seq.  But if we are in persist state, snd_max might reflect
 	 * one byte beyond the right edge of the window, so use snd_nxt in
 	 * that case, since we know we aren't doing a retransmission.
 	 * (retransmit and persist are mutually exclusive...)
 	 */
 	if (sack_rxmit == 0) {
 		if (len || (flags & (TH_SYN | TH_FIN))) {
 			th->th_seq = htonl(tp->snd_nxt);
 			rack_seq = tp->snd_nxt;
 		} else {
 			th->th_seq = htonl(tp->snd_max);
 			rack_seq = tp->snd_max;
 		}
 	} else {
 		th->th_seq = htonl(rsm->r_start);
 		rack_seq = rsm->r_start;
 	}
 	th->th_ack = htonl(tp->rcv_nxt);
 	tcp_set_flags(th, flags);
 	/*
 	 * Calculate receive window.  Don't shrink window, but avoid silly
 	 * window syndrome.
 	 * If a RST segment is sent, advertise a window of zero.
 	 */
 	if (flags & TH_RST) {
 		recwin = 0;
 	} else {
 		if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
 		    recwin < (long)segsiz) {
 			recwin = 0;
 		}
 		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
 		    recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
 			recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
 	}
 
 	/*
 	 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or
 	 * <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK> case is
 	 * handled in syncache.
 	 */
 	if (flags & TH_SYN)
 		th->th_win = htons((u_short)
 				   (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
 	else {
 		/* Avoid shrinking window with window scaling. */
 		recwin = roundup2(recwin, 1 << tp->rcv_scale);
 		th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
 	}
 	/*
 	 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0
 	 * window.  This may cause the remote transmitter to stall.  This
 	 * flag tells soreceive() to disable delayed acknowledgements when
 	 * draining the buffer.  This can occur if the receiver is
 	 * attempting to read more data than can be buffered prior to
 	 * transmitting on the connection.
 	 */
 	if (th->th_win == 0) {
 		tp->t_sndzerowin++;
 		tp->t_flags |= TF_RXWIN0SENT;
 	} else
 		tp->t_flags &= ~TF_RXWIN0SENT;
 	tp->snd_up = tp->snd_una;	/* drag it along, its deprecated */
 	/* Now are we using fsb?, if so copy the template data to the mbuf */
 	if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) {
 		uint8_t *cpto;
 
 		cpto = mtod(m, uint8_t *);
 		memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len);
 		/*
 		 * We have just copied in:
 		 * IP/IP6
 		 * <optional udphdr>
 		 * tcphdr (no options)
 		 *
 		 * We need to grab the correct pointers into the mbuf
 		 * for both the tcp header, and possibly the udp header (if tunneling).
 		 * We do this by using the offset in the copy buffer and adding it
 		 * to the mbuf base pointer (cpto).
 		 */
 #ifdef INET6
 		if (isipv6)
 			ip6 = mtod(m, struct ip6_hdr *);
 		else
 #endif				/* INET6 */
 #ifdef INET
 			ip = mtod(m, struct ip *);
 #endif
 		th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr));
 		/* If we have a udp header lets set it into the mbuf as well */
 		if (udp)
 			udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr));
 	}
 	if (optlen) {
 		bcopy(opt, th + 1, optlen);
 		th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
 	}
 	/*
 	 * Put TCP length in extended header, and then checksum extended
 	 * header and data.
 	 */
 	m->m_pkthdr.len = hdrlen + len;	/* in6_cksum() need this */
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	if (to.to_flags & TOF_SIGNATURE) {
 		/*
 		 * Calculate MD5 signature and put it into the place
 		 * determined before.
 		 * NOTE: since TCP options buffer doesn't point into
 		 * mbuf's data, calculate offset and use it.
 		 */
 		if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th,
 						       (u_char *)(th + 1) + (to.to_signature - opt)) != 0) {
 			/*
 			 * Do not send segment if the calculation of MD5
 			 * digest has failed.
 			 */
 			goto out;
 		}
 	}
 #endif
 #ifdef INET6
 	if (isipv6) {
 		/*
 		 * ip6_plen is not need to be filled now, and will be filled
 		 * in ip6_output.
 		 */
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in6_cksum_pseudo(ip6,
 						      sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
 						      0);
 		}
 	}
 #endif
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		if (tp->t_port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
 						ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
 			th->th_sum = htons(0);
 			UDPSTAT_INC(udps_opackets);
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			th->th_sum = in_pseudo(ip->ip_src.s_addr,
 					       ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
 									IPPROTO_TCP + len + optlen));
 		}
 		/* IP version must be set here for ipv4/ipv6 checking later */
 		KASSERT(ip->ip_v == IPVERSION,
 			("%s: IP version incorrect: %d", __func__, ip->ip_v));
 	}
 #endif
 	/*
 	 * Enable TSO and specify the size of the segments. The TCP pseudo
 	 * header checksum is always provided. XXX: Fixme: This is currently
 	 * not the case for IPv6.
 	 */
 	if (tso) {
 		/*
 		 * Here we must use t_maxseg and the optlen since
 		 * the optlen may include SACK's (or DSACK).
 		 */
 		KASSERT(len > tp->t_maxseg - optlen,
 			("%s: len <= tso_segsz", __func__));
 		m->m_pkthdr.csum_flags |= CSUM_TSO;
 		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
 	}
 	KASSERT(len + hdrlen == m_length(m, NULL),
 		("%s: mbuf chain different than expected: %d + %u != %u",
 		 __func__, len, hdrlen, m_length(m, NULL)));
 
 #ifdef TCP_HHOOK
 	/* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
 	hhook_run_tcp_est_out(tp, th, &to, len, tso);
 #endif
 	if ((rack->r_ctl.crte != NULL) &&
 	    (rack->rc_hw_nobuf == 0) &&
 	    tcp_bblogging_on(tp)) {
 		rack_log_queue_level(tp, rack, len, &tv, cts);
 	}
 	/* We're getting ready to send; log now. */
 	if (tcp_bblogging_on(rack->rc_tp)) {
 		union tcp_log_stackspecific log;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
 		if (rack->rack_no_prr)
 			log.u_bbr.flex1 = 0;
 		else
 			log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt;
 		log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs;
 		log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs;
 		log.u_bbr.flex4 = orig_len;
 		/* Save off the early/late values */
 		log.u_bbr.flex6 = rack->r_ctl.rc_agg_early;
 		log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed;
 		log.u_bbr.bw_inuse = rack_get_bw(rack);
 		log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw;
 		log.u_bbr.flex8 = 0;
 		if (rsm) {
 			if (rsm->r_flags & RACK_RWND_COLLAPSED) {
 				rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
 				counter_u64_add(rack_collapsed_win_rxt, 1);
 				counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
 			}
 			if (doing_tlp)
 				log.u_bbr.flex8 = 2;
 			else
 				log.u_bbr.flex8 = 1;
 		} else {
 			if (doing_tlp)
 				log.u_bbr.flex8 = 3;
 		}
 		log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
 		log.u_bbr.flex7 = mark;
 		log.u_bbr.flex7 <<= 8;
 		log.u_bbr.flex7 |= pass;
 		log.u_bbr.pkts_out = tp->t_maxseg;
 		log.u_bbr.timeStamp = cts;
 		log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked);
 		if (rsm && (rsm->r_rtr_cnt > 0)) {
 			/*
 			 * When we have a retransmit we want to log the
 			 * burst at send and flight at send from before.
 			 */
 			log.u_bbr.flex5 = rsm->r_fas;
 			log.u_bbr.bbr_substate = rsm->r_bas;
 		} else {
 			/*
 			 * New transmits we log in flex5 the inflight again as
 			 * well as the number of segments in our send in the
 			 * substate field.
 			 */
 			log.u_bbr.flex5 = log.u_bbr.inflight;
 			log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz);
 		}
 		log.u_bbr.lt_epoch = cwnd_to_use;
 		log.u_bbr.delivered = sendalot;
 		log.u_bbr.rttProp = (uint64_t)rsm;
 		log.u_bbr.pkt_epoch = __LINE__;
 		if (rsm) {
 			log.u_bbr.delRate = rsm->r_flags;
 			log.u_bbr.delRate <<= 31;
 			log.u_bbr.delRate |= rack->r_must_retran;
 			log.u_bbr.delRate <<= 1;
 			log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
 		} else {
 			log.u_bbr.delRate = rack->r_must_retran;
 			log.u_bbr.delRate <<= 1;
 			log.u_bbr.delRate |= (sack_rxmit & 0x00000001);
 		}
 		lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK,
 				    len, &log, false, NULL, __func__, __LINE__, &tv);
 	} else
 		lgb = NULL;
 
 	/*
 	 * Fill in IP length and desired time to live and send to IP level.
 	 * There should be a better way to handle ttl and tos; we could keep
 	 * them in the template, but need a way to checksum without them.
 	 */
 	/*
 	 * m->m_pkthdr.len should have been set before cksum calcuration,
 	 * because in6_cksum() need it.
 	 */
 #ifdef INET6
 	if (isipv6) {
 		/*
 		 * we separately set hoplimit for every segment, since the
 		 * user might want to change the value via setsockopt. Also,
 		 * desired default hop limit might be changed via Neighbor
 		 * Discovery.
 		 */
 		rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL);
 
 		/*
 		 * Set the packet size here for the benefit of DTrace
 		 * probes. ip6_output() will set it properly; it's supposed
 		 * to include the option header lengths as well.
 		 */
 		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
 
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 		else
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 
 		if (tp->t_state == TCPS_SYN_SENT)
 			TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
 
 		TCP_PROBE5(send, NULL, tp, ip6, tp, th);
 		/* TODO: IPv6 IP6TOS_ECT bit on */
 		error = ip6_output(m,
 				   inp->in6p_outputopts,
 				   &inp->inp_route6,
 				   ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
 				   NULL, NULL, inp);
 
 		if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
 			mtu = inp->inp_route6.ro_nh->nh_mtu;
 	}
 #endif				/* INET6 */
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		ip->ip_len = htons(m->m_pkthdr.len);
 #ifdef INET6
 		if (inp->inp_vflag & INP_IPV6PROTO)
 			ip->ip_ttl = in6_selecthlim(inp, NULL);
 #endif				/* INET6 */
 		rack->r_ctl.fsb.hoplimit = ip->ip_ttl;
 		/*
 		 * If we do path MTU discovery, then we set DF on every
 		 * packet. This might not be the best thing to do according
 		 * to RFC3390 Section 2. However the tcp hostcache migitates
 		 * the problem so it affects only the first tcp connection
 		 * with a host.
 		 *
 		 * NB: Don't set DF on small MTU/MSS to have a safe
 		 * fallback.
 		 */
 		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
 			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
 			if (tp->t_port == 0 || len < V_tcp_minmss) {
 				ip->ip_off |= htons(IP_DF);
 			}
 		} else {
 			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
 		}
 
 		if (tp->t_state == TCPS_SYN_SENT)
 			TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
 
 		TCP_PROBE5(send, NULL, tp, ip, tp, th);
 
 		error = ip_output(m,
 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
 				  inp->inp_options,
 #else
 				  NULL,
 #endif
 				  &inp->inp_route,
 				  ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
 				  inp);
 		if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
 			mtu = inp->inp_route.ro_nh->nh_mtu;
 	}
 #endif				/* INET */
 
 out:
 	if (lgb) {
 		lgb->tlb_errno = error;
 		lgb = NULL;
 	}
 	/*
 	 * In transmit state, time the transmission and arrange for the
 	 * retransmit.  In persist state, just set snd_max.
 	 */
 	rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error,
 			rack_to_usec_ts(&tv),
 			rsm, add_flag, s_mb, s_moff, hw_tls, segsiz);
 	if (error == 0) {
 		if (rsm == NULL) {
 			if (rack->lt_bw_up == 0) {
 				rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv);
 				rack->r_ctl.lt_seq = tp->snd_una;
 				rack->lt_bw_up = 1;
 			} else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) {
 				/*
 				 * Need to record what we have since we are
 				 * approaching seq wrap.
 				 */
 				uint64_t tmark;
 
 				rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq);
 				rack->r_ctl.lt_seq = tp->snd_una;
 				tmark = tcp_tv_to_lusectick(&tv);
 				rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark);
 				rack->r_ctl.lt_timemark = tmark;
 			}
 		}
 		rack->forced_ack = 0;	/* If we send something zap the FA flag */
 		counter_u64_add(rack_total_bytes, len);
 		tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
 		if (rsm && doing_tlp) {
 			rack->rc_last_sent_tlp_past_cumack = 0;
 			rack->rc_last_sent_tlp_seq_valid = 1;
 			rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
 			rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
 		}
 		if (rack->rc_hw_nobuf) {
 			rack->rc_hw_nobuf = 0;
 			rack->r_ctl.rc_agg_delayed = 0;
 			rack->r_early = 0;
 			rack->r_late = 0;
 			rack->r_ctl.rc_agg_early = 0;
 		}
 		if (rsm && (doing_tlp == 0)) {
 			/* Set we retransmitted */
 			rack->rc_gp_saw_rec = 1;
 		} else {
 			if (cwnd_to_use > tp->snd_ssthresh) {
 				/* Set we sent in CA */
 				rack->rc_gp_saw_ca = 1;
 			} else {
 				/* Set we sent in SS */
 				rack->rc_gp_saw_ss = 1;
 			}
 		}
 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    (tp->t_flags & TF_SACK_PERMIT) &&
 		    tp->rcv_numsacks > 0)
 			tcp_clean_dsack_blocks(tp);
 		tot_len_this_send += len;
 		if (len == 0) {
 			counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1);
 		} else {
 			int idx;
 
 			idx = (len / segsiz) + 3;
 			if (idx >= TCP_MSS_ACCT_ATIMER)
 				counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1);
 			else
 				counter_u64_add(rack_out_size[idx], 1);
 		}
 	}
 	if ((rack->rack_no_prr == 0) &&
 	    sub_from_prr &&
 	    (error == 0)) {
 		if (rack->r_ctl.rc_prr_sndcnt >= len)
 			rack->r_ctl.rc_prr_sndcnt -= len;
 		else
 			rack->r_ctl.rc_prr_sndcnt = 0;
 	}
 	sub_from_prr = 0;
 	if (doing_tlp) {
 		/* Make sure the TLP is added */
 		add_flag |= RACK_TLP;
 	} else if (rsm) {
 		/* If its a resend without TLP then it must not have the flag */
 		rsm->r_flags &= ~RACK_TLP;
 	}
 
 
 	if ((error == 0) &&
 	    (len > 0) &&
 	    (tp->snd_una == tp->snd_max))
 		rack->r_ctl.rc_tlp_rxt_last_time = cts;
 	{
 		tcp_seq startseq = tp->snd_nxt;
 
 		/* Track our lost count */
 		if (rsm && (doing_tlp == 0))
 			rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
 		/*
 		 * Advance snd_nxt over sequence space of this segment.
 		 */
 		if (error)
 			/* We don't log or do anything with errors */
 			goto nomore;
 		if (doing_tlp == 0) {
 			if (rsm == NULL) {
 				/*
 				 * Not a retransmission of some
 				 * sort, new data is going out so
 				 * clear our TLP count and flag.
 				 */
 				rack->rc_tlp_in_progress = 0;
 				rack->r_ctl.rc_tlp_cnt_out = 0;
 			}
 		} else {
 			/*
 			 * We have just sent a TLP, mark that it is true
 			 * and make sure our in progress is set so we
 			 * continue to check the count.
 			 */
 			rack->rc_tlp_in_progress = 1;
 			rack->r_ctl.rc_tlp_cnt_out++;
 		}
 		if (flags & (TH_SYN | TH_FIN)) {
 			if (flags & TH_SYN)
 				tp->snd_nxt++;
 			if (flags & TH_FIN) {
 				tp->snd_nxt++;
 				tp->t_flags |= TF_SENTFIN;
 			}
 		}
 		/* In the ENOBUFS case we do *not* update snd_max */
 		if (sack_rxmit)
 			goto nomore;
 
 		tp->snd_nxt += len;
 		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
 			if (tp->snd_una == tp->snd_max) {
 				/*
 				 * Update the time we just added data since
 				 * none was outstanding.
 				 */
 				rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__);
 				tp->t_acktime = ticks;
 			}
 			tp->snd_max = tp->snd_nxt;
 			if (rack->rc_new_rnd_needed) {
 				/*
 				 * Update the rnd to start ticking not
 				 * that from a time perspective all of
 				 * the preceding idle time is "in the round"
 				 */
 				rack->rc_new_rnd_needed = 0;
 				rack->r_ctl.roundends = tp->snd_max;
 			}
 			/*
 			 * Time this transmission if not a retransmission and
 			 * not currently timing anything.
 			 * This is only relevant in case of switching back to
 			 * the base stack.
 			 */
 			if (tp->t_rtttime == 0) {
 				tp->t_rtttime = ticks;
 				tp->t_rtseq = startseq;
 				KMOD_TCPSTAT_INC(tcps_segstimed);
 			}
 			if (len &&
 			    ((tp->t_flags & TF_GPUTINPROG) == 0))
 				rack_start_gp_measurement(tp, rack, startseq, sb_offset);
 		}
 		/*
 		 * If we are doing FO we need to update the mbuf position and subtract
 		 * this happens when the peer sends us duplicate information and
 		 * we thus want to send a DSACK.
 		 *
 		 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO
 		 * turned off? If not then we are going to echo multiple DSACK blocks
 		 * out (with the TSO), which we should not be doing.
 		 */
 		if (rack->r_fast_output && len) {
 			if (rack->r_ctl.fsb.left_to_send > len)
 				rack->r_ctl.fsb.left_to_send -= len;
 			else
 				rack->r_ctl.fsb.left_to_send = 0;
 			if (rack->r_ctl.fsb.left_to_send < segsiz)
 				rack->r_fast_output = 0;
 			if (rack->r_fast_output) {
 				rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off);
 				rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len;
 				rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m);
 			}
 		}
 	}
 nomore:
 	if (error) {
 		rack->r_ctl.rc_agg_delayed = 0;
 		rack->r_early = 0;
 		rack->r_late = 0;
 		rack->r_ctl.rc_agg_early = 0;
 		SOCKBUF_UNLOCK_ASSERT(sb);	/* Check gotos. */
 		/*
 		 * Failures do not advance the seq counter above. For the
 		 * case of ENOBUFS we will fall out and retry in 1ms with
 		 * the hpts. Everything else will just have to retransmit
 		 * with the timer.
 		 *
 		 * In any case, we do not want to loop around for another
 		 * send without a good reason.
 		 */
 		sendalot = 0;
 		switch (error) {
 		case EPERM:
 			tp->t_softerror = error;
 #ifdef TCP_ACCOUNTING
 			crtsc = get_cyclecount();
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
 			}
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
 			}
 			sched_unpin();
 #endif
 			return (error);
 		case ENOBUFS:
 			/*
 			 * Pace us right away to retry in a some
 			 * time
 			 */
 			if (rack->r_ctl.crte != NULL) {
 				tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF);
 				if (tcp_bblogging_on(rack->rc_tp))
 					rack_log_queue_level(tp, rack, len, &tv, cts);
 			} else
 				tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
 			slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
 			if (rack->rc_enobuf < 0x7f)
 				rack->rc_enobuf++;
 			if (slot < (10 * HPTS_USEC_IN_MSEC))
 				slot = 10 * HPTS_USEC_IN_MSEC;
 			if (rack->r_ctl.crte != NULL) {
 				counter_u64_add(rack_saw_enobuf_hw, 1);
 				tcp_rl_log_enobuf(rack->r_ctl.crte);
 			}
 			counter_u64_add(rack_saw_enobuf, 1);
 			goto enobufs;
 		case EMSGSIZE:
 			/*
 			 * For some reason the interface we used initially
 			 * to send segments changed to another or lowered
 			 * its MTU. If TSO was active we either got an
 			 * interface without TSO capabilits or TSO was
 			 * turned off. If we obtained mtu from ip_output()
 			 * then update it and try again.
 			 */
 			if (tso)
 				tp->t_flags &= ~TF_TSO;
 			if (mtu != 0) {
 				int saved_mtu;
 
 				saved_mtu = tp->t_maxseg;
 				tcp_mss_update(tp, -1, mtu, NULL, NULL);
 				if (saved_mtu > tp->t_maxseg) {
 					goto again;
 				}
 			}
 			slot = 10 * HPTS_USEC_IN_MSEC;
 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
 #ifdef TCP_ACCOUNTING
 			crtsc = get_cyclecount();
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
 			}
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
 			}
 			sched_unpin();
 #endif
 			return (error);
 		case ENETUNREACH:
 			counter_u64_add(rack_saw_enetunreach, 1);
 		case EHOSTDOWN:
 		case EHOSTUNREACH:
 		case ENETDOWN:
 			if (TCPS_HAVERCVDSYN(tp->t_state)) {
 				tp->t_softerror = error;
 			}
 			/* FALLTHROUGH */
 		default:
 			slot = 10 * HPTS_USEC_IN_MSEC;
 			rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
 #ifdef TCP_ACCOUNTING
 			crtsc = get_cyclecount();
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[SND_OUT_FAIL]++;
 			}
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val);
 			}
 			sched_unpin();
 #endif
 			return (error);
 		}
 	} else {
 		rack->rc_enobuf = 0;
 		if (IN_FASTRECOVERY(tp->t_flags) && rsm)
 			rack->r_ctl.retran_during_recovery += len;
 	}
 	KMOD_TCPSTAT_INC(tcps_sndtotal);
 
 	/*
 	 * Data sent (as far as we can tell). If this advertises a larger
 	 * window than any other segment, then remember the size of the
 	 * advertised window. Any pending ACK has now been sent.
 	 */
 	if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
 		tp->rcv_adv = tp->rcv_nxt + recwin;
 
 	tp->last_ack_sent = tp->rcv_nxt;
 	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
 enobufs:
 	if (sendalot) {
 		/* Do we need to turn off sendalot? */
 		if (rack->r_ctl.rc_pace_max_segs &&
 		    (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) {
 			/* We hit our max. */
 			sendalot = 0;
 		} else if ((rack->rc_user_set_max_segs) &&
 			   (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) {
 			/* We hit the user defined max */
 			sendalot = 0;
 		}
 	}
 	if ((error == 0) && (flags & TH_FIN))
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN);
 	if (flags & TH_RST) {
 		/*
 		 * We don't send again after sending a RST.
 		 */
 		slot = 0;
 		sendalot = 0;
 		if (error == 0)
 			tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 	} else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
 		/*
 		 * Get our pacing rate, if an error
 		 * occurred in sending (ENOBUF) we would
 		 * hit the else if with slot preset. Other
 		 * errors return.
 		 */
 		slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz);
 	}
 	if (rsm &&
 	    (rsm->r_flags & RACK_HAS_SYN) == 0 &&
 	    rack->use_rack_rr) {
 		/* Its a retransmit and we use the rack cheat? */
 		if ((slot == 0) ||
 		    (rack->rc_always_pace == 0) ||
 		    (rack->r_rr_config == 1)) {
 			/*
 			 * We have no pacing set or we
 			 * are using old-style rack or
 			 * we are overridden to use the old 1ms pacing.
 			 */
 			slot = rack->r_ctl.rc_min_to;
 		}
 	}
 	/* We have sent clear the flag */
 	rack->r_ent_rec_ns = 0;
 	if (rack->r_must_retran) {
 		if (rsm) {
 			rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
 			if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
 				/*
 				 * We have retransmitted all.
 				 */
 				rack->r_must_retran = 0;
 				rack->r_ctl.rc_out_at_rto = 0;
 			}
 		} else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
 			/*
 			 * Sending new data will also kill
 			 * the loop.
 			 */
 			rack->r_must_retran = 0;
 			rack->r_ctl.rc_out_at_rto = 0;
 		}
 	}
 	rack->r_ctl.fsb.recwin = recwin;
 	if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) &&
 	    SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) {
 		/*
 		 * We hit an RTO and now have past snd_max at the RTO
 		 * clear all the WAS flags.
 		 */
 		tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
 	}
 	if (slot) {
 		/* set the rack tcb into the slot N */
 		if ((error == 0) &&
 		    rack_use_rfo &&
 		    ((flags & (TH_SYN|TH_FIN)) == 0) &&
 		    (rsm == NULL) &&
 		    (tp->snd_nxt == tp->snd_max) &&
 		    (ipoptlen == 0) &&
 		    (tp->rcv_numsacks == 0) &&
 		    rack->r_fsb_inited &&
 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    ((IN_RECOVERY(tp->t_flags)) == 0) &&
 		    (rack->r_must_retran == 0) &&
 		    ((tp->t_flags & TF_NEEDFIN) == 0) &&
 		    (len > 0) && (orig_len > 0) &&
 		    (orig_len > len) &&
 		    ((orig_len - len) >= segsiz) &&
 		    ((optlen == 0) ||
 		     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
 			/* We can send at least one more MSS using our fsb */
 			rack_setup_fast_output(tp, rack, sb, len, orig_len,
 					       segsiz, pace_max_seg, hw_tls, flags);
 		} else
 			rack->r_fast_output = 0;
 		rack_log_fsb(rack, tp, so, flags,
 			     ipoptlen, orig_len, len, error,
 			     (rsm == NULL), optlen, __LINE__, 2);
 	} else if (sendalot) {
 		int ret;
 
 		sack_rxmit = 0;
 		if ((error == 0) &&
 		    rack_use_rfo &&
 		    ((flags & (TH_SYN|TH_FIN)) == 0) &&
 		    (rsm == NULL) &&
 		    (ipoptlen == 0) &&
 		    (tp->rcv_numsacks == 0) &&
 		    (tp->snd_nxt == tp->snd_max) &&
 		    (rack->r_must_retran == 0) &&
 		    rack->r_fsb_inited &&
 		    TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    ((IN_RECOVERY(tp->t_flags)) == 0) &&
 		    ((tp->t_flags & TF_NEEDFIN) == 0) &&
 		    (len > 0) && (orig_len > 0) &&
 		    (orig_len > len) &&
 		    ((orig_len - len) >= segsiz) &&
 		    ((optlen == 0) ||
 		     ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) {
 			/* we can use fast_output for more */
 			rack_setup_fast_output(tp, rack, sb, len, orig_len,
 					       segsiz, pace_max_seg, hw_tls, flags);
 			if (rack->r_fast_output) {
 				error = 0;
 				ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error);
 				if (ret >= 0)
 					return (ret);
 			        else if (error)
 					goto nomore;
 
 			}
 		}
 		goto again;
 	}
 	/* Assure when we leave that snd_nxt will point to top */
 skip_all_send:
 	if (SEQ_GT(tp->snd_max, tp->snd_nxt))
 		tp->snd_nxt = tp->snd_max;
 	rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
 #ifdef TCP_ACCOUNTING
 	crtsc = get_cyclecount() - ts_val;
 	if (tot_len_this_send) {
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[SND_OUT_DATA]++;
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_proc_time[SND_OUT_DATA] += crtsc;
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz);
 		}
 	} else {
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_cnt_counters[SND_OUT_ACK]++;
 		}
 		if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 			tp->tcp_proc_time[SND_OUT_ACK] += crtsc;
 		}
 	}
 	sched_unpin();
 #endif
 	if (error == ENOBUFS)
 		error = 0;
 	return (error);
 }
 
 static void
 rack_update_seg(struct tcp_rack *rack)
 {
 	uint32_t orig_val;
 
 	orig_val = rack->r_ctl.rc_pace_max_segs;
 	rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL);
 	if (orig_val != rack->r_ctl.rc_pace_max_segs)
 		rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0);
 }
 
 static void
 rack_mtu_change(struct tcpcb *tp)
 {
 	/*
 	 * The MSS may have changed
 	 */
 	struct tcp_rack *rack;
 	struct rack_sendmap *rsm;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) {
 		/*
 		 * The MTU has changed we need to resend everything
 		 * since all we have sent is lost. We first fix
 		 * up the mtu though.
 		 */
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		/* We treat this like a full retransmit timeout without the cwnd adjustment */
 		rack_remxt_tmr(tp);
 		rack->r_fast_output = 0;
 		rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp,
 						rack->r_ctl.rc_sacked);
 		rack->r_ctl.rc_snd_max_at_rto = tp->snd_max;
 		rack->r_must_retran = 1;
 		/* Mark all inflight to needing to be rxt'd */
 		TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
 			rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG);
 		}
 	}
 	sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una);
 	/* We don't use snd_nxt to retransmit */
 	tp->snd_nxt = tp->snd_max;
 }
 
 static int
 rack_set_dgp(struct tcp_rack *rack)
 {
 	/* pace_always=1 */
 	if (rack->rc_always_pace == 0) {
 		if (tcp_can_enable_pacing() == 0)
 			return (EBUSY);
 	}
 	rack->rc_fillcw_apply_discount = 0;
 	rack->dgp_on = 1;
 	rack->rc_always_pace = 1;
 	rack->use_fixed_rate = 0;
 	if (rack->gp_ready)
 		rack_set_cc_pacing(rack);
 	rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 	rack->rack_attempt_hdwr_pace = 0;
 	/* rxt settings */
 	rack->full_size_rxt = 1;
 	rack->shape_rxt_to_pacing_min  = 0;
 	/* cmpack=1 */
 	rack->r_use_cmp_ack = 1;
 	if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
 	    rack->r_use_cmp_ack)
 		rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
 	/* scwnd=1 */
 	rack->rack_enable_scwnd = 1;
 	/* dynamic=100 */
 	rack->rc_gp_dyn_mul = 1;
 	/* gp_inc_ca */
 	rack->r_ctl.rack_per_of_gp_ca = 100;
 	/* rrr_conf=3 */
 	rack->r_rr_config = 3;
 	/* npush=2 */
 	rack->r_ctl.rc_no_push_at_mrtt = 2;
 	/* fillcw=1 */
 	if (rack->r_cwnd_was_clamped == 0) {
 		rack->rc_pace_to_cwnd = 1;
 	} else {
 		rack->rc_pace_to_cwnd = 0;
 		/* Reset all multipliers to 100.0 so just the measured bw */
 		rack->r_ctl.rack_per_of_gp_ss = 100;
 		rack->r_ctl.rack_per_of_gp_ca = 100;
 	}
 	rack->rc_pace_fill_if_rttin_range = 0;
 	rack->rtt_limit_mul = 0;
 	/* noprr=1 */
 	rack->rack_no_prr = 1;
 	/* lscwnd=1 */
 	rack->r_limit_scw = 1;
 	/* gp_inc_rec */
 	rack->r_ctl.rack_per_of_gp_rec = 90;
 	rack_client_buffer_level_set(rack);
 	return (0);
 }
 
 
 
 static int
 rack_set_profile(struct tcp_rack *rack, int prof)
 {
 	int err = EINVAL;
 	if (prof == 1) {
 		/*
 		 * Profile 1 is "standard" DGP. It ignores
 		 * client buffer level.
 		 */
 		rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL0;
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 	} else if (prof == 2) {
 		/*
 		 * Profile 2 is DGP. Less aggressive with
 		 * respect to client buffer level.
 		 */
 		rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL1;
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 	} else if (prof == 3) {
 		/*
 		 * Profile 3 is DGP. Even Less aggressive with
 		 * respect to client buffer level.
 		 */
 		rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL2;
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 	} else if (prof == 4) {
 		/*
 		 * Profile 4 is DGP with the most responsiveness
 		 * to client buffer level.
 		 */
 		rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL3;
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 	} else if (prof == 5) {
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 		/*
 		 * By turning DGP off we change the rate
 		 * picked to be only the one the cwnd and rtt
 		 * get us.
 		 */
 		rack->dgp_on = 0;
 	} else if (prof == 6) {
 		err = rack_set_dgp(rack);
 		if (err)
 			return (err);
 		/*
 		 * Profile 6 tweaks DGP so that it will apply to
 		 * fill-cw the same settings that profile5 does
 		 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted).
 		 */
 		rack->rc_fillcw_apply_discount = 1;
 	} else if (prof == 0) {
 		/* This changes things back to the default settings */
 		rack->dgp_on = 0;
 		rack->rc_hybrid_mode = 0;
 		err = 0;
 		if (rack_fill_cw_state)
 			rack->rc_pace_to_cwnd = 1;
 		else
 			rack->rc_pace_to_cwnd = 0;
 		if (rack->rc_always_pace) {
 			tcp_decrement_paced_conn();
 			rack_undo_cc_pacing(rack);
 			rack->rc_always_pace = 0;
 		}
 		if (rack_pace_every_seg && tcp_can_enable_pacing()) {
 			rack->rc_always_pace = 1;
 			if (rack->rack_hibeta)
 				rack_set_cc_pacing(rack);
 		} else
 			rack->rc_always_pace = 0;
 		if (rack_dsack_std_based & 0x1) {
 			/* Basically this means all rack timers are at least (srtt + 1/4 srtt) */
 			rack->rc_rack_tmr_std_based = 1;
 		}
 		if (rack_dsack_std_based & 0x2) {
 			/* Basically this means  rack timers are extended based on dsack by up to (2 * srtt) */
 			rack->rc_rack_use_dsack = 1;
 		}
 		if (rack_use_cmp_acks)
 			rack->r_use_cmp_ack = 1;
 		else
 			rack->r_use_cmp_ack = 0;
 		if (rack_disable_prr)
 			rack->rack_no_prr = 1;
 		else
 			rack->rack_no_prr = 0;
 		if (rack_gp_no_rec_chg)
 			rack->rc_gp_no_rec_chg = 1;
 		else
 			rack->rc_gp_no_rec_chg = 0;
 		if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
 			rack->r_mbuf_queue = 1;
 			if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
 				rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
 			rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 		} else {
 			rack->r_mbuf_queue = 0;
 			rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
 		}
 		if (rack_enable_shared_cwnd)
 			rack->rack_enable_scwnd = 1;
 		else
 			rack->rack_enable_scwnd = 0;
 		if (rack_do_dyn_mul) {
 			/* When dynamic adjustment is on CA needs to start at 100% */
 			rack->rc_gp_dyn_mul = 1;
 			if (rack_do_dyn_mul >= 100)
 				rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul;
 		} else {
 			rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca;
 			rack->rc_gp_dyn_mul = 0;
 		}
 		rack->r_rr_config = 0;
 		rack->r_ctl.rc_no_push_at_mrtt = 0;
 		rack->rc_pace_to_cwnd = 0;
 		rack->rc_pace_fill_if_rttin_range = 0;
 		rack->rtt_limit_mul = 0;
 
 		if (rack_enable_hw_pacing)
 			rack->rack_hdw_pace_ena = 1;
 		else
 			rack->rack_hdw_pace_ena = 0;
 		if (rack_disable_prr)
 			rack->rack_no_prr = 1;
 		else
 			rack->rack_no_prr = 0;
 		if (rack_limits_scwnd)
 			rack->r_limit_scw  = 1;
 		else
 			rack->r_limit_scw  = 0;
 		rack_init_retransmit_value(rack, rack_rxt_controls);
 		err = 0;
 	}
 	return (err);
 }
 
 static int
 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval)
 {
 	struct deferred_opt_list *dol;
 
 	dol = malloc(sizeof(struct deferred_opt_list),
 		     M_TCPFSB, M_NOWAIT|M_ZERO);
 	if (dol == NULL) {
 		/*
 		 * No space yikes -- fail out..
 		 */
 		return (0);
 	}
 	dol->optname = sopt_name;
 	dol->optval = loptval;
 	TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next);
 	return (1);
 }
 
 static int
 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid)
 {
 #ifdef TCP_REQUEST_TRK
 	struct tcp_sendfile_track *sft;
 	struct timeval tv;
 	tcp_seq seq;
 	int err;
 
 	microuptime(&tv);
 
 	/*
 	 * If BB logging is not on we need to look at the DTL flag.
 	 * If its on already then those reasons override the DTL input.
 	 * We do this with any request, you can turn DTL on, but it does
 	 * not turn off at least from hybrid pacing requests.
 	 */
 	if (tcp_bblogging_on(rack->rc_tp) == 0) {
 		if (hybrid->hybrid_flags & TCP_HYBRID_PACING_DTL) {
 			/* Turn on BB point logging  */
 			tcp_set_bblog_state(rack->rc_tp, TCP_LOG_VIA_BBPOINTS,
 					    TCP_BBPOINT_REQ_LEVEL_LOGGING);
 		}
 	}
 	/* Make sure no fixed rate is on */
 	rack->use_fixed_rate = 0;
 	rack->r_ctl.rc_fixed_pacing_rate_rec = 0;
 	rack->r_ctl.rc_fixed_pacing_rate_ca = 0;
 	rack->r_ctl.rc_fixed_pacing_rate_ss = 0;
 	/* Now allocate or find our entry that will have these settings */
 	sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0);
 	if (sft == NULL) {
 		rack->rc_tp->tcp_hybrid_error++;
 		/* no space, where would it have gone? */
 		seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc;
 		rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0);
 		return (ENOSPC);
 	}
 	/* The seq will be snd_una + everything in the buffer */
 	seq = sft->start_seq;
 	if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) {
 		/* Disabling hybrid pacing */
 		if (rack->rc_hybrid_mode) {
 			rack_set_profile(rack, 0);
 			rack->rc_tp->tcp_hybrid_stop++;
 		}
 		rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0);
 		return (0);
 	}
 	if (rack->dgp_on == 0) {
 		/*
 		 * If we have not yet turned DGP on, do so
 		 * now setting pure DGP mode, no buffer level
 		 * response.
 		 */
 		if ((err = rack_set_profile(rack, 1)) != 0){
 			/* Failed to turn pacing on */
 			rack->rc_tp->tcp_hybrid_error++;
 			rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0);
 			return (err);
 		}
 	}
 	/* Now set in our flags */
 	sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET;
 	if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR)
 		sft->cspr = hybrid->cspr;
 	else
 		sft->cspr = 0;
 	if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS)
 		sft->hint_maxseg = hybrid->hint_maxseg;
 	else
 		sft->hint_maxseg = 0;
 	rack->rc_hybrid_mode = 1;
 	rack->rc_tp->tcp_hybrid_start++;
 	rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0);
 	return (0);
 #else
 	return (ENOTSUP);
 #endif
 }
 
 static int
 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
 		    uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid)
 
 {
 	struct epoch_tracker et;
 	struct sockopt sopt;
 	struct cc_newreno_opts opt;
 	uint64_t val;
 	int error = 0;
 	uint16_t ca, ss;
 
 	switch (sopt_name) {
 	case TCP_RACK_SET_RXT_OPTIONS:
 		if ((optval >= 0) && (optval <= 2)) {
 			rack_init_retransmit_value(rack, optval);
 		} else {
 			/*
 			 * You must send in 0, 1 or 2 all else is
 			 * invalid.
 			 */
 			error = EINVAL;
 		}
 		break;
 	case TCP_RACK_DSACK_OPT:
 		RACK_OPTS_INC(tcp_rack_dsack_opt);
 		if (optval & 0x1) {
 			rack->rc_rack_tmr_std_based = 1;
 		} else {
 			rack->rc_rack_tmr_std_based = 0;
 		}
 		if (optval & 0x2) {
 			rack->rc_rack_use_dsack = 1;
 		} else {
 			rack->rc_rack_use_dsack = 0;
 		}
 		rack_log_dsack_event(rack, 5, __LINE__, 0, 0);
 		break;
 	case TCP_RACK_PACING_DIVISOR:
 		RACK_OPTS_INC(tcp_rack_pacing_divisor);
 		if (optval == 0) {
 			rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor;
 		} else {
 			if (optval < RL_MIN_DIVISOR)
 				rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR;
 			else
 				rack->r_ctl.pace_len_divisor = optval;
 		}
 		break;
 	case TCP_RACK_HI_BETA:
 		RACK_OPTS_INC(tcp_rack_hi_beta);
 		if (optval > 0) {
 			rack->rack_hibeta = 1;
 			if ((optval >= 50) &&
 			    (optval <= 100)) {
 				/*
 				 * User wants to set a custom beta.
 				 */
 				rack->r_ctl.saved_hibeta = optval;
 				if (rack->rc_pacing_cc_set)
 					rack_undo_cc_pacing(rack);
 				rack->r_ctl.rc_saved_beta.beta = optval;
 			}
 			if (rack->rc_pacing_cc_set == 0)
 				rack_set_cc_pacing(rack);
 		} else {
 			rack->rack_hibeta = 0;
 			if (rack->rc_pacing_cc_set)
 				rack_undo_cc_pacing(rack);
 		}
 		break;
 	case TCP_RACK_PACING_BETA:
 		RACK_OPTS_INC(tcp_rack_beta);
 		if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
 			/* This only works for newreno. */
 			error = EINVAL;
 			break;
 		}
 		if (rack->rc_pacing_cc_set) {
 			/*
 			 * Set them into the real CC module
 			 * whats in the rack pcb is the old values
 			 * to be used on restoral/
 			 */
 			sopt.sopt_dir = SOPT_SET;
 			opt.name = CC_NEWRENO_BETA;
 			opt.val = optval;
 			if (CC_ALGO(tp)->ctl_output != NULL)
 				error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 			else {
 				error = ENOENT;
 				break;
 			}
 		} else {
 			/*
 			 * Not pacing yet so set it into our local
 			 * rack pcb storage.
 			 */
 			rack->r_ctl.rc_saved_beta.beta = optval;
 		}
 		break;
 	case TCP_RACK_TIMER_SLOP:
 		RACK_OPTS_INC(tcp_rack_timer_slop);
 		rack->r_ctl.timer_slop = optval;
 		if (rack->rc_tp->t_srtt) {
 			/*
 			 * If we have an SRTT lets update t_rxtcur
 			 * to have the new slop.
 			 */
 			RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp),
 					   rack_rto_min, rack_rto_max,
 					   rack->r_ctl.timer_slop);
 		}
 		break;
 	case TCP_RACK_PACING_BETA_ECN:
 		RACK_OPTS_INC(tcp_rack_beta_ecn);
 		if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) {
 			/* This only works for newreno. */
 			error = EINVAL;
 			break;
 		}
 		if (rack->rc_pacing_cc_set) {
 			/*
 			 * Set them into the real CC module
 			 * whats in the rack pcb is the old values
 			 * to be used on restoral/
 			 */
 			sopt.sopt_dir = SOPT_SET;
 			opt.name = CC_NEWRENO_BETA_ECN;
 			opt.val = optval;
 			if (CC_ALGO(tp)->ctl_output != NULL)
 				error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt);
 			else
 				error = ENOENT;
 		} else {
 			/*
 			 * Not pacing yet so set it into our local
 			 * rack pcb storage.
 			 */
 			rack->r_ctl.rc_saved_beta.beta_ecn = optval;
 			rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED;
 		}
 		break;
 	case TCP_DEFER_OPTIONS:
 		RACK_OPTS_INC(tcp_defer_opt);
 		if (optval) {
 			if (rack->gp_ready) {
 				/* Too late */
 				error = EINVAL;
 				break;
 			}
 			rack->defer_options = 1;
 		} else
 			rack->defer_options = 0;
 		break;
 	case TCP_RACK_MEASURE_CNT:
 		RACK_OPTS_INC(tcp_rack_measure_cnt);
 		if (optval && (optval <= 0xff)) {
 			rack->r_ctl.req_measurements = optval;
 		} else
 			error = EINVAL;
 		break;
 	case TCP_REC_ABC_VAL:
 		RACK_OPTS_INC(tcp_rec_abc_val);
 		if (optval > 0)
 			rack->r_use_labc_for_rec = 1;
 		else
 			rack->r_use_labc_for_rec = 0;
 		break;
 	case TCP_RACK_ABC_VAL:
 		RACK_OPTS_INC(tcp_rack_abc_val);
 		if ((optval > 0) && (optval < 255))
 			rack->rc_labc = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_HDWR_UP_ONLY:
 		RACK_OPTS_INC(tcp_pacing_up_only);
 		if (optval)
 			rack->r_up_only = 1;
 		else
 			rack->r_up_only = 0;
 		break;
 	case TCP_PACING_RATE_CAP:
 		RACK_OPTS_INC(tcp_pacing_rate_cap);
 		rack->r_ctl.bw_rate_cap = loptval;
 		break;
 	case TCP_HYBRID_PACING:
 		if (hybrid == NULL) {
 			error = EINVAL;
 			break;
 		}
 		error = process_hybrid_pacing(rack, hybrid);
 		break;
 	case TCP_RACK_PROFILE:
 		RACK_OPTS_INC(tcp_profile);
 		error = rack_set_profile(rack, optval);
 		break;
 	case TCP_USE_CMP_ACKS:
 		RACK_OPTS_INC(tcp_use_cmp_acks);
 		if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) {
 			/* You can't turn it off once its on! */
 			error = EINVAL;
 		} else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
 			rack->r_use_cmp_ack = 1;
 			rack->r_mbuf_queue = 1;
 			tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 		}
 		if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
 			tp->t_flags2 |= TF2_MBUF_ACKCMP;
 		break;
 	case TCP_SHARED_CWND_TIME_LIMIT:
 		RACK_OPTS_INC(tcp_lscwnd);
 		if (optval)
 			rack->r_limit_scw = 1;
 		else
 			rack->r_limit_scw = 0;
 		break;
 	case TCP_RACK_DGP_IN_REC:
 		RACK_OPTS_INC(tcp_dgp_in_rec);
 		if (optval)
 			rack->r_ctl.full_dgp_in_rec = 1;
 		else
 			rack->r_ctl.full_dgp_in_rec = 0;
 		break;
 	case TCP_RXT_CLAMP:
 		RACK_OPTS_INC(tcp_rxt_clamp);
 		rack_translate_clamp_value(rack, optval);
 		break;
  	case TCP_RACK_PACE_TO_FILL:
 		RACK_OPTS_INC(tcp_fillcw);
 		if (optval == 0)
 			rack->rc_pace_to_cwnd = 0;
 		else {
 			rack->rc_pace_to_cwnd = 1;
 			if (optval > 1)
 				rack->r_fill_less_agg = 1;
 		}
 		if ((optval >= rack_gp_rtt_maxmul) &&
 		    rack_gp_rtt_maxmul &&
 		    (optval < 0xf)) {
 			rack->rc_pace_fill_if_rttin_range = 1;
 			rack->rtt_limit_mul = optval;
 		} else {
 			rack->rc_pace_fill_if_rttin_range = 0;
 			rack->rtt_limit_mul = 0;
 		}
 		break;
 	case TCP_RACK_NO_PUSH_AT_MAX:
 		RACK_OPTS_INC(tcp_npush);
 		if (optval == 0)
 			rack->r_ctl.rc_no_push_at_mrtt = 0;
 		else if (optval < 0xff)
 			rack->r_ctl.rc_no_push_at_mrtt = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_SHARED_CWND_ENABLE:
 		RACK_OPTS_INC(tcp_rack_scwnd);
 		if (optval == 0)
 			rack->rack_enable_scwnd = 0;
 		else
 			rack->rack_enable_scwnd = 1;
 		break;
 	case TCP_RACK_MBUF_QUEUE:
 		/* Now do we use the LRO mbuf-queue feature */
 		RACK_OPTS_INC(tcp_rack_mbufq);
 		if (optval || rack->r_use_cmp_ack)
 			rack->r_mbuf_queue = 1;
 		else
 			rack->r_mbuf_queue = 0;
 		if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
 			tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 		else
 			tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
 		break;
 	case TCP_RACK_NONRXT_CFG_RATE:
 		RACK_OPTS_INC(tcp_rack_cfg_rate);
 		if (optval == 0)
 			rack->rack_rec_nonrxt_use_cr = 0;
 		else
 			rack->rack_rec_nonrxt_use_cr = 1;
 		break;
 	case TCP_NO_PRR:
 		RACK_OPTS_INC(tcp_rack_noprr);
 		if (optval == 0)
 			rack->rack_no_prr = 0;
 		else if (optval == 1)
 			rack->rack_no_prr = 1;
 		else if (optval == 2)
 			rack->no_prr_addback = 1;
 		else
 			error = EINVAL;
 		break;
 	case TCP_TIMELY_DYN_ADJ:
 		RACK_OPTS_INC(tcp_timely_dyn);
 		if (optval == 0)
 			rack->rc_gp_dyn_mul = 0;
 		else {
 			rack->rc_gp_dyn_mul = 1;
 			if (optval >= 100) {
 				/*
 				 * If the user sets something 100 or more
 				 * its the gp_ca value.
 				 */
 				rack->r_ctl.rack_per_of_gp_ca  = optval;
 			}
 		}
 		break;
 	case TCP_RACK_DO_DETECTION:
 		RACK_OPTS_INC(tcp_rack_do_detection);
 		if (optval == 0)
 			rack->do_detection = 0;
 		else
 			rack->do_detection = 1;
 		break;
 	case TCP_RACK_TLP_USE:
 		if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) {
 			error = EINVAL;
 			break;
 		}
 		RACK_OPTS_INC(tcp_tlp_use);
 		rack->rack_tlp_threshold_use = optval;
 		break;
 	case TCP_RACK_TLP_REDUCE:
 		/* RACK TLP cwnd reduction (bool) */
 		RACK_OPTS_INC(tcp_rack_tlp_reduce);
 		rack->r_ctl.rc_tlp_cwnd_reduce = optval;
 		break;
 		/*  Pacing related ones */
 	case TCP_RACK_PACE_ALWAYS:
 		/*
 		 * zero is old rack method, 1 is new
 		 * method using a pacing rate.
 		 */
 		RACK_OPTS_INC(tcp_rack_pace_always);
 		if (optval > 0) {
 			if (rack->rc_always_pace) {
 				error = EALREADY;
 				break;
 			} else if (tcp_can_enable_pacing()) {
 				rack->rc_always_pace = 1;
 				if (rack->rack_hibeta)
 					rack_set_cc_pacing(rack);
 			}
 			else {
 				error = ENOSPC;
 				break;
 			}
 		} else {
 			if (rack->rc_always_pace) {
 				tcp_decrement_paced_conn();
 				rack->rc_always_pace = 0;
 				rack_undo_cc_pacing(rack);
 			}
 		}
 		if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
 			tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
 		else
 			tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
 		/* A rate may be set irate or other, if so set seg size */
 		rack_update_seg(rack);
 		break;
 	case TCP_BBR_RACK_INIT_RATE:
 		RACK_OPTS_INC(tcp_initial_rate);
 		val = optval;
 		/* Change from kbits per second to bytes per second */
 		val *= 1000;
 		val /= 8;
 		rack->r_ctl.init_rate = val;
 		if (rack->rc_init_win != rack_default_init_window) {
 			uint32_t win, snt;
 
 			/*
 			 * Options don't always get applied
 			 * in the order you think. So in order
 			 * to assure we update a cwnd we need
 			 * to check and see if we are still
 			 * where we should raise the cwnd.
 			 */
 			win = rc_init_window(rack);
 			if (SEQ_GT(tp->snd_max, tp->iss))
 				snt = tp->snd_max - tp->iss;
 			else
 				snt = 0;
 			if ((snt < win) &&
 			    (tp->snd_cwnd < win))
 				tp->snd_cwnd = win;
 		}
 		if (rack->rc_always_pace)
 			rack_update_seg(rack);
 		break;
 	case TCP_BBR_IWINTSO:
 		RACK_OPTS_INC(tcp_initial_win);
 		if (optval && (optval <= 0xff)) {
 			uint32_t win, snt;
 
 			rack->rc_init_win = optval;
 			win = rc_init_window(rack);
 			if (SEQ_GT(tp->snd_max, tp->iss))
 				snt = tp->snd_max - tp->iss;
 			else
 				snt = 0;
 			if ((snt < win) &&
 			    (tp->t_srtt |
 			     rack->r_ctl.init_rate)) {
 				/*
 				 * We are not past the initial window
 				 * and we have some bases for pacing,
 				 * so we need to possibly adjust up
 				 * the cwnd. Note even if we don't set
 				 * the cwnd, its still ok to raise the rc_init_win
 				 * which can be used coming out of idle when we
 				 * would have a rate.
 				 */
 				if (tp->snd_cwnd < win)
 					tp->snd_cwnd = win;
 			}
 			if (rack->rc_always_pace)
 				rack_update_seg(rack);
 		} else
 			error = EINVAL;
 		break;
 	case TCP_RACK_FORCE_MSEG:
 		RACK_OPTS_INC(tcp_rack_force_max_seg);
 		if (optval)
 			rack->rc_force_max_seg = 1;
 		else
 			rack->rc_force_max_seg = 0;
 		break;
 	case TCP_RACK_PACE_MIN_SEG:
 		RACK_OPTS_INC(tcp_rack_min_seg);
 		rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval);
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		break;
 	case TCP_RACK_PACE_MAX_SEG:
 		/* Max segments size in a pace in bytes */
 		RACK_OPTS_INC(tcp_rack_max_seg);
 		if (optval <= MAX_USER_SET_SEG)
 			rack->rc_user_set_max_segs = optval;
 		else
 			rack->rc_user_set_max_segs = MAX_USER_SET_SEG;
 		rack_set_pace_segments(tp, rack, __LINE__, NULL);
 		break;
 	case TCP_RACK_PACE_RATE_REC:
 		/* Set the fixed pacing rate in Bytes per second ca */
 		RACK_OPTS_INC(tcp_rack_pace_rate_rec);
 		rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
 		rack->use_fixed_rate = 1;
 		if (rack->rack_hibeta)
 			rack_set_cc_pacing(rack);
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
 					   __LINE__, NULL,0);
 		break;
 
 	case TCP_RACK_PACE_RATE_SS:
 		/* Set the fixed pacing rate in Bytes per second ca */
 		RACK_OPTS_INC(tcp_rack_pace_rate_ss);
 		rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
 		rack->use_fixed_rate = 1;
 		if (rack->rack_hibeta)
 			rack_set_cc_pacing(rack);
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
 					   __LINE__, NULL, 0);
 		break;
 
 	case TCP_RACK_PACE_RATE_CA:
 		/* Set the fixed pacing rate in Bytes per second ca */
 		RACK_OPTS_INC(tcp_rack_pace_rate_ca);
 		rack->r_ctl.rc_fixed_pacing_rate_ca = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_ss = optval;
 		if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0)
 			rack->r_ctl.rc_fixed_pacing_rate_rec = optval;
 		rack->use_fixed_rate = 1;
 		if (rack->rack_hibeta)
 			rack_set_cc_pacing(rack);
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rc_fixed_pacing_rate_ss,
 					   rack->r_ctl.rc_fixed_pacing_rate_ca,
 					   rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8,
 					   __LINE__, NULL, 0);
 		break;
 	case TCP_RACK_GP_INCREASE_REC:
 		RACK_OPTS_INC(tcp_gp_inc_rec);
 		rack->r_ctl.rack_per_of_gp_rec = optval;
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rack_per_of_gp_ss,
 					   rack->r_ctl.rack_per_of_gp_ca,
 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
 					   __LINE__, NULL, 0);
 		break;
 	case TCP_RACK_GP_INCREASE_CA:
 		RACK_OPTS_INC(tcp_gp_inc_ca);
 		ca = optval;
 		if (ca < 100) {
 			/*
 			 * We don't allow any reduction
 			 * over the GP b/w.
 			 */
 			error = EINVAL;
 			break;
 		}
 		rack->r_ctl.rack_per_of_gp_ca = ca;
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rack_per_of_gp_ss,
 					   rack->r_ctl.rack_per_of_gp_ca,
 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
 					   __LINE__, NULL, 0);
 		break;
 	case TCP_RACK_GP_INCREASE_SS:
 		RACK_OPTS_INC(tcp_gp_inc_ss);
 		ss = optval;
 		if (ss < 100) {
 			/*
 			 * We don't allow any reduction
 			 * over the GP b/w.
 			 */
 			error = EINVAL;
 			break;
 		}
 		rack->r_ctl.rack_per_of_gp_ss = ss;
 		rack_log_pacing_delay_calc(rack,
 					   rack->r_ctl.rack_per_of_gp_ss,
 					   rack->r_ctl.rack_per_of_gp_ca,
 					   rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1,
 					   __LINE__, NULL, 0);
 		break;
 	case TCP_RACK_RR_CONF:
 		RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate);
 		if (optval && optval <= 3)
 			rack->r_rr_config = optval;
 		else
 			rack->r_rr_config = 0;
 		break;
 	case TCP_PACING_DND:			/*  URL:dnd */
 		if (optval > 0)
 			rack->rc_pace_dnd = 1;
 		else
 			rack->rc_pace_dnd = 0;
 		break;
 	case TCP_HDWR_RATE_CAP:
 		RACK_OPTS_INC(tcp_hdwr_rate_cap);
 		if (optval) {
 			if (rack->r_rack_hw_rate_caps == 0)
 				rack->r_rack_hw_rate_caps = 1;
 			else
 				error = EALREADY;
 		} else {
 			rack->r_rack_hw_rate_caps = 0;
 		}
 		break;
 	case TCP_RACK_SPLIT_LIMIT:
 		RACK_OPTS_INC(tcp_split_limit);
 		rack->r_ctl.rc_split_limit = optval;
 		break;
 	case TCP_BBR_HDWR_PACE:
 		RACK_OPTS_INC(tcp_hdwr_pacing);
 		if (optval){
 			if (rack->rack_hdrw_pacing == 0) {
 				rack->rack_hdw_pace_ena = 1;
 				rack->rack_attempt_hdwr_pace = 0;
 			} else
 				error = EALREADY;
 		} else {
 			rack->rack_hdw_pace_ena = 0;
 #ifdef RATELIMIT
 			if (rack->r_ctl.crte != NULL) {
 				rack->rack_hdrw_pacing = 0;
 				rack->rack_attempt_hdwr_pace = 0;
 				tcp_rel_pacing_rate(rack->r_ctl.crte, tp);
 				rack->r_ctl.crte = NULL;
 			}
 #endif
 		}
 		break;
 		/*  End Pacing related ones */
 	case TCP_RACK_PRR_SENDALOT:
 		/* Allow PRR to send more than one seg */
 		RACK_OPTS_INC(tcp_rack_prr_sendalot);
 		rack->r_ctl.rc_prr_sendalot = optval;
 		break;
 	case TCP_RACK_MIN_TO:
 		/* Minimum time between rack t-o's in ms */
 		RACK_OPTS_INC(tcp_rack_min_to);
 		rack->r_ctl.rc_min_to = optval;
 		break;
 	case TCP_RACK_EARLY_SEG:
 		/* If early recovery max segments */
 		RACK_OPTS_INC(tcp_rack_early_seg);
 		rack->r_ctl.rc_early_recovery_segs = optval;
 		break;
 	case TCP_RACK_ENABLE_HYSTART:
 	{
 		if (optval) {
 			tp->t_ccv.flags |= CCF_HYSTART_ALLOWED;
 			if (rack_do_hystart > RACK_HYSTART_ON)
 				tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND;
 			if (rack_do_hystart > RACK_HYSTART_ON_W_SC)
 				tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH;
 		} else {
 			tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH);
 		}
 	}
 	break;
 	case TCP_RACK_REORD_THRESH:
 		/* RACK reorder threshold (shift amount) */
 		RACK_OPTS_INC(tcp_rack_reord_thresh);
 		if ((optval > 0) && (optval < 31))
 			rack->r_ctl.rc_reorder_shift = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_RACK_REORD_FADE:
 		/* Does reordering fade after ms time */
 		RACK_OPTS_INC(tcp_rack_reord_fade);
 		rack->r_ctl.rc_reorder_fade = optval;
 		break;
 	case TCP_RACK_TLP_THRESH:
 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
 		RACK_OPTS_INC(tcp_rack_tlp_thresh);
 		if (optval)
 			rack->r_ctl.rc_tlp_threshold = optval;
 		else
 			error = EINVAL;
 		break;
 	case TCP_BBR_USE_RACK_RR:
 		RACK_OPTS_INC(tcp_rack_rr);
 		if (optval)
 			rack->use_rack_rr = 1;
 		else
 			rack->use_rack_rr = 0;
 		break;
 	case TCP_RACK_PKT_DELAY:
 		/* RACK added ms i.e. rack-rtt + reord + N */
 		RACK_OPTS_INC(tcp_rack_pkt_delay);
 		rack->r_ctl.rc_pkt_delay = optval;
 		break;
 	case TCP_DELACK:
 		RACK_OPTS_INC(tcp_rack_delayed_ack);
 		if (optval == 0)
 			tp->t_delayed_ack = 0;
 		else
 			tp->t_delayed_ack = 1;
 		if (tp->t_flags & TF_DELACK) {
 			tp->t_flags &= ~TF_DELACK;
 			tp->t_flags |= TF_ACKNOW;
 			NET_EPOCH_ENTER(et);
 			rack_output(tp);
 			NET_EPOCH_EXIT(et);
 		}
 		break;
 
 	case TCP_BBR_RACK_RTT_USE:
 		RACK_OPTS_INC(tcp_rack_rtt_use);
 		if ((optval != USE_RTT_HIGH) &&
 		    (optval != USE_RTT_LOW) &&
 		    (optval != USE_RTT_AVG))
 			error = EINVAL;
 		else
 			rack->r_ctl.rc_rate_sample_method = optval;
 		break;
 	case TCP_DATA_AFTER_CLOSE:
 		RACK_OPTS_INC(tcp_data_after_close);
 		if (optval)
 			rack->rc_allow_data_af_clo = 1;
 		else
 			rack->rc_allow_data_af_clo = 0;
 		break;
 	default:
 		break;
 	}
 	tcp_log_socket_option(tp, sopt_name, optval, error);
 	return (error);
 }
 
 
 static void
 rack_apply_deferred_options(struct tcp_rack *rack)
 {
 	struct deferred_opt_list *dol, *sdol;
 	uint32_t s_optval;
 
 	TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) {
 		TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next);
 		/* Disadvantage of deferal is you loose the error return */
 		s_optval = (uint32_t)dol->optval;
 		(void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL);
 		free(dol, M_TCPDO);
 	}
 }
 
 static void
 rack_hw_tls_change(struct tcpcb *tp, int chg)
 {
 	/* Update HW tls state */
 	struct tcp_rack *rack;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (chg)
 		rack->r_ctl.fsb.hw_tls = 1;
 	else
 		rack->r_ctl.fsb.hw_tls = 0;
 }
 
 static int
 rack_pru_options(struct tcpcb *tp, int flags)
 {
 	if (flags & PRUS_OOB)
 		return (EOPNOTSUPP);
 	return (0);
 }
 
 static bool
 rack_wake_check(struct tcpcb *tp)
 {
 	struct tcp_rack *rack;
 	struct timeval tv;
 	uint32_t cts;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack->r_ctl.rc_hpts_flags) {
 		cts = tcp_get_usecs(&tv);
 		if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){
 			/*
 			 * Pacing timer is up, check if we are ready.
 			 */
 			if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to))
 				return (true);
 		} else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) {
 			/*
 			 * A timer is up, check if we are ready.
 			 */
 			if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp))
 				return (true);
 		}
 	}
 	return (false);
 }
 
 static struct tcp_function_block __tcp_rack = {
 	.tfb_tcp_block_name = __XSTRING(STACKNAME),
 	.tfb_tcp_output = rack_output,
 	.tfb_do_queued_segments = ctf_do_queued_segments,
 	.tfb_do_segment_nounlock = rack_do_segment_nounlock,
 	.tfb_tcp_do_segment = rack_do_segment,
 	.tfb_tcp_ctloutput = rack_ctloutput,
 	.tfb_tcp_fb_init = rack_init,
 	.tfb_tcp_fb_fini = rack_fini,
 	.tfb_tcp_timer_stop_all = rack_stopall,
 	.tfb_tcp_rexmit_tmr = rack_remxt_tmr,
 	.tfb_tcp_handoff_ok = rack_handoff_ok,
 	.tfb_tcp_mtu_chg = rack_mtu_change,
 	.tfb_pru_options = rack_pru_options,
 	.tfb_hwtls_change = rack_hw_tls_change,
 	.tfb_chg_query = rack_chg_query,
 	.tfb_switch_failed = rack_switch_failed,
 	.tfb_early_wake_check = rack_wake_check,
 	.tfb_compute_pipe = rack_compute_pipe,
 	.tfb_flags = TCP_FUNC_OUTPUT_CANDROP,
 };
 
 /*
  * rack_ctloutput() must drop the inpcb lock before performing copyin on
  * socket option arguments.  When it re-acquires the lock after the copy, it
  * has to revalidate that the connection is still valid for the socket
  * option.
  */
 static int
 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 #ifdef INET
 	struct ip *ip;
 #endif
 	struct tcp_rack *rack;
 	struct tcp_hybrid_req hybrid;
 	uint64_t loptval;
 	int32_t error = 0, optval;
 
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack == NULL) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 #ifdef INET
 	ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr;
 #endif
 
 	switch (sopt->sopt_level) {
 #ifdef INET6
 	case IPPROTO_IPV6:
 		MPASS(inp->inp_vflag & INP_IPV6PROTO);
 		switch (sopt->sopt_name) {
 		case IPV6_USE_MIN_MTU:
 			tcp6_use_min_mtu(tp);
 			break;
 		}
 		INP_WUNLOCK(inp);
 		return (0);
 #endif
 #ifdef INET
 	case IPPROTO_IP:
 		switch (sopt->sopt_name) {
 		case IP_TOS:
 			/*
 			 * The DSCP codepoint has changed, update the fsb.
 			 */
 			ip->ip_tos = rack->rc_inp->inp_ip_tos;
 			break;
 		case IP_TTL:
 			/*
 			 * The TTL has changed, update the fsb.
 			 */
 			ip->ip_ttl = rack->rc_inp->inp_ip_ttl;
 			break;
 		}
 		INP_WUNLOCK(inp);
 		return (0);
 #endif
 #ifdef SO_PEERPRIO
 	case SOL_SOCKET:
 		switch (sopt->sopt_name) {
 		case SO_PEERPRIO:			/*  SC-URL:bs */
 			/* Already read in and sanity checked in sosetopt(). */
 			if (inp->inp_socket) {
 				rack->client_bufferlvl = inp->inp_socket->so_peerprio;
 				rack_client_buffer_level_set(rack);
 			}
 			break;
 		}
 		INP_WUNLOCK(inp);
 		return (0);
 #endif
 	case IPPROTO_TCP:
 		switch (sopt->sopt_name) {
 		case TCP_RACK_TLP_REDUCE:		/*  URL:tlp_reduce */
 		/*  Pacing related ones */
 		case TCP_RACK_PACE_ALWAYS:		/*  URL:pace_always */
 		case TCP_BBR_RACK_INIT_RATE:		/*  URL:irate */
 		case TCP_BBR_IWINTSO:			/*  URL:tso_iwin */
 		case TCP_RACK_PACE_MIN_SEG:		/*  URL:pace_min_seg */
 		case TCP_RACK_PACE_MAX_SEG:		/*  URL:pace_max_seg */
 		case TCP_RACK_FORCE_MSEG:		/*  URL:force_max_seg */
 		case TCP_RACK_PACE_RATE_CA:		/*  URL:pr_ca */
 		case TCP_RACK_PACE_RATE_SS:		/*  URL:pr_ss*/
 		case TCP_RACK_PACE_RATE_REC:		/*  URL:pr_rec */
 		case TCP_RACK_GP_INCREASE_CA:		/*  URL:gp_inc_ca */
 		case TCP_RACK_GP_INCREASE_SS:		/*  URL:gp_inc_ss */
 		case TCP_RACK_GP_INCREASE_REC:		/*  URL:gp_inc_rec */
 		case TCP_RACK_RR_CONF:			/*  URL:rrr_conf */
 		case TCP_BBR_HDWR_PACE:			/*  URL:hdwrpace */
 		case TCP_HDWR_RATE_CAP:			/*  URL:hdwrcap boolean */
 		case TCP_PACING_RATE_CAP:		/*  URL:cap  -- used by side-channel */
 		case TCP_HDWR_UP_ONLY:			/*  URL:uponly -- hardware pacing  boolean */
 		case TCP_RACK_PACING_BETA:		/*  URL:pacing_beta */
 		case TCP_RACK_PACING_BETA_ECN:		/*  URL:pacing_beta_ecn */
 		case TCP_RACK_PACE_TO_FILL:		/*  URL:fillcw */
 		case TCP_RACK_DGP_IN_REC:		/*  URL:dgpinrec */
 			/* End pacing related */
 		case TCP_RXT_CLAMP:			/*  URL:rxtclamp */
 		case TCP_DELACK:			/*  URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */
 		case TCP_RACK_PRR_SENDALOT:		/*  URL:prr_sendalot */
 		case TCP_RACK_MIN_TO:			/*  URL:min_to */
 		case TCP_RACK_EARLY_SEG:		/*  URL:early_seg */
 		case TCP_RACK_REORD_THRESH:		/*  URL:reord_thresh */
 		case TCP_RACK_REORD_FADE:		/*  URL:reord_fade */
 		case TCP_RACK_TLP_THRESH:		/*  URL:tlp_thresh */
 		case TCP_RACK_PKT_DELAY:		/*  URL:pkt_delay */
 		case TCP_RACK_TLP_USE:			/*  URL:tlp_use */
 		case TCP_BBR_RACK_RTT_USE:		/*  URL:rttuse */
 		case TCP_BBR_USE_RACK_RR:		/*  URL:rackrr */
 		case TCP_RACK_DO_DETECTION:		/*  URL:detect */
 		case TCP_NO_PRR:			/*  URL:noprr */
 		case TCP_TIMELY_DYN_ADJ:      		/*  URL:dynamic */
 		case TCP_DATA_AFTER_CLOSE:		/*  no URL */
 		case TCP_RACK_NONRXT_CFG_RATE:		/*  URL:nonrxtcr */
 		case TCP_SHARED_CWND_ENABLE:		/*  URL:scwnd */
 		case TCP_RACK_MBUF_QUEUE:		/*  URL:mqueue */
 		case TCP_RACK_NO_PUSH_AT_MAX:		/*  URL:npush */
 		case TCP_SHARED_CWND_TIME_LIMIT:	/*  URL:lscwnd */
 		case TCP_RACK_PROFILE:			/*  URL:profile */
 		case TCP_HYBRID_PACING:			/*  URL:hybrid */
 		case TCP_USE_CMP_ACKS:			/*  URL:cmpack */
 		case TCP_RACK_ABC_VAL:			/*  URL:labc */
 		case TCP_REC_ABC_VAL:			/*  URL:reclabc */
 		case TCP_RACK_MEASURE_CNT:		/*  URL:measurecnt */
 		case TCP_DEFER_OPTIONS:			/*  URL:defer */
 		case TCP_RACK_DSACK_OPT:		/*  URL:dsack */
 		case TCP_RACK_TIMER_SLOP:		/*  URL:timer_slop */
 		case TCP_RACK_ENABLE_HYSTART:		/*  URL:hystart */
 		case TCP_RACK_SET_RXT_OPTIONS:		/*  URL:rxtsz */
 		case TCP_RACK_HI_BETA:			/*  URL:hibeta */
 		case TCP_RACK_SPLIT_LIMIT:		/*  URL:split */
 		case TCP_RACK_PACING_DIVISOR:		/*  URL:divisor */
 		case TCP_PACING_DND:			/*  URL:dnd */
 			goto process_opt;
 			break;
 		default:
 			/* Filter off all unknown options to the base stack */
 			return (tcp_default_ctloutput(tp, sopt));
 			break;
 		}
 
 	default:
 		INP_WUNLOCK(inp);
 		return (0);
 	}
 process_opt:
 	INP_WUNLOCK(inp);
 	if (sopt->sopt_name == TCP_PACING_RATE_CAP) {
 		error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval));
 		/*
 		 * We truncate it down to 32 bits for the socket-option trace this
 		 * means rates > 34Gbps won't show right, but thats probably ok.
 		 */
 		optval = (uint32_t)loptval;
 	} else if (sopt->sopt_name == TCP_HYBRID_PACING) {
 		error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid));
 	} else {
 		error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
 		/* Save it in 64 bit form too */
 		loptval = optval;
 	}
 	if (error)
 		return (error);
 	INP_WLOCK(inp);
 	if (tp->t_fb != &__tcp_rack) {
 		INP_WUNLOCK(inp);
 		return (ENOPROTOOPT);
 	}
 	if (rack->defer_options && (rack->gp_ready == 0) &&
 	    (sopt->sopt_name != TCP_DEFER_OPTIONS) &&
 	    (sopt->sopt_name != TCP_HYBRID_PACING) &&
 	    (sopt->sopt_name != TCP_RACK_PACING_BETA) &&
 	    (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) &&
 	    (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) &&
 	    (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) {
 		/* Options are beind deferred */
 		if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) {
 			INP_WUNLOCK(inp);
 			return (0);
 		} else {
 			/* No memory to defer, fail */
 			INP_WUNLOCK(inp);
 			return (ENOMEM);
 		}
 	}
 	error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 
 static void
 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
 {
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	bzero(ti, sizeof(*ti));
 
 	ti->tcpi_state = tp->t_state;
 	if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
 		ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
 	if (tp->t_flags & TF_SACK_PERMIT)
 		ti->tcpi_options |= TCPI_OPT_SACK;
 	if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
 		ti->tcpi_options |= TCPI_OPT_WSCALE;
 		ti->tcpi_snd_wscale = tp->snd_scale;
 		ti->tcpi_rcv_wscale = tp->rcv_scale;
 	}
 	if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
 		ti->tcpi_options |= TCPI_OPT_ECN;
 	if (tp->t_flags & TF_FASTOPEN)
 		ti->tcpi_options |= TCPI_OPT_TFO;
 	/* still kept in ticks is t_rcvtime */
 	ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
 	/* Since we hold everything in precise useconds this is easy */
 	ti->tcpi_rtt = tp->t_srtt;
 	ti->tcpi_rttvar = tp->t_rttvar;
 	ti->tcpi_rto = tp->t_rxtcur;
 	ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
 	ti->tcpi_snd_cwnd = tp->snd_cwnd;
 	/*
 	 * FreeBSD-specific extension fields for tcp_info.
 	 */
 	ti->tcpi_rcv_space = tp->rcv_wnd;
 	ti->tcpi_rcv_nxt = tp->rcv_nxt;
 	ti->tcpi_snd_wnd = tp->snd_wnd;
 	ti->tcpi_snd_bwnd = 0;		/* Unused, kept for compat. */
 	ti->tcpi_snd_nxt = tp->snd_nxt;
 	ti->tcpi_snd_mss = tp->t_maxseg;
 	ti->tcpi_rcv_mss = tp->t_maxseg;
 	ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
 	ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
 	ti->tcpi_snd_zerowin = tp->t_sndzerowin;
 	ti->tcpi_total_tlp = tp->t_sndtlppack;
 	ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte;
 #ifdef NETFLIX_STATS
 	memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo));
 #endif
 #ifdef TCP_OFFLOAD
 	if (tp->t_flags & TF_TOE) {
 		ti->tcpi_options |= TCPI_OPT_TOE;
 		tcp_offload_tcp_info(tp, ti);
 	}
 #endif
 }
 
 static int
 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct tcp_rack *rack;
 	int32_t error, optval;
 	uint64_t val, loptval;
 	struct	tcp_info ti;
 	/*
 	 * Because all our options are either boolean or an int, we can just
 	 * pull everything into optval and then unlock and copy. If we ever
 	 * add a option that is not a int, then this will have quite an
 	 * impact to this routine.
 	 */
 	error = 0;
 	rack = (struct tcp_rack *)tp->t_fb_ptr;
 	if (rack == NULL) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	switch (sopt->sopt_name) {
 	case TCP_INFO:
 		/* First get the info filled */
 		rack_fill_info(tp, &ti);
 		/* Fix up the rtt related fields if needed */
 		INP_WUNLOCK(inp);
 		error = sooptcopyout(sopt, &ti, sizeof ti);
 		return (error);
 	/*
 	 * Beta is the congestion control value for NewReno that influences how
 	 * much of a backoff happens when loss is detected. It is normally set
 	 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value
 	 * when you exit recovery.
 	 */
 	case TCP_RACK_PACING_BETA:
 		if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
 			error = EINVAL;
 		else if (rack->rc_pacing_cc_set == 0)
 			optval = rack->r_ctl.rc_saved_beta.beta;
 		else {
 			/*
 			 * Reach out into the CC data and report back what
 			 * I have previously set. Yeah it looks hackish but
 			 * we don't want to report the saved values.
 			 */
 			if (tp->t_ccv.cc_data)
 				optval = ((struct newreno *)tp->t_ccv.cc_data)->beta;
 			else
 				error = EINVAL;
 		}
 		break;
 		/*
 		 * Beta_ecn is the congestion control value for NewReno that influences how
 		 * much of a backoff happens when a ECN mark is detected. It is normally set
 		 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when
 		 * you exit recovery. Note that classic ECN has a beta of 50, it is only
 		 * ABE Ecn that uses this "less" value, but we do too with pacing :)
 		 */
 
 	case TCP_RACK_PACING_BETA_ECN:
 		if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0)
 			error = EINVAL;
 		else if (rack->rc_pacing_cc_set == 0)
 			optval = rack->r_ctl.rc_saved_beta.beta_ecn;
 		else {
 			/*
 			 * Reach out into the CC data and report back what
 			 * I have previously set. Yeah it looks hackish but
 			 * we don't want to report the saved values.
 			 */
 			if (tp->t_ccv.cc_data)
 				optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn;
 			else
 				error = EINVAL;
 		}
 		break;
 	case TCP_RACK_DSACK_OPT:
 		optval = 0;
 		if (rack->rc_rack_tmr_std_based) {
 			optval |= 1;
 		}
 		if (rack->rc_rack_use_dsack) {
 			optval |= 2;
 		}
 		break;
  	case TCP_RACK_ENABLE_HYSTART:
 	{
 		if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) {
 			optval = RACK_HYSTART_ON;
 			if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND)
 				optval = RACK_HYSTART_ON_W_SC;
 			if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH)
 				optval = RACK_HYSTART_ON_W_SC_C;
 		} else {
 			optval = RACK_HYSTART_OFF;
 		}
 	}
 	break;
 	case TCP_RACK_DGP_IN_REC:
 		optval = rack->r_ctl.full_dgp_in_rec;
 		break;
 	case TCP_RACK_HI_BETA:
 		optval = rack->rack_hibeta;
 		break;
 	case TCP_RXT_CLAMP:
 		optval = rack->r_ctl.saved_rxt_clamp_val;
 		break;
 	case TCP_DEFER_OPTIONS:
 		optval = rack->defer_options;
 		break;
 	case TCP_RACK_MEASURE_CNT:
 		optval = rack->r_ctl.req_measurements;
 		break;
 	case TCP_REC_ABC_VAL:
 		optval = rack->r_use_labc_for_rec;
 		break;
 	case TCP_RACK_ABC_VAL:
 		optval = rack->rc_labc;
 		break;
 	case TCP_HDWR_UP_ONLY:
 		optval= rack->r_up_only;
 		break;
 	case TCP_PACING_RATE_CAP:
 		loptval = rack->r_ctl.bw_rate_cap;
 		break;
 	case TCP_RACK_PROFILE:
 		/* You cannot retrieve a profile, its write only */
 		error = EINVAL;
 		break;
 	case TCP_HYBRID_PACING:
 		/* You cannot retrieve hybrid pacing information, its write only */
 		error = EINVAL;
 		break;
 	case TCP_USE_CMP_ACKS:
 		optval = rack->r_use_cmp_ack;
 		break;
 	case TCP_RACK_PACE_TO_FILL:
 		optval = rack->rc_pace_to_cwnd;
 		if (optval && rack->r_fill_less_agg)
 			optval++;
 		break;
 	case TCP_RACK_NO_PUSH_AT_MAX:
 		optval = rack->r_ctl.rc_no_push_at_mrtt;
 		break;
 	case TCP_SHARED_CWND_ENABLE:
 		optval = rack->rack_enable_scwnd;
 		break;
 	case TCP_RACK_NONRXT_CFG_RATE:
 		optval = rack->rack_rec_nonrxt_use_cr;
 		break;
 	case TCP_NO_PRR:
 		if (rack->rack_no_prr  == 1)
 			optval = 1;
 		else if (rack->no_prr_addback == 1)
 			optval = 2;
 		else
 			optval = 0;
 		break;
 	case TCP_RACK_DO_DETECTION:
 		optval = rack->do_detection;
 		break;
 	case TCP_RACK_MBUF_QUEUE:
 		/* Now do we use the LRO mbuf-queue feature */
 		optval = rack->r_mbuf_queue;
 		break;
 	case TCP_TIMELY_DYN_ADJ:
 		optval = rack->rc_gp_dyn_mul;
 		break;
 	case TCP_BBR_IWINTSO:
 		optval = rack->rc_init_win;
 		break;
 	case TCP_RACK_TLP_REDUCE:
 		/* RACK TLP cwnd reduction (bool) */
 		optval = rack->r_ctl.rc_tlp_cwnd_reduce;
 		break;
 	case TCP_BBR_RACK_INIT_RATE:
 		val = rack->r_ctl.init_rate;
 		/* convert to kbits per sec */
 		val *= 8;
 		val /= 1000;
 		optval = (uint32_t)val;
 		break;
 	case TCP_RACK_FORCE_MSEG:
 		optval = rack->rc_force_max_seg;
 		break;
 	case TCP_RACK_PACE_MIN_SEG:
 		optval = rack->r_ctl.rc_user_set_min_segs;
 		break;
 	case TCP_RACK_PACE_MAX_SEG:
 		/* Max segments in a pace */
 		optval = rack->rc_user_set_max_segs;
 		break;
 	case TCP_RACK_PACE_ALWAYS:
 		/* Use the always pace method */
 		optval = rack->rc_always_pace;
 		break;
 	case TCP_RACK_PRR_SENDALOT:
 		/* Allow PRR to send more than one seg */
 		optval = rack->r_ctl.rc_prr_sendalot;
 		break;
 	case TCP_RACK_MIN_TO:
 		/* Minimum time between rack t-o's in ms */
 		optval = rack->r_ctl.rc_min_to;
 		break;
 	case TCP_RACK_SPLIT_LIMIT:
 		optval = rack->r_ctl.rc_split_limit;
 		break;
 	case TCP_RACK_EARLY_SEG:
 		/* If early recovery max segments */
 		optval = rack->r_ctl.rc_early_recovery_segs;
 		break;
 	case TCP_RACK_REORD_THRESH:
 		/* RACK reorder threshold (shift amount) */
 		optval = rack->r_ctl.rc_reorder_shift;
 		break;
 	case TCP_RACK_REORD_FADE:
 		/* Does reordering fade after ms time */
 		optval = rack->r_ctl.rc_reorder_fade;
 		break;
 	case TCP_BBR_USE_RACK_RR:
 		/* Do we use the rack cheat for rxt */
 		optval = rack->use_rack_rr;
 		break;
 	case TCP_RACK_RR_CONF:
 		optval = rack->r_rr_config;
 		break;
 	case TCP_HDWR_RATE_CAP:
 		optval = rack->r_rack_hw_rate_caps;
 		break;
 	case TCP_BBR_HDWR_PACE:
 		optval = rack->rack_hdw_pace_ena;
 		break;
 	case TCP_RACK_TLP_THRESH:
 		/* RACK TLP theshold i.e. srtt+(srtt/N) */
 		optval = rack->r_ctl.rc_tlp_threshold;
 		break;
 	case TCP_RACK_PKT_DELAY:
 		/* RACK added ms i.e. rack-rtt + reord + N */
 		optval = rack->r_ctl.rc_pkt_delay;
 		break;
 	case TCP_RACK_TLP_USE:
 		optval = rack->rack_tlp_threshold_use;
 		break;
 	case TCP_PACING_DND:
 		optval = rack->rc_pace_dnd;
 		break;
 	case TCP_RACK_PACE_RATE_CA:
 		optval = rack->r_ctl.rc_fixed_pacing_rate_ca;
 		break;
 	case TCP_RACK_PACE_RATE_SS:
 		optval = rack->r_ctl.rc_fixed_pacing_rate_ss;
 		break;
 	case TCP_RACK_PACE_RATE_REC:
 		optval = rack->r_ctl.rc_fixed_pacing_rate_rec;
 		break;
 	case TCP_RACK_GP_INCREASE_SS:
 		optval = rack->r_ctl.rack_per_of_gp_ca;
 		break;
 	case TCP_RACK_GP_INCREASE_CA:
 		optval = rack->r_ctl.rack_per_of_gp_ss;
 		break;
 	case TCP_RACK_PACING_DIVISOR:
 		optval = rack->r_ctl.pace_len_divisor;
 		break;
 	case TCP_BBR_RACK_RTT_USE:
 		optval = rack->r_ctl.rc_rate_sample_method;
 		break;
 	case TCP_DELACK:
 		optval = tp->t_delayed_ack;
 		break;
 	case TCP_DATA_AFTER_CLOSE:
 		optval = rack->rc_allow_data_af_clo;
 		break;
 	case TCP_SHARED_CWND_TIME_LIMIT:
 		optval = rack->r_limit_scw;
 		break;
 	case TCP_RACK_TIMER_SLOP:
 		optval = rack->r_ctl.timer_slop;
 		break;
 	default:
 		return (tcp_default_ctloutput(tp, sopt));
 		break;
 	}
 	INP_WUNLOCK(inp);
 	if (error == 0) {
 		if (TCP_PACING_RATE_CAP)
 			error = sooptcopyout(sopt, &loptval, sizeof loptval);
 		else
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 	}
 	return (error);
 }
 
 static int
 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt)
 {
 	if (sopt->sopt_dir == SOPT_SET) {
 		return (rack_set_sockopt(tp, sopt));
 	} else if (sopt->sopt_dir == SOPT_GET) {
 		return (rack_get_sockopt(tp, sopt));
 	} else {
 		panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
 	}
 }
 
 static const char *rack_stack_names[] = {
 	__XSTRING(STACKNAME),
 #ifdef STACKALIAS
 	__XSTRING(STACKALIAS),
 #endif
 };
 
 static int
 rack_ctor(void *mem, int32_t size, void *arg, int32_t how)
 {
 	memset(mem, 0, size);
 	return (0);
 }
 
 static void
 rack_dtor(void *mem, int32_t size, void *arg)
 {
 
 }
 
 static bool rack_mod_inited = false;
 
 static int
 tcp_addrack(module_t mod, int32_t type, void *data)
 {
 	int32_t err = 0;
 	int num_stacks;
 
 	switch (type) {
 	case MOD_LOAD:
 		rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map",
 		    sizeof(struct rack_sendmap),
 		    rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
 
 		rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb",
 		    sizeof(struct tcp_rack),
 		    rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
 
 		sysctl_ctx_init(&rack_sysctl_ctx);
 		rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx,
 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp),
 		    OID_AUTO,
 #ifdef STACKALIAS
 		    __XSTRING(STACKALIAS),
 #else
 		    __XSTRING(STACKNAME),
 #endif
 		    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
 		    "");
 		if (rack_sysctl_root == NULL) {
 			printf("Failed to add sysctl node\n");
 			err = EFAULT;
 			goto free_uma;
 		}
 		rack_init_sysctls();
 		num_stacks = nitems(rack_stack_names);
 		err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK,
 		    rack_stack_names, &num_stacks);
 		if (err) {
 			printf("Failed to register %s stack name for "
 			    "%s module\n", rack_stack_names[num_stacks],
 			    __XSTRING(MODNAME));
 			sysctl_ctx_free(&rack_sysctl_ctx);
 free_uma:
 			uma_zdestroy(rack_zone);
 			uma_zdestroy(rack_pcb_zone);
 			rack_counter_destroy();
 			printf("Failed to register rack module -- err:%d\n", err);
 			return (err);
 		}
 		tcp_lro_reg_mbufq();
 		rack_mod_inited = true;
 		break;
 	case MOD_QUIESCE:
 		err = deregister_tcp_functions(&__tcp_rack, true, false);
 		break;
 	case MOD_UNLOAD:
 		err = deregister_tcp_functions(&__tcp_rack, false, true);
 		if (err == EBUSY)
 			break;
 		if (rack_mod_inited) {
 			uma_zdestroy(rack_zone);
 			uma_zdestroy(rack_pcb_zone);
 			sysctl_ctx_free(&rack_sysctl_ctx);
 			rack_counter_destroy();
 			rack_mod_inited = false;
 		}
 		tcp_lro_dereg_mbufq();
 		err = 0;
 		break;
 	default:
 		return (EOPNOTSUPP);
 	}
 	return (err);
 }
 
 static moduledata_t tcp_rack = {
 	.name = __XSTRING(MODNAME),
 	.evhand = tcp_addrack,
 	.priv = 0
 };
 
 MODULE_VERSION(MODNAME, 1);
 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1);
 
 #endif /* #if !defined(INET) && !defined(INET6) */
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index b3f5375cb8cf..d951b5df938e 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -1,4700 +1,4696 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
  *	The Regents of the University of California.  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 #include "opt_inet.h"
 #include "opt_inet6.h"
 #include "opt_ipsec.h"
 #include "opt_kern_tls.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/arb.h>
 #include <sys/callout.h>
 #include <sys/eventhandler.h>
 #ifdef TCP_HHOOK
 #include <sys/hhook.h>
 #endif
 #include <sys/kernel.h>
 #ifdef TCP_HHOOK
 #include <sys/khelp.h>
 #endif
 #ifdef KERN_TLS
 #include <sys/ktls.h>
 #endif
 #include <sys/qmath.h>
 #include <sys/stats.h>
 #include <sys/sysctl.h>
 #include <sys/jail.h>
 #include <sys/malloc.h>
 #include <sys/refcount.h>
 #include <sys/mbuf.h>
 #include <sys/priv.h>
 #include <sys/proc.h>
 #include <sys/sdt.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
 #include <sys/protosw.h>
 #include <sys/random.h>
 
 #include <vm/uma.h>
 
 #include <net/route.h>
 #include <net/route/nhop.h>
 #include <net/if.h>
 #include <net/if_var.h>
 #include <net/if_private.h>
 #include <net/vnet.h>
 
 #include <netinet/in.h>
 #include <netinet/in_fib.h>
 #include <netinet/in_kdtrace.h>
 #include <netinet/in_pcb.h>
 #include <netinet/in_systm.h>
 #include <netinet/in_var.h>
 #include <netinet/ip.h>
 #include <netinet/ip_icmp.h>
 #include <netinet/ip_var.h>
 #ifdef INET6
 #include <netinet/icmp6.h>
 #include <netinet/ip6.h>
 #include <netinet6/in6_fib.h>
 #include <netinet6/in6_pcb.h>
 #include <netinet6/ip6_var.h>
 #include <netinet6/scope6_var.h>
 #include <netinet6/nd6.h>
 #endif
 
 #include <netinet/tcp.h>
 #ifdef INVARIANTS
 #define TCPSTATES
 #endif
 #include <netinet/tcp_fsm.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
 #include <netinet/tcp_ecn.h>
 #include <netinet/tcp_log_buf.h>
 #include <netinet/tcp_syncache.h>
 #include <netinet/tcp_hpts.h>
 #include <netinet/tcp_lro.h>
 #include <netinet/cc/cc.h>
 #include <netinet/tcpip.h>
 #include <netinet/tcp_fastopen.h>
 #include <netinet/tcp_accounting.h>
 #ifdef TCPPCAP
 #include <netinet/tcp_pcap.h>
 #endif
 #ifdef TCP_OFFLOAD
 #include <netinet/tcp_offload.h>
 #endif
 #include <netinet/udp.h>
 #include <netinet/udp_var.h>
 #ifdef INET6
 #include <netinet6/tcp6_var.h>
 #endif
 
 #include <netipsec/ipsec_support.h>
 
 #include <machine/in_cksum.h>
 #include <crypto/siphash/siphash.h>
 
 #include <security/mac/mac_framework.h>
 
 #ifdef INET6
 static ip6proto_ctlinput_t tcp6_ctlinput;
 static udp_tun_icmp_t tcp6_ctlinput_viaudp;
 #endif
 
 VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
 #ifdef INET6
 VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
 #endif
 
 #ifdef TCP_SAD_DETECTION
 /*  Sack attack detection thresholds and such */
 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack_attack,
     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
     "Sack Attack detection thresholds");
 int32_t tcp_force_detection = 0;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, force_detection,
     CTLFLAG_RW,
     &tcp_force_detection, 0,
     "Do we force detection even if the INP has it off?");
 int32_t tcp_sad_limit = 10000;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, limit,
     CTLFLAG_RW,
     &tcp_sad_limit, 10000,
     "If SaD is enabled, what is the limit to sendmap entries (0 = unlimited)?");
 int32_t tcp_sack_to_ack_thresh = 700;	/* 70 % */
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sack_to_ack_thresh,
     CTLFLAG_RW,
     &tcp_sack_to_ack_thresh, 700,
     "Percentage of sacks to acks we must see above (10.1 percent is 101)?");
 int32_t tcp_sack_to_move_thresh = 600;	/* 60 % */
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, move_thresh,
     CTLFLAG_RW,
     &tcp_sack_to_move_thresh, 600,
     "Percentage of sack moves we must see above (10.1 percent is 101)");
 int32_t tcp_restoral_thresh = 450;	/* 45 % (sack:2:ack -25%) (mv:ratio -15%) **/
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, restore_thresh,
     CTLFLAG_RW,
     &tcp_restoral_thresh, 450,
     "Percentage of sack to ack percentage we must see below to restore(10.1 percent is 101)");
 int32_t tcp_sad_decay_val = 800;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, decay_per,
     CTLFLAG_RW,
     &tcp_sad_decay_val, 800,
     "The decay percentage (10.1 percent equals 101 )");
 int32_t tcp_map_minimum = 500;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, nummaps,
     CTLFLAG_RW,
     &tcp_map_minimum, 500,
     "Number of Map enteries before we start detection");
 int32_t tcp_sad_pacing_interval = 2000;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_pacing_int,
     CTLFLAG_RW,
     &tcp_sad_pacing_interval, 2000,
     "What is the minimum pacing interval for a classified attacker?");
 
 int32_t tcp_sad_low_pps = 100;
 SYSCTL_INT(_net_inet_tcp_sack_attack, OID_AUTO, sad_low_pps,
     CTLFLAG_RW,
     &tcp_sad_low_pps, 100,
     "What is the input pps that below which we do not decay?");
 #endif
 uint32_t tcp_ack_war_time_window = 1000;
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_timewindow,
     CTLFLAG_RW,
     &tcp_ack_war_time_window, 1000,
    "If the tcp_stack does ack-war prevention how many milliseconds are in its time window?");
 uint32_t tcp_ack_war_cnt = 5;
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ack_war_cnt,
     CTLFLAG_RW,
     &tcp_ack_war_cnt, 5,
    "If the tcp_stack does ack-war prevention how many acks can be sent in its time window?");
 
 struct rwlock tcp_function_lock;
 
 static int
 sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
 {
 	int error, new;
 
 	new = V_tcp_mssdflt;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if (error == 0 && req->newptr) {
 		if (new < TCP_MINMSS)
 			error = EINVAL;
 		else
 			V_tcp_mssdflt = new;
 	}
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     &VNET_NAME(tcp_mssdflt), 0, &sysctl_net_inet_tcp_mss_check, "I",
     "Default TCP Maximum Segment Size");
 
 #ifdef INET6
 static int
 sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
 {
 	int error, new;
 
 	new = V_tcp_v6mssdflt;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if (error == 0 && req->newptr) {
 		if (new < TCP_MINMSS)
 			error = EINVAL;
 		else
 			V_tcp_v6mssdflt = new;
 	}
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     &VNET_NAME(tcp_v6mssdflt), 0, &sysctl_net_inet_tcp_mss_v6_check, "I",
    "Default TCP Maximum Segment Size for IPv6");
 #endif /* INET6 */
 
 /*
  * Minimum MSS we accept and use. This prevents DoS attacks where
  * we are forced to a ridiculous low MSS like 20 and send hundreds
  * of packets instead of one. The effect scales with the available
  * bandwidth and quickly saturates the CPU and network interface
  * with packet generation and sending. Set to zero to disable MINMSS
  * checking. This setting prevents us from sending too small packets.
  */
 VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_VNET | CTLFLAG_RW,
      &VNET_NAME(tcp_minmss), 0,
     "Minimum TCP Maximum Segment Size");
 
 VNET_DEFINE(int, tcp_do_rfc1323) = 1;
 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_VNET | CTLFLAG_RW,
     &VNET_NAME(tcp_do_rfc1323), 0,
     "Enable rfc1323 (high performance TCP) extensions");
 
 /*
  * As of June 2021, several TCP stacks violate RFC 7323 from September 2014.
  * Some stacks negotiate TS, but never send them after connection setup. Some
  * stacks negotiate TS, but don't send them when sending keep-alive segments.
  * These include modern widely deployed TCP stacks.
  * Therefore tolerating violations for now...
  */
 VNET_DEFINE(int, tcp_tolerate_missing_ts) = 1;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tolerate_missing_ts, CTLFLAG_VNET | CTLFLAG_RW,
     &VNET_NAME(tcp_tolerate_missing_ts), 0,
     "Tolerate missing TCP timestamps");
 
 VNET_DEFINE(int, tcp_ts_offset_per_conn) = 1;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, ts_offset_per_conn, CTLFLAG_VNET | CTLFLAG_RW,
     &VNET_NAME(tcp_ts_offset_per_conn), 0,
     "Initialize TCP timestamps per connection instead of per host pair");
 
 /* How many connections are pacing */
 static volatile uint32_t number_of_tcp_connections_pacing = 0;
 static uint32_t shadow_num_connections = 0;
 static counter_u64_t tcp_pacing_failures;
 
 static int tcp_pacing_limit = 10000;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pacing_limit, CTLFLAG_RW,
     &tcp_pacing_limit, 1000,
     "If the TCP stack does pacing, is there a limit (-1 = no, 0 = no pacing N = number of connections)");
 
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pacing_count, CTLFLAG_RD,
     &shadow_num_connections, 0, "Number of TCP connections being paced");
 
 SYSCTL_COUNTER_U64(_net_inet_tcp, OID_AUTO, pacing_failures, CTLFLAG_RD,
     &tcp_pacing_failures, "Number of times we failed to enable pacing to avoid exceeding the limit");
 
 static int	tcp_log_debug = 0;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
     &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
 
 /*
  * Target size of TCP PCB hash tables. Must be a power of two.
  *
  * Note that this can be overridden by the kernel environment
  * variable net.inet.tcp.tcbhashsize
  */
 #ifndef TCBHASHSIZE
 #define TCBHASHSIZE	0
 #endif
 static int	tcp_tcbhashsize = TCBHASHSIZE;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
     &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
 
 static int	do_tcpdrain = 1;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
     "Enable tcp_drain routine for extra help when low on mbufs");
 
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_VNET | CTLFLAG_RD,
     &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
 
 VNET_DEFINE_STATIC(int, icmp_may_rst) = 1;
 #define	V_icmp_may_rst			VNET(icmp_may_rst)
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_VNET | CTLFLAG_RW,
     &VNET_NAME(icmp_may_rst), 0,
     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
 
 VNET_DEFINE_STATIC(int, tcp_isn_reseed_interval) = 0;
 #define	V_tcp_isn_reseed_interval	VNET(tcp_isn_reseed_interval)
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_VNET | CTLFLAG_RW,
     &VNET_NAME(tcp_isn_reseed_interval), 0,
     "Seconds between reseeding of ISN secret");
 
 static int	tcp_soreceive_stream;
 SYSCTL_INT(_net_inet_tcp, OID_AUTO, soreceive_stream, CTLFLAG_RDTUN,
     &tcp_soreceive_stream, 0, "Using soreceive_stream for TCP sockets");
 
 VNET_DEFINE(uma_zone_t, sack_hole_zone);
 #define	V_sack_hole_zone		VNET(sack_hole_zone)
 VNET_DEFINE(uint32_t, tcp_map_entries_limit) = 0;	/* unlimited */
 static int
 sysctl_net_inet_tcp_map_limit_check(SYSCTL_HANDLER_ARGS)
 {
 	int error;
 	uint32_t new;
 
 	new = V_tcp_map_entries_limit;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if (error == 0 && req->newptr) {
 		/* only allow "0" and value > minimum */
 		if (new > 0 && new < TCP_MIN_MAP_ENTRIES_LIMIT)
 			error = EINVAL;
 		else
 			V_tcp_map_entries_limit = new;
 	}
 	return (error);
 }
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, map_limit,
     CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     &VNET_NAME(tcp_map_entries_limit), 0,
     &sysctl_net_inet_tcp_map_limit_check, "IU",
     "Total sendmap entries limit");
 
 VNET_DEFINE(uint32_t, tcp_map_split_limit) = 0;	/* unlimited */
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, split_limit, CTLFLAG_VNET | CTLFLAG_RW,
      &VNET_NAME(tcp_map_split_limit), 0,
     "Total sendmap split entries limit");
 
 #ifdef TCP_HHOOK
 VNET_DEFINE(struct hhook_head *, tcp_hhh[HHOOK_TCP_LAST+1]);
 #endif
 
 #define TS_OFFSET_SECRET_LENGTH SIPHASH_KEY_LENGTH
 VNET_DEFINE_STATIC(u_char, ts_offset_secret[TS_OFFSET_SECRET_LENGTH]);
 #define	V_ts_offset_secret	VNET(ts_offset_secret)
 
 static int	tcp_default_fb_init(struct tcpcb *tp, void **ptr);
 static void	tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged);
 static int	tcp_default_handoff_ok(struct tcpcb *tp);
 static struct inpcb *tcp_notify(struct inpcb *, int);
 static struct inpcb *tcp_mtudisc_notify(struct inpcb *, int);
 static struct inpcb *tcp_mtudisc(struct inpcb *, int);
 static struct inpcb *tcp_drop_syn_sent(struct inpcb *, int);
 static char *	tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
 		    const void *ip4hdr, const void *ip6hdr);
 static void	tcp_default_switch_failed(struct tcpcb *tp);
 static ipproto_ctlinput_t	tcp_ctlinput;
 static udp_tun_icmp_t		tcp_ctlinput_viaudp;
 
 static struct tcp_function_block tcp_def_funcblk = {
 	.tfb_tcp_block_name = "freebsd",
 	.tfb_tcp_output = tcp_default_output,
 	.tfb_tcp_do_segment = tcp_do_segment,
 	.tfb_tcp_ctloutput = tcp_default_ctloutput,
 	.tfb_tcp_handoff_ok = tcp_default_handoff_ok,
 	.tfb_tcp_fb_init = tcp_default_fb_init,
 	.tfb_tcp_fb_fini = tcp_default_fb_fini,
 	.tfb_switch_failed = tcp_default_switch_failed,
 };
 
 static int tcp_fb_cnt = 0;
 struct tcp_funchead t_functions;
 VNET_DEFINE_STATIC(struct tcp_function_block *, tcp_func_set_ptr) = &tcp_def_funcblk;
 #define	V_tcp_func_set_ptr VNET(tcp_func_set_ptr)
 
 void
 tcp_record_dsack(struct tcpcb *tp, tcp_seq start, tcp_seq end, int tlp)
 {
 	TCPSTAT_INC(tcps_dsack_count);
 	tp->t_dsack_pack++;
 	if (tlp == 0) {
 		if (SEQ_GT(end, start)) {
 			tp->t_dsack_bytes += (end - start);
 			TCPSTAT_ADD(tcps_dsack_bytes, (end - start));
 		} else {
 			tp->t_dsack_tlp_bytes += (start - end);
 			TCPSTAT_ADD(tcps_dsack_bytes, (start - end));
 		}
 	} else {
 		if (SEQ_GT(end, start)) {
 			tp->t_dsack_bytes += (end - start);
 			TCPSTAT_ADD(tcps_dsack_tlp_bytes, (end - start));
 		} else {
 			tp->t_dsack_tlp_bytes += (start - end);
 			TCPSTAT_ADD(tcps_dsack_tlp_bytes, (start - end));
 		}
 	}
 }
 
 static struct tcp_function_block *
 find_tcp_functions_locked(struct tcp_function_set *fs)
 {
 	struct tcp_function *f;
 	struct tcp_function_block *blk=NULL;
 
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 		if (strcmp(f->tf_name, fs->function_set_name) == 0) {
 			blk = f->tf_fb;
 			break;
 		}
 	}
 	return(blk);
 }
 
 static struct tcp_function_block *
 find_tcp_fb_locked(struct tcp_function_block *blk, struct tcp_function **s)
 {
 	struct tcp_function_block *rblk=NULL;
 	struct tcp_function *f;
 
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 		if (f->tf_fb == blk) {
 			rblk = blk;
 			if (s) {
 				*s = f;
 			}
 			break;
 		}
 	}
 	return (rblk);
 }
 
 struct tcp_function_block *
 find_and_ref_tcp_functions(struct tcp_function_set *fs)
 {
 	struct tcp_function_block *blk;
 
 	rw_rlock(&tcp_function_lock);
 	blk = find_tcp_functions_locked(fs);
 	if (blk)
 		refcount_acquire(&blk->tfb_refcnt);
 	rw_runlock(&tcp_function_lock);
 	return(blk);
 }
 
 struct tcp_function_block *
 find_and_ref_tcp_fb(struct tcp_function_block *blk)
 {
 	struct tcp_function_block *rblk;
 
 	rw_rlock(&tcp_function_lock);
 	rblk = find_tcp_fb_locked(blk, NULL);
 	if (rblk)
 		refcount_acquire(&rblk->tfb_refcnt);
 	rw_runlock(&tcp_function_lock);
 	return(rblk);
 }
 
 /* Find a matching alias for the given tcp_function_block. */
 int
 find_tcp_function_alias(struct tcp_function_block *blk,
     struct tcp_function_set *fs)
 {
 	struct tcp_function *f;
 	int found;
 
 	found = 0;
 	rw_rlock(&tcp_function_lock);
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 		if ((f->tf_fb == blk) &&
 		    (strncmp(f->tf_name, blk->tfb_tcp_block_name,
 		        TCP_FUNCTION_NAME_LEN_MAX) != 0)) {
 			/* Matching function block with different name. */
 			strncpy(fs->function_set_name, f->tf_name,
 			    TCP_FUNCTION_NAME_LEN_MAX);
 			found = 1;
 			break;
 		}
 	}
 	/* Null terminate the string appropriately. */
 	if (found) {
 		fs->function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
 	} else {
 		fs->function_set_name[0] = '\0';
 	}
 	rw_runlock(&tcp_function_lock);
 	return (found);
 }
 
 static struct tcp_function_block *
 find_and_ref_tcp_default_fb(void)
 {
 	struct tcp_function_block *rblk;
 
 	rw_rlock(&tcp_function_lock);
 	rblk = V_tcp_func_set_ptr;
 	refcount_acquire(&rblk->tfb_refcnt);
 	rw_runlock(&tcp_function_lock);
 	return (rblk);
 }
 
 void
 tcp_switch_back_to_default(struct tcpcb *tp)
 {
 	struct tcp_function_block *tfb;
 	void *ptr = NULL;
 
 	KASSERT(tp->t_fb != &tcp_def_funcblk,
 	    ("%s: called by the built-in default stack", __func__));
 
 	/*
 	 * Now, we'll find a new function block to use.
 	 * Start by trying the current user-selected
 	 * default, unless this stack is the user-selected
 	 * default.
 	 */
 	tfb = find_and_ref_tcp_default_fb();
 	if (tfb == tp->t_fb) {
 		refcount_release(&tfb->tfb_refcnt);
 		tfb = NULL;
 	}
 	/* Does the stack accept this connection? */
 	if (tfb != NULL && tfb->tfb_tcp_handoff_ok != NULL &&
 	    (*tfb->tfb_tcp_handoff_ok)(tp)) {
 		refcount_release(&tfb->tfb_refcnt);
 		tfb = NULL;
 	}
 	/* Try to use that stack. */
 	if (tfb != NULL) {
 		/* Initialize the new stack. If it succeeds, we are done. */
 		if (tfb->tfb_tcp_fb_init == NULL ||
 		    (*tfb->tfb_tcp_fb_init)(tp, &ptr) == 0) {
 			/* Release the old stack */
 			if (tp->t_fb->tfb_tcp_fb_fini != NULL)
 				(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
 			refcount_release(&tp->t_fb->tfb_refcnt);
 			/* Now set in all the pointers */
 			tp->t_fb = tfb;
 			tp->t_fb_ptr = ptr;
 			return;
 		}
 		/*
 		 * Initialization failed. Release the reference count on
 		 * the looked up default stack.
 		 */
 		refcount_release(&tfb->tfb_refcnt);
 	}
 
 	/*
 	 * If that wasn't feasible, use the built-in default
 	 * stack which is not allowed to reject anyone.
 	 */
 	tfb = find_and_ref_tcp_fb(&tcp_def_funcblk);
 	if (tfb == NULL) {
 		/* there always should be a default */
 		panic("Can't refer to tcp_def_funcblk");
 	}
 	if (tfb->tfb_tcp_handoff_ok != NULL) {
 		if ((*tfb->tfb_tcp_handoff_ok) (tp)) {
 			/* The default stack cannot say no */
 			panic("Default stack rejects a new session?");
 		}
 	}
 	if (tfb->tfb_tcp_fb_init != NULL &&
 	    (*tfb->tfb_tcp_fb_init)(tp, &ptr)) {
 		/* The default stack cannot fail */
 		panic("Default stack initialization failed");
 	}
 	/* Now release the old stack */
 	if (tp->t_fb->tfb_tcp_fb_fini != NULL)
 		(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
 	refcount_release(&tp->t_fb->tfb_refcnt);
 	/* And set in the pointers to the new */
 	tp->t_fb = tfb;
 	tp->t_fb_ptr = ptr;
 }
 
 static bool
 tcp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
     const struct sockaddr *sa, void *ctx)
 {
 	struct ip *iph;
 #ifdef INET6
 	struct ip6_hdr *ip6;
 #endif
 	struct udphdr *uh;
 	struct tcphdr *th;
 	int thlen;
 	uint16_t port;
 
 	TCPSTAT_INC(tcps_tunneled_pkts);
 	if ((m->m_flags & M_PKTHDR) == 0) {
 		/* Can't handle one that is not a pkt hdr */
 		TCPSTAT_INC(tcps_tunneled_errs);
 		goto out;
 	}
 	thlen = sizeof(struct tcphdr);
 	if (m->m_len < off + sizeof(struct udphdr) + thlen &&
 	    (m =  m_pullup(m, off + sizeof(struct udphdr) + thlen)) == NULL) {
 		TCPSTAT_INC(tcps_tunneled_errs);
 		goto out;
 	}
 	iph = mtod(m, struct ip *);
 	uh = (struct udphdr *)((caddr_t)iph + off);
 	th = (struct tcphdr *)(uh + 1);
 	thlen = th->th_off << 2;
 	if (m->m_len < off + sizeof(struct udphdr) + thlen) {
 		m =  m_pullup(m, off + sizeof(struct udphdr) + thlen);
 		if (m == NULL) {
 			TCPSTAT_INC(tcps_tunneled_errs);
 			goto out;
 		} else {
 			iph = mtod(m, struct ip *);
 			uh = (struct udphdr *)((caddr_t)iph + off);
 			th = (struct tcphdr *)(uh + 1);
 		}
 	}
 	m->m_pkthdr.tcp_tun_port = port = uh->uh_sport;
 	bcopy(th, uh, m->m_len - off);
 	m->m_len -= sizeof(struct udphdr);
 	m->m_pkthdr.len -= sizeof(struct udphdr);
 	/*
 	 * We use the same algorithm for
 	 * both UDP and TCP for c-sum. So
 	 * the code in tcp_input will skip
 	 * the checksum. So we do nothing
 	 * with the flag (m->m_pkthdr.csum_flags).
 	 */
 	switch (iph->ip_v) {
 #ifdef INET
 	case IPVERSION:
 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
 		tcp_input_with_port(&m, &off, IPPROTO_TCP, port);
 		break;
 #endif
 #ifdef INET6
 	case IPV6_VERSION >> 4:
 		ip6 = mtod(m, struct ip6_hdr *);
 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
 		tcp6_input_with_port(&m, &off, IPPROTO_TCP, port);
 		break;
 #endif
 	default:
 		goto out;
 		break;
 	}
 	return (true);
 out:
 	m_freem(m);
 
 	return (true);
 }
 
 static int
 sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
 {
 	int error=ENOENT;
 	struct tcp_function_set fs;
 	struct tcp_function_block *blk;
 
 	memset(&fs, 0, sizeof(fs));
 	rw_rlock(&tcp_function_lock);
 	blk = find_tcp_fb_locked(V_tcp_func_set_ptr, NULL);
 	if (blk) {
 		/* Found him */
 		strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
 		fs.pcbcnt = blk->tfb_refcnt;
 	}
 	rw_runlock(&tcp_function_lock);
 	error = sysctl_handle_string(oidp, fs.function_set_name,
 				     sizeof(fs.function_set_name), req);
 
 	/* Check for error or no change */
 	if (error != 0 || req->newptr == NULL)
 		return(error);
 
 	rw_wlock(&tcp_function_lock);
 	blk = find_tcp_functions_locked(&fs);
 	if ((blk == NULL) ||
 	    (blk->tfb_flags & TCP_FUNC_BEING_REMOVED)) {
 		error = ENOENT;
 		goto done;
 	}
 	V_tcp_func_set_ptr = blk;
 done:
 	rw_wunlock(&tcp_function_lock);
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_default,
     CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
     NULL, 0, sysctl_net_inet_default_tcp_functions, "A",
     "Set/get the default TCP functions");
 
 static int
 sysctl_net_inet_list_available(SYSCTL_HANDLER_ARGS)
 {
 	int error, cnt, linesz;
 	struct tcp_function *f;
 	char *buffer, *cp;
 	size_t bufsz, outsz;
 	bool alias;
 
 	cnt = 0;
 	rw_rlock(&tcp_function_lock);
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 		cnt++;
 	}
 	rw_runlock(&tcp_function_lock);
 
 	bufsz = (cnt+2) * ((TCP_FUNCTION_NAME_LEN_MAX * 2) + 13) + 1;
 	buffer = malloc(bufsz, M_TEMP, M_WAITOK);
 
 	error = 0;
 	cp = buffer;
 
 	linesz = snprintf(cp, bufsz, "\n%-32s%c %-32s %s\n", "Stack", 'D',
 	    "Alias", "PCB count");
 	cp += linesz;
 	bufsz -= linesz;
 	outsz = linesz;
 
 	rw_rlock(&tcp_function_lock);
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 		alias = (f->tf_name != f->tf_fb->tfb_tcp_block_name);
 		linesz = snprintf(cp, bufsz, "%-32s%c %-32s %u\n",
 		    f->tf_fb->tfb_tcp_block_name,
 		    (f->tf_fb == V_tcp_func_set_ptr) ? '*' : ' ',
 		    alias ? f->tf_name : "-",
 		    f->tf_fb->tfb_refcnt);
 		if (linesz >= bufsz) {
 			error = EOVERFLOW;
 			break;
 		}
 		cp += linesz;
 		bufsz -= linesz;
 		outsz += linesz;
 	}
 	rw_runlock(&tcp_function_lock);
 	if (error == 0)
 		error = sysctl_handle_string(oidp, buffer, outsz + 1, req);
 	free(buffer, M_TEMP);
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, functions_available,
     CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
     NULL, 0, sysctl_net_inet_list_available, "A",
     "list available TCP Function sets");
 
 VNET_DEFINE(int, tcp_udp_tunneling_port) = TCP_TUNNELING_PORT_DEFAULT;
 
 #ifdef INET
 VNET_DEFINE(struct socket *, udp4_tun_socket) = NULL;
 #define	V_udp4_tun_socket	VNET(udp4_tun_socket)
 #endif
 #ifdef INET6
 VNET_DEFINE(struct socket *, udp6_tun_socket) = NULL;
 #define	V_udp6_tun_socket	VNET(udp6_tun_socket)
 #endif
 
 static struct sx tcpoudp_lock;
 
 static void
 tcp_over_udp_stop(void)
 {
 
 	sx_assert(&tcpoudp_lock, SA_XLOCKED);
 
 #ifdef INET
 	if (V_udp4_tun_socket != NULL) {
 		soclose(V_udp4_tun_socket);
 		V_udp4_tun_socket = NULL;
 	}
 #endif
 #ifdef INET6
 	if (V_udp6_tun_socket != NULL) {
 		soclose(V_udp6_tun_socket);
 		V_udp6_tun_socket = NULL;
 	}
 #endif
 }
 
 static int
 tcp_over_udp_start(void)
 {
 	uint16_t port;
 	int ret;
 #ifdef INET
 	struct sockaddr_in sin;
 #endif
 #ifdef INET6
 	struct sockaddr_in6 sin6;
 #endif
 
 	sx_assert(&tcpoudp_lock, SA_XLOCKED);
 
 	port = V_tcp_udp_tunneling_port;
 	if (ntohs(port) == 0) {
 		/* Must have a port set */
 		return (EINVAL);
 	}
 #ifdef INET
 	if (V_udp4_tun_socket != NULL) {
 		/* Already running -- must stop first */
 		return (EALREADY);
 	}
 #endif
 #ifdef INET6
 	if (V_udp6_tun_socket != NULL) {
 		/* Already running -- must stop first */
 		return (EALREADY);
 	}
 #endif
 #ifdef INET
 	if ((ret = socreate(PF_INET, &V_udp4_tun_socket,
 	    SOCK_DGRAM, IPPROTO_UDP,
 	    curthread->td_ucred, curthread))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 	/* Call the special UDP hook. */
 	if ((ret = udp_set_kernel_tunneling(V_udp4_tun_socket,
 	    tcp_recv_udp_tunneled_packet,
 	    tcp_ctlinput_viaudp,
 	    NULL))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 	/* Ok, we have a socket, bind it to the port. */
 	memset(&sin, 0, sizeof(struct sockaddr_in));
 	sin.sin_len = sizeof(struct sockaddr_in);
 	sin.sin_family = AF_INET;
 	sin.sin_port = htons(port);
 	if ((ret = sobind(V_udp4_tun_socket,
 	    (struct sockaddr *)&sin, curthread))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 #endif
 #ifdef INET6
 	if ((ret = socreate(PF_INET6, &V_udp6_tun_socket,
 	    SOCK_DGRAM, IPPROTO_UDP,
 	    curthread->td_ucred, curthread))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 	/* Call the special UDP hook. */
 	if ((ret = udp_set_kernel_tunneling(V_udp6_tun_socket,
 	    tcp_recv_udp_tunneled_packet,
 	    tcp6_ctlinput_viaudp,
 	    NULL))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 	/* Ok, we have a socket, bind it to the port. */
 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
 	sin6.sin6_len = sizeof(struct sockaddr_in6);
 	sin6.sin6_family = AF_INET6;
 	sin6.sin6_port = htons(port);
 	if ((ret = sobind(V_udp6_tun_socket,
 	    (struct sockaddr *)&sin6, curthread))) {
 		tcp_over_udp_stop();
 		return (ret);
 	}
 #endif
 	return (0);
 }
 
 static int
 sysctl_net_inet_tcp_udp_tunneling_port_check(SYSCTL_HANDLER_ARGS)
 {
 	int error;
 	uint32_t old, new;
 
 	old = V_tcp_udp_tunneling_port;
 	new = old;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if ((error == 0) &&
 	    (req->newptr != NULL)) {
 		if ((new < TCP_TUNNELING_PORT_MIN) ||
 		    (new > TCP_TUNNELING_PORT_MAX)) {
 			error = EINVAL;
 		} else {
 			sx_xlock(&tcpoudp_lock);
 			V_tcp_udp_tunneling_port = new;
 			if (old != 0) {
 				tcp_over_udp_stop();
 			}
 			if (new != 0) {
 				error = tcp_over_udp_start();
 				if (error != 0) {
 					V_tcp_udp_tunneling_port = 0;
 				}
 			}
 			sx_xunlock(&tcpoudp_lock);
 		}
 	}
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_port,
     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
     &VNET_NAME(tcp_udp_tunneling_port),
     0, &sysctl_net_inet_tcp_udp_tunneling_port_check, "IU",
     "Tunneling port for tcp over udp");
 
 VNET_DEFINE(int, tcp_udp_tunneling_overhead) = TCP_TUNNELING_OVERHEAD_DEFAULT;
 
 static int
 sysctl_net_inet_tcp_udp_tunneling_overhead_check(SYSCTL_HANDLER_ARGS)
 {
 	int error, new;
 
 	new = V_tcp_udp_tunneling_overhead;
 	error = sysctl_handle_int(oidp, &new, 0, req);
 	if (error == 0 && req->newptr) {
 		if ((new < TCP_TUNNELING_OVERHEAD_MIN) ||
 		    (new > TCP_TUNNELING_OVERHEAD_MAX))
 			error = EINVAL;
 		else
 			V_tcp_udp_tunneling_overhead = new;
 	}
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, udp_tunneling_overhead,
     CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
     &VNET_NAME(tcp_udp_tunneling_overhead),
     0, &sysctl_net_inet_tcp_udp_tunneling_overhead_check, "IU",
     "MSS reduction when using tcp over udp");
 
 /*
  * Exports one (struct tcp_function_info) for each alias/name.
  */
 static int
 sysctl_net_inet_list_func_info(SYSCTL_HANDLER_ARGS)
 {
 	int cnt, error;
 	struct tcp_function *f;
 	struct tcp_function_info tfi;
 
 	/*
 	 * We don't allow writes.
 	 */
 	if (req->newptr != NULL)
 		return (EINVAL);
 
 	/*
 	 * Wire the old buffer so we can directly copy the functions to
 	 * user space without dropping the lock.
 	 */
 	if (req->oldptr != NULL) {
 		error = sysctl_wire_old_buffer(req, 0);
 		if (error)
 			return (error);
 	}
 
 	/*
 	 * Walk the list and copy out matching entries. If INVARIANTS
 	 * is compiled in, also walk the list to verify the length of
 	 * the list matches what we have recorded.
 	 */
 	rw_rlock(&tcp_function_lock);
 
 	cnt = 0;
 #ifndef INVARIANTS
 	if (req->oldptr == NULL) {
 		cnt = tcp_fb_cnt;
 		goto skip_loop;
 	}
 #endif
 	TAILQ_FOREACH(f, &t_functions, tf_next) {
 #ifdef INVARIANTS
 		cnt++;
 #endif
 		if (req->oldptr != NULL) {
 			bzero(&tfi, sizeof(tfi));
 			tfi.tfi_refcnt = f->tf_fb->tfb_refcnt;
 			tfi.tfi_id = f->tf_fb->tfb_id;
 			(void)strlcpy(tfi.tfi_alias, f->tf_name,
 			    sizeof(tfi.tfi_alias));
 			(void)strlcpy(tfi.tfi_name,
 			    f->tf_fb->tfb_tcp_block_name, sizeof(tfi.tfi_name));
 			error = SYSCTL_OUT(req, &tfi, sizeof(tfi));
 			/*
 			 * Don't stop on error, as that is the
 			 * mechanism we use to accumulate length
 			 * information if the buffer was too short.
 			 */
 		}
 	}
 	KASSERT(cnt == tcp_fb_cnt,
 	    ("%s: cnt (%d) != tcp_fb_cnt (%d)", __func__, cnt, tcp_fb_cnt));
 #ifndef INVARIANTS
 skip_loop:
 #endif
 	rw_runlock(&tcp_function_lock);
 	if (req->oldptr == NULL)
 		error = SYSCTL_OUT(req, NULL,
 		    (cnt + 1) * sizeof(struct tcp_function_info));
 
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, function_info,
 	    CTLTYPE_OPAQUE | CTLFLAG_SKIP | CTLFLAG_RD | CTLFLAG_MPSAFE,
 	    NULL, 0, sysctl_net_inet_list_func_info, "S,tcp_function_info",
 	    "List TCP function block name-to-ID mappings");
 
 /*
  * tfb_tcp_handoff_ok() function for the default stack.
  * Note that we'll basically try to take all comers.
  */
 static int
 tcp_default_handoff_ok(struct tcpcb *tp)
 {
 
 	return (0);
 }
 
 /*
  * tfb_tcp_fb_init() function for the default stack.
  *
  * This handles making sure we have appropriate timers set if you are
  * transitioning a socket that has some amount of setup done.
  *
  * The init() fuction from the default can *never* return non-zero i.e.
  * it is required to always succeed since it is the stack of last resort!
  */
 static int
 tcp_default_fb_init(struct tcpcb *tp, void **ptr)
 {
 	struct socket *so = tptosocket(tp);
 	int rexmt;
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	/* We don't use the pointer */
 	*ptr = NULL;
 
 	KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
 	    ("%s: connection %p in unexpected state %d", __func__, tp,
 	    tp->t_state));
 
 	/* Make sure we get no interesting mbuf queuing behavior */
 	/* All mbuf queue/ack compress flags should be off */
 	tcp_lro_features_off(tp);
 
 	/* Cancel the GP measurement in progress */
 	tp->t_flags &= ~TF_GPUTINPROG;
 	/* Validate the timers are not in usec, if they are convert */
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
 	if ((tp->t_state == TCPS_SYN_SENT) ||
 	    (tp->t_state == TCPS_SYN_RECEIVED))
 		rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift];
 	else
 		rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
 	if (tp->t_rxtshift == 0)
 		tp->t_rxtcur = rexmt;
 	else
 		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX);
 
 	/*
 	 * Nothing to do for ESTABLISHED or LISTEN states. And, we don't
 	 * know what to do for unexpected states (which includes TIME_WAIT).
 	 */
 	if (tp->t_state <= TCPS_LISTEN || tp->t_state >= TCPS_TIME_WAIT)
 		return (0);
 
 	/*
 	 * Make sure some kind of transmission timer is set if there is
 	 * outstanding data.
 	 */
 	if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
 	    tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
 	    tcp_timer_active(tp, TT_PERSIST))) {
 		/*
 		 * If the session has established and it looks like it should
 		 * be in the persist state, set the persist timer. Otherwise,
 		 * set the retransmit timer.
 		 */
 		if (TCPS_HAVEESTABLISHED(tp->t_state) && tp->snd_wnd == 0 &&
 		    (int32_t)(tp->snd_nxt - tp->snd_una) <
 		    (int32_t)sbavail(&so->so_snd))
 			tcp_setpersist(tp);
 		else
 			tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
 	}
 
 	/* All non-embryonic sessions get a keepalive timer. */
 	if (!tcp_timer_active(tp, TT_KEEP))
 		tcp_timer_activate(tp, TT_KEEP,
 		    TCPS_HAVEESTABLISHED(tp->t_state) ? TP_KEEPIDLE(tp) :
 		    TP_KEEPINIT(tp));
 
 	/*
 	 * Make sure critical variables are initialized
 	 * if transitioning while in Recovery.
 	 */
 	if IN_FASTRECOVERY(tp->t_flags) {
 		if (tp->sackhint.recover_fs == 0)
 			tp->sackhint.recover_fs = max(1,
 			    tp->snd_nxt - tp->snd_una);
 	}
 
 	return (0);
 }
 
 /*
  * tfb_tcp_fb_fini() function for the default stack.
  *
  * This changes state as necessary (or prudent) to prepare for another stack
  * to assume responsibility for the connection.
  */
 static void
 tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
 {
 
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 #ifdef TCP_BLACKBOX
 	tcp_log_flowend(tp);
 #endif
 	tp->t_acktime = 0;
 	return;
 }
 
 MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
 MALLOC_DEFINE(M_TCPFUNCTIONS, "tcpfunc", "TCP function set memory");
 
 static struct mtx isn_mtx;
 
 #define	ISN_LOCK_INIT()	mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
 #define	ISN_LOCK()	mtx_lock(&isn_mtx)
 #define	ISN_UNLOCK()	mtx_unlock(&isn_mtx)
 
 INPCBSTORAGE_DEFINE(tcpcbstor, tcpcb, "tcpinp", "tcp_inpcb", "tcp", "tcphash");
 
 /*
  * Take a value and get the next power of 2 that doesn't overflow.
  * Used to size the tcp_inpcb hash buckets.
  */
 static int
 maketcp_hashsize(int size)
 {
 	int hashsize;
 
 	/*
 	 * auto tune.
 	 * get the next power of 2 higher than maxsockets.
 	 */
 	hashsize = 1 << fls(size);
 	/* catch overflow, and just go one power of 2 smaller */
 	if (hashsize < size) {
 		hashsize = 1 << (fls(size) - 1);
 	}
 	return (hashsize);
 }
 
 static volatile int next_tcp_stack_id = 1;
 
 /*
  * Register a TCP function block with the name provided in the names
  * array.  (Note that this function does NOT automatically register
  * blk->tfb_tcp_block_name as a stack name.  Therefore, you should
  * explicitly include blk->tfb_tcp_block_name in the list of names if
  * you wish to register the stack with that name.)
  *
  * Either all name registrations will succeed or all will fail.  If
  * a name registration fails, the function will update the num_names
  * argument to point to the array index of the name that encountered
  * the failure.
  *
  * Returns 0 on success, or an error code on failure.
  */
 int
 register_tcp_functions_as_names(struct tcp_function_block *blk, int wait,
     const char *names[], int *num_names)
 {
 	struct tcp_function *n;
 	struct tcp_function_set fs;
 	int error, i;
 
 	KASSERT(names != NULL && *num_names > 0,
 	    ("%s: Called with 0-length name list", __func__));
 	KASSERT(names != NULL, ("%s: Called with NULL name list", __func__));
 	KASSERT(rw_initialized(&tcp_function_lock),
 	    ("%s: called too early", __func__));
 
 	if ((blk->tfb_tcp_output == NULL) ||
 	    (blk->tfb_tcp_do_segment == NULL) ||
 	    (blk->tfb_tcp_ctloutput == NULL) ||
 	    (strlen(blk->tfb_tcp_block_name) == 0)) {
 		/*
 		 * These functions are required and you
 		 * need a name.
 		 */
 		*num_names = 0;
 		return (EINVAL);
 	}
 
 	if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
 		*num_names = 0;
 		return (EINVAL);
 	}
 
 	refcount_init(&blk->tfb_refcnt, 0);
 	blk->tfb_id = atomic_fetchadd_int(&next_tcp_stack_id, 1);
 	for (i = 0; i < *num_names; i++) {
 		n = malloc(sizeof(struct tcp_function), M_TCPFUNCTIONS, wait);
 		if (n == NULL) {
 			error = ENOMEM;
 			goto cleanup;
 		}
 		n->tf_fb = blk;
 
 		(void)strlcpy(fs.function_set_name, names[i],
 		    sizeof(fs.function_set_name));
 		rw_wlock(&tcp_function_lock);
 		if (find_tcp_functions_locked(&fs) != NULL) {
 			/* Duplicate name space not allowed */
 			rw_wunlock(&tcp_function_lock);
 			free(n, M_TCPFUNCTIONS);
 			error = EALREADY;
 			goto cleanup;
 		}
 		(void)strlcpy(n->tf_name, names[i], sizeof(n->tf_name));
 		TAILQ_INSERT_TAIL(&t_functions, n, tf_next);
 		tcp_fb_cnt++;
 		rw_wunlock(&tcp_function_lock);
 	}
 	return(0);
 
 cleanup:
 	/*
 	 * Deregister the names we just added. Because registration failed
 	 * for names[i], we don't need to deregister that name.
 	 */
 	*num_names = i;
 	rw_wlock(&tcp_function_lock);
 	while (--i >= 0) {
 		TAILQ_FOREACH(n, &t_functions, tf_next) {
 			if (!strncmp(n->tf_name, names[i],
 			    TCP_FUNCTION_NAME_LEN_MAX)) {
 				TAILQ_REMOVE(&t_functions, n, tf_next);
 				tcp_fb_cnt--;
 				n->tf_fb = NULL;
 				free(n, M_TCPFUNCTIONS);
 				break;
 			}
 		}
 	}
 	rw_wunlock(&tcp_function_lock);
 	return (error);
 }
 
 /*
  * Register a TCP function block using the name provided in the name
  * argument.
  *
  * Returns 0 on success, or an error code on failure.
  */
 int
 register_tcp_functions_as_name(struct tcp_function_block *blk, const char *name,
     int wait)
 {
 	const char *name_list[1];
 	int num_names, rv;
 
 	num_names = 1;
 	if (name != NULL)
 		name_list[0] = name;
 	else
 		name_list[0] = blk->tfb_tcp_block_name;
 	rv = register_tcp_functions_as_names(blk, wait, name_list, &num_names);
 	return (rv);
 }
 
 /*
  * Register a TCP function block using the name defined in
  * blk->tfb_tcp_block_name.
  *
  * Returns 0 on success, or an error code on failure.
  */
 int
 register_tcp_functions(struct tcp_function_block *blk, int wait)
 {
 
 	return (register_tcp_functions_as_name(blk, NULL, wait));
 }
 
 /*
  * Deregister all names associated with a function block. This
  * functionally removes the function block from use within the system.
  *
  * When called with a true quiesce argument, mark the function block
  * as being removed so no more stacks will use it and determine
  * whether the removal would succeed.
  *
  * When called with a false quiesce argument, actually attempt the
  * removal.
  *
  * When called with a force argument, attempt to switch all TCBs to
  * use the default stack instead of returning EBUSY.
  *
  * Returns 0 on success (or if the removal would succeed), or an error
  * code on failure.
  */
 int
 deregister_tcp_functions(struct tcp_function_block *blk, bool quiesce,
     bool force)
 {
 	struct tcp_function *f;
 	VNET_ITERATOR_DECL(vnet_iter);
 
 	if (blk == &tcp_def_funcblk) {
 		/* You can't un-register the default */
 		return (EPERM);
 	}
 	rw_wlock(&tcp_function_lock);
 	VNET_LIST_RLOCK_NOSLEEP();
 	VNET_FOREACH(vnet_iter) {
 		CURVNET_SET(vnet_iter);
 		if (blk == V_tcp_func_set_ptr) {
 			/* You can't free the current default in some vnet. */
 			CURVNET_RESTORE();
 			VNET_LIST_RUNLOCK_NOSLEEP();
 			rw_wunlock(&tcp_function_lock);
 			return (EBUSY);
 		}
 		CURVNET_RESTORE();
 	}
 	VNET_LIST_RUNLOCK_NOSLEEP();
 	/* Mark the block so no more stacks can use it. */
 	blk->tfb_flags |= TCP_FUNC_BEING_REMOVED;
 	/*
 	 * If TCBs are still attached to the stack, attempt to switch them
 	 * to the default stack.
 	 */
 	if (force && blk->tfb_refcnt) {
 		struct inpcb *inp;
 		struct tcpcb *tp;
 		VNET_ITERATOR_DECL(vnet_iter);
 
 		rw_wunlock(&tcp_function_lock);
 
 		VNET_LIST_RLOCK();
 		VNET_FOREACH(vnet_iter) {
 			CURVNET_SET(vnet_iter);
 			struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
 			    INPLOOKUP_WLOCKPCB);
 
 			while ((inp = inp_next(&inpi)) != NULL) {
 				tp = intotcpcb(inp);
 				if (tp == NULL || tp->t_fb != blk)
 					continue;
 				tcp_switch_back_to_default(tp);
 			}
 			CURVNET_RESTORE();
 		}
 		VNET_LIST_RUNLOCK();
 
 		rw_wlock(&tcp_function_lock);
 	}
 	if (blk->tfb_refcnt) {
 		/* TCBs still attached. */
 		rw_wunlock(&tcp_function_lock);
 		return (EBUSY);
 	}
 	if (quiesce) {
 		/* Skip removal. */
 		rw_wunlock(&tcp_function_lock);
 		return (0);
 	}
 	/* Remove any function names that map to this function block. */
 	while (find_tcp_fb_locked(blk, &f) != NULL) {
 		TAILQ_REMOVE(&t_functions, f, tf_next);
 		tcp_fb_cnt--;
 		f->tf_fb = NULL;
 		free(f, M_TCPFUNCTIONS);
 	}
 	rw_wunlock(&tcp_function_lock);
 	return (0);
 }
 
 static void
 tcp_drain(void)
 {
 	struct epoch_tracker et;
 	VNET_ITERATOR_DECL(vnet_iter);
 
 	if (!do_tcpdrain)
 		return;
 
 	NET_EPOCH_ENTER(et);
 	VNET_LIST_RLOCK_NOSLEEP();
 	VNET_FOREACH(vnet_iter) {
 		CURVNET_SET(vnet_iter);
 		struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
 		    INPLOOKUP_WLOCKPCB);
 		struct inpcb *inpb;
 		struct tcpcb *tcpb;
 
 	/*
 	 * Walk the tcpbs, if existing, and flush the reassembly queue,
 	 * if there is one...
 	 * XXX: The "Net/3" implementation doesn't imply that the TCP
 	 *      reassembly queue should be flushed, but in a situation
 	 *	where we're really low on mbufs, this is potentially
 	 *	useful.
 	 */
 		while ((inpb = inp_next(&inpi)) != NULL) {
 			if ((tcpb = intotcpcb(inpb)) != NULL) {
 				tcp_reass_flush(tcpb);
 				tcp_clean_sackreport(tcpb);
 #ifdef TCP_BLACKBOX
 				tcp_log_drain(tcpb);
 #endif
 #ifdef TCPPCAP
 				if (tcp_pcap_aggressive_free) {
 					/* Free the TCP PCAP queues. */
 					tcp_pcap_drain(&(tcpb->t_inpkts));
 					tcp_pcap_drain(&(tcpb->t_outpkts));
 				}
 #endif
 			}
 		}
 		CURVNET_RESTORE();
 	}
 	VNET_LIST_RUNLOCK_NOSLEEP();
 	NET_EPOCH_EXIT(et);
 }
 
 static void
 tcp_vnet_init(void *arg __unused)
 {
 
 #ifdef TCP_HHOOK
 	if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN,
 	    &V_tcp_hhh[HHOOK_TCP_EST_IN], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
 		printf("%s: WARNING: unable to register helper hook\n", __func__);
 	if (hhook_head_register(HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT,
 	    &V_tcp_hhh[HHOOK_TCP_EST_OUT], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
 		printf("%s: WARNING: unable to register helper hook\n", __func__);
 #endif
 #ifdef STATS
 	if (tcp_stats_init())
 		printf("%s: WARNING: unable to initialise TCP stats\n",
 		    __func__);
 #endif
 	in_pcbinfo_init(&V_tcbinfo, &tcpcbstor, tcp_tcbhashsize,
 	    tcp_tcbhashsize);
 
 	syncache_init();
 	tcp_hc_init();
 
 	TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
 	V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 
 	tcp_fastopen_init();
 
 	COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK);
 	VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK);
 
 	V_tcp_msl = TCPTV_MSL;
 }
 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH,
     tcp_vnet_init, NULL);
 
 static void
 tcp_init(void *arg __unused)
 {
 	int hashsize;
 
 	tcp_reass_global_init();
 
 	/* XXX virtualize those below? */
 	tcp_delacktime = TCPTV_DELACK;
 	tcp_keepinit = TCPTV_KEEP_INIT;
 	tcp_keepidle = TCPTV_KEEP_IDLE;
 	tcp_keepintvl = TCPTV_KEEPINTVL;
 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
 	tcp_rexmit_initial = TCPTV_RTOBASE;
 	if (tcp_rexmit_initial < 1)
 		tcp_rexmit_initial = 1;
 	tcp_rexmit_min = TCPTV_MIN;
 	if (tcp_rexmit_min < 1)
 		tcp_rexmit_min = 1;
 	tcp_persmin = TCPTV_PERSMIN;
 	tcp_persmax = TCPTV_PERSMAX;
 	tcp_rexmit_slop = TCPTV_CPU_VAR;
 	tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
 
 	/* Setup the tcp function block list */
 	TAILQ_INIT(&t_functions);
 	rw_init(&tcp_function_lock, "tcp_func_lock");
 	register_tcp_functions(&tcp_def_funcblk, M_WAITOK);
 	sx_init(&tcpoudp_lock, "TCP over UDP configuration");
 #ifdef TCP_BLACKBOX
 	/* Initialize the TCP logging data. */
 	tcp_log_init();
 #endif
 	arc4rand(&V_ts_offset_secret, sizeof(V_ts_offset_secret), 0);
 
 	if (tcp_soreceive_stream) {
 #ifdef INET
 		tcp_protosw.pr_soreceive = soreceive_stream;
 #endif
 #ifdef INET6
 		tcp6_protosw.pr_soreceive = soreceive_stream;
 #endif /* INET6 */
 	}
 
 #ifdef INET6
 	max_protohdr_grow(sizeof(struct ip6_hdr) + sizeof(struct tcphdr));
 #else /* INET6 */
 	max_protohdr_grow(sizeof(struct tcpiphdr));
 #endif /* INET6 */
 
 	ISN_LOCK_INIT();
 	EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
 		SHUTDOWN_PRI_DEFAULT);
 	EVENTHANDLER_REGISTER(vm_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
 	EVENTHANDLER_REGISTER(mbuf_lowmem, tcp_drain, NULL, LOWMEM_PRI_DEFAULT);
 
 	tcp_inp_lro_direct_queue = counter_u64_alloc(M_WAITOK);
 	tcp_inp_lro_wokeup_queue = counter_u64_alloc(M_WAITOK);
 	tcp_inp_lro_compressed = counter_u64_alloc(M_WAITOK);
 	tcp_inp_lro_locks_taken = counter_u64_alloc(M_WAITOK);
 	tcp_extra_mbuf = counter_u64_alloc(M_WAITOK);
 	tcp_would_have_but = counter_u64_alloc(M_WAITOK);
 	tcp_comp_total = counter_u64_alloc(M_WAITOK);
 	tcp_uncomp_total = counter_u64_alloc(M_WAITOK);
 	tcp_bad_csums = counter_u64_alloc(M_WAITOK);
 	tcp_pacing_failures = counter_u64_alloc(M_WAITOK);
 #ifdef TCPPCAP
 	tcp_pcap_init();
 #endif
 
 	hashsize = tcp_tcbhashsize;
 	if (hashsize == 0) {
 		/*
 		 * Auto tune the hash size based on maxsockets.
 		 * A perfect hash would have a 1:1 mapping
 		 * (hashsize = maxsockets) however it's been
 		 * suggested that O(2) average is better.
 		 */
 		hashsize = maketcp_hashsize(maxsockets / 4);
 		/*
 		 * Our historical default is 512,
 		 * do not autotune lower than this.
 		 */
 		if (hashsize < 512)
 			hashsize = 512;
 		if (bootverbose)
 			printf("%s: %s auto tuned to %d\n", __func__,
 			    "net.inet.tcp.tcbhashsize", hashsize);
 	}
 	/*
 	 * We require a hashsize to be a power of two.
 	 * Previously if it was not a power of two we would just reset it
 	 * back to 512, which could be a nasty surprise if you did not notice
 	 * the error message.
 	 * Instead what we do is clip it to the closest power of two lower
 	 * than the specified hash value.
 	 */
 	if (!powerof2(hashsize)) {
 		int oldhashsize = hashsize;
 
 		hashsize = maketcp_hashsize(hashsize);
 		/* prevent absurdly low value */
 		if (hashsize < 16)
 			hashsize = 16;
 		printf("%s: WARNING: TCB hash size not a power of 2, "
 		    "clipped from %d to %d.\n", __func__, oldhashsize,
 		    hashsize);
 	}
 	tcp_tcbhashsize = hashsize;
 
 #ifdef INET
 	IPPROTO_REGISTER(IPPROTO_TCP, tcp_input, tcp_ctlinput);
 #endif
 #ifdef INET6
 	IP6PROTO_REGISTER(IPPROTO_TCP, tcp6_input, tcp6_ctlinput);
 #endif
 }
 SYSINIT(tcp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, tcp_init, NULL);
 
 #ifdef VIMAGE
 static void
 tcp_destroy(void *unused __unused)
 {
 	int n;
 #ifdef TCP_HHOOK
 	int error;
 #endif
 
 	/*
 	 * All our processes are gone, all our sockets should be cleaned
 	 * up, which means, we should be past the tcp_discardcb() calls.
 	 * Sleep to let all tcpcb timers really disappear and cleanup.
 	 */
 	for (;;) {
 		INP_INFO_WLOCK(&V_tcbinfo);
 		n = V_tcbinfo.ipi_count;
 		INP_INFO_WUNLOCK(&V_tcbinfo);
 		if (n == 0)
 			break;
 		pause("tcpdes", hz / 10);
 	}
 	tcp_hc_destroy();
 	syncache_destroy();
 	in_pcbinfo_destroy(&V_tcbinfo);
 	/* tcp_discardcb() clears the sack_holes up. */
 	uma_zdestroy(V_sack_hole_zone);
 
 	/*
 	 * Cannot free the zone until all tcpcbs are released as we attach
 	 * the allocations to them.
 	 */
 	tcp_fastopen_destroy();
 
 	COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES);
 	VNET_PCPUSTAT_FREE(tcpstat);
 
 #ifdef TCP_HHOOK
 	error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_IN]);
 	if (error != 0) {
 		printf("%s: WARNING: unable to deregister helper hook "
 		    "type=%d, id=%d: error %d returned\n", __func__,
 		    HHOOK_TYPE_TCP, HHOOK_TCP_EST_IN, error);
 	}
 	error = hhook_head_deregister(V_tcp_hhh[HHOOK_TCP_EST_OUT]);
 	if (error != 0) {
 		printf("%s: WARNING: unable to deregister helper hook "
 		    "type=%d, id=%d: error %d returned\n", __func__,
 		    HHOOK_TYPE_TCP, HHOOK_TCP_EST_OUT, error);
 	}
 #endif
 }
 VNET_SYSUNINIT(tcp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, tcp_destroy, NULL);
 #endif
 
 void
 tcp_fini(void *xtp)
 {
 
 }
 
 /*
  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
  * tcp_template used to store this data in mbufs, but we now recopy it out
  * of the tcpcb each time to conserve mbufs.
  */
 void
 tcpip_fillheaders(struct inpcb *inp, uint16_t port, void *ip_ptr, void *tcp_ptr)
 {
 	struct tcphdr *th = (struct tcphdr *)tcp_ptr;
 
 	INP_WLOCK_ASSERT(inp);
 
 #ifdef INET6
 	if ((inp->inp_vflag & INP_IPV6) != 0) {
 		struct ip6_hdr *ip6;
 
 		ip6 = (struct ip6_hdr *)ip_ptr;
 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
 			(inp->inp_flow & IPV6_FLOWINFO_MASK);
 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
 			(IPV6_VERSION & IPV6_VERSION_MASK);
 		if (port == 0)
 			ip6->ip6_nxt = IPPROTO_TCP;
 		else
 			ip6->ip6_nxt = IPPROTO_UDP;
 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
 		ip6->ip6_src = inp->in6p_laddr;
 		ip6->ip6_dst = inp->in6p_faddr;
 	}
 #endif /* INET6 */
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		struct ip *ip;
 
 		ip = (struct ip *)ip_ptr;
 		ip->ip_v = IPVERSION;
 		ip->ip_hl = 5;
 		ip->ip_tos = inp->inp_ip_tos;
 		ip->ip_len = 0;
 		ip->ip_id = 0;
 		ip->ip_off = 0;
 		ip->ip_ttl = inp->inp_ip_ttl;
 		ip->ip_sum = 0;
 		if (port == 0)
 			ip->ip_p = IPPROTO_TCP;
 		else
 			ip->ip_p = IPPROTO_UDP;
 		ip->ip_src = inp->inp_laddr;
 		ip->ip_dst = inp->inp_faddr;
 	}
 #endif /* INET */
 	th->th_sport = inp->inp_lport;
 	th->th_dport = inp->inp_fport;
 	th->th_seq = 0;
 	th->th_ack = 0;
 	th->th_off = 5;
 	tcp_set_flags(th, 0);
 	th->th_win = 0;
 	th->th_urp = 0;
 	th->th_sum = 0;		/* in_pseudo() is called later for ipv4 */
 }
 
 /*
  * Create template to be used to send tcp packets on a connection.
  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
  * use for this function is in keepalives, which use tcp_respond.
  */
 struct tcptemp *
 tcpip_maketemplate(struct inpcb *inp)
 {
 	struct tcptemp *t;
 
 	t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
 	if (t == NULL)
 		return (NULL);
 	tcpip_fillheaders(inp, 0, (void *)&t->tt_ipgen, (void *)&t->tt_t);
 	return (t);
 }
 
 /*
  * Send a single message to the TCP at address specified by
  * the given TCP/IP header.  If m == NULL, then we make a copy
  * of the tcpiphdr at th and send directly to the addressed host.
  * This is used to force keep alive messages out using the TCP
  * template for a connection.  If flags are given then we send
  * a message back to the TCP which originated the segment th,
  * and discard the mbuf containing it and any other attached mbufs.
  *
  * In any case the ack and sequence number of the transmitted
  * segment are as specified by the parameters.
  *
  * NOTE: If m != NULL, then th must point to *inside* the mbuf.
  */
 void
 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
     tcp_seq ack, tcp_seq seq, uint16_t flags)
 {
 	struct tcpopt to;
 	struct inpcb *inp;
 	struct ip *ip;
 	struct mbuf *optm;
 	struct udphdr *uh = NULL;
 	struct tcphdr *nth;
 	struct tcp_log_buffer *lgb;
 	u_char *optp;
 #ifdef INET6
 	struct ip6_hdr *ip6;
 	int isipv6;
 #endif /* INET6 */
 	int optlen, tlen, win, ulen;
 	int ect = 0;
 	bool incl_opts;
 	uint16_t port;
 	int output_ret;
 #ifdef INVARIANTS
 	int thflags = tcp_get_flags(th);
 #endif
 
 	KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
 	NET_EPOCH_ASSERT();
 
 #ifdef INET6
 	isipv6 = ((struct ip *)ipgen)->ip_v == (IPV6_VERSION >> 4);
 	ip6 = ipgen;
 #endif /* INET6 */
 	ip = ipgen;
 
 	if (tp != NULL) {
 		inp = tptoinpcb(tp);
 		INP_LOCK_ASSERT(inp);
 	} else
 		inp = NULL;
 
 	if (m != NULL) {
 #ifdef INET6
 		if (isipv6 && ip6 && (ip6->ip6_nxt == IPPROTO_UDP))
 			port = m->m_pkthdr.tcp_tun_port;
 		else
 #endif
 		if (ip && (ip->ip_p == IPPROTO_UDP))
 			port = m->m_pkthdr.tcp_tun_port;
 		else
 			port = 0;
 	} else
 		port = tp->t_port;
 
 	incl_opts = false;
 	win = 0;
 	if (tp != NULL) {
 		if (!(flags & TH_RST)) {
 			win = sbspace(&inp->inp_socket->so_rcv);
 			if (win > TCP_MAXWIN << tp->rcv_scale)
 				win = TCP_MAXWIN << tp->rcv_scale;
 		}
 		if ((tp->t_flags & TF_NOOPT) == 0)
 			incl_opts = true;
 	}
 	if (m == NULL) {
 		m = m_gethdr(M_NOWAIT, MT_DATA);
 		if (m == NULL)
 			return;
 		m->m_data += max_linkhdr;
 #ifdef INET6
 		if (isipv6) {
 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
 			      sizeof(struct ip6_hdr));
 			ip6 = mtod(m, struct ip6_hdr *);
 			nth = (struct tcphdr *)(ip6 + 1);
 			if (port) {
 				/* Insert a UDP header */
 				uh = (struct udphdr *)nth;
 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
 				uh->uh_dport = port;
 				nth = (struct tcphdr *)(uh + 1);
 			}
 		} else
 #endif /* INET6 */
 		{
 			bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
 			ip = mtod(m, struct ip *);
 			nth = (struct tcphdr *)(ip + 1);
 			if (port) {
 				/* Insert a UDP header */
 				uh = (struct udphdr *)nth;
 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
 				uh->uh_dport = port;
 				nth = (struct tcphdr *)(uh + 1);
 			}
 		}
 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
 		flags = TH_ACK;
 	} else if ((!M_WRITABLE(m)) || (port != 0)) {
 		struct mbuf *n;
 
 		/* Can't reuse 'm', allocate a new mbuf. */
 		n = m_gethdr(M_NOWAIT, MT_DATA);
 		if (n == NULL) {
 			m_freem(m);
 			return;
 		}
 
 		if (!m_dup_pkthdr(n, m, M_NOWAIT)) {
 			m_freem(m);
 			m_freem(n);
 			return;
 		}
 
 		n->m_data += max_linkhdr;
 		/* m_len is set later */
 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
 #ifdef INET6
 		if (isipv6) {
 			bcopy((caddr_t)ip6, mtod(n, caddr_t),
 			      sizeof(struct ip6_hdr));
 			ip6 = mtod(n, struct ip6_hdr *);
 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
 			nth = (struct tcphdr *)(ip6 + 1);
 			if (port) {
 				/* Insert a UDP header */
 				uh = (struct udphdr *)nth;
 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
 				uh->uh_dport = port;
 				nth = (struct tcphdr *)(uh + 1);
 			}
 		} else
 #endif /* INET6 */
 		{
 			bcopy((caddr_t)ip, mtod(n, caddr_t), sizeof(struct ip));
 			ip = mtod(n, struct ip *);
 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
 			nth = (struct tcphdr *)(ip + 1);
 			if (port) {
 				/* Insert a UDP header */
 				uh = (struct udphdr *)nth;
 				uh->uh_sport = htons(V_tcp_udp_tunneling_port);
 				uh->uh_dport = port;
 				nth = (struct tcphdr *)(uh + 1);
 			}
 		}
 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
 		xchg(nth->th_dport, nth->th_sport, uint16_t);
 		th = nth;
 		m_freem(m);
 		m = n;
 	} else {
 		/*
 		 *  reuse the mbuf.
 		 * XXX MRT We inherit the FIB, which is lucky.
 		 */
 		m_freem(m->m_next);
 		m->m_next = NULL;
 		m->m_data = (caddr_t)ipgen;
 		/* clear any receive flags for proper bpf timestamping */
 		m->m_flags &= ~(M_TSTMP | M_TSTMP_LRO);
 		/* m_len is set later */
 #ifdef INET6
 		if (isipv6) {
 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
 			nth = (struct tcphdr *)(ip6 + 1);
 		} else
 #endif /* INET6 */
 		{
 			xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
 			nth = (struct tcphdr *)(ip + 1);
 		}
 		if (th != nth) {
 			/*
 			 * this is usually a case when an extension header
 			 * exists between the IPv6 header and the
 			 * TCP header.
 			 */
 			nth->th_sport = th->th_sport;
 			nth->th_dport = th->th_dport;
 		}
 		xchg(nth->th_dport, nth->th_sport, uint16_t);
 #undef xchg
 	}
 	tlen = 0;
 #ifdef INET6
 	if (isipv6)
 		tlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
 #endif
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 		tlen = sizeof (struct tcpiphdr);
 #endif
 	if (port)
 		tlen += sizeof (struct udphdr);
 #ifdef INVARIANTS
 	m->m_len = 0;
 	KASSERT(M_TRAILINGSPACE(m) >= tlen,
 	    ("Not enough trailing space for message (m=%p, need=%d, have=%ld)",
 	    m, tlen, (long)M_TRAILINGSPACE(m)));
 #endif
 	m->m_len = tlen;
 	to.to_flags = 0;
 	if (incl_opts) {
 		ect = tcp_ecn_output_established(tp, &flags, 0, false);
 		/* Make sure we have room. */
 		if (M_TRAILINGSPACE(m) < TCP_MAXOLEN) {
 			m->m_next = m_get(M_NOWAIT, MT_DATA);
 			if (m->m_next) {
 				optp = mtod(m->m_next, u_char *);
 				optm = m->m_next;
 			} else
 				incl_opts = false;
 		} else {
 			optp = (u_char *) (nth + 1);
 			optm = m;
 		}
 	}
 	if (incl_opts) {
 		/* Timestamps. */
 		if (tp->t_flags & TF_RCVD_TSTMP) {
 			to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
 			to.to_tsecr = tp->ts_recent;
 			to.to_flags |= TOF_TS;
 		}
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		/* TCP-MD5 (RFC2385). */
 		if (tp->t_flags & TF_SIGNATURE)
 			to.to_flags |= TOF_SIGNATURE;
 #endif
 		/* Add the options. */
 		tlen += optlen = tcp_addoptions(&to, optp);
 
 		/* Update m_len in the correct mbuf. */
 		optm->m_len += optlen;
 	} else
 		optlen = 0;
 #ifdef INET6
 	if (isipv6) {
 		if (uh) {
 			ulen = tlen - sizeof(struct ip6_hdr);
 			uh->uh_ulen = htons(ulen);
 		}
 		ip6->ip6_flow = htonl(ect << IPV6_FLOWLABEL_LEN);
 		ip6->ip6_vfc = IPV6_VERSION;
 		if (port)
 			ip6->ip6_nxt = IPPROTO_UDP;
 		else
 			ip6->ip6_nxt = IPPROTO_TCP;
 		ip6->ip6_plen = htons(tlen - sizeof(*ip6));
 	}
 #endif
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		if (uh) {
 			ulen = tlen - sizeof(struct ip);
 			uh->uh_ulen = htons(ulen);
 		}
 		ip->ip_len = htons(tlen);
 		if (inp != NULL) {
 			ip->ip_tos = inp->inp_ip_tos & ~IPTOS_ECN_MASK;
 			ip->ip_ttl = inp->inp_ip_ttl;
 		} else {
 			ip->ip_tos = 0;
 			ip->ip_ttl = V_ip_defttl;
 		}
 		ip->ip_tos |= ect;
 		if (port) {
 			ip->ip_p = IPPROTO_UDP;
 		} else {
 			ip->ip_p = IPPROTO_TCP;
 		}
 		if (V_path_mtu_discovery)
 			ip->ip_off |= htons(IP_DF);
 	}
 #endif
 	m->m_pkthdr.len = tlen;
 	m->m_pkthdr.rcvif = NULL;
 #ifdef MAC
 	if (inp != NULL) {
 		/*
 		 * Packet is associated with a socket, so allow the
 		 * label of the response to reflect the socket label.
 		 */
 		INP_LOCK_ASSERT(inp);
 		mac_inpcb_create_mbuf(inp, m);
 	} else {
 		/*
 		 * Packet is not associated with a socket, so possibly
 		 * update the label in place.
 		 */
 		mac_netinet_tcp_reply(m);
 	}
 #endif
 	nth->th_seq = htonl(seq);
 	nth->th_ack = htonl(ack);
 	nth->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
 	tcp_set_flags(nth, flags);
 	if (tp && (flags & TH_RST)) {
 		/* Log the reset */
 		tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
 	}
 	if (tp != NULL)
 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
 	else
 		nth->th_win = htons((u_short)win);
 	nth->th_urp = 0;
 
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 	if (to.to_flags & TOF_SIGNATURE) {
 		if (!TCPMD5_ENABLED() ||
 		    TCPMD5_OUTPUT(m, nth, to.to_signature) != 0) {
 			m_freem(m);
 			return;
 		}
 	}
 #endif
 
 #ifdef INET6
 	if (isipv6) {
 		if (port) {
 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			uh->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
 			nth->th_sum = 0;
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			nth->th_sum = in6_cksum_pseudo(ip6,
 			    tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
 		}
 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
 	}
 #endif /* INET6 */
 #if defined(INET6) && defined(INET)
 	else
 #endif
 #ifdef INET
 	{
 		if (port) {
 			uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
 			    htons(ulen + IPPROTO_UDP));
 			m->m_pkthdr.csum_flags = CSUM_UDP;
 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
 			nth->th_sum = 0;
 		} else {
 			m->m_pkthdr.csum_flags = CSUM_TCP;
 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
 			nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
 			    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
 		}
 	}
 #endif /* INET */
 	TCP_PROBE3(debug__output, tp, th, m);
 	if (flags & TH_RST)
 		TCP_PROBE5(accept__refused, NULL, NULL, m, tp, nth);
 	lgb = NULL;
 	if ((tp != NULL) && tcp_bblogging_on(tp)) {
 		if (INP_WLOCKED(inp)) {
 			union tcp_log_stackspecific log;
 			struct timeval tv;
 
 			memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 			log.u_bbr.inhpts = tcp_in_hpts(tp);
 			log.u_bbr.flex8 = 4;
 			log.u_bbr.pkts_out = tp->t_maxseg;
 			log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 			log.u_bbr.delivered = 0;
 			lgb = tcp_log_event(tp, nth, NULL, NULL, TCP_LOG_OUT,
 			    ERRNO_UNK, 0, &log, false, NULL, NULL, 0, &tv);
 		} else {
 			/*
 			 * We can not log the packet, since we only own the
 			 * read lock, but a write lock is needed. The read lock
 			 * is not upgraded to a write lock, since only getting
 			 * the read lock was done intentionally to improve the
 			 * handling of SYN flooding attacks.
 			 * This happens only for pure SYN segments received in
 			 * the initial CLOSED state, or received in a more
 			 * advanced state than listen and the UDP encapsulation
 			 * port is unexpected.
 			 * The incoming SYN segments do not really belong to
 			 * the TCP connection and the handling does not change
 			 * the state of the TCP connection. Therefore, the
 			 * sending of the RST segments is not logged. Please
 			 * note that also the incoming SYN segments are not
 			 * logged.
 			 *
 			 * The following code ensures that the above description
 			 * is and stays correct.
 			 */
 			KASSERT((thflags & (TH_ACK|TH_SYN)) == TH_SYN &&
 			    (tp->t_state == TCPS_CLOSED ||
 			    (tp->t_state > TCPS_LISTEN && tp->t_port != port)),
 			    ("%s: Logging of TCP segment with flags 0x%b and "
 			    "UDP encapsulation port %u skipped in state %s",
 			    __func__, thflags, PRINT_TH_FLAGS,
 			    ntohs(port), tcpstates[tp->t_state]));
 		}
 	}
 
 	if (flags & TH_ACK)
 		TCPSTAT_INC(tcps_sndacks);
 	else if (flags & (TH_SYN|TH_FIN|TH_RST))
 		TCPSTAT_INC(tcps_sndctrl);
 	TCPSTAT_INC(tcps_sndtotal);
 
 #ifdef INET6
 	if (isipv6) {
 		TCP_PROBE5(send, NULL, tp, ip6, tp, nth);
 		output_ret = ip6_output(m, inp ? inp->in6p_outputopts : NULL,
 		    NULL, 0, NULL, NULL, inp);
 	}
 #endif /* INET6 */
 #if defined(INET) && defined(INET6)
 	else
 #endif
 #ifdef INET
 	{
 		TCP_PROBE5(send, NULL, tp, ip, tp, nth);
 		output_ret = ip_output(m, NULL, NULL, 0, NULL, inp);
 	}
 #endif
 	if (lgb != NULL)
 		lgb->tlb_errno = output_ret;
 }
 
 /*
  * Create a new TCP control block, making an empty reassembly queue and hooking
  * it to the argument protocol control block.  The `inp' parameter must have
  * come from the zone allocator set up by tcpcbstor declaration.
  */
 struct tcpcb *
 tcp_newtcpcb(struct inpcb *inp)
 {
 	struct tcpcb *tp = intotcpcb(inp);
 #ifdef INET6
 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
 #endif /* INET6 */
 
 	/*
 	 * Historically allocation was done with M_ZERO.  There is a lot of
 	 * code that rely on that.  For now take safe approach and zero whole
 	 * tcpcb.  This definitely can be optimized.
 	 */
 	bzero(&tp->t_start_zero, t_zero_size);
 
 	/* Initialise cc_var struct for this tcpcb. */
 	tp->t_ccv.type = IPPROTO_TCP;
 	tp->t_ccv.ccvc.tcp = tp;
 	rw_rlock(&tcp_function_lock);
 	tp->t_fb = V_tcp_func_set_ptr;
 	refcount_acquire(&tp->t_fb->tfb_refcnt);
 	rw_runlock(&tcp_function_lock);
 	/*
 	 * Use the current system default CC algorithm.
 	 */
 	cc_attach(tp, CC_DEFAULT_ALGO());
 
 	if (CC_ALGO(tp)->cb_init != NULL)
 		if (CC_ALGO(tp)->cb_init(&tp->t_ccv, NULL) > 0) {
 			cc_detach(tp);
 			if (tp->t_fb->tfb_tcp_fb_fini)
 				(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
 			refcount_release(&tp->t_fb->tfb_refcnt);
 			return (NULL);
 		}
 
 #ifdef TCP_HHOOK
 	if (khelp_init_osd(HELPER_CLASS_TCP, &tp->t_osd)) {
 		if (tp->t_fb->tfb_tcp_fb_fini)
 			(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
 		refcount_release(&tp->t_fb->tfb_refcnt);
 		return (NULL);
 	}
 #endif
 
 	TAILQ_INIT(&tp->t_segq);
 	STAILQ_INIT(&tp->t_inqueue);
 	tp->t_maxseg =
 #ifdef INET6
 		isipv6 ? V_tcp_v6mssdflt :
 #endif /* INET6 */
 		V_tcp_mssdflt;
 
 	/* All mbuf queue/ack compress flags should be off */
 	tcp_lro_features_off(tp);
 
 	callout_init_rw(&tp->t_callout, &inp->inp_lock, CALLOUT_RETURNUNLOCKED);
 	for (int i = 0; i < TT_N; i++)
 		tp->t_timers[i] = SBT_MAX;
 
 	switch (V_tcp_do_rfc1323) {
 		case 0:
 			break;
 		default:
 		case 1:
 			tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
 			break;
 		case 2:
 			tp->t_flags = TF_REQ_SCALE;
 			break;
 		case 3:
 			tp->t_flags = TF_REQ_TSTMP;
 			break;
 	}
 	if (V_tcp_do_sack)
 		tp->t_flags |= TF_SACK_PERMIT;
 	TAILQ_INIT(&tp->snd_holes);
 
 	/*
 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
 	 * reasonable initial retransmit time.
 	 */
 	tp->t_srtt = TCPTV_SRTTBASE;
 	tp->t_rttvar = ((tcp_rexmit_initial - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
 	tp->t_rttmin = tcp_rexmit_min;
 	tp->t_rxtcur = tcp_rexmit_initial;
 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
 	tp->t_rcvtime = ticks;
 	/* We always start with ticks granularity */
 	tp->t_tmr_granularity = TCP_TMR_GRANULARITY_TICKS;
 	/*
 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
 	 * because the socket may be bound to an IPv6 wildcard address,
 	 * which may match an IPv4-mapped IPv6 address.
 	 */
 	inp->inp_ip_ttl = V_ip_defttl;
 #ifdef TCPPCAP
 	/*
 	 * Init the TCP PCAP queues.
 	 */
 	tcp_pcap_tcpcb_init(tp);
 #endif
 #ifdef TCP_BLACKBOX
 	/* Initialize the per-TCPCB log data. */
 	tcp_log_tcpcbinit(tp);
 #endif
 	tp->t_pacing_rate = -1;
 	if (tp->t_fb->tfb_tcp_fb_init) {
 		if ((*tp->t_fb->tfb_tcp_fb_init)(tp, &tp->t_fb_ptr)) {
 			refcount_release(&tp->t_fb->tfb_refcnt);
 			return (NULL);
 		}
 	}
 #ifdef STATS
 	if (V_tcp_perconn_stats_enable == 1)
 		tp->t_stats = stats_blob_alloc(V_tcp_perconn_stats_dflt_tpl, 0);
 #endif
 	if (V_tcp_do_lrd)
 		tp->t_flags |= TF_LRD;
 
 	return (tp);
 }
 
 /*
  * Drop a TCP connection, reporting
  * the specified error.  If connection is synchronized,
  * then send a RST to peer.
  */
 struct tcpcb *
 tcp_drop(struct tcpcb *tp, int errno)
 {
 	struct socket *so = tptosocket(tp);
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
 		tcp_state_change(tp, TCPS_CLOSED);
 		/* Don't use tcp_output() here due to possible recursion. */
 		(void)tcp_output_nodrop(tp);
 		TCPSTAT_INC(tcps_drops);
 	} else
 		TCPSTAT_INC(tcps_conndrops);
 	if (errno == ETIMEDOUT && tp->t_softerror)
 		errno = tp->t_softerror;
 	so->so_error = errno;
 	return (tcp_close(tp));
 }
 
 void
 tcp_discardcb(struct tcpcb *tp)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 	struct mbuf *m;
 #ifdef INET6
 	bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
 #endif
 
 	INP_WLOCK_ASSERT(inp);
 
 	tcp_timer_stop(tp);
-	if (tp->t_fb->tfb_tcp_timer_stop_all) {
-		tp->t_fb->tfb_tcp_timer_stop_all(tp);
-	}
 
 	/* free the reassembly queue, if any */
 	tcp_reass_flush(tp);
 
 #ifdef TCP_OFFLOAD
 	/* Disconnect offload device, if any. */
 	if (tp->t_flags & TF_TOE)
 		tcp_offload_detach(tp);
 #endif
 
 	tcp_free_sackholes(tp);
 
 #ifdef TCPPCAP
 	/* Free the TCP PCAP queues. */
 	tcp_pcap_drain(&(tp->t_inpkts));
 	tcp_pcap_drain(&(tp->t_outpkts));
 #endif
 
 	/* Allow the CC algorithm to clean up after itself. */
 	if (CC_ALGO(tp)->cb_destroy != NULL)
 		CC_ALGO(tp)->cb_destroy(&tp->t_ccv);
 	CC_DATA(tp) = NULL;
 	/* Detach from the CC algorithm */
 	cc_detach(tp);
 
 #ifdef TCP_HHOOK
 	khelp_destroy_osd(&tp->t_osd);
 #endif
 #ifdef STATS
 	stats_blob_destroy(tp->t_stats);
 #endif
 
 	CC_ALGO(tp) = NULL;
 	if ((m = STAILQ_FIRST(&tp->t_inqueue)) != NULL) {
 		struct mbuf *prev;
 
 		STAILQ_INIT(&tp->t_inqueue);
 		STAILQ_FOREACH_FROM_SAFE(m, &tp->t_inqueue, m_stailqpkt, prev)
 			m_freem(m);
 	}
 	TCPSTATES_DEC(tp->t_state);
 
 	if (tp->t_fb->tfb_tcp_fb_fini)
 		(*tp->t_fb->tfb_tcp_fb_fini)(tp, 1);
 	MPASS(!tcp_in_hpts(tp));
 #ifdef TCP_BLACKBOX
 	tcp_log_tcpcbfini(tp);
 #endif
 
 	/*
 	 * If we got enough samples through the srtt filter,
 	 * save the rtt and rttvar in the routing entry.
 	 * 'Enough' is arbitrarily defined as 4 rtt samples.
 	 * 4 samples is enough for the srtt filter to converge
 	 * to within enough % of the correct value; fewer samples
 	 * and we could save a bogus rtt. The danger is not high
 	 * as tcp quickly recovers from everything.
 	 * XXX: Works very well but needs some more statistics!
 	 *
 	 * XXXRRS: Updating must be after the stack fini() since
 	 * that may be converting some internal representation of
 	 * say srtt etc into the general one used by other stacks.
 	 * Lets also at least protect against the so being NULL
 	 * as RW stated below.
 	 */
 	if ((tp->t_rttupdated >= 4) && (so != NULL)) {
 		struct hc_metrics_lite metrics;
 		uint32_t ssthresh;
 
 		bzero(&metrics, sizeof(metrics));
 		/*
 		 * Update the ssthresh always when the conditions below
 		 * are satisfied. This gives us better new start value
 		 * for the congestion avoidance for new connections.
 		 * ssthresh is only set if packet loss occurred on a session.
 		 *
 		 * XXXRW: 'so' may be NULL here, and/or socket buffer may be
 		 * being torn down.  Ideally this code would not use 'so'.
 		 */
 		ssthresh = tp->snd_ssthresh;
 		if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
 			/*
 			 * convert the limit from user data bytes to
 			 * packets then to packet data bytes.
 			 */
 			ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
 			if (ssthresh < 2)
 				ssthresh = 2;
 			ssthresh *= (tp->t_maxseg +
 #ifdef INET6
 			    (isipv6 ? sizeof (struct ip6_hdr) +
 			    sizeof (struct tcphdr) :
 #endif
 			    sizeof (struct tcpiphdr)
 #ifdef INET6
 			    )
 #endif
 			    );
 		} else
 			ssthresh = 0;
 		metrics.rmx_ssthresh = ssthresh;
 
 		metrics.rmx_rtt = tp->t_srtt;
 		metrics.rmx_rttvar = tp->t_rttvar;
 		metrics.rmx_cwnd = tp->snd_cwnd;
 		metrics.rmx_sendpipe = 0;
 		metrics.rmx_recvpipe = 0;
 
 		tcp_hc_update(&inp->inp_inc, &metrics);
 	}
 
 	refcount_release(&tp->t_fb->tfb_refcnt);
 }
 
 /*
  * Attempt to close a TCP control block, marking it as dropped, and freeing
  * the socket if we hold the only reference.
  */
 struct tcpcb *
 tcp_close(struct tcpcb *tp)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 
 	INP_WLOCK_ASSERT(inp);
 
 #ifdef TCP_OFFLOAD
 	if (tp->t_state == TCPS_LISTEN)
 		tcp_offload_listen_stop(tp);
 #endif
 	/*
 	 * This releases the TFO pending counter resource for TFO listen
 	 * sockets as well as passively-created TFO sockets that transition
 	 * from SYN_RECEIVED to CLOSED.
 	 */
 	if (tp->t_tfo_pending) {
 		tcp_fastopen_decrement_counter(tp->t_tfo_pending);
 		tp->t_tfo_pending = NULL;
 	}
-#ifdef TCPHPTS
-	tcp_hpts_remove(tp);
-#endif
+	if (tp->t_fb->tfb_tcp_timer_stop_all != NULL)
+		tp->t_fb->tfb_tcp_timer_stop_all(tp);
 	in_pcbdrop(inp);
 	TCPSTAT_INC(tcps_closed);
 	if (tp->t_state != TCPS_CLOSED)
 		tcp_state_change(tp, TCPS_CLOSED);
 	KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
 	soisdisconnected(so);
 	if (inp->inp_flags & INP_SOCKREF) {
 		inp->inp_flags &= ~INP_SOCKREF;
 		INP_WUNLOCK(inp);
 		sorele(so);
 		return (NULL);
 	}
 	return (tp);
 }
 
 /*
  * Notify a tcp user of an asynchronous error;
  * store error as soft error, but wake up user
  * (for now, won't do anything until can select for soft error).
  *
  * Do not wake up user since there currently is no mechanism for
  * reporting soft errors (yet - a kqueue filter may be added).
  */
 static struct inpcb *
 tcp_notify(struct inpcb *inp, int error)
 {
 	struct tcpcb *tp;
 
 	INP_WLOCK_ASSERT(inp);
 
 	tp = intotcpcb(inp);
 	KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
 
 	/*
 	 * Ignore some errors if we are hooked up.
 	 * If connection hasn't completed, has retransmitted several times,
 	 * and receives a second error, give up now.  This is better
 	 * than waiting a long time to establish a connection that
 	 * can never complete.
 	 */
 	if (tp->t_state == TCPS_ESTABLISHED &&
 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
 	     error == EHOSTDOWN)) {
 		if (inp->inp_route.ro_nh) {
 			NH_FREE(inp->inp_route.ro_nh);
 			inp->inp_route.ro_nh = (struct nhop_object *)NULL;
 		}
 		return (inp);
 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
 	    tp->t_softerror) {
 		tp = tcp_drop(tp, error);
 		if (tp != NULL)
 			return (inp);
 		else
 			return (NULL);
 	} else {
 		tp->t_softerror = error;
 		return (inp);
 	}
 #if 0
 	wakeup( &so->so_timeo);
 	sorwakeup(so);
 	sowwakeup(so);
 #endif
 }
 
 static int
 tcp_pcblist(SYSCTL_HANDLER_ARGS)
 {
 	struct inpcb_iterator inpi = INP_ALL_ITERATOR(&V_tcbinfo,
 	    INPLOOKUP_RLOCKPCB);
 	struct xinpgen xig;
 	struct inpcb *inp;
 	int error;
 
 	if (req->newptr != NULL)
 		return (EPERM);
 
 	if (req->oldptr == NULL) {
 		int n;
 
 		n = V_tcbinfo.ipi_count +
 		    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
 		n += imax(n / 8, 10);
 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
 		return (0);
 	}
 
 	if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
 		return (error);
 
 	bzero(&xig, sizeof(xig));
 	xig.xig_len = sizeof xig;
 	xig.xig_count = V_tcbinfo.ipi_count +
 	    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
 	xig.xig_gen = V_tcbinfo.ipi_gencnt;
 	xig.xig_sogen = so_gencnt;
 	error = SYSCTL_OUT(req, &xig, sizeof xig);
 	if (error)
 		return (error);
 
 	error = syncache_pcblist(req);
 	if (error)
 		return (error);
 
 	while ((inp = inp_next(&inpi)) != NULL) {
 		if (inp->inp_gencnt <= xig.xig_gen &&
 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
 			struct xtcpcb xt;
 
 			tcp_inptoxtp(inp, &xt);
 			error = SYSCTL_OUT(req, &xt, sizeof xt);
 			if (error) {
 				INP_RUNLOCK(inp);
 				break;
 			} else
 				continue;
 		}
 	}
 
 	if (!error) {
 		/*
 		 * Give the user an updated idea of our state.
 		 * If the generation differs from what we told
 		 * her before, she knows that something happened
 		 * while we were processing this request, and it
 		 * might be necessary to retry.
 		 */
 		xig.xig_gen = V_tcbinfo.ipi_gencnt;
 		xig.xig_sogen = so_gencnt;
 		xig.xig_count = V_tcbinfo.ipi_count +
 		    counter_u64_fetch(V_tcps_states[TCPS_SYN_RECEIVED]);
 		error = SYSCTL_OUT(req, &xig, sizeof xig);
 	}
 
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
     NULL, 0, tcp_pcblist, "S,xtcpcb",
     "List of active TCP connections");
 
 #ifdef INET
 static int
 tcp_getcred(SYSCTL_HANDLER_ARGS)
 {
 	struct xucred xuc;
 	struct sockaddr_in addrs[2];
 	struct epoch_tracker et;
 	struct inpcb *inp;
 	int error;
 
 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
 	if (error)
 		return (error);
 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
 	if (error)
 		return (error);
 	NET_EPOCH_ENTER(et);
 	inp = in_pcblookup(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
 	    addrs[0].sin_addr, addrs[0].sin_port, INPLOOKUP_RLOCKPCB, NULL);
 	NET_EPOCH_EXIT(et);
 	if (inp != NULL) {
 		if (error == 0)
 			error = cr_canseeinpcb(req->td->td_ucred, inp);
 		if (error == 0)
 			cru2x(inp->inp_cred, &xuc);
 		INP_RUNLOCK(inp);
 	} else
 		error = ENOENT;
 	if (error == 0)
 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
     0, 0, tcp_getcred, "S,xucred",
     "Get the xucred of a TCP connection");
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_getcred(SYSCTL_HANDLER_ARGS)
 {
 	struct epoch_tracker et;
 	struct xucred xuc;
 	struct sockaddr_in6 addrs[2];
 	struct inpcb *inp;
 	int error;
 #ifdef INET
 	int mapped = 0;
 #endif
 
 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
 	if (error)
 		return (error);
 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
 	if (error)
 		return (error);
 	if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
 	    (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
 		return (error);
 	}
 	if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
 #ifdef INET
 		if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
 			mapped = 1;
 		else
 #endif
 			return (EINVAL);
 	}
 
 	NET_EPOCH_ENTER(et);
 #ifdef INET
 	if (mapped == 1)
 		inp = in_pcblookup(&V_tcbinfo,
 			*(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
 			addrs[1].sin6_port,
 			*(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
 			addrs[0].sin6_port, INPLOOKUP_RLOCKPCB, NULL);
 	else
 #endif
 		inp = in6_pcblookup(&V_tcbinfo,
 			&addrs[1].sin6_addr, addrs[1].sin6_port,
 			&addrs[0].sin6_addr, addrs[0].sin6_port,
 			INPLOOKUP_RLOCKPCB, NULL);
 	NET_EPOCH_EXIT(et);
 	if (inp != NULL) {
 		if (error == 0)
 			error = cr_canseeinpcb(req->td->td_ucred, inp);
 		if (error == 0)
 			cru2x(inp->inp_cred, &xuc);
 		INP_RUNLOCK(inp);
 	} else
 		error = ENOENT;
 	if (error == 0)
 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_NEEDGIANT,
     0, 0, tcp6_getcred, "S,xucred",
     "Get the xucred of a TCP6 connection");
 #endif /* INET6 */
 
 #ifdef INET
 /* Path MTU to try next when a fragmentation-needed message is received. */
 static inline int
 tcp_next_pmtu(const struct icmp *icp, const struct ip *ip)
 {
 	int mtu = ntohs(icp->icmp_nextmtu);
 
 	/* If no alternative MTU was proposed, try the next smaller one. */
 	if (!mtu)
 		mtu = ip_next_mtu(ntohs(ip->ip_len), 1);
 	if (mtu < V_tcp_minmss + sizeof(struct tcpiphdr))
 		mtu = V_tcp_minmss + sizeof(struct tcpiphdr);
 
 	return (mtu);
 }
 
 static void
 tcp_ctlinput_with_port(struct icmp *icp, uint16_t port)
 {
 	struct ip *ip;
 	struct tcphdr *th;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct inpcb *(*notify)(struct inpcb *, int);
 	struct in_conninfo inc;
 	tcp_seq icmp_tcp_seq;
 	int errno, mtu;
 
 	errno = icmp_errmap(icp);
 	switch (errno) {
 	case 0:
 		return;
 	case EMSGSIZE:
 		notify = tcp_mtudisc_notify;
 		break;
 	case ECONNREFUSED:
 		if (V_icmp_may_rst)
 			notify = tcp_drop_syn_sent;
 		else
 			notify = tcp_notify;
 		break;
 	case EHOSTUNREACH:
 		if (V_icmp_may_rst && icp->icmp_type == ICMP_TIMXCEED)
 			notify = tcp_drop_syn_sent;
 		else
 			notify = tcp_notify;
 		break;
 	default:
 		notify = tcp_notify;
 	}
 
 	ip = &icp->icmp_ip;
 	th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
 	icmp_tcp_seq = th->th_seq;
 	inp = in_pcblookup(&V_tcbinfo, ip->ip_dst, th->th_dport, ip->ip_src,
 	    th->th_sport, INPLOOKUP_WLOCKPCB, NULL);
 	if (inp != NULL)  {
 		tp = intotcpcb(inp);
 #ifdef TCP_OFFLOAD
 		if (tp->t_flags & TF_TOE && errno == EMSGSIZE) {
 			/*
 			 * MTU discovery for offloaded connections.  Let
 			 * the TOE driver verify seq# and process it.
 			 */
 			mtu = tcp_next_pmtu(icp, ip);
 			tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
 			goto out;
 		}
 #endif
 		if (tp->t_port != port)
 			goto out;
 		if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
 		    SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
 			if (errno == EMSGSIZE) {
 				/*
 				 * MTU discovery: we got a needfrag and
 				 * will potentially try a lower MTU.
 				 */
 				mtu = tcp_next_pmtu(icp, ip);
 
 				/*
 				 * Only process the offered MTU if it
 				 * is smaller than the current one.
 				 */
 				if (mtu < tp->t_maxseg +
 				    sizeof(struct tcpiphdr)) {
 					bzero(&inc, sizeof(inc));
 					inc.inc_faddr = ip->ip_dst;
 					inc.inc_fibnum =
 					    inp->inp_inc.inc_fibnum;
 					tcp_hc_updatemtu(&inc, mtu);
 					inp = tcp_mtudisc(inp, mtu);
 				}
 			} else
 				inp = (*notify)(inp, errno);
 		}
 	} else {
 		bzero(&inc, sizeof(inc));
 		inc.inc_fport = th->th_dport;
 		inc.inc_lport = th->th_sport;
 		inc.inc_faddr = ip->ip_dst;
 		inc.inc_laddr = ip->ip_src;
 		syncache_unreach(&inc, icmp_tcp_seq, port);
 	}
 out:
 	if (inp != NULL)
 		INP_WUNLOCK(inp);
 }
 
 static void
 tcp_ctlinput(struct icmp *icmp)
 {
 	tcp_ctlinput_with_port(icmp, htons(0));
 }
 
 static void
 tcp_ctlinput_viaudp(udp_tun_icmp_param_t param)
 {
 	/* Its a tunneled TCP over UDP icmp */
 	struct icmp *icmp = param.icmp;
 	struct ip *outer_ip, *inner_ip;
 	struct udphdr *udp;
 	struct tcphdr *th, ttemp;
 	int i_hlen, o_len;
 	uint16_t port;
 
 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
 	inner_ip = &icmp->icmp_ip;
 	i_hlen = inner_ip->ip_hl << 2;
 	o_len = ntohs(outer_ip->ip_len);
 	if (o_len <
 	    (sizeof(struct ip) + 8 + i_hlen + sizeof(struct udphdr) + offsetof(struct tcphdr, th_ack))) {
 		/* Not enough data present */
 		return;
 	}
 	/* Ok lets strip out the inner udphdr header by copying up on top of it the tcp hdr */
 	udp = (struct udphdr *)(((caddr_t)inner_ip) + i_hlen);
 	if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
 		return;
 	}
 	port = udp->uh_dport;
 	th = (struct tcphdr *)(udp + 1);
 	memcpy(&ttemp, th, sizeof(struct tcphdr));
 	memcpy(udp, &ttemp, sizeof(struct tcphdr));
 	/* Now adjust down the size of the outer IP header */
 	o_len -= sizeof(struct udphdr);
 	outer_ip->ip_len = htons(o_len);
 	/* Now call in to the normal handling code */
 	tcp_ctlinput_with_port(icmp, port);
 }
 #endif /* INET */
 
 #ifdef INET6
 static inline int
 tcp6_next_pmtu(const struct icmp6_hdr *icmp6)
 {
 	int mtu = ntohl(icmp6->icmp6_mtu);
 
 	/*
 	 * If no alternative MTU was proposed, or the proposed MTU was too
 	 * small, set to the min.
 	 */
 	if (mtu < IPV6_MMTU)
 		mtu = IPV6_MMTU - 8;	/* XXXNP: what is the adjustment for? */
 	return (mtu);
 }
 
 static void
 tcp6_ctlinput_with_port(struct ip6ctlparam *ip6cp, uint16_t port)
 {
 	struct in6_addr *dst;
 	struct inpcb *(*notify)(struct inpcb *, int);
 	struct ip6_hdr *ip6;
 	struct mbuf *m;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct icmp6_hdr *icmp6;
 	struct in_conninfo inc;
 	struct tcp_ports {
 		uint16_t th_sport;
 		uint16_t th_dport;
 	} t_ports;
 	tcp_seq icmp_tcp_seq;
 	unsigned int mtu;
 	unsigned int off;
 	int errno;
 
 	icmp6 = ip6cp->ip6c_icmp6;
 	m = ip6cp->ip6c_m;
 	ip6 = ip6cp->ip6c_ip6;
 	off = ip6cp->ip6c_off;
 	dst = &ip6cp->ip6c_finaldst->sin6_addr;
 
 	errno = icmp6_errmap(icmp6);
 	switch (errno) {
 	case 0:
 		return;
 	case EMSGSIZE:
 		notify = tcp_mtudisc_notify;
 		break;
 	case ECONNREFUSED:
 		if (V_icmp_may_rst)
 			notify = tcp_drop_syn_sent;
 		else
 			notify = tcp_notify;
 		break;
 	case EHOSTUNREACH:
 		/*
 		 * There are only four ICMPs that may reset connection:
 		 * - administratively prohibited
 		 * - port unreachable
 		 * - time exceeded in transit
 		 * - unknown next header
 		 */
 		if (V_icmp_may_rst &&
 		    ((icmp6->icmp6_type == ICMP6_DST_UNREACH &&
 		     (icmp6->icmp6_code == ICMP6_DST_UNREACH_ADMIN ||
 		      icmp6->icmp6_code == ICMP6_DST_UNREACH_NOPORT)) ||
 		    (icmp6->icmp6_type == ICMP6_TIME_EXCEEDED &&
 		      icmp6->icmp6_code == ICMP6_TIME_EXCEED_TRANSIT) ||
 		    (icmp6->icmp6_type == ICMP6_PARAM_PROB &&
 		      icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER)))
 			notify = tcp_drop_syn_sent;
 		else
 			notify = tcp_notify;
 		break;
 	default:
 		notify = tcp_notify;
 	}
 
 	/* Check if we can safely get the ports from the tcp hdr */
 	if (m == NULL ||
 	    (m->m_pkthdr.len <
 		(int32_t) (off + sizeof(struct tcp_ports)))) {
 		return;
 	}
 	bzero(&t_ports, sizeof(struct tcp_ports));
 	m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
 	inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
 	    &ip6->ip6_src, t_ports.th_sport, INPLOOKUP_WLOCKPCB, NULL);
 	off += sizeof(struct tcp_ports);
 	if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
 		goto out;
 	}
 	m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
 	if (inp != NULL)  {
 		tp = intotcpcb(inp);
 #ifdef TCP_OFFLOAD
 		if (tp->t_flags & TF_TOE && errno == EMSGSIZE) {
 			/* MTU discovery for offloaded connections. */
 			mtu = tcp6_next_pmtu(icmp6);
 			tcp_offload_pmtu_update(tp, icmp_tcp_seq, mtu);
 			goto out;
 		}
 #endif
 		if (tp->t_port != port)
 			goto out;
 		if (SEQ_GEQ(ntohl(icmp_tcp_seq), tp->snd_una) &&
 		    SEQ_LT(ntohl(icmp_tcp_seq), tp->snd_max)) {
 			if (errno == EMSGSIZE) {
 				/*
 				 * MTU discovery:
 				 * If we got a needfrag set the MTU
 				 * in the route to the suggested new
 				 * value (if given) and then notify.
 				 */
 				mtu = tcp6_next_pmtu(icmp6);
 
 				bzero(&inc, sizeof(inc));
 				inc.inc_fibnum = M_GETFIB(m);
 				inc.inc_flags |= INC_ISIPV6;
 				inc.inc6_faddr = *dst;
 				if (in6_setscope(&inc.inc6_faddr,
 					m->m_pkthdr.rcvif, NULL))
 					goto out;
 				/*
 				 * Only process the offered MTU if it
 				 * is smaller than the current one.
 				 */
 				if (mtu < tp->t_maxseg +
 				    sizeof (struct tcphdr) +
 				    sizeof (struct ip6_hdr)) {
 					tcp_hc_updatemtu(&inc, mtu);
 					tcp_mtudisc(inp, mtu);
 					ICMP6STAT_INC(icp6s_pmtuchg);
 				}
 			} else
 				inp = (*notify)(inp, errno);
 		}
 	} else {
 		bzero(&inc, sizeof(inc));
 		inc.inc_fibnum = M_GETFIB(m);
 		inc.inc_flags |= INC_ISIPV6;
 		inc.inc_fport = t_ports.th_dport;
 		inc.inc_lport = t_ports.th_sport;
 		inc.inc6_faddr = *dst;
 		inc.inc6_laddr = ip6->ip6_src;
 		syncache_unreach(&inc, icmp_tcp_seq, port);
 	}
 out:
 	if (inp != NULL)
 		INP_WUNLOCK(inp);
 }
 
 static void
 tcp6_ctlinput(struct ip6ctlparam *ctl)
 {
 	tcp6_ctlinput_with_port(ctl, htons(0));
 }
 
 static void
 tcp6_ctlinput_viaudp(udp_tun_icmp_param_t param)
 {
 	struct ip6ctlparam *ip6cp = param.ip6cp;
 	struct mbuf *m;
 	struct udphdr *udp;
 	uint16_t port;
 
 	m = m_pulldown(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(struct udphdr), NULL);
 	if (m == NULL) {
 		return;
 	}
 	udp = mtod(m, struct udphdr *);
 	if (ntohs(udp->uh_sport) != V_tcp_udp_tunneling_port) {
 		return;
 	}
 	port = udp->uh_dport;
 	m_adj(m, sizeof(struct udphdr));
 	if ((m->m_flags & M_PKTHDR) == 0) {
 		ip6cp->ip6c_m->m_pkthdr.len -= sizeof(struct udphdr);
 	}
 	/* Now call in to the normal handling code */
 	tcp6_ctlinput_with_port(ip6cp, port);
 }
 
 #endif /* INET6 */
 
 static uint32_t
 tcp_keyed_hash(struct in_conninfo *inc, u_char *key, u_int len)
 {
 	SIPHASH_CTX ctx;
 	uint32_t hash[2];
 
 	KASSERT(len >= SIPHASH_KEY_LENGTH,
 	    ("%s: keylen %u too short ", __func__, len));
 	SipHash24_Init(&ctx);
 	SipHash_SetKey(&ctx, (uint8_t *)key);
 	SipHash_Update(&ctx, &inc->inc_fport, sizeof(uint16_t));
 	SipHash_Update(&ctx, &inc->inc_lport, sizeof(uint16_t));
 	switch (inc->inc_flags & INC_ISIPV6) {
 #ifdef INET
 	case 0:
 		SipHash_Update(&ctx, &inc->inc_faddr, sizeof(struct in_addr));
 		SipHash_Update(&ctx, &inc->inc_laddr, sizeof(struct in_addr));
 		break;
 #endif
 #ifdef INET6
 	case INC_ISIPV6:
 		SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(struct in6_addr));
 		SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(struct in6_addr));
 		break;
 #endif
 	}
 	SipHash_Final((uint8_t *)hash, &ctx);
 
 	return (hash[0] ^ hash[1]);
 }
 
 uint32_t
 tcp_new_ts_offset(struct in_conninfo *inc)
 {
 	struct in_conninfo inc_store, *local_inc;
 
 	if (!V_tcp_ts_offset_per_conn) {
 		memcpy(&inc_store, inc, sizeof(struct in_conninfo));
 		inc_store.inc_lport = 0;
 		inc_store.inc_fport = 0;
 		local_inc = &inc_store;
 	} else {
 		local_inc = inc;
 	}
 	return (tcp_keyed_hash(local_inc, V_ts_offset_secret,
 	    sizeof(V_ts_offset_secret)));
 }
 
 /*
  * Following is where TCP initial sequence number generation occurs.
  *
  * There are two places where we must use initial sequence numbers:
  * 1.  In SYN-ACK packets.
  * 2.  In SYN packets.
  *
  * All ISNs for SYN-ACK packets are generated by the syncache.  See
  * tcp_syncache.c for details.
  *
  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
  * depends on this property.  In addition, these ISNs should be
  * unguessable so as to prevent connection hijacking.  To satisfy
  * the requirements of this situation, the algorithm outlined in
  * RFC 1948 is used, with only small modifications.
  *
  * Implementation details:
  *
  * Time is based off the system timer, and is corrected so that it
  * increases by one megabyte per second.  This allows for proper
  * recycling on high speed LANs while still leaving over an hour
  * before rollover.
  *
  * As reading the *exact* system time is too expensive to be done
  * whenever setting up a TCP connection, we increment the time
  * offset in two ways.  First, a small random positive increment
  * is added to isn_offset for each connection that is set up.
  * Second, the function tcp_isn_tick fires once per clock tick
  * and increments isn_offset as necessary so that sequence numbers
  * are incremented at approximately ISN_BYTES_PER_SECOND.  The
  * random positive increments serve only to ensure that the same
  * exact sequence number is never sent out twice (as could otherwise
  * happen when a port is recycled in less than the system tick
  * interval.)
  *
  * net.inet.tcp.isn_reseed_interval controls the number of seconds
  * between seeding of isn_secret.  This is normally set to zero,
  * as reseeding should not be necessary.
  *
  * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
  * isn_offset_old, and isn_ctx is performed using the ISN lock.  In
  * general, this means holding an exclusive (write) lock.
  */
 
 #define ISN_BYTES_PER_SECOND 1048576
 #define ISN_STATIC_INCREMENT 4096
 #define ISN_RANDOM_INCREMENT (4096 - 1)
 #define ISN_SECRET_LENGTH    SIPHASH_KEY_LENGTH
 
 VNET_DEFINE_STATIC(u_char, isn_secret[ISN_SECRET_LENGTH]);
 VNET_DEFINE_STATIC(int, isn_last);
 VNET_DEFINE_STATIC(int, isn_last_reseed);
 VNET_DEFINE_STATIC(u_int32_t, isn_offset);
 VNET_DEFINE_STATIC(u_int32_t, isn_offset_old);
 
 #define	V_isn_secret			VNET(isn_secret)
 #define	V_isn_last			VNET(isn_last)
 #define	V_isn_last_reseed		VNET(isn_last_reseed)
 #define	V_isn_offset			VNET(isn_offset)
 #define	V_isn_offset_old		VNET(isn_offset_old)
 
 tcp_seq
 tcp_new_isn(struct in_conninfo *inc)
 {
 	tcp_seq new_isn;
 	u_int32_t projected_offset;
 
 	ISN_LOCK();
 	/* Seed if this is the first use, reseed if requested. */
 	if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
 	     (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
 		< (u_int)ticks))) {
 		arc4rand(&V_isn_secret, sizeof(V_isn_secret), 0);
 		V_isn_last_reseed = ticks;
 	}
 
 	/* Compute the hash and return the ISN. */
 	new_isn = (tcp_seq)tcp_keyed_hash(inc, V_isn_secret,
 	    sizeof(V_isn_secret));
 	V_isn_offset += ISN_STATIC_INCREMENT +
 		(arc4random() & ISN_RANDOM_INCREMENT);
 	if (ticks != V_isn_last) {
 		projected_offset = V_isn_offset_old +
 		    ISN_BYTES_PER_SECOND / hz * (ticks - V_isn_last);
 		if (SEQ_GT(projected_offset, V_isn_offset))
 			V_isn_offset = projected_offset;
 		V_isn_offset_old = V_isn_offset;
 		V_isn_last = ticks;
 	}
 	new_isn += V_isn_offset;
 	ISN_UNLOCK();
 	return (new_isn);
 }
 
 /*
  * When a specific ICMP unreachable message is received and the
  * connection state is SYN-SENT, drop the connection.  This behavior
  * is controlled by the icmp_may_rst sysctl.
  */
 static struct inpcb *
 tcp_drop_syn_sent(struct inpcb *inp, int errno)
 {
 	struct tcpcb *tp;
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	tp = intotcpcb(inp);
 	if (tp->t_state != TCPS_SYN_SENT)
 		return (inp);
 
 	if (IS_FASTOPEN(tp->t_flags))
 		tcp_fastopen_disable_path(tp);
 
 	tp = tcp_drop(tp, errno);
 	if (tp != NULL)
 		return (inp);
 	else
 		return (NULL);
 }
 
 /*
  * When `need fragmentation' ICMP is received, update our idea of the MSS
  * based on the new value. Also nudge TCP to send something, since we
  * know the packet we just sent was dropped.
  * This duplicates some code in the tcp_mss() function in tcp_input.c.
  */
 static struct inpcb *
 tcp_mtudisc_notify(struct inpcb *inp, int error)
 {
 
 	return (tcp_mtudisc(inp, -1));
 }
 
 static struct inpcb *
 tcp_mtudisc(struct inpcb *inp, int mtuoffer)
 {
 	struct tcpcb *tp;
 	struct socket *so;
 
 	INP_WLOCK_ASSERT(inp);
 
 	tp = intotcpcb(inp);
 	KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
 
 	tcp_mss_update(tp, -1, mtuoffer, NULL, NULL);
 
 	so = inp->inp_socket;
 	SOCKBUF_LOCK(&so->so_snd);
 	/* If the mss is larger than the socket buffer, decrease the mss. */
 	if (so->so_snd.sb_hiwat < tp->t_maxseg)
 		tp->t_maxseg = so->so_snd.sb_hiwat;
 	SOCKBUF_UNLOCK(&so->so_snd);
 
 	TCPSTAT_INC(tcps_mturesent);
 	tp->t_rtttime = 0;
 	tp->snd_nxt = tp->snd_una;
 	tcp_free_sackholes(tp);
 	tp->snd_recover = tp->snd_max;
 	if (tp->t_flags & TF_SACK_PERMIT)
 		EXIT_FASTRECOVERY(tp->t_flags);
 	if (tp->t_fb->tfb_tcp_mtu_chg != NULL) {
 		/*
 		 * Conceptually the snd_nxt setting
 		 * and freeing sack holes should
 		 * be done by the default stacks
 		 * own tfb_tcp_mtu_chg().
 		 */
 		tp->t_fb->tfb_tcp_mtu_chg(tp);
 	}
 	if (tcp_output(tp) < 0)
 		return (NULL);
 	else
 		return (inp);
 }
 
 #ifdef INET
 /*
  * Look-up the routing entry to the peer of this inpcb.  If no route
  * is found and it cannot be allocated, then return 0.  This routine
  * is called by TCP routines that access the rmx structure and by
  * tcp_mss_update to get the peer/interface MTU.
  */
 uint32_t
 tcp_maxmtu(struct in_conninfo *inc, struct tcp_ifcap *cap)
 {
 	struct nhop_object *nh;
 	struct ifnet *ifp;
 	uint32_t maxmtu = 0;
 
 	KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
 
 	if (inc->inc_faddr.s_addr != INADDR_ANY) {
 		nh = fib4_lookup(inc->inc_fibnum, inc->inc_faddr, 0, NHR_NONE, 0);
 		if (nh == NULL)
 			return (0);
 
 		ifp = nh->nh_ifp;
 		maxmtu = nh->nh_mtu;
 
 		/* Report additional interface capabilities. */
 		if (cap != NULL) {
 			if (ifp->if_capenable & IFCAP_TSO4 &&
 			    ifp->if_hwassist & CSUM_TSO) {
 				cap->ifcap |= CSUM_TSO;
 				cap->tsomax = ifp->if_hw_tsomax;
 				cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
 				cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
 			}
 		}
 	}
 	return (maxmtu);
 }
 #endif /* INET */
 
 #ifdef INET6
 uint32_t
 tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
 {
 	struct nhop_object *nh;
 	struct in6_addr dst6;
 	uint32_t scopeid;
 	struct ifnet *ifp;
 	uint32_t maxmtu = 0;
 
 	KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
 
 	if (inc->inc_flags & INC_IPV6MINMTU)
 		return (IPV6_MMTU);
 
 	if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
 		in6_splitscope(&inc->inc6_faddr, &dst6, &scopeid);
 		nh = fib6_lookup(inc->inc_fibnum, &dst6, scopeid, NHR_NONE, 0);
 		if (nh == NULL)
 			return (0);
 
 		ifp = nh->nh_ifp;
 		maxmtu = nh->nh_mtu;
 
 		/* Report additional interface capabilities. */
 		if (cap != NULL) {
 			if (ifp->if_capenable & IFCAP_TSO6 &&
 			    ifp->if_hwassist & CSUM_TSO) {
 				cap->ifcap |= CSUM_TSO;
 				cap->tsomax = ifp->if_hw_tsomax;
 				cap->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
 				cap->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
 			}
 		}
 	}
 
 	return (maxmtu);
 }
 
 /*
  * Handle setsockopt(IPV6_USE_MIN_MTU) by a TCP stack.
  *
  * XXXGL: we are updating inpcb here with INC_IPV6MINMTU flag.
  * The right place to do that is ip6_setpktopt() that has just been
  * executed.  By the way it just filled ip6po_minmtu for us.
  */
 void
 tcp6_use_min_mtu(struct tcpcb *tp)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 
 	INP_WLOCK_ASSERT(inp);
 	/*
 	 * In case of the IPV6_USE_MIN_MTU socket
 	 * option, the INC_IPV6MINMTU flag to announce
 	 * a corresponding MSS during the initial
 	 * handshake.  If the TCP connection is not in
 	 * the front states, just reduce the MSS being
 	 * used.  This avoids the sending of TCP
 	 * segments which will be fragmented at the
 	 * IPv6 layer.
 	 */
 	inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
 	if ((tp->t_state >= TCPS_SYN_SENT) &&
 	    (inp->inp_inc.inc_flags & INC_ISIPV6)) {
 		struct ip6_pktopts *opt;
 
 		opt = inp->in6p_outputopts;
 		if (opt != NULL && opt->ip6po_minmtu == IP6PO_MINMTU_ALL &&
 		    tp->t_maxseg > TCP6_MSS)
 			tp->t_maxseg = TCP6_MSS;
 	}
 }
 #endif /* INET6 */
 
 /*
  * Calculate effective SMSS per RFC5681 definition for a given TCP
  * connection at its current state, taking into account SACK and etc.
  */
 u_int
 tcp_maxseg(const struct tcpcb *tp)
 {
 	u_int optlen;
 
 	if (tp->t_flags & TF_NOOPT)
 		return (tp->t_maxseg);
 
 	/*
 	 * Here we have a simplified code from tcp_addoptions(),
 	 * without a proper loop, and having most of paddings hardcoded.
 	 * We might make mistakes with padding here in some edge cases,
 	 * but this is harmless, since result of tcp_maxseg() is used
 	 * only in cwnd and ssthresh estimations.
 	 */
 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 		if (tp->t_flags & TF_RCVD_TSTMP)
 			optlen = TCPOLEN_TSTAMP_APPA;
 		else
 			optlen = 0;
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		if (tp->t_flags & TF_SIGNATURE)
 			optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
 #endif
 		if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0) {
 			optlen += TCPOLEN_SACKHDR;
 			optlen += tp->rcv_numsacks * TCPOLEN_SACK;
 			optlen = PADTCPOLEN(optlen);
 		}
 	} else {
 		if (tp->t_flags & TF_REQ_TSTMP)
 			optlen = TCPOLEN_TSTAMP_APPA;
 		else
 			optlen = PADTCPOLEN(TCPOLEN_MAXSEG);
 		if (tp->t_flags & TF_REQ_SCALE)
 			optlen += PADTCPOLEN(TCPOLEN_WINDOW);
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		if (tp->t_flags & TF_SIGNATURE)
 			optlen += PADTCPOLEN(TCPOLEN_SIGNATURE);
 #endif
 		if (tp->t_flags & TF_SACK_PERMIT)
 			optlen += PADTCPOLEN(TCPOLEN_SACK_PERMITTED);
 	}
 #undef PAD
 	optlen = min(optlen, TCP_MAXOLEN);
 	return (tp->t_maxseg - optlen);
 }
 
 
 u_int
 tcp_fixed_maxseg(const struct tcpcb *tp)
 {
 	int optlen;
 
 	if (tp->t_flags & TF_NOOPT)
 		return (tp->t_maxseg);
 
 	/*
 	 * Here we have a simplified code from tcp_addoptions(),
 	 * without a proper loop, and having most of paddings hardcoded.
 	 * We only consider fixed options that we would send every
 	 * time I.e. SACK is not considered. This is important
 	 * for cc modules to figure out what the modulo of the
 	 * cwnd should be.
 	 */
 #define	PAD(len)	((((len) / 4) + !!((len) % 4)) * 4)
 	if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 		if (tp->t_flags & TF_RCVD_TSTMP)
 			optlen = TCPOLEN_TSTAMP_APPA;
 		else
 			optlen = 0;
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		if (tp->t_flags & TF_SIGNATURE)
 			optlen += PAD(TCPOLEN_SIGNATURE);
 #endif
 	} else {
 		if (tp->t_flags & TF_REQ_TSTMP)
 			optlen = TCPOLEN_TSTAMP_APPA;
 		else
 			optlen = PAD(TCPOLEN_MAXSEG);
 		if (tp->t_flags & TF_REQ_SCALE)
 			optlen += PAD(TCPOLEN_WINDOW);
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		if (tp->t_flags & TF_SIGNATURE)
 			optlen += PAD(TCPOLEN_SIGNATURE);
 #endif
 		if (tp->t_flags & TF_SACK_PERMIT)
 			optlen += PAD(TCPOLEN_SACK_PERMITTED);
 	}
 #undef PAD
 	optlen = min(optlen, TCP_MAXOLEN);
 	return (tp->t_maxseg - optlen);
 }
 
 
 
 static int
 sysctl_drop(SYSCTL_HANDLER_ARGS)
 {
 	/* addrs[0] is a foreign socket, addrs[1] is a local one. */
 	struct sockaddr_storage addrs[2];
 	struct inpcb *inp;
 	struct tcpcb *tp;
 #ifdef INET
 	struct sockaddr_in *fin = NULL, *lin = NULL;
 #endif
 	struct epoch_tracker et;
 #ifdef INET6
 	struct sockaddr_in6 *fin6, *lin6;
 #endif
 	int error;
 
 	inp = NULL;
 #ifdef INET6
 	fin6 = lin6 = NULL;
 #endif
 	error = 0;
 
 	if (req->oldptr != NULL || req->oldlen != 0)
 		return (EINVAL);
 	if (req->newptr == NULL)
 		return (EPERM);
 	if (req->newlen < sizeof(addrs))
 		return (ENOMEM);
 	error = SYSCTL_IN(req, &addrs, sizeof(addrs));
 	if (error)
 		return (error);
 
 	switch (addrs[0].ss_family) {
 #ifdef INET6
 	case AF_INET6:
 		fin6 = (struct sockaddr_in6 *)&addrs[0];
 		lin6 = (struct sockaddr_in6 *)&addrs[1];
 		if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
 		    lin6->sin6_len != sizeof(struct sockaddr_in6))
 			return (EINVAL);
 		if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
 			if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
 				return (EINVAL);
 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
 #ifdef INET
 			fin = (struct sockaddr_in *)&addrs[0];
 			lin = (struct sockaddr_in *)&addrs[1];
 #endif
 			break;
 		}
 		error = sa6_embedscope(fin6, V_ip6_use_defzone);
 		if (error)
 			return (error);
 		error = sa6_embedscope(lin6, V_ip6_use_defzone);
 		if (error)
 			return (error);
 		break;
 #endif
 #ifdef INET
 	case AF_INET:
 		fin = (struct sockaddr_in *)&addrs[0];
 		lin = (struct sockaddr_in *)&addrs[1];
 		if (fin->sin_len != sizeof(struct sockaddr_in) ||
 		    lin->sin_len != sizeof(struct sockaddr_in))
 			return (EINVAL);
 		break;
 #endif
 	default:
 		return (EINVAL);
 	}
 	NET_EPOCH_ENTER(et);
 	switch (addrs[0].ss_family) {
 #ifdef INET6
 	case AF_INET6:
 		inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
 		    fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
 		    INPLOOKUP_WLOCKPCB, NULL);
 		break;
 #endif
 #ifdef INET
 	case AF_INET:
 		inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
 		    lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
 		break;
 #endif
 	}
 	if (inp != NULL) {
 		if (!SOLISTENING(inp->inp_socket)) {
 			tp = intotcpcb(inp);
 			tp = tcp_drop(tp, ECONNABORTED);
 			if (tp != NULL)
 				INP_WUNLOCK(inp);
 		} else
 			INP_WUNLOCK(inp);
 	} else
 		error = ESRCH;
 	NET_EPOCH_EXIT(et);
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
     CTLFLAG_NEEDGIANT, NULL, 0, sysctl_drop, "",
     "Drop TCP connection");
 
 static int
 tcp_sysctl_setsockopt(SYSCTL_HANDLER_ARGS)
 {
 	return (sysctl_setsockopt(oidp, arg1, arg2, req, &V_tcbinfo,
 	    &tcp_ctloutput_set));
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, setsockopt,
     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
     CTLFLAG_MPSAFE, NULL, 0, tcp_sysctl_setsockopt, "",
     "Set socket option for TCP endpoint");
 
 #ifdef KERN_TLS
 static int
 sysctl_switch_tls(SYSCTL_HANDLER_ARGS)
 {
 	/* addrs[0] is a foreign socket, addrs[1] is a local one. */
 	struct sockaddr_storage addrs[2];
 	struct inpcb *inp;
 #ifdef INET
 	struct sockaddr_in *fin = NULL, *lin = NULL;
 #endif
 	struct epoch_tracker et;
 #ifdef INET6
 	struct sockaddr_in6 *fin6, *lin6;
 #endif
 	int error;
 
 	inp = NULL;
 #ifdef INET6
 	fin6 = lin6 = NULL;
 #endif
 	error = 0;
 
 	if (req->oldptr != NULL || req->oldlen != 0)
 		return (EINVAL);
 	if (req->newptr == NULL)
 		return (EPERM);
 	if (req->newlen < sizeof(addrs))
 		return (ENOMEM);
 	error = SYSCTL_IN(req, &addrs, sizeof(addrs));
 	if (error)
 		return (error);
 
 	switch (addrs[0].ss_family) {
 #ifdef INET6
 	case AF_INET6:
 		fin6 = (struct sockaddr_in6 *)&addrs[0];
 		lin6 = (struct sockaddr_in6 *)&addrs[1];
 		if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
 		    lin6->sin6_len != sizeof(struct sockaddr_in6))
 			return (EINVAL);
 		if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
 			if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
 				return (EINVAL);
 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
 			in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
 #ifdef INET
 			fin = (struct sockaddr_in *)&addrs[0];
 			lin = (struct sockaddr_in *)&addrs[1];
 #endif
 			break;
 		}
 		error = sa6_embedscope(fin6, V_ip6_use_defzone);
 		if (error)
 			return (error);
 		error = sa6_embedscope(lin6, V_ip6_use_defzone);
 		if (error)
 			return (error);
 		break;
 #endif
 #ifdef INET
 	case AF_INET:
 		fin = (struct sockaddr_in *)&addrs[0];
 		lin = (struct sockaddr_in *)&addrs[1];
 		if (fin->sin_len != sizeof(struct sockaddr_in) ||
 		    lin->sin_len != sizeof(struct sockaddr_in))
 			return (EINVAL);
 		break;
 #endif
 	default:
 		return (EINVAL);
 	}
 	NET_EPOCH_ENTER(et);
 	switch (addrs[0].ss_family) {
 #ifdef INET6
 	case AF_INET6:
 		inp = in6_pcblookup(&V_tcbinfo, &fin6->sin6_addr,
 		    fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port,
 		    INPLOOKUP_WLOCKPCB, NULL);
 		break;
 #endif
 #ifdef INET
 	case AF_INET:
 		inp = in_pcblookup(&V_tcbinfo, fin->sin_addr, fin->sin_port,
 		    lin->sin_addr, lin->sin_port, INPLOOKUP_WLOCKPCB, NULL);
 		break;
 #endif
 	}
 	NET_EPOCH_EXIT(et);
 	if (inp != NULL) {
 		struct socket *so;
 
 		so = inp->inp_socket;
 		soref(so);
 		error = ktls_set_tx_mode(so,
 		    arg2 == 0 ? TCP_TLS_MODE_SW : TCP_TLS_MODE_IFNET);
 		INP_WUNLOCK(inp);
 		sorele(so);
 	} else
 		error = ESRCH;
 	return (error);
 }
 
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_sw_tls,
     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
     CTLFLAG_NEEDGIANT, NULL, 0, sysctl_switch_tls, "",
     "Switch TCP connection to SW TLS");
 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, switch_to_ifnet_tls,
     CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_WR | CTLFLAG_SKIP |
     CTLFLAG_NEEDGIANT, NULL, 1, sysctl_switch_tls, "",
     "Switch TCP connection to ifnet TLS");
 #endif
 
 /*
  * Generate a standardized TCP log line for use throughout the
  * tcp subsystem.  Memory allocation is done with M_NOWAIT to
  * allow use in the interrupt context.
  *
  * NB: The caller MUST free(s, M_TCPLOG) the returned string.
  * NB: The function may return NULL if memory allocation failed.
  *
  * Due to header inclusion and ordering limitations the struct ip
  * and ip6_hdr pointers have to be passed as void pointers.
  */
 char *
 tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
     const void *ip6hdr)
 {
 
 	/* Is logging enabled? */
 	if (V_tcp_log_in_vain == 0)
 		return (NULL);
 
 	return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
 }
 
 char *
 tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
     const void *ip6hdr)
 {
 
 	/* Is logging enabled? */
 	if (tcp_log_debug == 0)
 		return (NULL);
 
 	return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
 }
 
 static char *
 tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, const void *ip4hdr,
     const void *ip6hdr)
 {
 	char *s, *sp;
 	size_t size;
 #ifdef INET
 	const struct ip *ip = (const struct ip *)ip4hdr;
 #endif
 #ifdef INET6
 	const struct ip6_hdr *ip6 = (const struct ip6_hdr *)ip6hdr;
 #endif /* INET6 */
 
 	/*
 	 * The log line looks like this:
 	 * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
 	 */
 	size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
 	    sizeof(PRINT_TH_FLAGS) + 1 +
 #ifdef INET6
 	    2 * INET6_ADDRSTRLEN;
 #else
 	    2 * INET_ADDRSTRLEN;
 #endif /* INET6 */
 
 	s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
 	if (s == NULL)
 		return (NULL);
 
 	strcat(s, "TCP: [");
 	sp = s + strlen(s);
 
 	if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
 		inet_ntoa_r(inc->inc_faddr, sp);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
 		sp = s + strlen(s);
 		inet_ntoa_r(inc->inc_laddr, sp);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i", ntohs(inc->inc_lport));
 #ifdef INET6
 	} else if (inc) {
 		ip6_sprintf(sp, &inc->inc6_faddr);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
 		sp = s + strlen(s);
 		ip6_sprintf(sp, &inc->inc6_laddr);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i", ntohs(inc->inc_lport));
 	} else if (ip6 && th) {
 		ip6_sprintf(sp, &ip6->ip6_src);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i to [", ntohs(th->th_sport));
 		sp = s + strlen(s);
 		ip6_sprintf(sp, &ip6->ip6_dst);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i", ntohs(th->th_dport));
 #endif /* INET6 */
 #ifdef INET
 	} else if (ip && th) {
 		inet_ntoa_r(ip->ip_src, sp);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i to [", ntohs(th->th_sport));
 		sp = s + strlen(s);
 		inet_ntoa_r(ip->ip_dst, sp);
 		sp = s + strlen(s);
 		sprintf(sp, "]:%i", ntohs(th->th_dport));
 #endif /* INET */
 	} else {
 		free(s, M_TCPLOG);
 		return (NULL);
 	}
 	sp = s + strlen(s);
 	if (th)
 		sprintf(sp, " tcpflags 0x%b", tcp_get_flags(th), PRINT_TH_FLAGS);
 	if (*(s + size - 1) != '\0')
 		panic("%s: string too long", __func__);
 	return (s);
 }
 
 /*
  * A subroutine which makes it easy to track TCP state changes with DTrace.
  * This function shouldn't be called for t_state initializations that don't
  * correspond to actual TCP state transitions.
  */
 void
 tcp_state_change(struct tcpcb *tp, int newstate)
 {
 #if defined(KDTRACE_HOOKS)
 	int pstate = tp->t_state;
 #endif
 
 	TCPSTATES_DEC(tp->t_state);
 	TCPSTATES_INC(newstate);
 	tp->t_state = newstate;
 	TCP_PROBE6(state__change, NULL, tp, NULL, tp, NULL, pstate);
 }
 
 /*
  * Create an external-format (``xtcpcb'') structure using the information in
  * the kernel-format tcpcb structure pointed to by tp.  This is done to
  * reduce the spew of irrelevant information over this interface, to isolate
  * user code from changes in the kernel structure, and potentially to provide
  * information-hiding if we decide that some of this information should be
  * hidden from users.
  */
 void
 tcp_inptoxtp(const struct inpcb *inp, struct xtcpcb *xt)
 {
 	struct tcpcb *tp = intotcpcb(inp);
 	sbintime_t now;
 
 	bzero(xt, sizeof(*xt));
 	xt->t_state = tp->t_state;
 	xt->t_logstate = tcp_get_bblog_state(tp);
 	xt->t_flags = tp->t_flags;
 	xt->t_sndzerowin = tp->t_sndzerowin;
 	xt->t_sndrexmitpack = tp->t_sndrexmitpack;
 	xt->t_rcvoopack = tp->t_rcvoopack;
 	xt->t_rcv_wnd = tp->rcv_wnd;
 	xt->t_snd_wnd = tp->snd_wnd;
 	xt->t_snd_cwnd = tp->snd_cwnd;
 	xt->t_snd_ssthresh = tp->snd_ssthresh;
 	xt->t_dsack_bytes = tp->t_dsack_bytes;
 	xt->t_dsack_tlp_bytes = tp->t_dsack_tlp_bytes;
 	xt->t_dsack_pack = tp->t_dsack_pack;
 	xt->t_maxseg = tp->t_maxseg;
 	xt->xt_ecn = (tp->t_flags2 & TF2_ECN_PERMIT) ? 1 : 0 +
 		     (tp->t_flags2 & TF2_ACE_PERMIT) ? 2 : 0;
 
 	now = getsbinuptime();
 #define	COPYTIMER(which,where)	do {					\
 	if (tp->t_timers[which] != SBT_MAX)				\
 		xt->where = (tp->t_timers[which] - now) / SBT_1MS;	\
 	else								\
 		xt->where = 0;						\
 } while (0)
 	COPYTIMER(TT_DELACK, tt_delack);
 	COPYTIMER(TT_REXMT, tt_rexmt);
 	COPYTIMER(TT_PERSIST, tt_persist);
 	COPYTIMER(TT_KEEP, tt_keep);
 	COPYTIMER(TT_2MSL, tt_2msl);
 #undef COPYTIMER
 	xt->t_rcvtime = 1000 * (ticks - tp->t_rcvtime) / hz;
 
 	xt->xt_encaps_port = tp->t_port;
 	bcopy(tp->t_fb->tfb_tcp_block_name, xt->xt_stack,
 	    TCP_FUNCTION_NAME_LEN_MAX);
 	bcopy(CC_ALGO(tp)->name, xt->xt_cc, TCP_CA_NAME_MAX);
 #ifdef TCP_BLACKBOX
 	(void)tcp_log_get_id(tp, xt->xt_logid);
 #endif
 
 	xt->xt_len = sizeof(struct xtcpcb);
 	in_pcbtoxinpcb(inp, &xt->xt_inp);
 	/*
 	 * TCP doesn't use inp_ppcb pointer, we embed inpcb into tcpcb.
 	 * Fixup the pointer that in_pcbtoxinpcb() has set.  When printing
 	 * TCP netstat(1) used to use this pointer, so this fixup needs to
 	 * stay for stable/14.
 	 */
 	xt->xt_inp.inp_ppcb = (uintptr_t)tp;
 }
 
 void
 tcp_log_end_status(struct tcpcb *tp, uint8_t status)
 {
 	uint32_t bit, i;
 
 	if ((tp == NULL) ||
 	    (status > TCP_EI_STATUS_MAX_VALUE) ||
 	    (status == 0)) {
 		/* Invalid */
 		return;
 	}
 	if (status > (sizeof(uint32_t) * 8)) {
 		/* Should this be a KASSERT? */
 		return;
 	}
 	bit = 1U << (status - 1);
 	if (bit & tp->t_end_info_status) {
 		/* already logged */
 		return;
 	}
 	for (i = 0; i < TCP_END_BYTE_INFO; i++) {
 		if (tp->t_end_info_bytes[i] == TCP_EI_EMPTY_SLOT) {
 			tp->t_end_info_bytes[i] = status;
 			tp->t_end_info_status |= bit;
 			break;
 		}
 	}
 }
 
 int
 tcp_can_enable_pacing(void)
 {
 
 	if ((tcp_pacing_limit == -1) ||
 	    (tcp_pacing_limit > number_of_tcp_connections_pacing)) {
 		atomic_fetchadd_int(&number_of_tcp_connections_pacing, 1);
 		shadow_num_connections = number_of_tcp_connections_pacing;
 		return (1);
 	} else {
 		counter_u64_add(tcp_pacing_failures, 1);
 		return (0);
 	}
 }
 
 static uint8_t tcp_pacing_warning = 0;
 
 void
 tcp_decrement_paced_conn(void)
 {
 	uint32_t ret;
 
 	ret = atomic_fetchadd_int(&number_of_tcp_connections_pacing, -1);
 	shadow_num_connections = number_of_tcp_connections_pacing;
 	KASSERT(ret != 0, ("tcp_paced_connection_exits -1 would cause wrap?"));
 	if (ret == 0) {
 		if (tcp_pacing_limit != -1) {
 			printf("Warning all pacing is now disabled, count decrements invalidly!\n");
 			tcp_pacing_limit = 0;
 		} else if (tcp_pacing_warning == 0) {
 			printf("Warning pacing count is invalid, invalid decrement\n");
 			tcp_pacing_warning = 1;
 		}
 	}
 }
 
 static void
 tcp_default_switch_failed(struct tcpcb *tp)
 {
 	/*
 	 * If a switch fails we only need to
 	 * care about two things:
 	 * a) The t_flags2
 	 * and
 	 * b) The timer granularity.
 	 * Timeouts, at least for now, don't use the
 	 * old callout system in the other stacks so
 	 * those are hopefully safe.
 	 */
 	tcp_lro_features_off(tp);
 	tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
 }
 
 #ifdef TCP_ACCOUNTING
 int
 tcp_do_ack_accounting(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to, uint32_t tiwin, int mss)
 {
 	if (SEQ_LT(th->th_ack, tp->snd_una)) {
 		/* Do we have a SACK? */
 		if (to->to_flags & TOF_SACK) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_SACK]++;
 			}
 			return (ACK_SACK);
 		} else {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_BEHIND]++;
 			}
 			return (ACK_BEHIND);
 		}
 	} else if (th->th_ack == tp->snd_una) {
 		/* Do we have a SACK? */
 		if (to->to_flags & TOF_SACK) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_SACK]++;
 			}
 			return (ACK_SACK);
 		} else if (tiwin != tp->snd_wnd) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_RWND]++;
 			}
 			return (ACK_RWND);
 		} else {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_DUPACK]++;
 			}
 			return (ACK_DUPACK);
 		}
 	} else {
 		if (!SEQ_GT(th->th_ack, tp->snd_max)) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((th->th_ack - tp->snd_una) + mss - 1)/mss);
 			}
 		}
 		if (to->to_flags & TOF_SACK) {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_CUMACK_SACK]++;
 			}
 			return (ACK_CUMACK_SACK);
 		} else {
 			if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
 				tp->tcp_cnt_counters[ACK_CUMACK]++;
 			}
 			return (ACK_CUMACK);
 		}
 	}
 }
 #endif
 
 void
 tcp_change_time_units(struct tcpcb *tp, int granularity)
 {
 	if (tp->t_tmr_granularity == granularity) {
 		/* We are there */
 		return;
 	}
 	if (granularity == TCP_TMR_GRANULARITY_USEC) {
 		KASSERT((tp->t_tmr_granularity == TCP_TMR_GRANULARITY_TICKS),
 			("Granularity is not TICKS its %u in tp:%p",
 			 tp->t_tmr_granularity, tp));
 		tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow);
 		if (tp->t_srtt > 1) {
 			uint32_t val, frac;
 
 			val = tp->t_srtt >> TCP_RTT_SHIFT;
 			frac = tp->t_srtt & 0x1f;
 			tp->t_srtt = TICKS_2_USEC(val);
 			/*
 			 * frac is the fractional part of the srtt (if any)
 			 * but its in ticks and every bit represents
 			 * 1/32nd of a hz.
 			 */
 			if (frac) {
 				if (hz == 1000) {
 					frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
 				} else {
 					frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
 				}
 				tp->t_srtt += frac;
 			}
 		}
 		if (tp->t_rttvar) {
 			uint32_t val, frac;
 
 			val = tp->t_rttvar >> TCP_RTTVAR_SHIFT;
 			frac = tp->t_rttvar & 0x1f;
 			tp->t_rttvar = TICKS_2_USEC(val);
 			/*
 			 * frac is the fractional part of the srtt (if any)
 			 * but its in ticks and every bit represents
 			 * 1/32nd of a hz.
 			 */
 			if (frac) {
 				if (hz == 1000) {
 					frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE);
 				} else {
 					frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE));
 				}
 				tp->t_rttvar += frac;
 			}
 		}
 		tp->t_tmr_granularity = TCP_TMR_GRANULARITY_USEC;
 	} else if (granularity == TCP_TMR_GRANULARITY_TICKS) {
 		/* Convert back to ticks, with  */
 		KASSERT((tp->t_tmr_granularity == TCP_TMR_GRANULARITY_USEC),
 			("Granularity is not USEC its %u in tp:%p",
 			 tp->t_tmr_granularity, tp));
 		if (tp->t_srtt > 1) {
 			uint32_t val, frac;
 
 			val = USEC_2_TICKS(tp->t_srtt);
 			frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
 			tp->t_srtt = val << TCP_RTT_SHIFT;
 			/*
 			 * frac is the fractional part here is left
 			 * over from converting to hz and shifting.
 			 * We need to convert this to the 5 bit
 			 * remainder.
 			 */
 			if (frac) {
 				if (hz == 1000) {
 					frac = (((uint64_t)frac *  (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
 				} else {
 					frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
 				}
 				tp->t_srtt += frac;
 			}
 		}
 		if (tp->t_rttvar) {
 			uint32_t val, frac;
 
 			val = USEC_2_TICKS(tp->t_rttvar);
 			frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz);
 			tp->t_rttvar = val <<  TCP_RTTVAR_SHIFT;
 			/*
 			 * frac is the fractional part here is left
 			 * over from converting to hz and shifting.
 			 * We need to convert this to the 5 bit
 			 * remainder.
 			 */
 			if (frac) {
 				if (hz == 1000) {
 					frac = (((uint64_t)frac *  (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC);
 				} else {
 					frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC);
 				}
 				tp->t_rttvar += frac;
 			}
 		}
 		tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow);
 		tp->t_tmr_granularity = TCP_TMR_GRANULARITY_TICKS;
 	}
 #ifdef INVARIANTS
 	else {
 		panic("Unknown granularity:%d tp:%p",
 		      granularity, tp);
 	}
 #endif	
 }
 
 void
 tcp_handle_orphaned_packets(struct tcpcb *tp)
 {
 	struct mbuf *save, *m, *prev;
 	/*
 	 * Called when a stack switch is occuring from the fini()
 	 * of the old stack. We assue the init() as already been
 	 * run of the new stack and it has set the t_flags2 to
 	 * what it supports. This function will then deal with any
 	 * differences i.e. cleanup packets that maybe queued that
 	 * the newstack does not support.
 	 */
 
 	if (tp->t_flags2 & TF2_MBUF_L_ACKS)
 		return;
 	if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) == 0 &&
 	    !STAILQ_EMPTY(&tp->t_inqueue)) {
 		/*
 		 * It is unsafe to process the packets since a
 		 * reset may be lurking in them (its rare but it
 		 * can occur). If we were to find a RST, then we
 		 * would end up dropping the connection and the
 		 * INP lock, so when we return the caller (tcp_usrreq)
 		 * will blow up when it trys to unlock the inp.
 		 * This new stack does not do any fancy LRO features
 		 * so all we can do is toss the packets.
 		 */
 		m = STAILQ_FIRST(&tp->t_inqueue);
 		STAILQ_INIT(&tp->t_inqueue);
 		STAILQ_FOREACH_FROM_SAFE(m, &tp->t_inqueue, m_stailqpkt, save)
 			m_freem(m);
 	} else {
 		/*
 		 * Here we have a stack that does mbuf queuing but
 		 * does not support compressed ack's. We must
 		 * walk all the mbufs and discard any compressed acks.
 		 */
 		STAILQ_FOREACH_SAFE(m, &tp->t_inqueue, m_stailqpkt, save) {
 			if (m->m_flags & M_ACKCMP) {
 				if (m == STAILQ_FIRST(&tp->t_inqueue))
 					STAILQ_REMOVE_HEAD(&tp->t_inqueue,
 					    m_stailqpkt);
 				else
 					STAILQ_REMOVE_AFTER(&tp->t_inqueue,
 					    prev, m_stailqpkt);
 				m_freem(m);
 			} else
 				prev = m;
 		}
 	}
 }
 
 #ifdef TCP_REQUEST_TRK
 uint32_t
 tcp_estimate_tls_overhead(struct socket *so, uint64_t tls_usr_bytes)
 {
 #ifdef KERN_TLS
 	struct ktls_session *tls;
 	uint32_t rec_oh, records;
 
 	tls = so->so_snd.sb_tls_info;
 	if (tls == NULL)
 	    return (0);
 
 	rec_oh = tls->params.tls_hlen + tls->params.tls_tlen;
 	records = ((tls_usr_bytes + tls->params.max_frame_len - 1)/tls->params.max_frame_len);
 	return (records * rec_oh);
 #else
 	return (0);
 #endif
 }
 
 extern uint32_t tcp_stale_entry_time;
 uint32_t tcp_stale_entry_time = 250000;
 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, usrlog_stale, CTLFLAG_RW,
     &tcp_stale_entry_time, 250000, "Time that a tcpreq entry without a sendfile ages out");
 
 void
 tcp_req_log_req_info(struct tcpcb *tp, struct tcp_sendfile_track *req,
     uint16_t slot, uint8_t val, uint64_t offset, uint64_t nbytes)
 {
 	if (tcp_bblogging_on(tp)) {
 		union tcp_log_stackspecific log;
 		struct timeval tv;
 
 		memset(&log.u_bbr, 0, sizeof(log.u_bbr));
 		log.u_bbr.inhpts = tcp_in_hpts(tp);
 		log.u_bbr.flex8 = val;
 		log.u_bbr.rttProp = req->timestamp;
 		log.u_bbr.delRate = req->start;
 		log.u_bbr.cur_del_rate = req->end;
 		log.u_bbr.flex1 = req->start_seq;
 		log.u_bbr.flex2 = req->end_seq;
 		log.u_bbr.flex3 = req->flags;
 		log.u_bbr.flex4 = ((req->localtime >> 32) & 0x00000000ffffffff);
 		log.u_bbr.flex5 = (req->localtime & 0x00000000ffffffff);
 		log.u_bbr.flex7 = slot;
 		log.u_bbr.bw_inuse = offset;
 		/* nbytes = flex6 | epoch */
 		log.u_bbr.flex6 = ((nbytes >> 32) & 0x00000000ffffffff);
 		log.u_bbr.epoch = (nbytes & 0x00000000ffffffff);
 		/* cspr =  lt_epoch | pkts_out */
 		log.u_bbr.lt_epoch = ((req->cspr >> 32) & 0x00000000ffffffff);
 		log.u_bbr.pkts_out |= (req->cspr & 0x00000000ffffffff);
 		log.u_bbr.applimited = tp->t_tcpreq_closed;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_open;
 		log.u_bbr.applimited <<= 8;
 		log.u_bbr.applimited |= tp->t_tcpreq_req;
 		log.u_bbr.timeStamp = tcp_get_usecs(&tv);
 		TCP_LOG_EVENTP(tp, NULL,
 		    &tptosocket(tp)->so_rcv,
 		    &tptosocket(tp)->so_snd,
 		    TCP_LOG_REQ_T, 0,
 		    0, &log, false, &tv);
 	}
 }
 
 void
 tcp_req_free_a_slot(struct tcpcb *tp, struct tcp_sendfile_track *ent)
 {
 	if (tp->t_tcpreq_req > 0)
 		tp->t_tcpreq_req--;
 	if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
 		if (tp->t_tcpreq_open > 0)
 			tp->t_tcpreq_open--;
 	} else {
 		if (tp->t_tcpreq_closed > 0)
 			tp->t_tcpreq_closed--;
 	}
 	ent->flags = TCP_TRK_TRACK_FLG_EMPTY;
 }
 
 static void
 tcp_req_check_for_stale_entries(struct tcpcb *tp, uint64_t ts, int rm_oldest)
 {
 	struct tcp_sendfile_track *ent;
 	uint64_t time_delta, oldest_delta;
 	int i, oldest, oldest_set = 0, cnt_rm = 0;
 
 	for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
 		ent = &tp->t_tcpreq_info[i];
 		if (ent->flags != TCP_TRK_TRACK_FLG_USED) {
 			/*
 			 * We only care about closed end ranges
 			 * that are allocated and have no sendfile
 			 * ever touching them. They would be in
 			 * state USED.
 			 */
 			continue;
 		}
 		if (ts >= ent->localtime)
 			time_delta = ts - ent->localtime;
 		else
 			time_delta = 0;
 		if (time_delta &&
 		    ((oldest_delta < time_delta) || (oldest_set == 0))) {
 			oldest_set = 1;
 			oldest = i;
 			oldest_delta = time_delta;
 		}
 		if (tcp_stale_entry_time && (time_delta >= tcp_stale_entry_time)) {
 			/*
 			 * No sendfile in a our time-limit
 			 * time to purge it.
 			 */
 			cnt_rm++;
 			tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE,
 					      time_delta, 0);
 			tcp_req_free_a_slot(tp, ent);
 		}
 	}
 	if ((cnt_rm == 0) && rm_oldest && oldest_set) {
 		ent = &tp->t_tcpreq_info[oldest];
 		tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i], i, TCP_TRK_REQ_LOG_STALE,
 				      oldest_delta, 1);
 		tcp_req_free_a_slot(tp, ent);
 	}
 }
 
 int
 tcp_req_check_for_comp(struct tcpcb *tp, tcp_seq ack_point)
 {
 	int i, ret=0;
 	struct tcp_sendfile_track *ent;
 
 	/* Clean up any old closed end requests that are now completed */
 	if (tp->t_tcpreq_req == 0)
 		return(0);
 	if (tp->t_tcpreq_closed == 0)
 		return(0);
 	for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
 		ent = &tp->t_tcpreq_info[i];
 		/* Skip empty ones */
 		if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
 			continue;
 		/* Skip open ones */
 		if (ent->flags & TCP_TRK_TRACK_FLG_OPEN)
 			continue;
 		if (SEQ_GEQ(ack_point, ent->end_seq)) {
 			/* We are past it -- free it */
 			tcp_req_log_req_info(tp, ent,
 					      i, TCP_TRK_REQ_LOG_FREED, 0, 0);
 			tcp_req_free_a_slot(tp, ent);
 			ret++;
 		}
 	}
 	return (ret);
 }
 
 int
 tcp_req_is_entry_comp(struct tcpcb *tp, struct tcp_sendfile_track *ent, tcp_seq ack_point)
 {
 	if (tp->t_tcpreq_req == 0)
 		return(-1);
 	if (tp->t_tcpreq_closed == 0)
 		return(-1);
 	if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
 		return(-1);
 	if (SEQ_GEQ(ack_point, ent->end_seq)) {
 		return (1);
 	}
 	return (0);
 }
 
 struct tcp_sendfile_track *
 tcp_req_find_a_req_that_is_completed_by(struct tcpcb *tp, tcp_seq th_ack, int *ip)
 {
 	/*
 	 * Given an ack point (th_ack) walk through our entries and
 	 * return the first one found that th_ack goes past the
 	 * end_seq.
 	 */
 	struct tcp_sendfile_track *ent;
 	int i;
 
 	if (tp->t_tcpreq_req == 0) {
 		/* none open */
 		return (NULL);
 	}
 	for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
 		ent = &tp->t_tcpreq_info[i];
 		if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY)
 			continue;
 		if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0) {
 			if (SEQ_GEQ(th_ack, ent->end_seq)) {
 				*ip = i;
 				return (ent);
 			}
 		}
 	}
 	return (NULL);
 }
 
 struct tcp_sendfile_track *
 tcp_req_find_req_for_seq(struct tcpcb *tp, tcp_seq seq)
 {
 	struct tcp_sendfile_track *ent;
 	int i;
 
 	if (tp->t_tcpreq_req == 0) {
 		/* none open */
 		return (NULL);
 	}
 	for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
 		ent = &tp->t_tcpreq_info[i];
 		tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_SEARCH,
 				      (uint64_t)seq, 0);
 		if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
 			continue;
 		}
 		if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
 			/*
 			 * An open end request only needs to
 			 * match the beginning seq or be
 			 * all we have (once we keep going on
 			 * a open end request we may have a seq
 			 * wrap).
 			 */
 			if ((SEQ_GEQ(seq, ent->start_seq)) ||
 			    (tp->t_tcpreq_closed == 0))
 				return (ent);
 		} else {
 			/*
 			 * For this one we need to
 			 * be a bit more careful if its
 			 * completed at least.
 			 */
 			if ((SEQ_GEQ(seq, ent->start_seq)) &&
 			    (SEQ_LT(seq, ent->end_seq))) {
 				return (ent);
 			}
 		}
 	}
 	return (NULL);
 }
 
 /* Should this be in its own file tcp_req.c ? */
 struct tcp_sendfile_track *
 tcp_req_alloc_req_full(struct tcpcb *tp, struct tcp_snd_req *req, uint64_t ts, int rec_dups)
 {
 	struct tcp_sendfile_track *fil;
 	int i, allocated;
 
 	/* In case the stack does not check for completions do so now */
 	tcp_req_check_for_comp(tp, tp->snd_una);
 	/* Check for stale entries */
 	if (tp->t_tcpreq_req)
 		tcp_req_check_for_stale_entries(tp, ts,
 		    (tp->t_tcpreq_req >= MAX_TCP_TRK_REQ));
 	/* Check to see if this is a duplicate of one not started */
 	if (tp->t_tcpreq_req) {
 		for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) {
 			fil = &tp->t_tcpreq_info[i];
 			if (fil->flags != TCP_TRK_TRACK_FLG_USED)
 				continue;
 			if ((fil->timestamp == req->timestamp) &&
 			    (fil->start == req->start) &&
 			    ((fil->flags & TCP_TRK_TRACK_FLG_OPEN) ||
 			     (fil->end == req->end))) {
 				/*
 				 * We already have this request
 				 * and it has not been started with sendfile.
 				 * This probably means the user was returned
 				 * a 4xx of some sort and its going to age
 				 * out, lets not duplicate it.
 				 */
 				return(fil);
 			}
 		}
 	}
 	/* Ok if there is no room at the inn we are in trouble */
 	if (tp->t_tcpreq_req >= MAX_TCP_TRK_REQ) {
 		tcp_trace_point(tp, TCP_TP_REQ_LOG_FAIL);
 		for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
 			tcp_req_log_req_info(tp, &tp->t_tcpreq_info[i],
 			    i, TCP_TRK_REQ_LOG_ALLOCFAIL, 0, 0);
 		}
 		return (NULL);
 	}
 	for(i = 0, allocated = 0; i < MAX_TCP_TRK_REQ; i++) {
 		fil = &tp->t_tcpreq_info[i];
 		if (fil->flags == TCP_TRK_TRACK_FLG_EMPTY) {
 			allocated = 1;
 			fil->flags = TCP_TRK_TRACK_FLG_USED;
 			fil->timestamp = req->timestamp;
 			fil->localtime = ts;
 			fil->start = req->start;
 			if (req->flags & TCP_LOG_HTTPD_RANGE_END) {
 				fil->end = req->end;
 			} else {
 				fil->end = 0;
 				fil->flags |= TCP_TRK_TRACK_FLG_OPEN;
 			}
 			/*
 			 * We can set the min boundaries to the TCP Sequence space,
 			 * but it might be found to be further up when sendfile
 			 * actually runs on this range (if it ever does).
 			 */
 			fil->sbcc_at_s = tptosocket(tp)->so_snd.sb_ccc;
 			fil->start_seq = tp->snd_una +
 			    tptosocket(tp)->so_snd.sb_ccc;
 			fil->end_seq = (fil->start_seq + ((uint32_t)(fil->end - fil->start)));
 			if (tptosocket(tp)->so_snd.sb_tls_info) {
 				/*
 				 * This session is doing TLS. Take a swag guess
 				 * at the overhead.
 				 */
 				fil->end_seq += tcp_estimate_tls_overhead(
 				    tptosocket(tp), (fil->end - fil->start));
 			}
 			tp->t_tcpreq_req++;
 			if (fil->flags & TCP_TRK_TRACK_FLG_OPEN)
 				tp->t_tcpreq_open++;
 			else
 				tp->t_tcpreq_closed++;
 			tcp_req_log_req_info(tp, fil, i,
 			    TCP_TRK_REQ_LOG_NEW, 0, 0);
 			break;
 		} else
 			fil = NULL;
 	}
 	return (fil);
 }
 
 void
 tcp_req_alloc_req(struct tcpcb *tp, union tcp_log_userdata *user, uint64_t ts)
 {
 	(void)tcp_req_alloc_req_full(tp, &user->tcp_req, ts, 1);
 }
 #endif
 
 void
 tcp_log_socket_option(struct tcpcb *tp, uint32_t option_num, uint32_t option_val, int err)
 {
 	if (tcp_bblogging_on(tp)) {
 		struct tcp_log_buffer *l;
 
 		l = tcp_log_event(tp, NULL,
 		        &tptosocket(tp)->so_rcv,
 		        &tptosocket(tp)->so_snd,
 		        TCP_LOG_SOCKET_OPT,
 		        err, 0, NULL, 1,
 		        NULL, NULL, 0, NULL);
 		if (l) {
 			l->tlb_flex1 = option_num;
 			l->tlb_flex2 = option_val;
 		}
 	}
 }
 
 uint32_t
 tcp_get_srtt(struct tcpcb *tp, int granularity)
 {
 	uint32_t srtt;
 
 	KASSERT(granularity == TCP_TMR_GRANULARITY_USEC ||
 	    granularity == TCP_TMR_GRANULARITY_TICKS,
 	    ("%s: called with unexpected granularity %d", __func__,
 	    granularity));
 
 	srtt = tp->t_srtt;
 
 	/*
 	 * We only support two granularities. If the stored granularity
 	 * does not match the granularity requested by the caller,
 	 * convert the stored value to the requested unit of granularity.
 	 */
 	if (tp->t_tmr_granularity != granularity) {
 		if (granularity == TCP_TMR_GRANULARITY_USEC)
 			srtt = TICKS_2_USEC(srtt);
 		else
 			srtt = USEC_2_TICKS(srtt);
 	}
 
 	/*
 	 * If the srtt is stored with ticks granularity, we need to
 	 * unshift to get the actual value. We do this after the
 	 * conversion above (if one was necessary) in order to maximize
 	 * precision.
 	 */
 	if (tp->t_tmr_granularity == TCP_TMR_GRANULARITY_TICKS)
 		srtt = srtt >> TCP_RTT_SHIFT;
 
 	return (srtt);
 }
 
 void
 tcp_account_for_send(struct tcpcb *tp, uint32_t len, uint8_t is_rxt,
     uint8_t is_tlp, bool hw_tls)
 {
 
 	if (is_tlp) {
 		tp->t_sndtlppack++;
 		tp->t_sndtlpbyte += len;
 	}
 	/* To get total bytes sent you must add t_snd_rxt_bytes to t_sndbytes */
 	if (is_rxt)
 		tp->t_snd_rxt_bytes += len;
 	else
 		tp->t_sndbytes += len;
 
 #ifdef KERN_TLS
 	if (hw_tls && is_rxt && len != 0) {
 		uint64_t rexmit_percent;
 
 		rexmit_percent = (1000ULL * tp->t_snd_rxt_bytes) /
 		    (10ULL * (tp->t_snd_rxt_bytes + tp->t_sndbytes));
 		if (rexmit_percent > ktls_ifnet_max_rexmit_pct)
 			ktls_disable_ifnet(tp);
 	}
 #endif
 }
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index 93fdedc03c7b..d3ba42fd9d06 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -1,3136 +1,3134 @@
 /*-
  * SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (c) 1982, 1986, 1988, 1993
  *	The Regents of the University of California.
  * Copyright (c) 2006-2007 Robert N. M. Watson
  * Copyright (c) 2010-2011 Juniper Networks, Inc.
  * All rights reserved.
  *
  * Portions of this software were developed by Robert N. M. Watson under
  * contract to Juniper Networks, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the University nor the names of its contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 #include "opt_ddb.h"
 #include "opt_inet.h"
 #include "opt_inet6.h"
 #include "opt_ipsec.h"
 #include "opt_kern_tls.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/arb.h>
 #include <sys/limits.h>
 #include <sys/malloc.h>
 #include <sys/refcount.h>
 #include <sys/kernel.h>
 #include <sys/ktls.h>
 #include <sys/qmath.h>
 #include <sys/sysctl.h>
 #include <sys/mbuf.h>
 #ifdef INET6
 #include <sys/domain.h>
 #endif /* INET6 */
 #include <sys/socket.h>
 #include <sys/socketvar.h>
 #include <sys/protosw.h>
 #include <sys/proc.h>
 #include <sys/jail.h>
 #include <sys/stats.h>
 
 #ifdef DDB
 #include <ddb/ddb.h>
 #endif
 
 #include <net/if.h>
 #include <net/if_var.h>
 #include <net/route.h>
 #include <net/vnet.h>
 
 #include <netinet/in.h>
 #include <netinet/in_kdtrace.h>
 #include <netinet/in_pcb.h>
 #include <netinet/in_systm.h>
 #include <netinet/in_var.h>
 #include <netinet/ip.h>
 #include <netinet/ip_var.h>
 #ifdef INET6
 #include <netinet/ip6.h>
 #include <netinet6/in6_pcb.h>
 #include <netinet6/ip6_var.h>
 #include <netinet6/scope6_var.h>
 #endif
 #include <netinet/tcp.h>
 #include <netinet/tcp_fsm.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
 #include <netinet/tcp_log_buf.h>
 #include <netinet/tcpip.h>
 #include <netinet/cc/cc.h>
 #include <netinet/tcp_fastopen.h>
 #include <netinet/tcp_hpts.h>
 #ifdef TCPPCAP
 #include <netinet/tcp_pcap.h>
 #endif
 #ifdef TCP_OFFLOAD
 #include <netinet/tcp_offload.h>
 #endif
 #include <netipsec/ipsec_support.h>
 
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/pmap.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
 
 /*
  * TCP protocol interface to socket abstraction.
  */
 #ifdef INET
 static int	tcp_connect(struct tcpcb *, struct sockaddr_in *,
 		    struct thread *td);
 #endif /* INET */
 #ifdef INET6
 static int	tcp6_connect(struct tcpcb *, struct sockaddr_in6 *,
 		    struct thread *td);
 #endif /* INET6 */
 static void	tcp_disconnect(struct tcpcb *);
 static void	tcp_usrclosed(struct tcpcb *);
 static void	tcp_fill_info(const struct tcpcb *, struct tcp_info *);
 
 static int	tcp_pru_options_support(struct tcpcb *tp, int flags);
 
 static void
 tcp_bblog_pru(struct tcpcb *tp, uint32_t pru, int error)
 {
 	struct tcp_log_buffer *lgb;
 
 	KASSERT(tp != NULL, ("tcp_bblog_pru: tp == NULL"));
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 	if (tcp_bblogging_on(tp)) {
 		lgb = tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_PRU, error,
 		    0, NULL, false, NULL, NULL, 0, NULL);
 	} else {
 		lgb = NULL;
 	}
 	if (lgb != NULL) {
 		if (error >= 0) {
 			lgb->tlb_errno = (uint32_t)error;
 		}
 		lgb->tlb_flex1 = pru;
 	}
 }
 
 /*
  * TCP attaches to socket via pru_attach(), reserving space,
  * and an internet control block.
  */
 static int
 tcp_usr_attach(struct socket *so, int proto, struct thread *td)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp = NULL;
 	int error;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp == NULL, ("tcp_usr_attach: inp != NULL"));
 
 	error = soreserve(so, V_tcp_sendspace, V_tcp_recvspace);
 	if (error)
 		goto out;
 
 	so->so_rcv.sb_flags |= SB_AUTOSIZE;
 	so->so_snd.sb_flags |= SB_AUTOSIZE;
 	error = in_pcballoc(so, &V_tcbinfo);
 	if (error)
 		goto out;
 	inp = sotoinpcb(so);
 	tp = tcp_newtcpcb(inp);
 	if (tp == NULL) {
 		error = ENOBUFS;
 		in_pcbdetach(inp);
 		in_pcbfree(inp);
 		goto out;
 	}
 	tp->t_state = TCPS_CLOSED;
 	tcp_bblog_pru(tp, PRU_ATTACH, error);
 	INP_WUNLOCK(inp);
 	TCPSTATES_INC(TCPS_CLOSED);
 out:
 	TCP_PROBE2(debug__user, tp, PRU_ATTACH);
 	return (error);
 }
 
 /*
  * tcp_usr_detach is called when the socket layer loses its final reference
  * to the socket, be it a file descriptor reference, a reference from TCP,
  * etc.  At this point, there is only one case in which we will keep around
  * inpcb state: time wait.
  */
 static void
 tcp_usr_detach(struct socket *so)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
 	INP_WLOCK(inp);
 	KASSERT(so->so_pcb == inp && inp->inp_socket == so,
 		("%s: socket %p inp %p mismatch", __func__, so, inp));
 
 	tp = intotcpcb(inp);
 
 	KASSERT(inp->inp_flags & INP_DROPPED ||
 	    tp->t_state < TCPS_SYN_SENT,
 	    ("%s: inp %p not dropped or embryonic", __func__, inp));
 
 	tcp_discardcb(tp);
 	in_pcbdetach(inp);
 	in_pcbfree(inp);
 }
 
 #ifdef INET
 /*
  * Give the socket an address.
  */
 static int
 tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct sockaddr_in *sinp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	tp = intotcpcb(inp);
 
 	sinp = (struct sockaddr_in *)nam;
 	if (nam->sa_family != AF_INET) {
 		/*
 		 * Preserve compatibility with old programs.
 		 */
 		if (nam->sa_family != AF_UNSPEC ||
 		    nam->sa_len < offsetof(struct sockaddr_in, sin_zero) ||
 		    sinp->sin_addr.s_addr != INADDR_ANY) {
 			error = EAFNOSUPPORT;
 			goto out;
 		}
 		nam->sa_family = AF_INET;
 	}
 	if (nam->sa_len != sizeof(*sinp)) {
 		error = EINVAL;
 		goto out;
 	}
 	/*
 	 * Must check for multicast addresses and disallow binding
 	 * to them.
 	 */
 	if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	INP_HASH_WLOCK(&V_tcbinfo);
 	error = in_pcbbind(inp, sinp, td->td_ucred);
 	INP_HASH_WUNLOCK(&V_tcbinfo);
 out:
 	tcp_bblog_pru(tp, PRU_BIND, error);
 	TCP_PROBE2(debug__user, tp, PRU_BIND);
 	INP_WUNLOCK(inp);
 
 	return (error);
 }
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct sockaddr_in6 *sin6;
 	u_char vflagsav;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	tp = intotcpcb(inp);
 
 	vflagsav = inp->inp_vflag;
 
 	sin6 = (struct sockaddr_in6 *)nam;
 	if (nam->sa_family != AF_INET6) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	if (nam->sa_len != sizeof(*sin6)) {
 		error = EINVAL;
 		goto out;
 	}
 	/*
 	 * Must check for multicast addresses and disallow binding
 	 * to them.
 	 */
 	if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 
 	INP_HASH_WLOCK(&V_tcbinfo);
 	inp->inp_vflag &= ~INP_IPV4;
 	inp->inp_vflag |= INP_IPV6;
 #ifdef INET
 	if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
 			inp->inp_vflag |= INP_IPV4;
 		else if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
 			struct sockaddr_in sin;
 
 			in6_sin6_2_sin(&sin, sin6);
 			if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
 				error = EAFNOSUPPORT;
 				INP_HASH_WUNLOCK(&V_tcbinfo);
 				goto out;
 			}
 			inp->inp_vflag |= INP_IPV4;
 			inp->inp_vflag &= ~INP_IPV6;
 			error = in_pcbbind(inp, &sin, td->td_ucred);
 			INP_HASH_WUNLOCK(&V_tcbinfo);
 			goto out;
 		}
 	}
 #endif
 	error = in6_pcbbind(inp, sin6, td->td_ucred);
 	INP_HASH_WUNLOCK(&V_tcbinfo);
 out:
 	if (error != 0)
 		inp->inp_vflag = vflagsav;
 	tcp_bblog_pru(tp, PRU_BIND, error);
 	TCP_PROBE2(debug__user, tp, PRU_BIND);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 #endif /* INET6 */
 
 #ifdef INET
 /*
  * Prepare to accept connections.
  */
 static int
 tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	tp = intotcpcb(inp);
 
 	SOCK_LOCK(so);
 	error = solisten_proto_check(so);
 	if (error != 0) {
 		SOCK_UNLOCK(so);
 		goto out;
 	}
 	if (inp->inp_lport == 0) {
 		INP_HASH_WLOCK(&V_tcbinfo);
 		error = in_pcbbind(inp, NULL, td->td_ucred);
 		INP_HASH_WUNLOCK(&V_tcbinfo);
 	}
 	if (error == 0) {
 		tcp_state_change(tp, TCPS_LISTEN);
 		solisten_proto(so, backlog);
 #ifdef TCP_OFFLOAD
 		if ((so->so_options & SO_NO_OFFLOAD) == 0)
 			tcp_offload_listen_start(tp);
 #endif
 	} else {
 		solisten_proto_abort(so);
 	}
 	SOCK_UNLOCK(so);
 
 	if (IS_FASTOPEN(tp->t_flags))
 		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
 
 out:
 	tcp_bblog_pru(tp, PRU_LISTEN, error);
 	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	u_char vflagsav;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (EINVAL);
 	}
 	tp = intotcpcb(inp);
 
 	vflagsav = inp->inp_vflag;
 
 	SOCK_LOCK(so);
 	error = solisten_proto_check(so);
 	if (error != 0) {
 		SOCK_UNLOCK(so);
 		goto out;
 	}
 	INP_HASH_WLOCK(&V_tcbinfo);
 	if (inp->inp_lport == 0) {
 		inp->inp_vflag &= ~INP_IPV4;
 		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
 			inp->inp_vflag |= INP_IPV4;
 		error = in6_pcbbind(inp, NULL, td->td_ucred);
 	}
 	INP_HASH_WUNLOCK(&V_tcbinfo);
 	if (error == 0) {
 		tcp_state_change(tp, TCPS_LISTEN);
 		solisten_proto(so, backlog);
 #ifdef TCP_OFFLOAD
 		if ((so->so_options & SO_NO_OFFLOAD) == 0)
 			tcp_offload_listen_start(tp);
 #endif
 	} else {
 		solisten_proto_abort(so);
 	}
 	SOCK_UNLOCK(so);
 
 	if (IS_FASTOPEN(tp->t_flags))
 		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
 
 	if (error != 0)
 		inp->inp_vflag = vflagsav;
 
 out:
 	tcp_bblog_pru(tp, PRU_LISTEN, error);
 	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 #endif /* INET6 */
 
 #ifdef INET
 /*
  * Initiate connection to peer.
  * Create a template for use in transmissions on this connection.
  * Enter SYN_SENT state, and mark socket as connecting.
  * Start keep-alive timer, and seed output sequence space.
  * Send initial segment on connection.
  */
 static int
 tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
 {
 	struct epoch_tracker et;
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct sockaddr_in *sinp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNREFUSED);
 	}
 	tp = intotcpcb(inp);
 
 	sinp = (struct sockaddr_in *)nam;
 	if (nam->sa_family != AF_INET) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	if (nam->sa_len != sizeof (*sinp)) {
 		error = EINVAL;
 		goto out;
 	}
 	/*
 	 * Must disallow TCP ``connections'' to multicast addresses.
 	 */
 	if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	if (ntohl(sinp->sin_addr.s_addr) == INADDR_BROADCAST) {
 		error = EACCES;
 		goto out;
 	}
 	if ((error = prison_remote_ip4(td->td_ucred, &sinp->sin_addr)) != 0)
 		goto out;
 	if (SOLISTENING(so)) {
 		error = EOPNOTSUPP;
 		goto out;
 	}
 	NET_EPOCH_ENTER(et);
 	if ((error = tcp_connect(tp, sinp, td)) != 0)
 		goto out_in_epoch;
 #ifdef TCP_OFFLOAD
 	if (registered_toedevs > 0 &&
 	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
 	    (error = tcp_offload_connect(so, nam)) == 0)
 		goto out_in_epoch;
 #endif
 	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
 	error = tcp_output(tp);
 	KASSERT(error >= 0, ("TCP stack %s requested tcp_drop(%p) at connect()"
 	    ", error code %d", tp->t_fb->tfb_tcp_block_name, tp, -error));
 out_in_epoch:
 	NET_EPOCH_EXIT(et);
 out:
 	tcp_bblog_pru(tp, PRU_CONNECT, error);
 	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
 {
 	struct epoch_tracker et;
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct sockaddr_in6 *sin6;
 	u_int8_t incflagsav;
 	u_char vflagsav;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNREFUSED);
 	}
 	tp = intotcpcb(inp);
 
 	vflagsav = inp->inp_vflag;
 	incflagsav = inp->inp_inc.inc_flags;
 
 	sin6 = (struct sockaddr_in6 *)nam;
 	if (nam->sa_family != AF_INET6) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	if (nam->sa_len != sizeof (*sin6)) {
 		error = EINVAL;
 		goto out;
 	}
 	/*
 	 * Must disallow TCP ``connections'' to multicast addresses.
 	 */
 	if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
 		error = EAFNOSUPPORT;
 		goto out;
 	}
 	if (SOLISTENING(so)) {
 		error = EINVAL;
 		goto out;
 	}
 #ifdef INET
 	/*
 	 * XXXRW: Some confusion: V4/V6 flags relate to binding, and
 	 * therefore probably require the hash lock, which isn't held here.
 	 * Is this a significant problem?
 	 */
 	if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
 		struct sockaddr_in sin;
 
 		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
 			error = EINVAL;
 			goto out;
 		}
 		if ((inp->inp_vflag & INP_IPV4) == 0) {
 			error = EAFNOSUPPORT;
 			goto out;
 		}
 
 		in6_sin6_2_sin(&sin, sin6);
 		if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
 			error = EAFNOSUPPORT;
 			goto out;
 		}
 		if (ntohl(sin.sin_addr.s_addr) == INADDR_BROADCAST) {
 			error = EACCES;
 			goto out;
 		}
 		if ((error = prison_remote_ip4(td->td_ucred,
 		    &sin.sin_addr)) != 0)
 			goto out;
 		inp->inp_vflag |= INP_IPV4;
 		inp->inp_vflag &= ~INP_IPV6;
 		NET_EPOCH_ENTER(et);
 		if ((error = tcp_connect(tp, &sin, td)) != 0)
 			goto out_in_epoch;
 #ifdef TCP_OFFLOAD
 		if (registered_toedevs > 0 &&
 		    (so->so_options & SO_NO_OFFLOAD) == 0 &&
 		    (error = tcp_offload_connect(so, nam)) == 0)
 			goto out_in_epoch;
 #endif
 		error = tcp_output(tp);
 		goto out_in_epoch;
 	} else {
 		if ((inp->inp_vflag & INP_IPV6) == 0) {
 			error = EAFNOSUPPORT;
 			goto out;
 		}
 	}
 #endif
 	if ((error = prison_remote_ip6(td->td_ucred, &sin6->sin6_addr)) != 0)
 		goto out;
 	inp->inp_vflag &= ~INP_IPV4;
 	inp->inp_vflag |= INP_IPV6;
 	inp->inp_inc.inc_flags |= INC_ISIPV6;
 	NET_EPOCH_ENTER(et);
 	if ((error = tcp6_connect(tp, sin6, td)) != 0)
 		goto out_in_epoch;
 #ifdef TCP_OFFLOAD
 	if (registered_toedevs > 0 &&
 	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
 	    (error = tcp_offload_connect(so, nam)) == 0)
 		goto out_in_epoch;
 #endif
 	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
 	error = tcp_output(tp);
 out_in_epoch:
 	NET_EPOCH_EXIT(et);
 out:
 	KASSERT(error >= 0, ("TCP stack %s requested tcp_drop(%p) at connect()"
 	    ", error code %d", tp->t_fb->tfb_tcp_block_name, tp, -error));
 	/*
 	 * If the implicit bind in the connect call fails, restore
 	 * the flags we modified.
 	 */
 	if (error != 0 && inp->inp_lport == 0) {
 		inp->inp_vflag = vflagsav;
 		inp->inp_inc.inc_flags = incflagsav;
 	}
 
 	tcp_bblog_pru(tp, PRU_CONNECT, error);
 	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 #endif /* INET6 */
 
 /*
  * Initiate disconnect from peer.
  * If connection never passed embryonic stage, just drop;
  * else if don't need to let data drain, then can just drop anyways,
  * else have to begin TCP shutdown process: mark socket disconnecting,
  * drain unread data, state switch to reflect user close, and
  * send segment (e.g. FIN) to peer.  Socket will be really disconnected
  * when peer sends FIN and acks ours.
  *
  * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
  */
 static int
 tcp_usr_disconnect(struct socket *so)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp = NULL;
 	struct epoch_tracker et;
 	int error = 0;
 
 	NET_EPOCH_ENTER(et);
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		NET_EPOCH_EXIT(et);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	if (tp->t_state == TCPS_TIME_WAIT)
 		goto out;
 	tcp_disconnect(tp);
 out:
 	tcp_bblog_pru(tp, PRU_DISCONNECT, error);
 	TCP_PROBE2(debug__user, tp, PRU_DISCONNECT);
 	INP_WUNLOCK(inp);
 	NET_EPOCH_EXIT(et);
 	return (error);
 }
 
 #ifdef INET
 /*
  * Accept a connection.  Essentially all the work is done at higher levels;
  * just return the address of the peer, storing through addr.
  */
 static int
 tcp_usr_accept(struct socket *so, struct sockaddr *sa)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	int error = 0;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNABORTED);
 	}
 	tp = intotcpcb(inp);
 
 	if (so->so_state & SS_ISDISCONNECTED)
 		error = ECONNABORTED;
 	else
 		*(struct sockaddr_in *)sa = (struct sockaddr_in ){
 			.sin_family = AF_INET,
 			.sin_len = sizeof(struct sockaddr_in),
 			.sin_port = inp->inp_fport,
 			.sin_addr = inp->inp_faddr,
 		};
 	tcp_bblog_pru(tp, PRU_ACCEPT, error);
 	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
 	INP_WUNLOCK(inp);
 
 	return (error);
 }
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_usr_accept(struct socket *so, struct sockaddr *sa)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	int error = 0;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp6_usr_accept: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNABORTED);
 	}
 	tp = intotcpcb(inp);
 
 	if (so->so_state & SS_ISDISCONNECTED) {
 		error = ECONNABORTED;
 	} else {
 		if (inp->inp_vflag & INP_IPV4) {
 			struct sockaddr_in sin = {
 				.sin_family = AF_INET,
 				.sin_len = sizeof(struct sockaddr_in),
 				.sin_port = inp->inp_fport,
 				.sin_addr = inp->inp_faddr,
 			};
 			in6_sin_2_v4mapsin6(&sin, (struct sockaddr_in6 *)sa);
 		} else {
 			*(struct sockaddr_in6 *)sa = (struct sockaddr_in6 ){
 				.sin6_family = AF_INET6,
 				.sin6_len = sizeof(struct sockaddr_in6),
 				.sin6_port = inp->inp_fport,
 				.sin6_addr = inp->in6p_faddr,
 			};
 			/* XXX: should catch errors */
 			(void)sa6_recoverscope((struct sockaddr_in6 *)sa);
 		}
 	}
 
 	tcp_bblog_pru(tp, PRU_ACCEPT, error);
 	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
 	INP_WUNLOCK(inp);
 
 	return (error);
 }
 #endif /* INET6 */
 
 /*
  * Mark the connection as being incapable of further output.
  */
 static int
 tcp_usr_shutdown(struct socket *so)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct epoch_tracker et;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	NET_EPOCH_ENTER(et);
 	socantsendmore(so);
 	tcp_usrclosed(tp);
 	if (!(inp->inp_flags & INP_DROPPED))
 		error = tcp_output_nodrop(tp);
 	tcp_bblog_pru(tp, PRU_SHUTDOWN, error);
 	TCP_PROBE2(debug__user, tp, PRU_SHUTDOWN);
 	error = tcp_unlock_or_drop(tp, error);
 	NET_EPOCH_EXIT(et);
 
 	return (error);
 }
 
 /*
  * After a receive, possibly send window update to peer.
  */
 static int
 tcp_usr_rcvd(struct socket *so, int flags)
 {
 	struct epoch_tracker et;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	int outrv = 0, error = 0;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_rcvd: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	NET_EPOCH_ENTER(et);
 	/*
 	 * For passively-created TFO connections, don't attempt a window
 	 * update while still in SYN_RECEIVED as this may trigger an early
 	 * SYN|ACK.  It is preferable to have the SYN|ACK be sent along with
 	 * application response data, or failing that, when the DELACK timer
 	 * expires.
 	 */
 	if (IS_FASTOPEN(tp->t_flags) &&
 	    (tp->t_state == TCPS_SYN_RECEIVED))
 		goto out;
 #ifdef TCP_OFFLOAD
 	if (tp->t_flags & TF_TOE)
 		tcp_offload_rcvd(tp);
 	else
 #endif
 		outrv = tcp_output_nodrop(tp);
 out:
 	tcp_bblog_pru(tp, PRU_RCVD, error);
 	TCP_PROBE2(debug__user, tp, PRU_RCVD);
 	(void) tcp_unlock_or_drop(tp, outrv);
 	NET_EPOCH_EXIT(et);
 	return (error);
 }
 
 /*
  * Do a send by putting data in output queue and updating urgent
  * marker if URG set.  Possibly send more data.  Unlike the other
  * pru_*() routines, the mbuf chains are our responsibility.  We
  * must either enqueue them or free them.  The other pru_* routines
  * generally are caller-frees.
  */
 static int
 tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
     struct sockaddr *nam, struct mbuf *control, struct thread *td)
 {
 	struct epoch_tracker et;
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 #ifdef INET
 #ifdef INET6
 	struct sockaddr_in sin;
 #endif
 	struct sockaddr_in *sinp;
 #endif
 #ifdef INET6
 	struct sockaddr_in6 *sin6;
 	int isipv6;
 #endif
 	u_int8_t incflagsav;
 	u_char vflagsav;
 	bool restoreflags;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_send: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		if (m != NULL && (flags & PRUS_NOTREADY) == 0)
 			m_freem(m);
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	vflagsav = inp->inp_vflag;
 	incflagsav = inp->inp_inc.inc_flags;
 	restoreflags = false;
 
 	NET_EPOCH_ENTER(et);
 	if (control != NULL) {
 		/* TCP doesn't do control messages (rights, creds, etc) */
 		if (control->m_len > 0) {
 			m_freem(control);
 			error = EINVAL;
 			goto out;
 		}
 		m_freem(control);	/* empty control, just free it */
 	}
 
 	if ((flags & PRUS_OOB) != 0 &&
 	    (error = tcp_pru_options_support(tp, PRUS_OOB)) != 0)
 		goto out;
 
 	if (nam != NULL && tp->t_state < TCPS_SYN_SENT) {
 		if (tp->t_state == TCPS_LISTEN) {
 			error = EINVAL;
 			goto out;
 		}
 		switch (nam->sa_family) {
 #ifdef INET
 		case AF_INET:
 			sinp = (struct sockaddr_in *)nam;
 			if (sinp->sin_len != sizeof(struct sockaddr_in)) {
 				error = EINVAL;
 				goto out;
 			}
 			if ((inp->inp_vflag & INP_IPV6) != 0) {
 				error = EAFNOSUPPORT;
 				goto out;
 			}
 			if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
 				error = EAFNOSUPPORT;
 				goto out;
 			}
 			if (ntohl(sinp->sin_addr.s_addr) == INADDR_BROADCAST) {
 				error = EACCES;
 				goto out;
 			}
 			if ((error = prison_remote_ip4(td->td_ucred,
 			    &sinp->sin_addr)))
 				goto out;
 #ifdef INET6
 			isipv6 = 0;
 #endif
 			break;
 #endif /* INET */
 #ifdef INET6
 		case AF_INET6:
 			sin6 = (struct sockaddr_in6 *)nam;
 			if (sin6->sin6_len != sizeof(*sin6)) {
 				error = EINVAL;
 				goto out;
 			}
 			if ((inp->inp_vflag & INP_IPV6PROTO) == 0) {
 				error = EAFNOSUPPORT;
 				goto out;
 			}
 			if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
 				error = EAFNOSUPPORT;
 				goto out;
 			}
 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
 #ifdef INET
 				if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
 					error = EINVAL;
 					goto out;
 				}
 				if ((inp->inp_vflag & INP_IPV4) == 0) {
 					error = EAFNOSUPPORT;
 					goto out;
 				}
 				restoreflags = true;
 				inp->inp_vflag &= ~INP_IPV6;
 				sinp = &sin;
 				in6_sin6_2_sin(sinp, sin6);
 				if (IN_MULTICAST(
 				    ntohl(sinp->sin_addr.s_addr))) {
 					error = EAFNOSUPPORT;
 					goto out;
 				}
 				if ((error = prison_remote_ip4(td->td_ucred,
 				    &sinp->sin_addr)))
 					goto out;
 				isipv6 = 0;
 #else /* !INET */
 				error = EAFNOSUPPORT;
 				goto out;
 #endif /* INET */
 			} else {
 				if ((inp->inp_vflag & INP_IPV6) == 0) {
 					error = EAFNOSUPPORT;
 					goto out;
 				}
 				restoreflags = true;
 				inp->inp_vflag &= ~INP_IPV4;
 				inp->inp_inc.inc_flags |= INC_ISIPV6;
 				if ((error = prison_remote_ip6(td->td_ucred,
 				    &sin6->sin6_addr)))
 					goto out;
 				isipv6 = 1;
 			}
 			break;
 #endif /* INET6 */
 		default:
 			error = EAFNOSUPPORT;
 			goto out;
 		}
 	}
 	if (!(flags & PRUS_OOB)) {
 		if (tp->t_acktime == 0)
 			tp->t_acktime = ticks;
 		sbappendstream(&so->so_snd, m, flags);
 		m = NULL;
 		if (nam && tp->t_state < TCPS_SYN_SENT) {
 			KASSERT(tp->t_state == TCPS_CLOSED,
 			    ("%s: tp %p is listening", __func__, tp));
 
 			/*
 			 * Do implied connect if not yet connected,
 			 * initialize window to default value, and
 			 * initialize maxseg using peer's cached MSS.
 			 */
 #ifdef INET6
 			if (isipv6)
 				error = tcp6_connect(tp, sin6, td);
 #endif /* INET6 */
 #if defined(INET6) && defined(INET)
 			else
 #endif
 #ifdef INET
 				error = tcp_connect(tp, sinp, td);
 #endif
 			/*
 			 * The bind operation in tcp_connect succeeded. We
 			 * no longer want to restore the flags if later
 			 * operations fail.
 			 */
 			if (error == 0 || inp->inp_lport != 0)
 				restoreflags = false;
 
 			if (error) {
 				/* m is freed if PRUS_NOTREADY is unset. */
 				sbflush(&so->so_snd);
 				goto out;
 			}
 			if (IS_FASTOPEN(tp->t_flags))
 				tcp_fastopen_connect(tp);
 			else {
 				tp->snd_wnd = TTCP_CLIENT_SND_WND;
 				tcp_mss(tp, -1);
 			}
 		}
 		if (flags & PRUS_EOF) {
 			/*
 			 * Close the send side of the connection after
 			 * the data is sent.
 			 */
 			socantsendmore(so);
 			tcp_usrclosed(tp);
 		}
 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
 		    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
 		    (tp->t_fbyte_out == 0) &&
 		    (so->so_snd.sb_ccc > 0)) {
 			tp->t_fbyte_out = ticks;
 			if (tp->t_fbyte_out == 0)
 				tp->t_fbyte_out = 1;
 			if (tp->t_fbyte_out && tp->t_fbyte_in)
 				tp->t_flags2 |= TF2_FBYTES_COMPLETE;
 		}
 		if (!(inp->inp_flags & INP_DROPPED) &&
 		    !(flags & PRUS_NOTREADY)) {
 			if (flags & PRUS_MORETOCOME)
 				tp->t_flags |= TF_MORETOCOME;
 			error = tcp_output_nodrop(tp);
 			if (flags & PRUS_MORETOCOME)
 				tp->t_flags &= ~TF_MORETOCOME;
 		}
 	} else {
 		/*
 		 * XXXRW: PRUS_EOF not implemented with PRUS_OOB?
 		 */
 		SOCKBUF_LOCK(&so->so_snd);
 		if (sbspace(&so->so_snd) < -512) {
 			SOCKBUF_UNLOCK(&so->so_snd);
 			error = ENOBUFS;
 			goto out;
 		}
 		/*
 		 * According to RFC961 (Assigned Protocols),
 		 * the urgent pointer points to the last octet
 		 * of urgent data.  We continue, however,
 		 * to consider it to indicate the first octet
 		 * of data past the urgent section.
 		 * Otherwise, snd_up should be one lower.
 		 */
 		if (tp->t_acktime == 0)
 			tp->t_acktime = ticks;
 		sbappendstream_locked(&so->so_snd, m, flags);
 		SOCKBUF_UNLOCK(&so->so_snd);
 		m = NULL;
 		if (nam && tp->t_state < TCPS_SYN_SENT) {
 			/*
 			 * Do implied connect if not yet connected,
 			 * initialize window to default value, and
 			 * initialize maxseg using peer's cached MSS.
 			 */
 
 			/*
 			 * Not going to contemplate SYN|URG
 			 */
 			if (IS_FASTOPEN(tp->t_flags))
 				tp->t_flags &= ~TF_FASTOPEN;
 #ifdef INET6
 			if (isipv6)
 				error = tcp6_connect(tp, sin6, td);
 #endif /* INET6 */
 #if defined(INET6) && defined(INET)
 			else
 #endif
 #ifdef INET
 				error = tcp_connect(tp, sinp, td);
 #endif
 			/*
 			 * The bind operation in tcp_connect succeeded. We
 			 * no longer want to restore the flags if later
 			 * operations fail.
 			 */
 			if (error == 0 || inp->inp_lport != 0)
 				restoreflags = false;
 
 			if (error != 0) {
 				/* m is freed if PRUS_NOTREADY is unset. */
 				sbflush(&so->so_snd);
 				goto out;
 			}
 			tp->snd_wnd = TTCP_CLIENT_SND_WND;
 			tcp_mss(tp, -1);
 		}
 		tp->snd_up = tp->snd_una + sbavail(&so->so_snd);
 		if ((flags & PRUS_NOTREADY) == 0) {
 			tp->t_flags |= TF_FORCEDATA;
 			error = tcp_output_nodrop(tp);
 			tp->t_flags &= ~TF_FORCEDATA;
 		}
 	}
 	TCP_LOG_EVENT(tp, NULL,
 	    &inp->inp_socket->so_rcv,
 	    &inp->inp_socket->so_snd,
 	    TCP_LOG_USERSEND, error,
 	    0, NULL, false);
 
 out:
 	/*
 	 * In case of PRUS_NOTREADY, the caller or tcp_usr_ready() is
 	 * responsible for freeing memory.
 	 */
 	if (m != NULL && (flags & PRUS_NOTREADY) == 0)
 		m_freem(m);
 
 	/*
 	 * If the request was unsuccessful and we changed flags,
 	 * restore the original flags.
 	 */
 	if (error != 0 && restoreflags) {
 		inp->inp_vflag = vflagsav;
 		inp->inp_inc.inc_flags = incflagsav;
 	}
 	tcp_bblog_pru(tp, (flags & PRUS_OOB) ? PRU_SENDOOB :
 		      ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND), error);
 	TCP_PROBE2(debug__user, tp, (flags & PRUS_OOB) ? PRU_SENDOOB :
 		   ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
 	error = tcp_unlock_or_drop(tp, error);
 	NET_EPOCH_EXIT(et);
 	return (error);
 }
 
 static int
 tcp_usr_ready(struct socket *so, struct mbuf *m, int count)
 {
 	struct epoch_tracker et;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	int error;
 
 	inp = sotoinpcb(so);
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		mb_free_notready(m, count);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	SOCKBUF_LOCK(&so->so_snd);
 	error = sbready(&so->so_snd, m, count);
 	SOCKBUF_UNLOCK(&so->so_snd);
 	if (error) {
 		INP_WUNLOCK(inp);
 		return (error);
 	}
 	NET_EPOCH_ENTER(et);
 	error = tcp_output_unlock(tp);
 	NET_EPOCH_EXIT(et);
 
 	return (error);
 }
 
 /*
  * Abort the TCP.  Drop the connection abruptly.
  */
 static void
 tcp_usr_abort(struct socket *so)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct epoch_tracker et;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL"));
 
 	NET_EPOCH_ENTER(et);
 	INP_WLOCK(inp);
 	KASSERT(inp->inp_socket != NULL,
 	    ("tcp_usr_abort: inp_socket == NULL"));
 
 	/*
 	 * If we still have full TCP state, and we're not dropped, drop.
 	 */
 	if (!(inp->inp_flags & INP_DROPPED)) {
 		tp = intotcpcb(inp);
 		tp = tcp_drop(tp, ECONNABORTED);
 		if (tp == NULL)
 			goto dropped;
 		tcp_bblog_pru(tp, PRU_ABORT, 0);
 		TCP_PROBE2(debug__user, tp, PRU_ABORT);
 	}
 	if (!(inp->inp_flags & INP_DROPPED)) {
 		soref(so);
 		inp->inp_flags |= INP_SOCKREF;
 	}
 	INP_WUNLOCK(inp);
 dropped:
 	NET_EPOCH_EXIT(et);
 }
 
 /*
  * TCP socket is closed.  Start friendly disconnect.
  */
 static void
 tcp_usr_close(struct socket *so)
 {
 	struct inpcb *inp;
 	struct tcpcb *tp;
 	struct epoch_tracker et;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL"));
 
 	NET_EPOCH_ENTER(et);
 	INP_WLOCK(inp);
 	KASSERT(inp->inp_socket != NULL,
 	    ("tcp_usr_close: inp_socket == NULL"));
 
 	/*
 	 * If we are still connected and we're not dropped, initiate
 	 * a disconnect.
 	 */
 	if (!(inp->inp_flags & INP_DROPPED)) {
 		tp = intotcpcb(inp);
 		if (tp->t_state != TCPS_TIME_WAIT) {
 			tp->t_flags |= TF_CLOSED;
 			tcp_disconnect(tp);
 			tcp_bblog_pru(tp, PRU_CLOSE, 0);
 			TCP_PROBE2(debug__user, tp, PRU_CLOSE);
 		}
 	}
 	if (!(inp->inp_flags & INP_DROPPED)) {
 		soref(so);
 		inp->inp_flags |= INP_SOCKREF;
 	}
 	INP_WUNLOCK(inp);
 	NET_EPOCH_EXIT(et);
 }
 
 static int
 tcp_pru_options_support(struct tcpcb *tp, int flags)
 {
 	/*
 	 * If the specific TCP stack has a pru_options
 	 * specified then it does not always support
 	 * all the PRU_XX options and we must ask it.
 	 * If the function is not specified then all
 	 * of the PRU_XX options are supported.
 	 */
 	int ret = 0;
 
 	if (tp->t_fb->tfb_pru_options) {
 		ret = (*tp->t_fb->tfb_pru_options)(tp, flags);
 	}
 	return (ret);
 }
 
 /*
  * Receive out-of-band data.
  */
 static int
 tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags)
 {
 	int error = 0;
 	struct inpcb *inp;
 	struct tcpcb *tp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_usr_rcvoob: inp == NULL"));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 
 	error = tcp_pru_options_support(tp, PRUS_OOB);
 	if (error) {
 		goto out;
 	}
 	if ((so->so_oobmark == 0 &&
 	     (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) ||
 	    so->so_options & SO_OOBINLINE ||
 	    tp->t_oobflags & TCPOOB_HADDATA) {
 		error = EINVAL;
 		goto out;
 	}
 	if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
 		error = EWOULDBLOCK;
 		goto out;
 	}
 	m->m_len = 1;
 	*mtod(m, caddr_t) = tp->t_iobc;
 	if ((flags & MSG_PEEK) == 0)
 		tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
 
 out:
 	tcp_bblog_pru(tp, PRU_RCVOOB, error);
 	TCP_PROBE2(debug__user, tp, PRU_RCVOOB);
 	INP_WUNLOCK(inp);
 	return (error);
 }
 
 #ifdef INET
 struct protosw tcp_protosw = {
 	.pr_type =		SOCK_STREAM,
 	.pr_protocol =		IPPROTO_TCP,
 	.pr_flags =		PR_CONNREQUIRED | PR_IMPLOPCL | PR_WANTRCVD |
 				    PR_CAPATTACH,
 	.pr_ctloutput =		tcp_ctloutput,
 	.pr_abort =		tcp_usr_abort,
 	.pr_accept =		tcp_usr_accept,
 	.pr_attach =		tcp_usr_attach,
 	.pr_bind =		tcp_usr_bind,
 	.pr_connect =		tcp_usr_connect,
 	.pr_control =		in_control,
 	.pr_detach =		tcp_usr_detach,
 	.pr_disconnect =	tcp_usr_disconnect,
 	.pr_listen =		tcp_usr_listen,
 	.pr_peeraddr =		in_getpeeraddr,
 	.pr_rcvd =		tcp_usr_rcvd,
 	.pr_rcvoob =		tcp_usr_rcvoob,
 	.pr_send =		tcp_usr_send,
 	.pr_ready =		tcp_usr_ready,
 	.pr_shutdown =		tcp_usr_shutdown,
 	.pr_sockaddr =		in_getsockaddr,
 	.pr_sosetlabel =	in_pcbsosetlabel,
 	.pr_close =		tcp_usr_close,
 };
 #endif /* INET */
 
 #ifdef INET6
 struct protosw tcp6_protosw = {
 	.pr_type =		SOCK_STREAM,
 	.pr_protocol =		IPPROTO_TCP,
 	.pr_flags =		PR_CONNREQUIRED | PR_IMPLOPCL |PR_WANTRCVD |
 				    PR_CAPATTACH,
 	.pr_ctloutput =		tcp_ctloutput,
 	.pr_abort =		tcp_usr_abort,
 	.pr_accept =		tcp6_usr_accept,
 	.pr_attach =		tcp_usr_attach,
 	.pr_bind =		tcp6_usr_bind,
 	.pr_connect =		tcp6_usr_connect,
 	.pr_control =		in6_control,
 	.pr_detach =		tcp_usr_detach,
 	.pr_disconnect =	tcp_usr_disconnect,
 	.pr_listen =		tcp6_usr_listen,
 	.pr_peeraddr =		in6_mapped_peeraddr,
 	.pr_rcvd =		tcp_usr_rcvd,
 	.pr_rcvoob =		tcp_usr_rcvoob,
 	.pr_send =		tcp_usr_send,
 	.pr_ready =		tcp_usr_ready,
 	.pr_shutdown =		tcp_usr_shutdown,
 	.pr_sockaddr =		in6_mapped_sockaddr,
 	.pr_sosetlabel =	in_pcbsosetlabel,
 	.pr_close =		tcp_usr_close,
 };
 #endif /* INET6 */
 
 #ifdef INET
 /*
  * Common subroutine to open a TCP connection to remote host specified
  * by struct sockaddr_in.  Call in_pcbconnect() to choose local host address
  * and assign a local port number and install the inpcb into the hash.
  * Initialize connection parameters and enter SYN-SENT state.
  */
 static int
 tcp_connect(struct tcpcb *tp, struct sockaddr_in *sin, struct thread *td)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 	int error;
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	if (__predict_false((so->so_state &
 	    (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING |
 	    SS_ISDISCONNECTED)) != 0))
 		return (EISCONN);
 
 	INP_HASH_WLOCK(&V_tcbinfo);
 	error = in_pcbconnect(inp, sin, td->td_ucred, true);
 	INP_HASH_WUNLOCK(&V_tcbinfo);
 	if (error != 0)
 		return (error);
 
 	/*
 	 * Compute window scaling to request:
 	 * Scale to fit into sweet spot.  See tcp_syncache.c.
 	 * XXX: This should move to tcp_output().
 	 */
 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
 	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
 		tp->request_r_scale++;
 
 	soisconnecting(so);
 	TCPSTAT_INC(tcps_connattempt);
 	tcp_state_change(tp, TCPS_SYN_SENT);
 	tp->iss = tcp_new_isn(&inp->inp_inc);
 	if (tp->t_flags & TF_REQ_TSTMP)
 		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
 	tcp_sendseqinit(tp);
 
 	return (0);
 }
 #endif /* INET */
 
 #ifdef INET6
 static int
 tcp6_connect(struct tcpcb *tp, struct sockaddr_in6 *sin6, struct thread *td)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 	int error;
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	if (__predict_false((so->so_state &
 	    (SS_ISCONNECTING | SS_ISCONNECTED)) != 0))
 		return (EISCONN);
 
 	INP_HASH_WLOCK(&V_tcbinfo);
 	error = in6_pcbconnect(inp, sin6, td->td_ucred, true);
 	INP_HASH_WUNLOCK(&V_tcbinfo);
 	if (error != 0)
 		return (error);
 
 	/* Compute window scaling to request.  */
 	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
 	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
 		tp->request_r_scale++;
 
 	soisconnecting(so);
 	TCPSTAT_INC(tcps_connattempt);
 	tcp_state_change(tp, TCPS_SYN_SENT);
 	tp->iss = tcp_new_isn(&inp->inp_inc);
 	if (tp->t_flags & TF_REQ_TSTMP)
 		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
 	tcp_sendseqinit(tp);
 
 	return (0);
 }
 #endif /* INET6 */
 
 /*
  * Export TCP internal state information via a struct tcp_info, based on the
  * Linux 2.6 API.  Not ABI compatible as our constants are mapped differently
  * (TCP state machine, etc).  We export all information using FreeBSD-native
  * constants -- for example, the numeric values for tcpi_state will differ
  * from Linux.
  */
 void
 tcp_fill_info(const struct tcpcb *tp, struct tcp_info *ti)
 {
 
 	INP_LOCK_ASSERT(tptoinpcb(tp));
 	bzero(ti, sizeof(*ti));
 
 	ti->tcpi_state = tp->t_state;
 	if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
 		ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
 	if (tp->t_flags & TF_SACK_PERMIT)
 		ti->tcpi_options |= TCPI_OPT_SACK;
 	if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
 		ti->tcpi_options |= TCPI_OPT_WSCALE;
 		ti->tcpi_snd_wscale = tp->snd_scale;
 		ti->tcpi_rcv_wscale = tp->rcv_scale;
 	}
 	switch (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) {
 		case TF2_ECN_PERMIT:
 			ti->tcpi_options |= TCPI_OPT_ECN;
 			break;
 		case TF2_ACE_PERMIT:
 			/* FALLTHROUGH */
 		case TF2_ECN_PERMIT | TF2_ACE_PERMIT:
 			ti->tcpi_options |= TCPI_OPT_ACE;
 			break;
 		default:
 			break;
 	}
 	if (IS_FASTOPEN(tp->t_flags))
 		ti->tcpi_options |= TCPI_OPT_TFO;
 
 	ti->tcpi_rto = tp->t_rxtcur * tick;
 	ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
 	ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
 	ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
 
 	ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
 	ti->tcpi_snd_cwnd = tp->snd_cwnd;
 
 	/*
 	 * FreeBSD-specific extension fields for tcp_info.
 	 */
 	ti->tcpi_rcv_space = tp->rcv_wnd;
 	ti->tcpi_rcv_nxt = tp->rcv_nxt;
 	ti->tcpi_snd_wnd = tp->snd_wnd;
 	ti->tcpi_snd_bwnd = 0;		/* Unused, kept for compat. */
 	ti->tcpi_snd_nxt = tp->snd_nxt;
 	ti->tcpi_snd_mss = tp->t_maxseg;
 	ti->tcpi_rcv_mss = tp->t_maxseg;
 	ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
 	ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
 	ti->tcpi_snd_zerowin = tp->t_sndzerowin;
 	ti->tcpi_snd_una = tp->snd_una;
 	ti->tcpi_snd_max = tp->snd_max;
 	ti->tcpi_rcv_numsacks = tp->rcv_numsacks;
 	ti->tcpi_rcv_adv = tp->rcv_adv;
 	ti->tcpi_dupacks = tp->t_dupacks;
 #ifdef TCP_OFFLOAD
 	if (tp->t_flags & TF_TOE) {
 		ti->tcpi_options |= TCPI_OPT_TOE;
 		tcp_offload_tcp_info(tp, ti);
 	}
 #endif
 	/*
 	 * AccECN related counters.
 	 */
 	if ((tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) ==
 	    (TF2_ECN_PERMIT | TF2_ACE_PERMIT))
 		/*
 		 * Internal counter starts at 5 for AccECN
 		 * but 0 for RFC3168 ECN.
 		 */
 		ti->tcpi_delivered_ce = tp->t_scep - 5;
 	else
 		ti->tcpi_delivered_ce = tp->t_scep;
 	ti->tcpi_received_ce = tp->t_rcep;
 }
 
 /*
  * tcp_ctloutput() must drop the inpcb lock before performing copyin on
  * socket option arguments.  When it re-acquires the lock after the copy, it
  * has to revalidate that the connection is still valid for the socket
  * option.
  */
 #define INP_WLOCK_RECHECK_CLEANUP(inp, cleanup) do {			\
 	INP_WLOCK(inp);							\
 	if (inp->inp_flags & INP_DROPPED) {				\
 		INP_WUNLOCK(inp);					\
 		cleanup;						\
 		return (ECONNRESET);					\
 	}								\
 	tp = intotcpcb(inp);						\
 } while(0)
 #define INP_WLOCK_RECHECK(inp) INP_WLOCK_RECHECK_CLEANUP((inp), /* noop */)
 
 int
 tcp_ctloutput_set(struct inpcb *inp, struct sockopt *sopt)
 {
 	struct socket *so = inp->inp_socket;
 	struct tcpcb *tp = intotcpcb(inp);
 	int error = 0;
 
 	MPASS(sopt->sopt_dir == SOPT_SET);
 	INP_WLOCK_ASSERT(inp);
 	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
 	    ("inp_flags == %x", inp->inp_flags));
 	KASSERT(so != NULL, ("inp_socket == NULL"));
 
 	if (sopt->sopt_level != IPPROTO_TCP) {
 		INP_WUNLOCK(inp);
 #ifdef INET6
 		if (inp->inp_vflag & INP_IPV6PROTO)
 			error = ip6_ctloutput(so, sopt);
 #endif
 #if defined(INET6) && defined(INET)
 		else
 #endif
 #ifdef INET
 			error = ip_ctloutput(so, sopt);
 #endif
 		/*
 		 * When an IP-level socket option affects TCP, pass control
 		 * down to stack tfb_tcp_ctloutput, otherwise return what
 		 * IP level returned.
 		 */
 		switch (sopt->sopt_level) {
 #ifdef INET6
 		case IPPROTO_IPV6:
 			if ((inp->inp_vflag & INP_IPV6PROTO) == 0)
 				return (error);
 			switch (sopt->sopt_name) {
 			case IPV6_TCLASS:
 				/* Notify tcp stacks that care (e.g. RACK). */
 				break;
 			case IPV6_USE_MIN_MTU:
 				/* Update t_maxseg accordingly. */
 				break;
 			default:
 				return (error);
 			}
 			break;
 #endif
 #ifdef INET
 		case IPPROTO_IP:
 			switch (sopt->sopt_name) {
 			case IP_TOS:
 				inp->inp_ip_tos &= ~IPTOS_ECN_MASK;
 				break;
 			case IP_TTL:
 				/* Notify tcp stacks that care (e.g. RACK). */
 				break;
 			default:
 				return (error);
 			}
 			break;
 #endif
 		default:
 			return (error);
 		}
 		INP_WLOCK(inp);
 		if (inp->inp_flags & INP_DROPPED) {
 			INP_WUNLOCK(inp);
 			return (ECONNRESET);
 		}
 	} else if (sopt->sopt_name == TCP_FUNCTION_BLK) {
 		/*
 		 * Protect the TCP option TCP_FUNCTION_BLK so
 		 * that a sub-function can *never* overwrite this.
 		 */
 		struct tcp_function_set fsn;
 		struct tcp_function_block *blk;
 		void *ptr = NULL;
 
 		INP_WUNLOCK(inp);
 		error = sooptcopyin(sopt, &fsn, sizeof fsn, sizeof fsn);
 		if (error)
 			return (error);
 
 		INP_WLOCK(inp);
 		tp = intotcpcb(inp);
 
 		blk = find_and_ref_tcp_functions(&fsn);
 		if (blk == NULL) {
 			INP_WUNLOCK(inp);
 			return (ENOENT);
 		}
 		if (tp->t_fb == blk) {
 			/* You already have this */
 			refcount_release(&blk->tfb_refcnt);
 			INP_WUNLOCK(inp);
 			return (0);
 		}
 		if (tp->t_state != TCPS_CLOSED) {
 			/*
 			 * The user has advanced the state
 			 * past the initial point, we may not
 			 * be able to switch.
 			 */
 			if (blk->tfb_tcp_handoff_ok != NULL) {
 				/*
 				 * Does the stack provide a
 				 * query mechanism, if so it may
 				 * still be possible?
 				 */
 				error = (*blk->tfb_tcp_handoff_ok)(tp);
 			} else
 				error = EINVAL;
 			if (error) {
 				refcount_release(&blk->tfb_refcnt);
 				INP_WUNLOCK(inp);
 				return(error);
 			}
 		}
 		if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
 			refcount_release(&blk->tfb_refcnt);
 			INP_WUNLOCK(inp);
 			return (ENOENT);
 		}
 		/*
 		 * Ensure the new stack takes ownership with a
 		 * clean slate on peak rate threshold.
 		 */
-#ifdef TCPHPTS
-		/* Assure that we are not on any hpts */
-		tcp_hpts_remove(tp);
-#endif
+		if (tp->t_fb->tfb_tcp_timer_stop_all != NULL)
+			tp->t_fb->tfb_tcp_timer_stop_all(tp);
 		if (blk->tfb_tcp_fb_init) {
 			error = (*blk->tfb_tcp_fb_init)(tp, &ptr);
 			if (error) {
 				/*
 				 * Release the ref count the lookup
 				 * acquired.
 				 */ 
 				refcount_release(&blk->tfb_refcnt);
 				/* 
 				 * Now there is a chance that the
 				 * init() function mucked with some
 				 * things before it failed, such as
 				 * hpts or inp_flags2 or timer granularity.
 				 * It should not of, but lets give the old
 				 * stack a chance to reset to a known good state.
 				 */
 				if (tp->t_fb->tfb_switch_failed) {
 					(*tp->t_fb->tfb_switch_failed)(tp);
 				}
 			 	goto err_out;
 			}
 		}
 		if (tp->t_fb->tfb_tcp_fb_fini) {
 			struct epoch_tracker et;
 			/*
 			 * Tell the stack to cleanup with 0 i.e.
 			 * the tcb is not going away.
 			 */
 			NET_EPOCH_ENTER(et);
 			(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
 			NET_EPOCH_EXIT(et);
 		}
 		/*
 		 * Release the old refcnt, the
 		 * lookup acquired a ref on the
 		 * new one already.
 		 */
 		refcount_release(&tp->t_fb->tfb_refcnt);
 		/* 
 		 * Set in the new stack.
 		 */
 		tp->t_fb = blk;
 		tp->t_fb_ptr = ptr;
 #ifdef TCP_OFFLOAD
 		if (tp->t_flags & TF_TOE) {
 			tcp_offload_ctloutput(tp, sopt->sopt_dir,
 			     sopt->sopt_name);
 		}
 #endif
 err_out:
 		INP_WUNLOCK(inp);
 		return (error);
 
 	}
 
 	/* Pass in the INP locked, callee must unlock it. */
 	return (tp->t_fb->tfb_tcp_ctloutput(tp, sopt));
 }
 
 static int
 tcp_ctloutput_get(struct inpcb *inp, struct sockopt *sopt)
 {
 	struct socket *so = inp->inp_socket;
 	struct tcpcb *tp = intotcpcb(inp);
 	int error = 0;
 
 	MPASS(sopt->sopt_dir == SOPT_GET);
 	INP_WLOCK_ASSERT(inp);
 	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
 	    ("inp_flags == %x", inp->inp_flags));
 	KASSERT(so != NULL, ("inp_socket == NULL"));
 
 	if (sopt->sopt_level != IPPROTO_TCP) {
 		INP_WUNLOCK(inp);
 #ifdef INET6
 		if (inp->inp_vflag & INP_IPV6PROTO)
 			error = ip6_ctloutput(so, sopt);
 #endif /* INET6 */
 #if defined(INET6) && defined(INET)
 		else
 #endif
 #ifdef INET
 			error = ip_ctloutput(so, sopt);
 #endif
 		return (error);
 	}
 	if (((sopt->sopt_name == TCP_FUNCTION_BLK) ||
 	     (sopt->sopt_name == TCP_FUNCTION_ALIAS))) {
 		struct tcp_function_set fsn;
 
 		if (sopt->sopt_name == TCP_FUNCTION_ALIAS) {
 			memset(&fsn, 0, sizeof(fsn));
 			find_tcp_function_alias(tp->t_fb, &fsn);
 		} else {
 			strncpy(fsn.function_set_name,
 			    tp->t_fb->tfb_tcp_block_name,
 			    TCP_FUNCTION_NAME_LEN_MAX);
 			fsn.function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
 		}
 		fsn.pcbcnt = tp->t_fb->tfb_refcnt;
 		INP_WUNLOCK(inp);
 		error = sooptcopyout(sopt, &fsn, sizeof fsn);
 		return (error);
 	}
 
 	/* Pass in the INP locked, callee must unlock it. */
 	return (tp->t_fb->tfb_tcp_ctloutput(tp, sopt));
 }
 
 int
 tcp_ctloutput(struct socket *so, struct sockopt *sopt)
 {
 	struct	inpcb *inp;
 
 	inp = sotoinpcb(so);
 	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
 
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		return (ECONNRESET);
 	}
 	if (sopt->sopt_dir == SOPT_SET)
 		return (tcp_ctloutput_set(inp, sopt));
 	else if (sopt->sopt_dir == SOPT_GET)
 		return (tcp_ctloutput_get(inp, sopt));
 	else
 		panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir);
 }
 
 /*
  * If this assert becomes untrue, we need to change the size of the buf
  * variable in tcp_default_ctloutput().
  */
 #ifdef CTASSERT
 CTASSERT(TCP_CA_NAME_MAX <= TCP_LOG_ID_LEN);
 CTASSERT(TCP_LOG_REASON_LEN <= TCP_LOG_ID_LEN);
 #endif
 
 #ifdef KERN_TLS
 static int
 copyin_tls_enable(struct sockopt *sopt, struct tls_enable *tls)
 {
 	struct tls_enable_v0 tls_v0;
 	int error;
 
 	if (sopt->sopt_valsize == sizeof(tls_v0)) {
 		error = sooptcopyin(sopt, &tls_v0, sizeof(tls_v0),
 		    sizeof(tls_v0));
 		if (error)
 			return (error);
 		memset(tls, 0, sizeof(*tls));
 		tls->cipher_key = tls_v0.cipher_key;
 		tls->iv = tls_v0.iv;
 		tls->auth_key = tls_v0.auth_key;
 		tls->cipher_algorithm = tls_v0.cipher_algorithm;
 		tls->cipher_key_len = tls_v0.cipher_key_len;
 		tls->iv_len = tls_v0.iv_len;
 		tls->auth_algorithm = tls_v0.auth_algorithm;
 		tls->auth_key_len = tls_v0.auth_key_len;
 		tls->flags = tls_v0.flags;
 		tls->tls_vmajor = tls_v0.tls_vmajor;
 		tls->tls_vminor = tls_v0.tls_vminor;
 		return (0);
 	}
 
 	return (sooptcopyin(sopt, tls, sizeof(*tls), sizeof(*tls)));
 }
 #endif
 
 extern struct cc_algo newreno_cc_algo;
 
 static int
 tcp_set_cc_mod(struct inpcb *inp, struct sockopt *sopt)
 {
 	struct cc_algo *algo;
 	void *ptr = NULL;
 	struct tcpcb *tp;
 	struct cc_var cc_mem;
 	char	buf[TCP_CA_NAME_MAX];
 	size_t mem_sz;
 	int error;
 
 	INP_WUNLOCK(inp);
 	error = sooptcopyin(sopt, buf, TCP_CA_NAME_MAX - 1, 1);
 	if (error)
 		return(error);
 	buf[sopt->sopt_valsize] = '\0';
 	CC_LIST_RLOCK();
 	STAILQ_FOREACH(algo, &cc_list, entries) {
 		if (strncmp(buf, algo->name,
 			    TCP_CA_NAME_MAX) == 0) {
 			if (algo->flags & CC_MODULE_BEING_REMOVED) {
 				/* We can't "see" modules being unloaded */
 				continue;
 			}
 			break;
 		}
 	}
 	if (algo == NULL) {
 		CC_LIST_RUNLOCK();
 		return(ESRCH);
 	}
 	/* 
 	 * With a reference the algorithm cannot be removed
 	 * so we hold a reference through the change process.
 	 */
 	cc_refer(algo);
 	CC_LIST_RUNLOCK();
 	if (algo->cb_init != NULL) {
 		/* We can now pre-get the memory for the CC */
 		mem_sz = (*algo->cc_data_sz)();
 		if (mem_sz == 0) {
 			goto no_mem_needed;
 		}
 		ptr = malloc(mem_sz, M_CC_MEM, M_WAITOK);
 	} else {
 no_mem_needed:
 		mem_sz = 0;
 		ptr = NULL;
 	}
 	/*
 	 * Make sure its all clean and zero and also get
 	 * back the inplock.
 	 */
 	memset(&cc_mem, 0, sizeof(cc_mem));
 	INP_WLOCK(inp);
 	if (inp->inp_flags & INP_DROPPED) {
 		INP_WUNLOCK(inp);
 		if (ptr)
 			free(ptr, M_CC_MEM);
 		/* Release our temp reference */
 		CC_LIST_RLOCK();
 		cc_release(algo);
 		CC_LIST_RUNLOCK();
 		return (ECONNRESET);
 	}
 	tp = intotcpcb(inp);
 	if (ptr != NULL)
 		memset(ptr, 0, mem_sz);
 	cc_mem.ccvc.tcp = tp;
 	/*
 	 * We once again hold a write lock over the tcb so it's
 	 * safe to do these things without ordering concerns.
 	 * Note here we init into stack memory.
 	 */
 	if (algo->cb_init != NULL)
 		error = algo->cb_init(&cc_mem, ptr);
 	else
 		error = 0;
 	/*
 	 * The CC algorithms, when given their memory
 	 * should not fail we could in theory have a
 	 * KASSERT here.
 	 */
 	if (error == 0) {
 		/*
 		 * Touchdown, lets go ahead and move the
 		 * connection to the new CC module by
 		 * copying in the cc_mem after we call
 		 * the old ones cleanup (if any).
 		 */
 		if (CC_ALGO(tp)->cb_destroy != NULL)
 			CC_ALGO(tp)->cb_destroy(&tp->t_ccv);
 		/* Detach the old CC from the tcpcb  */
 		cc_detach(tp);
 		/* Copy in our temp memory that was inited */
 		memcpy(&tp->t_ccv, &cc_mem, sizeof(struct cc_var));
 		/* Now attach the new, which takes a reference */
 		cc_attach(tp, algo);
 		/* Ok now are we where we have gotten past any conn_init? */
 		if (TCPS_HAVEESTABLISHED(tp->t_state) && (CC_ALGO(tp)->conn_init != NULL)) {
 			/* Yep run the connection init for the new CC */
 			CC_ALGO(tp)->conn_init(&tp->t_ccv);
 		}
 	} else if (ptr)
 		free(ptr, M_CC_MEM);
 	INP_WUNLOCK(inp);
 	/* Now lets release our temp reference */
 	CC_LIST_RLOCK();
 	cc_release(algo);
 	CC_LIST_RUNLOCK();
 	return (error);
 }
 
 int
 tcp_default_ctloutput(struct tcpcb *tp, struct sockopt *sopt)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	int	error, opt, optval;
 	u_int	ui;
 	struct	tcp_info ti;
 #ifdef KERN_TLS
 	struct tls_enable tls;
 	struct socket *so = inp->inp_socket;
 #endif
 	char	*pbuf, buf[TCP_LOG_ID_LEN];
 #ifdef STATS
 	struct statsblob *sbp;
 #endif
 	size_t	len;
 
 	INP_WLOCK_ASSERT(inp);
 	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
 	    ("inp_flags == %x", inp->inp_flags));
 	KASSERT(inp->inp_socket != NULL, ("inp_socket == NULL"));
 
 	switch (sopt->sopt_level) {
 #ifdef INET6
 	case IPPROTO_IPV6:
 		MPASS(inp->inp_vflag & INP_IPV6PROTO);
 		switch (sopt->sopt_name) {
 		case IPV6_USE_MIN_MTU:
 			tcp6_use_min_mtu(tp);
 			/* FALLTHROUGH */
 		}
 		INP_WUNLOCK(inp);
 		return (0);
 #endif
 #ifdef INET
 	case IPPROTO_IP:
 		INP_WUNLOCK(inp);
 		return (0);
 #endif
 	}
 
 	/*
 	 * For TCP_CCALGOOPT forward the control to CC module, for both
 	 * SOPT_SET and SOPT_GET.
 	 */
 	switch (sopt->sopt_name) {
 	case TCP_CCALGOOPT:
 		INP_WUNLOCK(inp);
 		if (sopt->sopt_valsize > CC_ALGOOPT_LIMIT)
 			return (EINVAL);
 		pbuf = malloc(sopt->sopt_valsize, M_TEMP, M_WAITOK | M_ZERO);
 		error = sooptcopyin(sopt, pbuf, sopt->sopt_valsize,
 		    sopt->sopt_valsize);
 		if (error) {
 			free(pbuf, M_TEMP);
 			return (error);
 		}
 		INP_WLOCK_RECHECK_CLEANUP(inp, free(pbuf, M_TEMP));
 		if (CC_ALGO(tp)->ctl_output != NULL)
 			error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, sopt, pbuf);
 		else
 			error = ENOENT;
 		INP_WUNLOCK(inp);
 		if (error == 0 && sopt->sopt_dir == SOPT_GET)
 			error = sooptcopyout(sopt, pbuf, sopt->sopt_valsize);
 		free(pbuf, M_TEMP);
 		return (error);
 	}
 
 	switch (sopt->sopt_dir) {
 	case SOPT_SET:
 		switch (sopt->sopt_name) {
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		case TCP_MD5SIG:
 			INP_WUNLOCK(inp);
 			if (!TCPMD5_ENABLED())
 				return (ENOPROTOOPT);
 			error = TCPMD5_PCBCTL(inp, sopt);
 			if (error)
 				return (error);
 			INP_WLOCK_RECHECK(inp);
 			goto unlock_and_done;
 #endif /* IPSEC */
 
 		case TCP_NODELAY:
 		case TCP_NOOPT:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			switch (sopt->sopt_name) {
 			case TCP_NODELAY:
 				opt = TF_NODELAY;
 				break;
 			case TCP_NOOPT:
 				opt = TF_NOOPT;
 				break;
 			default:
 				opt = 0; /* dead code to fool gcc */
 				break;
 			}
 
 			if (optval)
 				tp->t_flags |= opt;
 			else
 				tp->t_flags &= ~opt;
 unlock_and_done:
 #ifdef TCP_OFFLOAD
 			if (tp->t_flags & TF_TOE) {
 				tcp_offload_ctloutput(tp, sopt->sopt_dir,
 				    sopt->sopt_name);
 			}
 #endif
 			INP_WUNLOCK(inp);
 			break;
 
 		case TCP_NOPUSH:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			if (optval)
 				tp->t_flags |= TF_NOPUSH;
 			else if (tp->t_flags & TF_NOPUSH) {
 				tp->t_flags &= ~TF_NOPUSH;
 				if (TCPS_HAVEESTABLISHED(tp->t_state)) {
 					struct epoch_tracker et;
 
 					NET_EPOCH_ENTER(et);
 					error = tcp_output_nodrop(tp);
 					NET_EPOCH_EXIT(et);
 				}
 			}
 			goto unlock_and_done;
 
 		case TCP_REMOTE_UDP_ENCAPS_PORT:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 			if ((optval < TCP_TUNNELING_PORT_MIN) ||
 			    (optval > TCP_TUNNELING_PORT_MAX)) {
 				/* Its got to be in range */
 				return (EINVAL);
 			}
 			if ((V_tcp_udp_tunneling_port == 0) && (optval != 0)) {
 				/* You have to have enabled a UDP tunneling port first */
 				return (EINVAL);
 			}
 			INP_WLOCK_RECHECK(inp);
 			if (tp->t_state != TCPS_CLOSED) {
 				/* You can't change after you are connected */
 				error = EINVAL;
 			} else {
 				/* Ok we are all good set the port */
 				tp->t_port = htons(optval);
 			}
 			goto unlock_and_done;
 
 		case TCP_MAXSEG:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			if (optval > 0 && optval <= tp->t_maxseg &&
 			    optval + 40 >= V_tcp_minmss)
 				tp->t_maxseg = optval;
 			else
 				error = EINVAL;
 			goto unlock_and_done;
 
 		case TCP_INFO:
 			INP_WUNLOCK(inp);
 			error = EINVAL;
 			break;
 
 		case TCP_STATS:
 			INP_WUNLOCK(inp);
 #ifdef STATS
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			if (optval > 0)
 				sbp = stats_blob_alloc(
 				    V_tcp_perconn_stats_dflt_tpl, 0);
 			else
 				sbp = NULL;
 
 			INP_WLOCK_RECHECK(inp);
 			if ((tp->t_stats != NULL && sbp == NULL) ||
 			    (tp->t_stats == NULL && sbp != NULL)) {
 				struct statsblob *t = tp->t_stats;
 				tp->t_stats = sbp;
 				sbp = t;
 			}
 			INP_WUNLOCK(inp);
 
 			stats_blob_destroy(sbp);
 #else
 			return (EOPNOTSUPP);
 #endif /* !STATS */
 			break;
 
 		case TCP_CONGESTION:
 			error = tcp_set_cc_mod(inp, sopt);
 			break;
 
 		case TCP_REUSPORT_LB_NUMA:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof(optval),
 			    sizeof(optval));
 			INP_WLOCK_RECHECK(inp);
 			if (!error)
 				error = in_pcblbgroup_numa(inp, optval);
 			INP_WUNLOCK(inp);
 			break;
 
 #ifdef KERN_TLS
 		case TCP_TXTLS_ENABLE:
 			INP_WUNLOCK(inp);
 			error = copyin_tls_enable(sopt, &tls);
 			if (error)
 				break;
 			error = ktls_enable_tx(so, &tls);
 			break;
 		case TCP_TXTLS_MODE:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			error = ktls_set_tx_mode(so, ui);
 			INP_WUNLOCK(inp);
 			break;
 		case TCP_RXTLS_ENABLE:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &tls, sizeof(tls),
 			    sizeof(tls));
 			if (error)
 				break;
 			error = ktls_enable_rx(so, &tls);
 			break;
 #endif
 		case TCP_MAXUNACKTIME:
 		case TCP_KEEPIDLE:
 		case TCP_KEEPINTVL:
 		case TCP_KEEPINIT:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
 			if (error)
 				return (error);
 
 			if (ui > (UINT_MAX / hz)) {
 				error = EINVAL;
 				break;
 			}
 			ui *= hz;
 
 			INP_WLOCK_RECHECK(inp);
 			switch (sopt->sopt_name) {
 			case TCP_MAXUNACKTIME:
 				tp->t_maxunacktime = ui;
 				break;
 
 			case TCP_KEEPIDLE:
 				tp->t_keepidle = ui;
 				/*
 				 * XXX: better check current remaining
 				 * timeout and "merge" it with new value.
 				 */
 				if ((tp->t_state > TCPS_LISTEN) &&
 				    (tp->t_state <= TCPS_CLOSING))
 					tcp_timer_activate(tp, TT_KEEP,
 					    TP_KEEPIDLE(tp));
 				break;
 			case TCP_KEEPINTVL:
 				tp->t_keepintvl = ui;
 				if ((tp->t_state == TCPS_FIN_WAIT_2) &&
 				    (TP_MAXIDLE(tp) > 0))
 					tcp_timer_activate(tp, TT_2MSL,
 					    TP_MAXIDLE(tp));
 				break;
 			case TCP_KEEPINIT:
 				tp->t_keepinit = ui;
 				if (tp->t_state == TCPS_SYN_RECEIVED ||
 				    tp->t_state == TCPS_SYN_SENT)
 					tcp_timer_activate(tp, TT_KEEP,
 					    TP_KEEPINIT(tp));
 				break;
 			}
 			goto unlock_and_done;
 
 		case TCP_KEEPCNT:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			tp->t_keepcnt = ui;
 			if ((tp->t_state == TCPS_FIN_WAIT_2) &&
 			    (TP_MAXIDLE(tp) > 0))
 				tcp_timer_activate(tp, TT_2MSL,
 				    TP_MAXIDLE(tp));
 			goto unlock_and_done;
 
 #ifdef TCPPCAP
 		case TCP_PCAP_OUT:
 		case TCP_PCAP_IN:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			if (optval >= 0)
 				tcp_pcap_set_sock_max(
 					(sopt->sopt_name == TCP_PCAP_OUT) ?
 					&(tp->t_outpkts) : &(tp->t_inpkts),
 					optval);
 			else
 				error = EINVAL;
 			goto unlock_and_done;
 #endif
 
 		case TCP_FASTOPEN: {
 			struct tcp_fastopen tfo_optval;
 
 			INP_WUNLOCK(inp);
 			if (!V_tcp_fastopen_client_enable &&
 			    !V_tcp_fastopen_server_enable)
 				return (EPERM);
 
 			error = sooptcopyin(sopt, &tfo_optval,
 				    sizeof(tfo_optval), sizeof(int));
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			if ((tp->t_state != TCPS_CLOSED) &&
 			    (tp->t_state != TCPS_LISTEN)) {
 				error = EINVAL;
 				goto unlock_and_done;
 			}
 			if (tfo_optval.enable) {
 				if (tp->t_state == TCPS_LISTEN) {
 					if (!V_tcp_fastopen_server_enable) {
 						error = EPERM;
 						goto unlock_and_done;
 					}
 
 					if (tp->t_tfo_pending == NULL)
 						tp->t_tfo_pending =
 						    tcp_fastopen_alloc_counter();
 				} else {
 					/*
 					 * If a pre-shared key was provided,
 					 * stash it in the client cookie
 					 * field of the tcpcb for use during
 					 * connect.
 					 */
 					if (sopt->sopt_valsize ==
 					    sizeof(tfo_optval)) {
 						memcpy(tp->t_tfo_cookie.client,
 						       tfo_optval.psk,
 						       TCP_FASTOPEN_PSK_LEN);
 						tp->t_tfo_client_cookie_len =
 						    TCP_FASTOPEN_PSK_LEN;
 					}
 				}
 				tp->t_flags |= TF_FASTOPEN;
 			} else
 				tp->t_flags &= ~TF_FASTOPEN;
 			goto unlock_and_done;
 		}
 
 #ifdef TCP_BLACKBOX
 		case TCP_LOG:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, &optval, sizeof optval,
 			    sizeof optval);
 			if (error)
 				return (error);
 
 			INP_WLOCK_RECHECK(inp);
 			error = tcp_log_state_change(tp, optval);
 			goto unlock_and_done;
 
 		case TCP_LOGBUF:
 			INP_WUNLOCK(inp);
 			error = EINVAL;
 			break;
 
 		case TCP_LOGID:
 			INP_WUNLOCK(inp);
 			error = sooptcopyin(sopt, buf, TCP_LOG_ID_LEN - 1, 0);
 			if (error)
 				break;
 			buf[sopt->sopt_valsize] = '\0';
 			INP_WLOCK_RECHECK(inp);
 			error = tcp_log_set_id(tp, buf);
 			/* tcp_log_set_id() unlocks the INP. */
 			break;
 
 		case TCP_LOGDUMP:
 		case TCP_LOGDUMPID:
 			INP_WUNLOCK(inp);
 			error =
 			    sooptcopyin(sopt, buf, TCP_LOG_REASON_LEN - 1, 0);
 			if (error)
 				break;
 			buf[sopt->sopt_valsize] = '\0';
 			INP_WLOCK_RECHECK(inp);
 			if (sopt->sopt_name == TCP_LOGDUMP) {
 				error = tcp_log_dump_tp_logbuf(tp, buf,
 				    M_WAITOK, true);
 				INP_WUNLOCK(inp);
 			} else {
 				tcp_log_dump_tp_bucket_logbufs(tp, buf);
 				/*
 				 * tcp_log_dump_tp_bucket_logbufs() drops the
 				 * INP lock.
 				 */
 			}
 			break;
 #endif
 
 		default:
 			INP_WUNLOCK(inp);
 			error = ENOPROTOOPT;
 			break;
 		}
 		break;
 
 	case SOPT_GET:
 		tp = intotcpcb(inp);
 		switch (sopt->sopt_name) {
 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
 		case TCP_MD5SIG:
 			INP_WUNLOCK(inp);
 			if (!TCPMD5_ENABLED())
 				return (ENOPROTOOPT);
 			error = TCPMD5_PCBCTL(inp, sopt);
 			break;
 #endif
 
 		case TCP_NODELAY:
 			optval = tp->t_flags & TF_NODELAY;
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 		case TCP_MAXSEG:
 			optval = tp->t_maxseg;
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 		case TCP_REMOTE_UDP_ENCAPS_PORT:
 			optval = ntohs(tp->t_port);
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 		case TCP_NOOPT:
 			optval = tp->t_flags & TF_NOOPT;
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 		case TCP_NOPUSH:
 			optval = tp->t_flags & TF_NOPUSH;
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 		case TCP_INFO:
 			tcp_fill_info(tp, &ti);
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &ti, sizeof ti);
 			break;
 		case TCP_STATS:
 			{
 #ifdef STATS
 			int nheld;
 			TYPEOF_MEMBER(struct statsblob, flags) sbflags = 0;
 
 			error = 0;
 			socklen_t outsbsz = sopt->sopt_valsize;
 			if (tp->t_stats == NULL)
 				error = ENOENT;
 			else if (outsbsz >= tp->t_stats->cursz)
 				outsbsz = tp->t_stats->cursz;
 			else if (outsbsz >= sizeof(struct statsblob))
 				outsbsz = sizeof(struct statsblob);
 			else
 				error = EINVAL;
 			INP_WUNLOCK(inp);
 			if (error)
 				break;
 
 			sbp = sopt->sopt_val;
 			nheld = atop(round_page(((vm_offset_t)sbp) +
 			    (vm_size_t)outsbsz) - trunc_page((vm_offset_t)sbp));
 			vm_page_t ma[nheld];
 			if (vm_fault_quick_hold_pages(
 			    &curproc->p_vmspace->vm_map, (vm_offset_t)sbp,
 			    outsbsz, VM_PROT_READ | VM_PROT_WRITE, ma,
 			    nheld) < 0) {
 				error = EFAULT;
 				break;
 			}
 
 			if ((error = copyin_nofault(&(sbp->flags), &sbflags,
 			    SIZEOF_MEMBER(struct statsblob, flags))))
 				goto unhold;
 
 			INP_WLOCK_RECHECK(inp);
 			error = stats_blob_snapshot(&sbp, outsbsz, tp->t_stats,
 			    sbflags | SB_CLONE_USRDSTNOFAULT);
 			INP_WUNLOCK(inp);
 			sopt->sopt_valsize = outsbsz;
 unhold:
 			vm_page_unhold_pages(ma, nheld);
 #else
 			INP_WUNLOCK(inp);
 			error = EOPNOTSUPP;
 #endif /* !STATS */
 			break;
 			}
 		case TCP_CONGESTION:
 			len = strlcpy(buf, CC_ALGO(tp)->name, TCP_CA_NAME_MAX);
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, buf, len + 1);
 			break;
 		case TCP_MAXUNACKTIME:
 		case TCP_KEEPIDLE:
 		case TCP_KEEPINTVL:
 		case TCP_KEEPINIT:
 		case TCP_KEEPCNT:
 			switch (sopt->sopt_name) {
 			case TCP_MAXUNACKTIME:
 				ui = TP_MAXUNACKTIME(tp) / hz;
 				break;
 			case TCP_KEEPIDLE:
 				ui = TP_KEEPIDLE(tp) / hz;
 				break;
 			case TCP_KEEPINTVL:
 				ui = TP_KEEPINTVL(tp) / hz;
 				break;
 			case TCP_KEEPINIT:
 				ui = TP_KEEPINIT(tp) / hz;
 				break;
 			case TCP_KEEPCNT:
 				ui = TP_KEEPCNT(tp);
 				break;
 			}
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &ui, sizeof(ui));
 			break;
 #ifdef TCPPCAP
 		case TCP_PCAP_OUT:
 		case TCP_PCAP_IN:
 			optval = tcp_pcap_get_sock_max(
 					(sopt->sopt_name == TCP_PCAP_OUT) ?
 					&(tp->t_outpkts) : &(tp->t_inpkts));
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 #endif
 		case TCP_FASTOPEN:
 			optval = tp->t_flags & TF_FASTOPEN;
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof optval);
 			break;
 #ifdef TCP_BLACKBOX
 		case TCP_LOG:
 			optval = tcp_get_bblog_state(tp);
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, &optval, sizeof(optval));
 			break;
 		case TCP_LOGBUF:
 			/* tcp_log_getlogbuf() does INP_WUNLOCK(inp) */
 			error = tcp_log_getlogbuf(sopt, tp);
 			break;
 		case TCP_LOGID:
 			len = tcp_log_get_id(tp, buf);
 			INP_WUNLOCK(inp);
 			error = sooptcopyout(sopt, buf, len + 1);
 			break;
 		case TCP_LOGDUMP:
 		case TCP_LOGDUMPID:
 			INP_WUNLOCK(inp);
 			error = EINVAL;
 			break;
 #endif
 #ifdef KERN_TLS
 		case TCP_TXTLS_MODE:
 			error = ktls_get_tx_mode(so, &optval);
 			INP_WUNLOCK(inp);
 			if (error == 0)
 				error = sooptcopyout(sopt, &optval,
 				    sizeof(optval));
 			break;
 		case TCP_RXTLS_MODE:
 			error = ktls_get_rx_mode(so, &optval);
 			INP_WUNLOCK(inp);
 			if (error == 0)
 				error = sooptcopyout(sopt, &optval,
 				    sizeof(optval));
 			break;
 #endif
 		default:
 			INP_WUNLOCK(inp);
 			error = ENOPROTOOPT;
 			break;
 		}
 		break;
 	}
 	return (error);
 }
 #undef INP_WLOCK_RECHECK
 #undef INP_WLOCK_RECHECK_CLEANUP
 
 /*
  * Initiate (or continue) disconnect.
  * If embryonic state, just send reset (once).
  * If in ``let data drain'' option and linger null, just drop.
  * Otherwise (hard), mark socket disconnecting and drop
  * current input data; switch states based on user close, and
  * send segment to peer (with FIN).
  */
 static void
 tcp_disconnect(struct tcpcb *tp)
 {
 	struct inpcb *inp = tptoinpcb(tp);
 	struct socket *so = tptosocket(tp);
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(inp);
 
 	/*
 	 * Neither tcp_close() nor tcp_drop() should return NULL, as the
 	 * socket is still open.
 	 */
 	if (tp->t_state < TCPS_ESTABLISHED &&
 	    !(tp->t_state > TCPS_LISTEN && IS_FASTOPEN(tp->t_flags))) {
 		tp = tcp_close(tp);
 		KASSERT(tp != NULL,
 		    ("tcp_disconnect: tcp_close() returned NULL"));
 	} else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
 		tp = tcp_drop(tp, 0);
 		KASSERT(tp != NULL,
 		    ("tcp_disconnect: tcp_drop() returned NULL"));
 	} else {
 		soisdisconnecting(so);
 		sbflush(&so->so_rcv);
 		tcp_usrclosed(tp);
 		if (!(inp->inp_flags & INP_DROPPED))
 			/* Ignore stack's drop request, we already at it. */
 			(void)tcp_output_nodrop(tp);
 	}
 }
 
 /*
  * User issued close, and wish to trail through shutdown states:
  * if never received SYN, just forget it.  If got a SYN from peer,
  * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
  * If already got a FIN from peer, then almost done; go to LAST_ACK
  * state.  In all other cases, have already sent FIN to peer (e.g.
  * after PRU_SHUTDOWN), and just have to play tedious game waiting
  * for peer to send FIN or not respond to keep-alives, etc.
  * We can let the user exit from the close as soon as the FIN is acked.
  */
 static void
 tcp_usrclosed(struct tcpcb *tp)
 {
 
 	NET_EPOCH_ASSERT();
 	INP_WLOCK_ASSERT(tptoinpcb(tp));
 
 	switch (tp->t_state) {
 	case TCPS_LISTEN:
 #ifdef TCP_OFFLOAD
 		tcp_offload_listen_stop(tp);
 #endif
 		tcp_state_change(tp, TCPS_CLOSED);
 		/* FALLTHROUGH */
 	case TCPS_CLOSED:
 		tp = tcp_close(tp);
 		/*
 		 * tcp_close() should never return NULL here as the socket is
 		 * still open.
 		 */
 		KASSERT(tp != NULL,
 		    ("tcp_usrclosed: tcp_close() returned NULL"));
 		break;
 
 	case TCPS_SYN_SENT:
 	case TCPS_SYN_RECEIVED:
 		tp->t_flags |= TF_NEEDFIN;
 		break;
 
 	case TCPS_ESTABLISHED:
 		tcp_state_change(tp, TCPS_FIN_WAIT_1);
 		break;
 
 	case TCPS_CLOSE_WAIT:
 		tcp_state_change(tp, TCPS_LAST_ACK);
 		break;
 	}
 	if (tp->t_acktime == 0)
 		tp->t_acktime = ticks;
 	if (tp->t_state >= TCPS_FIN_WAIT_2) {
 		soisdisconnected(tptosocket(tp));
 		/* Prevent the connection hanging in FIN_WAIT_2 forever. */
 		if (tp->t_state == TCPS_FIN_WAIT_2) {
 			int timeout;
 
 			timeout = (tcp_fast_finwait2_recycle) ?
 			    tcp_finwait2_timeout : TP_MAXIDLE(tp);
 			tcp_timer_activate(tp, TT_2MSL, timeout);
 		}
 	}
 }
 
 #ifdef DDB
 static void
 db_print_indent(int indent)
 {
 	int i;
 
 	for (i = 0; i < indent; i++)
 		db_printf(" ");
 }
 
 static void
 db_print_tstate(int t_state)
 {
 
 	switch (t_state) {
 	case TCPS_CLOSED:
 		db_printf("TCPS_CLOSED");
 		return;
 
 	case TCPS_LISTEN:
 		db_printf("TCPS_LISTEN");
 		return;
 
 	case TCPS_SYN_SENT:
 		db_printf("TCPS_SYN_SENT");
 		return;
 
 	case TCPS_SYN_RECEIVED:
 		db_printf("TCPS_SYN_RECEIVED");
 		return;
 
 	case TCPS_ESTABLISHED:
 		db_printf("TCPS_ESTABLISHED");
 		return;
 
 	case TCPS_CLOSE_WAIT:
 		db_printf("TCPS_CLOSE_WAIT");
 		return;
 
 	case TCPS_FIN_WAIT_1:
 		db_printf("TCPS_FIN_WAIT_1");
 		return;
 
 	case TCPS_CLOSING:
 		db_printf("TCPS_CLOSING");
 		return;
 
 	case TCPS_LAST_ACK:
 		db_printf("TCPS_LAST_ACK");
 		return;
 
 	case TCPS_FIN_WAIT_2:
 		db_printf("TCPS_FIN_WAIT_2");
 		return;
 
 	case TCPS_TIME_WAIT:
 		db_printf("TCPS_TIME_WAIT");
 		return;
 
 	default:
 		db_printf("unknown");
 		return;
 	}
 }
 
 static void
 db_print_tflags(u_int t_flags)
 {
 	int comma;
 
 	comma = 0;
 	if (t_flags & TF_ACKNOW) {
 		db_printf("%sTF_ACKNOW", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_DELACK) {
 		db_printf("%sTF_DELACK", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_NODELAY) {
 		db_printf("%sTF_NODELAY", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_NOOPT) {
 		db_printf("%sTF_NOOPT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_SENTFIN) {
 		db_printf("%sTF_SENTFIN", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_REQ_SCALE) {
 		db_printf("%sTF_REQ_SCALE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_RCVD_SCALE) {
 		db_printf("%sTF_RECVD_SCALE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_REQ_TSTMP) {
 		db_printf("%sTF_REQ_TSTMP", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_RCVD_TSTMP) {
 		db_printf("%sTF_RCVD_TSTMP", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_SACK_PERMIT) {
 		db_printf("%sTF_SACK_PERMIT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_NEEDSYN) {
 		db_printf("%sTF_NEEDSYN", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_NEEDFIN) {
 		db_printf("%sTF_NEEDFIN", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_NOPUSH) {
 		db_printf("%sTF_NOPUSH", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_PREVVALID) {
 		db_printf("%sTF_PREVVALID", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_MORETOCOME) {
 		db_printf("%sTF_MORETOCOME", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_SONOTCONN) {
 		db_printf("%sTF_SONOTCONN", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_LASTIDLE) {
 		db_printf("%sTF_LASTIDLE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_RXWIN0SENT) {
 		db_printf("%sTF_RXWIN0SENT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_FASTRECOVERY) {
 		db_printf("%sTF_FASTRECOVERY", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_CONGRECOVERY) {
 		db_printf("%sTF_CONGRECOVERY", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_WASFRECOVERY) {
 		db_printf("%sTF_WASFRECOVERY", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_WASCRECOVERY) {
 		db_printf("%sTF_WASCRECOVERY", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_SIGNATURE) {
 		db_printf("%sTF_SIGNATURE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_FORCEDATA) {
 		db_printf("%sTF_FORCEDATA", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_TSO) {
 		db_printf("%sTF_TSO", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags & TF_FASTOPEN) {
 		db_printf("%sTF_FASTOPEN", comma ? ", " : "");
 		comma = 1;
 	}
 }
 
 static void
 db_print_tflags2(u_int t_flags2)
 {
 	int comma;
 
 	comma = 0;
 	if (t_flags2 & TF2_PLPMTU_BLACKHOLE) {
 		db_printf("%sTF2_PLPMTU_BLACKHOLE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_PLPMTU_PMTUD) {
 		db_printf("%sTF2_PLPMTU_PMTUD", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_PLPMTU_MAXSEGSNT) {
 		db_printf("%sTF2_PLPMTU_MAXSEGSNT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_LOG_AUTO) {
 		db_printf("%sTF2_LOG_AUTO", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_DROP_AF_DATA) {
 		db_printf("%sTF2_DROP_AF_DATA", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_ECN_PERMIT) {
 		db_printf("%sTF2_ECN_PERMIT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_ECN_SND_CWR) {
 		db_printf("%sTF2_ECN_SND_CWR", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_ECN_SND_ECE) {
 		db_printf("%sTF2_ECN_SND_ECE", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_ACE_PERMIT) {
 		db_printf("%sTF2_ACE_PERMIT", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_flags2 & TF2_FBYTES_COMPLETE) {
 		db_printf("%sTF2_FBYTES_COMPLETE", comma ? ", " : "");
 		comma = 1;
 	}
 }
 
 static void
 db_print_toobflags(char t_oobflags)
 {
 	int comma;
 
 	comma = 0;
 	if (t_oobflags & TCPOOB_HAVEDATA) {
 		db_printf("%sTCPOOB_HAVEDATA", comma ? ", " : "");
 		comma = 1;
 	}
 	if (t_oobflags & TCPOOB_HADDATA) {
 		db_printf("%sTCPOOB_HADDATA", comma ? ", " : "");
 		comma = 1;
 	}
 }
 
 static void
 db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
 {
 
 	db_print_indent(indent);
 	db_printf("%s at %p\n", name, tp);
 
 	indent += 2;
 
 	db_print_indent(indent);
 	db_printf("t_segq first: %p   t_segqlen: %d   t_dupacks: %d\n",
 	   TAILQ_FIRST(&tp->t_segq), tp->t_segqlen, tp->t_dupacks);
 
 	db_print_indent(indent);
 	db_printf("t_callout: %p   t_timers: %p\n",
 	    &tp->t_callout, &tp->t_timers);
 
 	db_print_indent(indent);
 	db_printf("t_state: %d (", tp->t_state);
 	db_print_tstate(tp->t_state);
 	db_printf(")\n");
 
 	db_print_indent(indent);
 	db_printf("t_flags: 0x%x (", tp->t_flags);
 	db_print_tflags(tp->t_flags);
 	db_printf(")\n");
 
 	db_print_indent(indent);
 	db_printf("t_flags2: 0x%x (", tp->t_flags2);
 	db_print_tflags2(tp->t_flags2);
 	db_printf(")\n");
 
 	db_print_indent(indent);
 	db_printf("snd_una: 0x%08x   snd_max: 0x%08x   snd_nxt: 0x%08x\n",
 	    tp->snd_una, tp->snd_max, tp->snd_nxt);
 
 	db_print_indent(indent);
 	db_printf("snd_up: 0x%08x   snd_wl1: 0x%08x   snd_wl2: 0x%08x\n",
 	   tp->snd_up, tp->snd_wl1, tp->snd_wl2);
 
 	db_print_indent(indent);
 	db_printf("iss: 0x%08x   irs: 0x%08x   rcv_nxt: 0x%08x\n",
 	    tp->iss, tp->irs, tp->rcv_nxt);
 
 	db_print_indent(indent);
 	db_printf("rcv_adv: 0x%08x   rcv_wnd: %u   rcv_up: 0x%08x\n",
 	    tp->rcv_adv, tp->rcv_wnd, tp->rcv_up);
 
 	db_print_indent(indent);
 	db_printf("snd_wnd: %u   snd_cwnd: %u\n",
 	   tp->snd_wnd, tp->snd_cwnd);
 
 	db_print_indent(indent);
 	db_printf("snd_ssthresh: %u   snd_recover: "
 	    "0x%08x\n", tp->snd_ssthresh, tp->snd_recover);
 
 	db_print_indent(indent);
 	db_printf("t_rcvtime: %u   t_startime: %u\n",
 	    tp->t_rcvtime, tp->t_starttime);
 
 	db_print_indent(indent);
 	db_printf("t_rttime: %u   t_rtsq: 0x%08x\n",
 	    tp->t_rtttime, tp->t_rtseq);
 
 	db_print_indent(indent);
 	db_printf("t_rxtcur: %d   t_maxseg: %u   t_srtt: %d\n",
 	    tp->t_rxtcur, tp->t_maxseg, tp->t_srtt);
 
 	db_print_indent(indent);
 	db_printf("t_rttvar: %d   t_rxtshift: %d   t_rttmin: %u\n",
 	    tp->t_rttvar, tp->t_rxtshift, tp->t_rttmin);
 
 	db_print_indent(indent);
 	db_printf("t_rttupdated: %u   max_sndwnd: %u   t_softerror: %d\n",
 	    tp->t_rttupdated, tp->max_sndwnd, tp->t_softerror);
 
 	db_print_indent(indent);
 	db_printf("t_oobflags: 0x%x (", tp->t_oobflags);
 	db_print_toobflags(tp->t_oobflags);
 	db_printf(")   t_iobc: 0x%02x\n", tp->t_iobc);
 
 	db_print_indent(indent);
 	db_printf("snd_scale: %u   rcv_scale: %u   request_r_scale: %u\n",
 	    tp->snd_scale, tp->rcv_scale, tp->request_r_scale);
 
 	db_print_indent(indent);
 	db_printf("ts_recent: %u   ts_recent_age: %u\n",
 	    tp->ts_recent, tp->ts_recent_age);
 
 	db_print_indent(indent);
 	db_printf("ts_offset: %u   last_ack_sent: 0x%08x   snd_cwnd_prev: "
 	    "%u\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
 
 	db_print_indent(indent);
 	db_printf("snd_ssthresh_prev: %u   snd_recover_prev: 0x%08x   "
 	    "t_badrxtwin: %u\n", tp->snd_ssthresh_prev,
 	    tp->snd_recover_prev, tp->t_badrxtwin);
 
 	db_print_indent(indent);
 	db_printf("snd_numholes: %d  snd_holes first: %p\n",
 	    tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
 
 	db_print_indent(indent);
 	db_printf("snd_fack: 0x%08x   rcv_numsacks: %d\n",
 	    tp->snd_fack, tp->rcv_numsacks);
 
 	/* Skip sackblks, sackhint. */
 
 	db_print_indent(indent);
 	db_printf("t_rttlow: %d   rfbuf_ts: %u   rfbuf_cnt: %d\n",
 	    tp->t_rttlow, tp->rfbuf_ts, tp->rfbuf_cnt);
 }
 
 DB_SHOW_COMMAND(tcpcb, db_show_tcpcb)
 {
 	struct tcpcb *tp;
 
 	if (!have_addr) {
 		db_printf("usage: show tcpcb <addr>\n");
 		return;
 	}
 	tp = (struct tcpcb *)addr;
 
 	db_print_tcpcb(tp, "tcpcb", 0);
 }
 #endif