diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h --- a/sys/netinet/in_pcb.h +++ b/sys/netinet/in_pcb.h @@ -590,8 +590,8 @@ /* * Flags for inp_flags2. */ -#define INP_MBUF_L_ACKS 0x00000001 /* We need large mbufs for ack compression */ -#define INP_MBUF_ACKCMP 0x00000002 /* TCP mbuf ack compression ok */ +/* 0x00000001 */ +/* 0x00000002 */ /* 0x00000004 */ #define INP_REUSEPORT 0x00000008 /* SO_REUSEPORT option is set */ /* 0x00000010 */ @@ -602,11 +602,11 @@ #define INP_RECVRSSBUCKETID 0x00000200 /* populate recv datagram with bucket id */ #define INP_RATE_LIMIT_CHANGED 0x00000400 /* rate limit needs attention */ #define INP_ORIGDSTADDR 0x00000800 /* receive IP dst address/port */ -#define INP_CANNOT_DO_ECN 0x00001000 /* The stack does not do ECN */ +/* 0x00001000 */ #define INP_REUSEPORT_LB 0x00002000 /* SO_REUSEPORT_LB option is set */ -#define INP_SUPPORTS_MBUFQ 0x00004000 /* Supports the mbuf queue method of LRO */ -#define INP_MBUF_QUEUE_READY 0x00008000 /* The transport is pacing, inputs can be queued */ -#define INP_DONT_SACK_QUEUE 0x00010000 /* If a sack arrives do not wake me */ +/* 0x00004000 */ +/* 0x00008000 */ +/* 0x00010000 */ #define INP_2PCP_SET 0x00020000 /* If the Eth PCP should be set explicitly */ #define INP_2PCP_BIT0 0x00040000 /* Eth PCP Bit 0 */ #define INP_2PCP_BIT1 0x00080000 /* Eth PCP Bit 1 */ diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c --- a/sys/netinet/tcp_hpts.c +++ b/sys/netinet/tcp_hpts.c @@ -1370,7 +1370,7 @@ * cause a call to output if it is needed. */ tp->t_flags2 |= TF2_HPTS_CALLS; - if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && + if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) && !STAILQ_EMPTY(&tp->t_inqueue)) { error = (*tp->t_fb->tfb_do_queued_segments)(tp, 0); if (error) { diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c --- a/sys/netinet/tcp_lro.c +++ b/sys/netinet/tcp_lro.c @@ -91,7 +91,7 @@ uint32_t csum, bool use_hash); #ifdef TCPHPTS -static bool do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *, +static bool do_bpf_strip_and_compress(struct tcpcb *, struct lro_ctrl *, struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **, bool *, bool, bool, struct ifnet *, bool); @@ -1192,13 +1192,9 @@ } static bool -tcp_lro_check_wake_status(struct inpcb *inp) +tcp_lro_check_wake_status(struct tcpcb *tp) { - struct tcpcb *tp; - tp = intotcpcb(inp); - if (__predict_false(tp == NULL)) - return (true); if (tp->t_fb->tfb_early_wake_check != NULL) return ((tp->t_fb->tfb_early_wake_check)(tp)); return (false); @@ -1206,15 +1202,10 @@ static struct mbuf * tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le, - struct inpcb *inp, int32_t *new_m, bool can_append_old_cmp) + struct tcpcb *tp, int32_t *new_m, bool can_append_old_cmp) { - struct tcpcb *tp; struct mbuf *m; - tp = intotcpcb(inp); - if (__predict_false(tp == NULL)) - return (NULL); - /* Look at the last mbuf if any in queue */ if (can_append_old_cmp) { m = STAILQ_LAST(&tp->t_inqueue, mbuf, m_stailqpkt); @@ -1226,13 +1217,13 @@ return (m); } else { /* Mark we ran out of space */ - inp->inp_flags2 |= INP_MBUF_L_ACKS; + tp->t_flags2 |= TF2_MBUF_L_ACKS; } } } /* Decide mbuf size. */ tcp_lro_log(tp, lc, le, NULL, 21, 0, 0, 0, 0); - if (inp->inp_flags2 & INP_MBUF_L_ACKS) + if (tp->t_flags2 & TF2_MBUF_L_ACKS) m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); @@ -1248,7 +1239,7 @@ return (m); } -static struct inpcb * +static struct tcpcb * tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa) { struct inpcb *inp; @@ -1277,10 +1268,10 @@ break; #endif default: - inp = NULL; - break; + return (NULL); } - return (inp); + + return (intotcpcb(inp)); } static inline bool @@ -1335,7 +1326,6 @@ static int tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le) { - struct inpcb *inp; struct tcpcb *tp; struct mbuf **pp, *cmp, *mv_to; struct ifnet *lagg_ifp; @@ -1364,31 +1354,28 @@ IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6))) return (TCP_LRO_CANNOT); #endif - /* Lookup inp, if any. */ - inp = tcp_lro_lookup(lc->ifp, + /* Lookup inp, if any. Returns locked TCP inpcb. */ + tp = tcp_lro_lookup(lc->ifp, (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner); - if (inp == NULL) + if (tp == NULL) return (TCP_LRO_CANNOT); counter_u64_add(tcp_inp_lro_locks_taken, 1); - /* Get TCP control structure. */ - tp = intotcpcb(inp); - /* Check if the inp is dead, Jim. */ if (tp->t_state == TCPS_TIME_WAIT) { - INP_WUNLOCK(inp); + INP_WUNLOCK(tptoinpcb(tp)); return (TCP_LRO_CANNOT); } if (tp->t_lro_cpu == HPTS_CPU_NONE && lc->lro_cpu_is_set == 1) tp->t_lro_cpu = lc->lro_last_cpu; /* Check if the transport doesn't support the needed optimizations. */ - if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) { - INP_WUNLOCK(inp); + if ((tp->t_flags2 & (TF2_SUPPORTS_MBUFQ | TF2_MBUF_ACKCMP)) == 0) { + INP_WUNLOCK(tptoinpcb(tp)); return (TCP_LRO_CANNOT); } - if (inp->inp_flags2 & INP_MBUF_QUEUE_READY) + if (tp->t_flags2 & TF2_MBUF_QUEUE_READY) should_wake = false; else should_wake = true; @@ -1411,7 +1398,7 @@ cmp = NULL; for (pp = &le->m_head; *pp != NULL; ) { mv_to = NULL; - if (do_bpf_strip_and_compress(inp, lc, le, pp, + if (do_bpf_strip_and_compress(tp, lc, le, pp, &cmp, &mv_to, &should_wake, bpf_req, lagg_bpf_req, lagg_ifp, can_append_old_cmp) == false) { /* Advance to next mbuf. */ @@ -1444,17 +1431,18 @@ /* Check if any data mbufs left. */ if (le->m_head != NULL) { counter_u64_add(tcp_inp_lro_direct_queue, 1); - tcp_lro_log(tp, lc, le, NULL, 22, 1, inp->inp_flags2, 0, 1); + tcp_lro_log(tp, lc, le, NULL, 22, 1, tp->t_flags2, 0, 1); tcp_queue_pkts(tp, le); } if (should_wake) { /* Wakeup */ counter_u64_add(tcp_inp_lro_wokeup_queue, 1); if ((*tp->t_fb->tfb_do_queued_segments)(tp, 0)) - inp = NULL; + /* TCP cb gone and unlocked. */ + return (0); } - if (inp != NULL) - INP_WUNLOCK(inp); + INP_WUNLOCK(tptoinpcb(tp)); + return (0); /* Success. */ } #endif @@ -1674,7 +1662,7 @@ * and strip all, but the IPv4/IPv6 header. */ static bool -do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc, +do_bpf_strip_and_compress(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to, bool *should_wake, bool bpf_req, bool lagg_bpf_req, struct ifnet *lagg_ifp, bool can_append_old_cmp) { @@ -1751,7 +1739,7 @@ /* Now lets look at the should wake states */ if ((other_opts == true) && - ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) { + ((tp->t_flags2 & TF2_DONT_SACK_QUEUE) == 0)) { /* * If there are other options (SACK?) and the * tcp endpoint has not expressly told us it does @@ -1760,13 +1748,13 @@ *should_wake = true; } else if (*should_wake == false) { /* Wakeup override check if we are false here */ - *should_wake = tcp_lro_check_wake_status(inp); + *should_wake = tcp_lro_check_wake_status(tp); } /* Is the ack compressable? */ if (can_compress == false) goto done; /* Does the TCP endpoint support ACK compression? */ - if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0) + if ((tp->t_flags2 & TF2_MBUF_ACKCMP) == 0) goto done; /* Lets get the TOS/traffic class field */ @@ -1785,7 +1773,8 @@ /* Now lets get space if we don't have some already */ if (*cmp == NULL) { new_one: - nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf, can_append_old_cmp); + nm = tcp_lro_get_last_if_ackcmp(lc, le, tp, &n_mbuf, + can_append_old_cmp); if (__predict_false(nm == NULL)) goto done; *cmp = nm; @@ -1812,7 +1801,7 @@ nm = *cmp; if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) { /* We ran out of space */ - inp->inp_flags2 |= INP_MBUF_L_ACKS; + tp->t_flags2 |= TF2_MBUF_L_ACKS; goto new_one; } } diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c --- a/sys/netinet/tcp_stacks/bbr.c +++ b/sys/netinet/tcp_stacks/bbr.c @@ -891,7 +891,7 @@ * Tell LRO that it can queue packets while * we pace. */ - bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; + bbr->rc_tp->t_flags2 |= TF2_MBUF_QUEUE_READY; if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && (bbr->rc_cwnd_limited == 0)) { /* @@ -899,9 +899,9 @@ * are running a rack timer we put on * the do not disturbe even for sack. */ - inp->inp_flags2 |= INP_DONT_SACK_QUEUE; + tp->t_flags2 |= TF2_DONT_SACK_QUEUE; } else - inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; + tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; bbr->rc_pacer_started = cts; (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), @@ -932,12 +932,12 @@ * if a sack arrives as long as we are * not cwnd limited. */ - bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; - inp->inp_flags2 |= INP_DONT_SACK_QUEUE; + tp->t_flags2 |= (TF2_MBUF_QUEUE_READY | + TF2_DONT_SACK_QUEUE); } else { /* All other timers wake us up */ - bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; - inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; + tp->t_flags2 &= ~(TF2_MBUF_QUEUE_READY | + TF2_DONT_SACK_QUEUE); } bbr->bbr_timer_src = frm; bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0); @@ -2498,7 +2498,7 @@ log.u_bbr.flex4 = slot; log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot; log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); - log.u_bbr.pkts_out = bbr->rc_inp->inp_flags2; + log.u_bbr.pkts_out = bbr->rc_tp->t_flags2; log.u_bbr.flex8 = which; TCP_LOG_EVENTP(bbr->rc_tp, NULL, &bbr->rc_inp->inp_socket->so_rcv, @@ -9940,13 +9940,13 @@ } bbr = (struct tcp_bbr *)*ptr; bbr->rtt_valid = 0; - inp->inp_flags2 |= INP_CANNOT_DO_ECN; - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_CANNOT_DO_ECN; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; /* Take off any undesired flags */ - inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; - inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; - inp->inp_flags2 &= ~INP_MBUF_ACKCMP; - inp->inp_flags2 &= ~INP_MBUF_L_ACKS; + tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY; + tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; + tp->t_flags2 &= ~TF2_MBUF_ACKCMP; + tp->t_flags2 &= ~TF2_MBUF_L_ACKS; TAILQ_INIT(&bbr->r_ctl.rc_map); TAILQ_INIT(&bbr->r_ctl.rc_free); @@ -11583,7 +11583,7 @@ } /* Clear the flag, it may have been cleared by output but we may not have */ if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) - tp->t_hpts_calls &= ~TF2_HPTS_CALLS; + tp->t_flags2 &= ~TF2_HPTS_CALLS; /* Do we have a new state */ if (bbr->r_state != tp->t_state) bbr_set_state(tp, bbr, tiwin); @@ -12046,7 +12046,7 @@ return (retval < 0 ? retval : 0); } } - bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; + bbr->rc_tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY; if (hpts_calling && (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { bbr->r_ctl.rc_last_delay_val = 0; @@ -14078,15 +14078,14 @@ * pacer (if our flags are up) if so we are good, if * not we need to get back into the pacer. */ - struct inpcb *inp = tptoinpcb(tp); struct timeval tv; uint32_t cts; uint32_t toval; struct tcp_bbr *bbr; struct hpts_diag diag; - inp->inp_flags2 |= INP_CANNOT_DO_ECN; - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_CANNOT_DO_ECN; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS); if (tp->t_in_hpts > IHPTS_NONE) { return; diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c --- a/sys/netinet/tcp_stacks/rack.c +++ b/sys/netinet/tcp_stacks/rack.c @@ -6811,12 +6811,12 @@ * are not on then these flags won't have any effect since it * won't go through the queuing LRO path). * - * INP_MBUF_QUEUE_READY - This flags says that I am busy + * TF2_MBUF_QUEUE_READY - This flags says that I am busy * pacing output, so don't disturb. But * it also means LRO can wake me if there * is a SACK arrival. * - * INP_DONT_SACK_QUEUE - This flag is used in conjunction + * TF2_DONT_SACK_QUEUE - This flag is used in conjunction * with the above flag (QUEUE_READY) and * when present it says don't even wake me * if a SACK arrives. @@ -6831,7 +6831,7 @@ * Other cases should usually have none of the flags set * so LRO can call into us. */ - inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); + tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); if (slot) { rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; rack->r_ctl.rc_last_output_to = us_cts + slot; @@ -6843,7 +6843,7 @@ * will be effective if mbuf queueing is on or * compressed acks are being processed. */ - inp->inp_flags2 |= INP_MBUF_QUEUE_READY; + tp->t_flags2 |= TF2_MBUF_QUEUE_READY; /* * But wait if we have a Rack timer running * even a SACK should not disturb us (with @@ -6851,7 +6851,7 @@ */ if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) { if (rack->r_rr_config != 3) - inp->inp_flags2 |= INP_DONT_SACK_QUEUE; + tp->t_flags2 |= TF2_DONT_SACK_QUEUE; else if (rack->rc_pace_dnd) { if (IN_RECOVERY(tp->t_flags)) { /* @@ -6862,13 +6862,14 @@ * and let all sacks wake us up. * */ - inp->inp_flags2 |= INP_DONT_SACK_QUEUE; + tp->t_flags2 |= TF2_DONT_SACK_QUEUE; } } } /* For sack attackers we want to ignore sack */ if (rack->sack_attack_disable == 1) { - inp->inp_flags2 |= (INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); + tp->t_flags2 |= (TF2_DONT_SACK_QUEUE | + TF2_MBUF_QUEUE_READY); } else if (rack->rc_ack_can_sendout_data) { /* * Ahh but wait, this is that special case @@ -6876,7 +6877,8 @@ * backout the changes (used for non-paced * burst limiting). */ - inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); + tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | + TF2_MBUF_QUEUE_READY); } if ((rack->use_rack_rr) && (rack->r_rr_config < 2) && @@ -6897,7 +6899,7 @@ } } else if (hpts_timeout) { /* - * With respect to inp_flags2 here, lets let any new acks wake + * With respect to t_flags2(?) here, lets let any new acks wake * us up here. Since we are not pacing (no pacing timer), output * can happen so we should let it. If its a Rack timer, then any inbound * packet probably won't change the sending (we will be blocked) @@ -8025,7 +8027,7 @@ * no-sack wakeup on since we no longer have a PKT_OUTPUT * flag in place. */ - rack->rc_inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; + rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; ret = -3; left = rack->r_ctl.rc_timer_exp - cts; tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); @@ -14555,9 +14557,8 @@ * This method gets called if a stack switch was * attempted and it failed. We are left * but our hpts timers were stopped and we - * need to validate time units and inp_flags2. + * need to validate time units and t_flags2. */ - struct inpcb *inp = tptoinpcb(tp); struct tcp_rack *rack; struct timeval tv; uint32_t cts; @@ -14567,11 +14568,11 @@ rack = (struct tcp_rack *)tp->t_fb_ptr; tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; else - inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; + tp->t_flags2 |= TF2_MBUF_ACKCMP; if (tp->t_in_hpts > IHPTS_NONE) { /* Strange */ return; @@ -15078,13 +15079,13 @@ } } rack_stop_all_timers(tp, rack); - /* Setup all the inp_flags2 */ + /* Setup all the t_flags2 */ if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - tptoinpcb(tp)->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; else - tptoinpcb(tp)->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; + tp->t_flags2 |= TF2_MBUF_ACKCMP; /* * Timers in Rack are kept in microseconds so lets * convert any initial incoming variables @@ -15406,7 +15407,7 @@ break; }; if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; + rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; } @@ -16508,7 +16509,7 @@ * so should process the packets. */ slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; - if (rack->rc_inp->inp_flags2 & INP_DONT_SACK_QUEUE) { + if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { no_output = 1; } else { /* @@ -22399,7 +22400,7 @@ rack->use_fixed_rate = 0; if (rack->gp_ready) rack_set_cc_pacing(rack); - rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; rack->rack_attempt_hdwr_pace = 0; /* rxt settings */ rack->full_size_rxt = 1; @@ -22408,7 +22409,7 @@ rack->r_use_cmp_ack = 1; if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && rack->r_use_cmp_ack) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; + rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; /* scwnd=1 */ rack->rack_enable_scwnd = 1; /* dynamic=100 */ @@ -22525,11 +22526,11 @@ if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { rack->r_mbuf_queue = 1; if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; - rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; + rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; } else { rack->r_mbuf_queue = 0; - rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; } if (rack_enable_shared_cwnd) rack->rack_enable_scwnd = 1; @@ -22676,7 +22677,6 @@ struct epoch_tracker et; struct sockopt sopt; struct cc_newreno_opts opt; - struct inpcb *inp = tptoinpcb(tp); uint64_t val; int error = 0; uint16_t ca, ss; @@ -22854,16 +22854,16 @@ break; case TCP_USE_CMP_ACKS: RACK_OPTS_INC(tcp_use_cmp_acks); - if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { + if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { /* You can't turn it off once its on! */ error = EINVAL; } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { rack->r_use_cmp_ack = 1; rack->r_mbuf_queue = 1; - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; } if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) - inp->inp_flags2 |= INP_MBUF_ACKCMP; + tp->t_flags2 |= TF2_MBUF_ACKCMP; break; case TCP_SHARED_CWND_TIME_LIMIT: RACK_OPTS_INC(tcp_lscwnd); @@ -22926,9 +22926,9 @@ else rack->r_mbuf_queue = 0; if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; else - inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; break; case TCP_RACK_NONRXT_CFG_RATE: RACK_OPTS_INC(tcp_rack_cfg_rate); @@ -23011,9 +23011,9 @@ } } if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; + tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; else - inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; /* A rate may be set irate or other, if so set seg size */ rack_update_seg(rack); break; diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.c b/sys/netinet/tcp_stacks/rack_bbr_common.c --- a/sys/netinet/tcp_stacks/rack_bbr_common.c +++ b/sys/netinet/tcp_stacks/rack_bbr_common.c @@ -445,8 +445,8 @@ * been compressed. We assert the inp has * the flag set to enable this! */ - KASSERT((inp->inp_flags2 & INP_MBUF_ACKCMP), - ("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, inp)); + KASSERT((tp->t_flags2 & TF2_MBUF_ACKCMP), + ("tp:%p no TF2_MBUF_ACKCMP flags?", tp)); tlen = 0; drop_hdrlen = 0; th = NULL; diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c --- a/sys/netinet/tcp_subr.c +++ b/sys/netinet/tcp_subr.c @@ -1073,7 +1073,7 @@ /* Make sure we get no interesting mbuf queuing behavior */ /* All mbuf queue/ack compress flags should be off */ - tcp_lro_features_off(tptoinpcb(tp)); + tcp_lro_features_off(tp); /* Cancel the GP measurement in progress */ tp->t_flags &= ~TF_GPUTINPROG; @@ -2270,7 +2270,7 @@ V_tcp_mssdflt; /* All mbuf queue/ack compress flags should be off */ - tcp_lro_features_off(tptoinpcb(tp)); + tcp_lro_features_off(tp); callout_init_rw(&tp->t_callout, &inp->inp_lock, CALLOUT_RETURNUNLOCKED); for (int i = 0; i < TT_N; i++) @@ -4052,7 +4052,7 @@ * old callout system in the other stacks so * those are hopefully safe. */ - tcp_lro_features_off(tptoinpcb(tp)); + tcp_lro_features_off(tp); tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS); } @@ -4236,9 +4236,9 @@ * the newstack does not support. */ - if (tptoinpcb(tp)->inp_flags2 & INP_MBUF_L_ACKS) + if (tp->t_flags2 & TF2_MBUF_L_ACKS) return; - if ((tptoinpcb(tp)->inp_flags2 & INP_SUPPORTS_MBUFQ) == 0 && + if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) == 0 && !STAILQ_EMPTY(&tp->t_inqueue)) { /* * It is unsafe to process the packets since a diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c --- a/sys/netinet/tcp_syncache.c +++ b/sys/netinet/tcp_syncache.c @@ -1724,7 +1724,7 @@ if (ltflags & TF_NOOPT) sc->sc_flags |= SCF_NOOPT; /* ECN Handshake */ - if (V_tcp_do_ecn && (inp->inp_flags2 & INP_CANNOT_DO_ECN) == 0) + if (V_tcp_do_ecn && (tp->t_flags2 & TF2_CANNOT_DO_ECN) == 0) sc->sc_flags |= tcp_ecn_syncache_add(tcp_get_flags(th), iptos); if (V_tcp_syncookies) diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h --- a/sys/netinet/tcp_var.h +++ b/sys/netinet/tcp_var.h @@ -677,16 +677,6 @@ return (rv); } -static inline void -tcp_lro_features_off(struct inpcb *inp) -{ - inp->inp_flags2 &= ~(INP_SUPPORTS_MBUFQ| - INP_MBUF_QUEUE_READY| - INP_DONT_SACK_QUEUE| - INP_MBUF_ACKCMP| - INP_MBUF_L_ACKS); -} - /* * tcp_output_unlock() * Always returns unlocked, handles drop request from advanced stacks. @@ -853,6 +843,12 @@ #define TF2_ECN_USE_ECT1 0x00000800 /* Use ECT(1) marking on session */ #define TF2_TCP_ACCOUNTING 0x00010000 /* Do TCP accounting */ #define TF2_HPTS_CALLS 0x00020000 /* tcp_output() called via HPTS */ +#define TF2_MBUF_L_ACKS 0x00040000 /* large mbufs for ack compression */ +#define TF2_MBUF_ACKCMP 0x00080000 /* mbuf ack compression ok */ +#define TF2_SUPPORTS_MBUFQ 0x00100000 /* Supports the mbuf queue method */ +#define TF2_MBUF_QUEUE_READY 0x00200000 /* Inputs can be queued */ +#define TF2_DONT_SACK_QUEUE 0x00400000 /* Don't wake on sack */ +#define TF2_CANNOT_DO_ECN 0x00800000 /* The stack does not do ECN */ /* * Structure to hold TCP options that are only used during segment @@ -1543,6 +1539,15 @@ int tcp_do_ack_accounting(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to, uint32_t tiwin, int mss); #endif +static inline void +tcp_lro_features_off(struct tcpcb *tp) +{ + tp->t_flags2 &= ~(TF2_SUPPORTS_MBUFQ| + TF2_MBUF_QUEUE_READY| + TF2_DONT_SACK_QUEUE| + TF2_MBUF_ACKCMP| + TF2_MBUF_L_ACKS); +} static inline void tcp_fields_to_host(struct tcphdr *th)