diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -306,7 +306,7 @@ assign_rxopt(struct tcpcb *tp, uint16_t opt) { struct toepcb *toep = tp->t_toe; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct adapter *sc = td_adapter(toep->td); INP_LOCK_ASSERT(inp); @@ -442,7 +442,7 @@ t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; struct toepcb *toep = tp->t_toe; @@ -466,7 +466,7 @@ void t4_rcvd(struct toedev *tod, struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; @@ -1276,7 +1276,7 @@ { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #endif struct toepcb *toep = tp->t_toe; @@ -1295,7 +1295,7 @@ { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #endif struct toepcb *toep = tp->t_toe; @@ -1316,7 +1316,7 @@ { struct adapter *sc = tod->tod_softc; #if defined(INVARIANTS) - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #endif struct toepcb *toep = tp->t_toe; diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c --- a/sys/dev/cxgbe/tom/t4_listen.c +++ b/sys/dev/cxgbe/tom/t4_listen.c @@ -519,7 +519,7 @@ struct adapter *sc = tod->tod_softc; struct vi_info *vi; struct port_info *pi; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct listen_ctx *lctx; int i, rc, v; struct offload_settings settings; @@ -615,7 +615,7 @@ { struct listen_ctx *lctx; struct adapter *sc = tod->tod_softc; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c --- a/sys/dev/cxgbe/tom/t4_tom.c +++ b/sys/dev/cxgbe/tom/t4_tom.c @@ -366,7 +366,7 @@ t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) { #if defined(KTR) || defined(INVARIANTS) - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #endif struct toepcb *toep = tp->t_toe; @@ -820,7 +820,7 @@ struct adapter *sc = tod->tod_softc; struct toepcb *toep = tp->t_toe; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); MPASS(ti != NULL); fill_tcp_info(sc, toep->tid, ti); @@ -833,7 +833,7 @@ { struct toepcb *toep = tp->t_toe; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); MPASS(tls != NULL); return (tls_alloc_ktls(toep, tls, direction)); @@ -918,7 +918,7 @@ struct ulp_txpkt *ulpmc; int idx, len; struct wrq_cookie cookie; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct toepcb *toep = tp->t_toe; struct adapter *sc = td_adapter(toep->td); unsigned short *mtus = &sc->params.mtus[0]; diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -3225,7 +3225,7 @@ struct ktls_session *tls; tp = arg; - inp = tp->t_inpcb; + inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); so = inp->inp_socket; SOCK_LOCK(so); diff --git a/sys/netinet/cc/cc_cdg.c b/sys/netinet/cc/cc_cdg.c --- a/sys/netinet/cc/cc_cdg.c +++ b/sys/netinet/cc/cc_cdg.c @@ -297,7 +297,7 @@ { struct cdg *cdg_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { cdg_data = malloc(sizeof(struct cdg), M_CC_MEM, M_NOWAIT); if (cdg_data == NULL) diff --git a/sys/netinet/cc/cc_chd.c b/sys/netinet/cc/cc_chd.c --- a/sys/netinet/cc/cc_chd.c +++ b/sys/netinet/cc/cc_chd.c @@ -324,7 +324,7 @@ { struct chd *chd_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { chd_data = malloc(sizeof(struct chd), M_CC_MEM, M_NOWAIT); if (chd_data == NULL) diff --git a/sys/netinet/cc/cc_cubic.c b/sys/netinet/cc/cc_cubic.c --- a/sys/netinet/cc/cc_cubic.c +++ b/sys/netinet/cc/cc_cubic.c @@ -150,8 +150,8 @@ log.u_bbr.delivered = cubicd->css_lowrtt_fas; log.u_bbr.pkt_epoch = ccv->flags; TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, TCP_HYSTART, 0, 0, &log, false, &tv); } @@ -387,7 +387,7 @@ { struct cubic *cubic_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { cubic_data = malloc(sizeof(struct cubic), M_CC_MEM, M_NOWAIT|M_ZERO); if (cubic_data == NULL) diff --git a/sys/netinet/cc/cc_dctcp.c b/sys/netinet/cc/cc_dctcp.c --- a/sys/netinet/cc/cc_dctcp.c +++ b/sys/netinet/cc/cc_dctcp.c @@ -202,7 +202,7 @@ { struct dctcp *dctcp_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { dctcp_data = malloc(sizeof(struct dctcp), M_CC_MEM, M_NOWAIT|M_ZERO); if (dctcp_data == NULL) diff --git a/sys/netinet/cc/cc_htcp.c b/sys/netinet/cc/cc_htcp.c --- a/sys/netinet/cc/cc_htcp.c +++ b/sys/netinet/cc/cc_htcp.c @@ -256,7 +256,7 @@ { struct htcp *htcp_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { htcp_data = malloc(sizeof(struct htcp), M_CC_MEM, M_NOWAIT); if (htcp_data == NULL) diff --git a/sys/netinet/cc/cc_newreno.c b/sys/netinet/cc/cc_newreno.c --- a/sys/netinet/cc/cc_newreno.c +++ b/sys/netinet/cc/cc_newreno.c @@ -160,8 +160,8 @@ log.u_bbr.delivered = nreno->css_lowrtt_fas; log.u_bbr.pkt_epoch = ccv->flags; TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, TCP_HYSTART, 0, 0, &log, false, &tv); } @@ -178,7 +178,7 @@ { struct newreno *nreno; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { ccv->cc_data = malloc(sizeof(struct newreno), M_CC_MEM, M_NOWAIT); if (ccv->cc_data == NULL) diff --git a/sys/netinet/cc/cc_vegas.c b/sys/netinet/cc/cc_vegas.c --- a/sys/netinet/cc/cc_vegas.c +++ b/sys/netinet/cc/cc_vegas.c @@ -187,7 +187,7 @@ { struct vegas *vegas_data; - INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp)); if (ptr == NULL) { vegas_data = malloc(sizeof(struct vegas), M_CC_MEM, M_NOWAIT); if (vegas_data == NULL) diff --git a/sys/netinet/khelp/h_ertt.c b/sys/netinet/khelp/h_ertt.c --- a/sys/netinet/khelp/h_ertt.c +++ b/sys/netinet/khelp/h_ertt.c @@ -219,7 +219,7 @@ measurenext = measurenext_len = multiack = rts = rtt_bytes_adjust = 0; acked = th->th_ack - tp->snd_una; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* Packet has provided new acknowledgements. */ if (acked > 0 || new_sacked_bytes) { @@ -452,7 +452,7 @@ len = thdp->len; tso = thdp->tso; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (len > 0) { txsi = uma_zalloc(txseginfo_zone, M_NOWAIT); diff --git a/sys/netinet/tcp_fastopen.c b/sys/netinet/tcp_fastopen.c --- a/sys/netinet/tcp_fastopen.c +++ b/sys/netinet/tcp_fastopen.c @@ -867,7 +867,7 @@ void tcp_fastopen_connect(struct tcpcb *tp) { - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); struct tcp_fastopen_ccache_bucket *ccb; struct tcp_fastopen_ccache_entry *cce; sbintime_t now; @@ -875,7 +875,6 @@ uint64_t psk_cookie; psk_cookie = 0; - inp = tp->t_inpcb; cce = tcp_fastopen_ccache_lookup(&inp->inp_inc, &ccb); if (cce) { if (cce->disable_time == 0) { @@ -955,7 +954,7 @@ void tcp_fastopen_disable_path(struct tcpcb *tp) { - struct in_conninfo *inc = &tp->t_inpcb->inp_inc; + struct in_conninfo *inc = &tptoinpcb(tp)->inp_inc; struct tcp_fastopen_ccache_bucket *ccb; struct tcp_fastopen_ccache_entry *cce; @@ -981,7 +980,7 @@ tcp_fastopen_update_cache(struct tcpcb *tp, uint16_t mss, uint8_t cookie_len, uint8_t *cookie) { - struct in_conninfo *inc = &tp->t_inpcb->inp_inc; + struct in_conninfo *inc = &tptoinpcb(tp)->inp_inc; struct tcp_fastopen_ccache_bucket *ccb; struct tcp_fastopen_ccache_entry *cce; diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c --- a/sys/netinet/tcp_hpts.c +++ b/sys/netinet/tcp_hpts.c @@ -463,8 +463,8 @@ log.u_bbr.pkt_epoch = hpts->p_runningslot; log.u_bbr.use_lt_bw = 1; TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, BBR_LOG_HPTSDIAG, 0, 0, &log, false, tv); } diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c --- a/sys/netinet/tcp_input.c +++ b/sys/netinet/tcp_input.c @@ -309,11 +309,12 @@ cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, uint16_t type) { + struct inpcb *inp = tptoinpcb(tp); #ifdef STATS int32_t gput; #endif - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tp->ccv->nsegs = nsegs; tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); @@ -380,11 +381,11 @@ cc_conn_init(struct tcpcb *tp) { struct hc_metrics_lite metrics; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); u_int maxseg; int rtt; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tcp_hc_get(&inp->inp_inc, &metrics); maxseg = tcp_maxseg(tp); @@ -435,7 +436,7 @@ void inline cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); #ifdef STATS stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); @@ -496,7 +497,7 @@ void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* XXXLAS: KASSERT that we're in recovery? */ @@ -528,7 +529,7 @@ void inline cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (CC_ALGO(tp)->ecnpkt_handler != NULL) { switch (iptos & IPTOS_ECN_MASK) { @@ -1515,10 +1516,10 @@ tcp_handle_wakeup(struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tp->t_flags & TF_WAKESOR) { - struct socket *so = tp->t_inpcb->inp_socket; + struct socket *so = tptosocket(tp); tp->t_flags &= ~TF_WAKESOR; SOCKBUF_LOCK_ASSERT(&so->so_rcv); @@ -1536,7 +1537,8 @@ uint32_t tiwin; uint16_t nsegs; char *s; - struct in_conninfo *inc; + struct inpcb *inp = tptoinpcb(tp); + struct in_conninfo *inc = &inp->inp_inc; struct mbuf *mfree; struct tcpopt to; int tfo_syn; @@ -1552,13 +1554,12 @@ short ostate = 0; #endif thflags = tcp_get_flags(th); - inc = &tp->t_inpcb->inp_inc; tp->sackhint.last_sack_ack = 0; sack_changed = 0; nsegs = max(1, m->m_pkthdr.lro_nsegs); NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", @@ -1695,7 +1696,7 @@ if (to.to_flags & TOF_MSS) mss = to.to_mss; else - if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) + if ((inp->inp_vflag & INP_IPV6) != 0) mss = TCP6_MSS; else mss = TCP_MSS; @@ -2860,7 +2861,7 @@ } process_ACK: - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); /* * Adjust for the SYN bit in sequence space, @@ -3055,7 +3056,7 @@ } step6: - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); /* * Update window information. @@ -3140,7 +3141,7 @@ tp->rcv_up = tp->rcv_nxt; } dodata: /* XXX */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); /* * Process the segment text, merging it into the TCP sequencing queue, @@ -3325,13 +3326,13 @@ (void) tcp_output(tp); check_delack: - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); if (tp->t_flags & TF_DELACK) { tp->t_flags &= ~TF_DELACK; tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); } - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); return; dropafterack: @@ -3365,14 +3366,14 @@ TCP_PROBE3(debug__input, tp, th, m); tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); m_freem(m); return; dropwithreset: if (tp != NULL) { tcp_dropwithreset(m, th, tp, tlen, rstreason); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); } else tcp_dropwithreset(m, th, NULL, tlen, rstreason); return; @@ -3382,13 +3383,13 @@ * Drop space held by incoming segment and return. */ #ifdef TCPDEBUG - if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, &tcp_savetcp, 0); #endif TCP_PROBE3(debug__input, tp, th, m); if (tp != NULL) { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); } m_freem(m); } @@ -3410,7 +3411,7 @@ #endif if (tp != NULL) { - INP_LOCK_ASSERT(tp->t_inpcb); + INP_LOCK_ASSERT(tptoinpcb(tp)); } /* Don't bother if destination was broadcast/multicast. */ @@ -3582,7 +3583,7 @@ char *cp = mtod(m, caddr_t) + cnt; struct tcpcb *tp = sototcpcb(so); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tp->t_iobc = *cp; tp->t_oobflags |= TCPOOB_HAVEDATA; @@ -3609,7 +3610,7 @@ { int delta; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); TCPSTAT_INC(tcps_rttupdated); tp->t_rttupdated++; @@ -3712,7 +3713,7 @@ { int mss = 0; uint32_t maxmtu = 0; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); struct hc_metrics_lite metrics; #ifdef INET6 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; @@ -3723,7 +3724,7 @@ size_t min_protoh = sizeof(struct tcpiphdr); #endif - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); if (tp->t_port) min_protoh += V_tcp_udp_tunneling_overhead; @@ -3860,7 +3861,7 @@ { int mss; uint32_t bufsize; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); struct socket *so; struct hc_metrics_lite metrics; struct tcp_ifcap cap; @@ -3871,7 +3872,6 @@ tcp_mss_update(tp, offer, -1, &metrics, &cap); mss = tp->t_maxseg; - inp = tp->t_inpcb; /* * If there's a pipesize, change the socket buffer to that size, @@ -3977,7 +3977,7 @@ int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0; int maxseg = tcp_maxseg(tp); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* * Compute the amount of data that this ACK is indicating @@ -4054,7 +4054,7 @@ uint32_t ocwnd = tp->snd_cwnd; u_int maxseg = tcp_maxseg(tp); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tcp_timer_activate(tp, TT_REXMT, 0); tp->t_rtttime = 0; diff --git a/sys/netinet/tcp_log_buf.c b/sys/netinet/tcp_log_buf.c --- a/sys/netinet/tcp_log_buf.c +++ b/sys/netinet/tcp_log_buf.c @@ -499,7 +499,7 @@ tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); #ifdef STATS if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL) @@ -522,20 +522,21 @@ int tcp_log_set_tag(struct tcpcb *tp, char *tag) { + struct inpcb *inp = tptoinpcb(tp); struct tcp_log_id_bucket *tlb; int tree_locked; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tree_locked = TREE_UNLOCKED; tlb = tp->t_lib; if (tlb == NULL) { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); return (EOPNOTSUPP); } TCPID_BUCKET_REF(tlb); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); TCPID_BUCKET_LOCK(tlb); strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN); if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) @@ -562,13 +563,12 @@ { struct tcp_log_id_bucket *tlb, *tmp_tlb; struct tcp_log_id_node *tln; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); int tree_locked, rv; bool bucket_locked; tlb = NULL; tln = NULL; - inp = tp->t_inpcb; tree_locked = TREE_UNLOCKED; bucket_locked = false; @@ -922,7 +922,7 @@ { size_t len; - INP_LOCK_ASSERT(tp->t_inpcb); + INP_LOCK_ASSERT(tptoinpcb(tp)); if (tp->t_lib != NULL) { len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); KASSERT(len < TCP_LOG_ID_LEN, @@ -944,18 +944,19 @@ size_t tcp_log_get_tag(struct tcpcb *tp, char *buf) { + struct inpcb *inp = tptoinpcb(tp); struct tcp_log_id_bucket *tlb; size_t len; int tree_locked; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tree_locked = TREE_UNLOCKED; tlb = tp->t_lib; if (tlb != NULL) { TCPID_BUCKET_REF(tlb); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); TCPID_BUCKET_LOCK(tlb); len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN); KASSERT(len < TCP_LOG_TAG_LEN, @@ -973,7 +974,7 @@ } else TCPID_TREE_UNLOCK_ASSERT(); } else { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); *buf = '\0'; len = 0; } @@ -990,7 +991,7 @@ tcp_log_get_id_cnt(struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt); } @@ -1298,11 +1299,12 @@ static void tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln) { + struct inpcb *inp = tptoinpcb(tp); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); - tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie; - if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6) + tln->tln_ie = inp->inp_inc.inc_ie; + if (inp->inp_inc.inc_flags & INC_ISIPV6) tln->tln_af = AF_INET6; else tln->tln_af = AF_INET; @@ -1323,7 +1325,7 @@ struct tcp_log_mem *log_entry; sbintime_t callouttime; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false); @@ -1383,11 +1385,13 @@ */ if (tp->t_lin != NULL) { + struct inpcb *inp = tptoinpcb(tp); + /* Copy the relevant information to the log entry. */ tln = tp->t_lin; - KASSERT(tln->tln_inp == tp->t_inpcb, - ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)", - __func__, tln->tln_inp, tp->t_inpcb)); + KASSERT(tln->tln_inp == inp, + ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)", + __func__, tln->tln_inp, inp)); tcp_log_move_tp_to_node(tp, tln); /* Clear information from the PCB. */ @@ -1401,7 +1405,7 @@ * racing to lock this node when we move it to the expire * queue. */ - in_pcbref(tp->t_inpcb); + in_pcbref(inp); /* * Store the entry on the expiry list. The exact behavior @@ -1496,10 +1500,8 @@ tcp_log_purge_tp_logbuf(struct tcpcb *tp) { struct tcp_log_mem *log_entry; - struct inpcb *inp __diagused; - inp = tp->t_inpcb; - INP_WLOCK_ASSERT(inp); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tp->t_lognum == 0) return; @@ -1533,7 +1535,7 @@ ("%s called with inconsistent func (%p) and line (%d) arguments", __func__, func, line)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tcp_disable_all_bb_logs) { /* * The global shutdown logging @@ -1748,7 +1750,7 @@ { struct tcp_log_mem *log_entry; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); switch(state) { case TCP_LOG_STATE_CLEAR: while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) @@ -1786,7 +1788,7 @@ struct tcp_log_mem *log_entry, *next; int target, skip; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if ((target = tp->t_lognum / 2) == 0) return; @@ -1930,12 +1932,11 @@ struct tcp_log_stailq log_tailq; struct tcp_log_mem *log_entry, *log_next; struct tcp_log_buffer *out_entry; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); size_t outsize, entrysize; int error, outnum; - INP_WLOCK_ASSERT(tp->t_inpcb); - inp = tp->t_inpcb; + INP_WLOCK_ASSERT(inp); /* * Determine which log entries will fit in the buffer. As an @@ -2153,12 +2154,11 @@ tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force) { struct tcp_log_dev_log_queue *entry; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); #ifdef TCPLOG_DEBUG_COUNTERS int num_entries; #endif - inp = tp->t_inpcb; INP_WLOCK_ASSERT(inp); /* If there are no log entries, there is nothing to do. */ @@ -2586,11 +2586,12 @@ void tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason) { + struct inpcb *inp = tptoinpcb(tp); struct tcp_log_id_bucket *tlb; int tree_locked; /* Figure out our bucket and lock it. */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tlb = tp->t_lib; if (tlb == NULL) { /* @@ -2598,11 +2599,11 @@ * session's traces. */ (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); return; } TCPID_BUCKET_REF(tlb); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); TCPID_BUCKET_LOCK(tlb); /* If we are the last reference, we have nothing more to do here. */ @@ -2632,7 +2633,7 @@ tcp_log_flowend(struct tcpcb *tp) { if (tp->t_logstate != TCP_LOG_STATE_OFF) { - struct socket *so = tp->t_inpcb->inp_socket; + struct socket *so = tptosocket(tp); TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FLOWEND, 0, 0, NULL, false); } diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c --- a/sys/netinet/tcp_lro.c +++ b/sys/netinet/tcp_lro.c @@ -718,11 +718,9 @@ log.u_bbr.inhpts = 1; else log.u_bbr.inhpts = 0; - TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, - TCP_LOG_LRO, 0, - 0, &log, false, &tv); + TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, + TCP_LOG_LRO, 0, 0, &log, false, &tv); } } #endif diff --git a/sys/netinet/tcp_offload.c b/sys/netinet/tcp_offload.c --- a/sys/netinet/tcp_offload.c +++ b/sys/netinet/tcp_offload.c @@ -114,7 +114,7 @@ tcp_offload_listen_start(struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); EVENTHANDLER_INVOKE(tcp_offload_listen_start, tp); } @@ -123,7 +123,7 @@ tcp_offload_listen_stop(struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); EVENTHANDLER_INVOKE(tcp_offload_listen_stop, tp); } @@ -134,7 +134,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_input(tod, tp, m); } @@ -146,7 +146,7 @@ int error, flags; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); flags = tcp_outflags[tp->t_state]; @@ -170,7 +170,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_rcvd(tod, tp); } @@ -181,7 +181,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_ctloutput(tod, tp, sopt_dir, sopt_name); } @@ -192,7 +192,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_tcp_info(tod, tp, ti); } @@ -204,7 +204,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); return (tod->tod_alloc_tls_session(tod, tp, tls, direction)); } @@ -215,7 +215,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_pcb_detach(tod, tp); } @@ -226,7 +226,7 @@ struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tod->tod_pmtu_update(tod, tp, seq, mtu); } diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c --- a/sys/netinet/tcp_output.c +++ b/sys/netinet/tcp_output.c @@ -184,7 +184,7 @@ void cc_after_idle(struct tcpcb *tp) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (CC_ALGO(tp)->after_idle != NULL) CC_ALGO(tp)->after_idle(tp->ccv); @@ -196,7 +196,8 @@ int tcp_default_output(struct tcpcb *tp) { - struct socket *so = tp->t_inpcb->inp_socket; + struct socket *so = tptosocket(tp); + struct inpcb *inp = tptoinpcb(tp); int32_t len; uint32_t recwin, sendwin; uint16_t flags; @@ -230,7 +231,7 @@ struct ip6_hdr *ip6 = NULL; int isipv6; - isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; + isipv6 = (inp->inp_vflag & INP_IPV6) != 0; #endif #ifdef KERN_TLS const bool hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0; @@ -239,7 +240,7 @@ #endif NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); #ifdef TCP_OFFLOAD if (tp->t_flags & TF_TOE) @@ -542,23 +543,23 @@ */ #ifdef INET6 if (isipv6 && IPSEC_ENABLED(ipv6)) - ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); + ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); #ifdef INET else #endif #endif /* INET6 */ #ifdef INET if (IPSEC_ENABLED(ipv4)) - ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); + ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); #endif /* INET */ #endif /* IPSEC */ #ifdef INET6 if (isipv6) - ipoptlen = ip6_optlen(tp->t_inpcb); + ipoptlen = ip6_optlen(inp); else #endif - if (tp->t_inpcb->inp_options) - ipoptlen = tp->t_inpcb->inp_options->m_len - + if (inp->inp_options) + ipoptlen = inp->inp_options->m_len - offsetof(struct ipoption, ipopt_list); else ipoptlen = 0; @@ -809,7 +810,7 @@ if ((tp->t_flags & TF_NOOPT) == 0) { /* Maximum segment size. */ if (flags & TH_SYN) { - to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc); + to.to_mss = tcp_mssopt(&inp->inp_inc); if (tp->t_port) to.to_mss -= V_tcp_udp_tunneling_overhead; to.to_flags |= TOF_MSS; @@ -1154,7 +1155,7 @@ SOCKBUF_UNLOCK_ASSERT(&so->so_snd); m->m_pkthdr.rcvif = (struct ifnet *)0; #ifdef MAC - mac_inpcb_create_mbuf(tp->t_inpcb, m); + mac_inpcb_create_mbuf(inp, m); #endif #ifdef INET6 if (isipv6) { @@ -1169,7 +1170,7 @@ } else { th = (struct tcphdr *)(ip6 + 1); } - tcpip_fillheaders(tp->t_inpcb, tp->t_port, ip6, th); + tcpip_fillheaders(inp, tp->t_port, ip6, th); } else #endif /* INET6 */ { @@ -1186,7 +1187,7 @@ th = (struct tcphdr *)(udp + 1); } else th = (struct tcphdr *)(ip + 1); - tcpip_fillheaders(tp->t_inpcb, tp->t_port, ip, th); + tcpip_fillheaders(inp, tp->t_port, ip, th); } /* @@ -1467,7 +1468,7 @@ * Also, desired default hop limit might be changed via * Neighbor Discovery. */ - ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL); + ip6->ip6_hlim = in6_selecthlim(inp, NULL); /* * Set the packet size here for the benefit of DTrace probes. @@ -1492,13 +1493,12 @@ #endif /* TODO: IPv6 IP6TOS_ECT bit on */ - error = ip6_output(m, tp->t_inpcb->in6p_outputopts, - &tp->t_inpcb->inp_route6, + error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6, ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), - NULL, NULL, tp->t_inpcb); + NULL, NULL, inp); - if (error == EMSGSIZE && tp->t_inpcb->inp_route6.ro_nh != NULL) - mtu = tp->t_inpcb->inp_route6.ro_nh->nh_mtu; + if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) + mtu = inp->inp_route6.ro_nh->nh_mtu; } #endif /* INET6 */ #if defined(INET) && defined(INET6) @@ -1508,8 +1508,8 @@ { ip->ip_len = htons(m->m_pkthdr.len); #ifdef INET6 - if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO) - ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL); + if (inp->inp_vflag & INP_IPV6PROTO) + ip->ip_ttl = in6_selecthlim(inp, NULL); #endif /* INET6 */ /* * If we do path MTU discovery, then we set DF on every packet. @@ -1538,12 +1538,11 @@ tcp_pcap_add(th, m, &(tp->t_outpkts)); #endif - error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, - ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, - tp->t_inpcb); + error = ip_output(m, inp->inp_options, &inp->inp_route, + ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp); - if (error == EMSGSIZE && tp->t_inpcb->inp_route.ro_nh != NULL) - mtu = tp->t_inpcb->inp_route.ro_nh->nh_mtu; + if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) + mtu = inp->inp_route.ro_nh->nh_mtu; } #endif /* INET */ diff --git a/sys/netinet/tcp_ratelimit.c b/sys/netinet/tcp_ratelimit.c --- a/sys/netinet/tcp_ratelimit.c +++ b/sys/netinet/tcp_ratelimit.c @@ -1317,14 +1317,15 @@ tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp, uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate) { + struct inpcb *inp = tptoinpcb(tp); const struct tcp_hwrate_limit_table *rte; #ifdef KERN_TLS struct ktls_session *tls; #endif - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); - if (tp->t_inpcb->inp_snd_tag == NULL) { + if (inp->inp_snd_tag == NULL) { /* * We are setting up a rate for the first time. */ @@ -1336,8 +1337,8 @@ } #ifdef KERN_TLS tls = NULL; - if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { - tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info; + if (tptosocket(tp)->so_snd.sb_flags & SB_TLS_IFNET) { + tls = tptosocket(tp)->so_snd.sb_tls_info; if ((ifp->if_capenable & IFCAP_TXTLS_RTLMT) == 0 || tls->mode != TCP_TLS_MODE_IFNET) { @@ -1347,7 +1348,7 @@ } } #endif - rte = rt_setup_rate(tp->t_inpcb, ifp, bytes_per_sec, flags, error, lower_rate); + rte = rt_setup_rate(inp, ifp, bytes_per_sec, flags, error, lower_rate); if (rte) rl_increment_using(rte); #ifdef KERN_TLS @@ -1358,7 +1359,7 @@ * tag to a TLS ratelimit tag. */ MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS); - ktls_output_eagain(tp->t_inpcb, tls); + ktls_output_eagain(inp, tls); } #endif } else { @@ -1381,6 +1382,7 @@ struct tcpcb *tp, struct ifnet *ifp, uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate) { + struct inpcb *inp = tptoinpcb(tp); const struct tcp_hwrate_limit_table *nrte; const struct tcp_rate_set *rs; #ifdef KERN_TLS @@ -1388,7 +1390,7 @@ #endif int err; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); if (crte == NULL) { /* Wrong interface */ @@ -1398,8 +1400,8 @@ } #ifdef KERN_TLS - if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { - tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info; + if (tptosocket(tp)->so_snd.sb_flags & SB_TLS_IFNET) { + tls = tptosocket(tp)->so_snd.sb_tls_info; if (tls->mode != TCP_TLS_MODE_IFNET) tls = NULL; else if (tls->snd_tag != NULL && @@ -1427,7 +1429,7 @@ } } #endif - if (tp->t_inpcb->inp_snd_tag == NULL) { + if (inp->inp_snd_tag == NULL) { /* Wrong interface */ tcp_rel_pacing_rate(crte, tp); if (error) @@ -1466,7 +1468,7 @@ err = ktls_modify_txrtlmt(tls, nrte->rate); else #endif - err = in_pcbmodify_txrtlmt(tp->t_inpcb, nrte->rate); + err = in_pcbmodify_txrtlmt(inp, nrte->rate); if (err) { struct tcp_rate_set *lrs; uint64_t pre; @@ -1475,8 +1477,8 @@ lrs = __DECONST(struct tcp_rate_set *, rs); pre = atomic_fetchadd_64(&lrs->rs_flows_using, -1); /* Do we still have a snd-tag attached? */ - if (tp->t_inpcb->inp_snd_tag) - in_pcbdetach_txrtlmt(tp->t_inpcb); + if (inp->inp_snd_tag) + in_pcbdetach_txrtlmt(inp); if (pre == 1) { struct epoch_tracker et; @@ -1508,11 +1510,12 @@ void tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); const struct tcp_rate_set *crs; struct tcp_rate_set *rs; uint64_t pre; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tp->t_pacing_rate = -1; crs = crte->ptbl; @@ -1543,7 +1546,7 @@ * ktls_output_eagain() to reset the send tag to a plain * TLS tag? */ - in_pcbdetach_txrtlmt(tp->t_inpcb); + in_pcbdetach_txrtlmt(inp); } #define ONE_POINT_TWO_MEG 150000 /* 1.2 megabits in bytes */ @@ -1573,8 +1576,8 @@ log.u_bbr.cur_del_rate = bw; log.u_bbr.delRate = hw_rate; TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); } diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c --- a/sys/netinet/tcp_reass.c +++ b/sys/netinet/tcp_reass.c @@ -203,6 +203,7 @@ tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p, tcp_seq seq, int len, uint8_t action, int instance) { + struct socket *so = tptosocket(tp); uint32_t cts; struct timeval tv; @@ -230,9 +231,7 @@ log.u_bbr.flex7 = instance; log.u_bbr.flex8 = action; log.u_bbr.timeStamp = cts; - TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + TCP_LOG_EVENTP(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_REASS, 0, len, &log, false, &tv); } @@ -305,7 +304,7 @@ { struct tseg_qent *qe; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) { TAILQ_REMOVE(&tp->t_segq, qe, tqe_q); @@ -530,12 +529,13 @@ struct tseg_qent *nq = NULL; struct tseg_qent *te = NULL; struct mbuf *mlast = NULL; - struct sockbuf *sb; - struct socket *so = tp->t_inpcb->inp_socket; + struct inpcb *inp = tptoinpcb(tp); + struct socket *so = tptosocket(tp); + struct sockbuf *sb = &so->so_rcv; char *s = NULL; int flags, i, lenofoh; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); /* * XXX: tcp_reass() is rather inefficient with its data structures * and should be rewritten (see NetBSD for optimizations). @@ -597,7 +597,6 @@ * Will it fit? */ lenofoh = tcp_reass_overhead_of_chain(m, &mlast); - sb = &tp->t_inpcb->inp_socket->so_rcv; if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) && (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) { /* No room */ @@ -608,7 +607,7 @@ #ifdef TCP_REASS_LOGGING tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0); #endif - if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { + if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { log(LOG_DEBUG, "%s; %s: mbuf count limit reached, " "segment dropped\n", s, __func__); free(s, M_TCPLOG); @@ -987,7 +986,7 @@ */ TCPSTAT_INC(tcps_rcvreassfull); *tlenp = 0; - if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { + if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { log(LOG_DEBUG, "%s; %s: queue limit reached, " "segment dropped\n", s, __func__); free(s, M_TCPLOG); @@ -1003,7 +1002,7 @@ tcp_reass_maxqueuelen)) { TCPSTAT_INC(tcps_rcvreassfull); *tlenp = 0; - if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { + if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { log(LOG_DEBUG, "%s; %s: queue limit reached, " "segment dropped\n", s, __func__); free(s, M_TCPLOG); @@ -1024,8 +1023,7 @@ TCPSTAT_INC(tcps_rcvmemdrop); m_freem(m); *tlenp = 0; - if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, - NULL))) { + if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { log(LOG_DEBUG, "%s; %s: global zone limit " "reached, segment dropped\n", s, __func__); free(s, M_TCPLOG); diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c --- a/sys/netinet/tcp_sack.c +++ b/sys/netinet/tcp_sack.c @@ -178,7 +178,7 @@ int i, j, n, identical; tcp_seq start, end; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end")); @@ -279,7 +279,7 @@ struct sackblk head_blk, saved_blks[MAX_SACK_BLKS]; int num_head, num_saved, i; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* Check arguments. */ KASSERT(SEQ_LEQ(rcv_start, rcv_end), ("rcv_start <= rcv_end")); @@ -410,7 +410,7 @@ struct sackblk saved_blks[MAX_SACK_BLKS]; int num_saved, i; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* * Clean up any DSACK blocks that * are in our queue of sack blocks. @@ -451,7 +451,7 @@ { int i; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tp->rcv_numsacks = 0; for (i = 0; i < MAX_SACK_BLKS; i++) tp->sackblks[i].start = tp->sackblks[i].end=0; @@ -561,7 +561,7 @@ int i, j, num_sack_blks, sack_changed; int delivered_data, left_edge_delta; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); num_sack_blks = 0; sack_changed = 0; @@ -830,7 +830,7 @@ { struct sackhole *q; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) tcp_sackhole_remove(tp, q); tp->sackhint.sack_bytes_rexmit = 0; @@ -854,7 +854,7 @@ int num_segs = 1; u_int maxseg = tcp_maxseg(tp); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tcp_timer_activate(tp, TT_REXMT, 0); tp->t_rtttime = 0; /* Send one or 2 segments based on how much new data was acked. */ @@ -914,7 +914,7 @@ { struct sackhole *p; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); *sack_bytes_rexmt = 0; TAILQ_FOREACH(p, &tp->snd_holes, scblink) { if (SEQ_LT(p->rxmit, p->end)) { @@ -952,7 +952,7 @@ { struct sackhole *hole = NULL; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit; hole = tp->sackhint.nexthole; if (hole == NULL) @@ -995,7 +995,7 @@ { struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (cur == NULL) return; /* No holes */ if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c --- a/sys/netinet/tcp_stacks/bbr.c +++ b/sys/netinet/tcp_stacks/bbr.c @@ -584,7 +584,8 @@ (tp->t_state < TCPS_ESTABLISHED)) { /* Nothing on the send map */ activate_rxt: - if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { + if (SEQ_LT(tp->snd_una, tp->snd_max) || + sbavail(&tptosocket(tp)->so_snd)) { uint64_t tov; time_since_sent = 0; @@ -734,7 +735,7 @@ static void bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len) { - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); struct hpts_diag diag; uint32_t delayed_ack = 0; uint32_t left = 0; @@ -743,7 +744,6 @@ int32_t delay_calc = 0; uint32_t prev_delay = 0; - inp = tp->t_inpcb; if (tcp_in_hpts(inp)) { /* A previous call is already set up */ return; @@ -909,14 +909,14 @@ inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; bbr->rc_pacer_started = cts; - (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), + (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), __LINE__, &diag); bbr->rc_timer_first = 0; bbr->bbr_timer_src = frm; bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1); bbr_log_hpts_diag(bbr, cts, &diag); } else if (hpts_timeout) { - (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), + (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), __LINE__, &diag); /* * We add the flag here as well if the slot is set, @@ -3607,11 +3607,12 @@ bbr_ack_received(struct tcpcb *tp, struct tcp_bbr *bbr, struct tcphdr *th, uint32_t bytes_this_ack, uint32_t sack_changed, uint32_t prev_acked, int32_t line, uint32_t losses) { - INP_WLOCK_ASSERT(tp->t_inpcb); uint64_t bw; uint32_t cwnd, target_cwnd, saved_bytes, maxseg; int32_t meth; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + #ifdef STATS if ((tp->t_flags & TF_GPUTINPROG) && SEQ_GEQ(th->th_ack, tp->gput_ack)) { @@ -3762,7 +3763,7 @@ struct tcp_bbr *bbr; bbr = (struct tcp_bbr *)tp->t_fb_ptr; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <= tp->snd_cwnd) { @@ -3776,7 +3777,7 @@ struct tcp_bbr *bbr; uint32_t flight; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); bbr = (struct tcp_bbr *)tp->t_fb_ptr; /* * Here we just exit recovery. @@ -3931,7 +3932,7 @@ { struct tcp_bbr *bbr; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); #ifdef STATS stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); #endif @@ -4589,7 +4590,7 @@ * A TLP timer has expired. We have been idle for 2 rtts. So we now * need to figure out how to force a full MSS segment out. */ - so = tp->t_inpcb->inp_socket; + so = tptosocket(tp); avail = sbavail(&so->so_snd); out = ctf_outstanding(tp); if (out > tp->snd_wnd) { @@ -4753,8 +4754,7 @@ } if (bbr->rc_in_persist == 0) return (0); - KASSERT(tp->t_inpcb != NULL, - ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); + /* * Persistence timer into zero window. Force a byte to be output, if * possible. @@ -4825,13 +4825,12 @@ bbr_timeout_keepalive(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) { struct tcptemp *t_template; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); if (bbr->rc_all_timers_stopped) { return (1); } bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; - inp = tp->t_inpcb; bbr_log_to_event(bbr, cts, BBR_TO_FRM_KEEP); /* * Keep-alive timer went off; send something or drop connection if @@ -4969,6 +4968,7 @@ static int bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) { + struct inpcb *inp = tptoinpcb(tp); int32_t rexmt; int32_t retval = 0; bool isipv6; @@ -5059,7 +5059,7 @@ * catch ESTABLISHED state. */ #ifdef INET6 - isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; + isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; #else isipv6 = false; #endif @@ -5171,10 +5171,10 @@ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { #ifdef INET6 if (bbr->r_is_v6) - in6_losing(tp->t_inpcb); + in6_losing(inp); else #endif - in_losing(tp->t_inpcb); + in_losing(inp); tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); tp->t_srtt = 0; } @@ -5221,7 +5221,7 @@ left = bbr->r_ctl.rc_timer_exp - cts; ret = -3; bbr_log_to_processing(bbr, cts, ret, left, hpts_calling); - tcp_hpts_insert(tp->t_inpcb, HPTS_USEC_TO_SLOTS(left)); + tcp_hpts_insert(tptoinpcb(tp), HPTS_USEC_TO_SLOTS(left)); return (1); } bbr->rc_tmr_stopped = 0; @@ -5928,7 +5928,7 @@ * slot (11). * */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (err) { /* * We don't log errors -- we could but snd_max does not @@ -7333,7 +7333,7 @@ uint32_t cts, acked, ack_point, sack_changed = 0; uint32_t p_maxseg, maxseg, p_acked = 0; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tcp_get_flags(th) & TH_RST) { /* We don't log resets */ return (0); @@ -7791,7 +7791,7 @@ /* Send window already scaled. */ } } - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); acked = BYTES_THIS_ACK(tp, th); KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs); @@ -7839,7 +7839,7 @@ /* Nothing left outstanding */ nothing_left: bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__); - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) + if (sbavail(&so->so_snd) == 0) bbr->rc_tp->t_acktime = 0; if ((sbused(&so->so_snd) == 0) && (tp->t_flags & TF_SENTFIN)) { @@ -8150,7 +8150,7 @@ struct tcp_bbr *bbr; bbr = (struct tcp_bbr *)tp->t_fb_ptr; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); nsegs = max(1, m->m_pkthdr.lro_nsegs); if ((thflags & TH_ACK) && (SEQ_LT(tp->snd_wl1, th->th_seq) || @@ -8196,8 +8196,8 @@ (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && TCPS_HAVEESTABLISHED(tp->t_state) && (tp->snd_max == tp->snd_una) && - sbavail(&tp->t_inpcb->inp_socket->so_snd) && - (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { + sbavail(&so->so_snd) && + (sbavail(&so->so_snd) > tp->snd_wnd)) { /* No send window.. we must enter persist */ bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); } @@ -8212,7 +8212,6 @@ * is surprised. */ tp->rcv_up = tp->rcv_nxt; - INP_WLOCK_ASSERT(tp->t_inpcb); /* * Process the segment text, merging it into the TCP sequencing @@ -8406,7 +8405,6 @@ bbr->rc_timer_first = 1; bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); - INP_WLOCK_ASSERT(tp->t_inpcb); tcp_twstart(tp); return (1); } @@ -8418,7 +8416,6 @@ (sbavail(&so->so_snd) > ctf_outstanding(tp))) { bbr->r_wanted_output = 1; } - INP_WLOCK_ASSERT(tp->t_inpcb); return (0); } @@ -8663,8 +8660,8 @@ (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && TCPS_HAVEESTABLISHED(tp->t_state) && (tp->snd_max == tp->snd_una) && - sbavail(&tp->t_inpcb->inp_socket->so_snd) && - (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { + sbavail(&so->so_snd) && + (sbavail(&so->so_snd) > tp->snd_wnd)) { /* No send window.. we must enter persist */ bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); } @@ -8746,7 +8743,7 @@ if (tp->snd_una == tp->snd_max) { /* Nothing left outstanding */ bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__); - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) + if (sbavail(&so->so_snd) == 0) bbr->rc_tp->t_acktime = 0; bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); if (bbr->rc_in_persist == 0) { @@ -8782,6 +8779,8 @@ struct tcp_bbr *bbr; int32_t ret_val = 0; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); /* @@ -8904,7 +8903,6 @@ tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); tcp_state_change(tp, TCPS_SYN_RECEIVED); } - INP_WLOCK_ASSERT(tp->t_inpcb); /* * Advance th->th_seq to correspond to first data byte. If data, * trim to stay within window, dropping FIN if necessary. @@ -8991,6 +8989,8 @@ int32_t ret_val; struct tcp_bbr *bbr; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); if ((thflags & TH_ACK) && @@ -9048,7 +9048,6 @@ ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); return (1); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9210,6 +9209,8 @@ struct tcp_bbr *bbr; int32_t ret_val; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + /* * Header prediction: check for the two common cases of a * uni-directional data xfer. If the packet has no control flags, @@ -9271,7 +9272,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9345,6 +9345,8 @@ struct tcp_bbr *bbr; int32_t ret_val; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); if ((thflags & TH_RST) || @@ -9367,7 +9369,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9466,6 +9467,8 @@ int32_t ret_val; struct tcp_bbr *bbr; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); if ((thflags & TH_RST) || @@ -9488,7 +9491,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9590,6 +9592,8 @@ int32_t ret_val; struct tcp_bbr *bbr; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); if ((thflags & TH_RST) || @@ -9612,7 +9616,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9700,6 +9703,8 @@ int32_t ret_val; struct tcp_bbr *bbr; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); if ((thflags & TH_RST) || @@ -9722,7 +9727,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9810,6 +9814,8 @@ int32_t ret_val; struct tcp_bbr *bbr; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + bbr = (struct tcp_bbr *)tp->t_fb_ptr; ctf_calc_rwin(so, tp); /* Reset receive buffer auto scaling when not in bulk receive mode. */ @@ -9825,7 +9831,6 @@ ctf_challenge_ack(m, th, tp, iptos, &ret_val); return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); /* * RFC 1323 PAWS: If we have a timestamp reply on this segment and * it's less than ts_recent, drop it. @@ -9835,7 +9840,6 @@ if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) return (ret_val); } - INP_WLOCK_ASSERT(tp->t_inpcb); if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { return (ret_val); } @@ -9863,7 +9867,6 @@ * p.869. In such cases, we can still calculate the RTT correctly * when RCV.NXT == Last.ACK.Sent. */ - INP_WLOCK_ASSERT(tp->t_inpcb); if ((to->to_flags & TOF_TS) != 0 && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + @@ -9892,7 +9895,6 @@ /* * Ack processing. */ - INP_WLOCK_ASSERT(tp->t_inpcb); if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { return (ret_val); } @@ -9903,7 +9905,6 @@ return (1); } } - INP_WLOCK_ASSERT(tp->t_inpcb); return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, tiwin, thflags, nxt_pkt)); } @@ -9992,8 +9993,8 @@ static int bbr_init(struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); struct tcp_bbr *bbr = NULL; - struct inpcb *inp; uint32_t cts; tp->t_fb_ptr = uma_zalloc(bbr_pcb_zone, (M_NOWAIT | M_ZERO)); @@ -10008,16 +10009,13 @@ } bbr = (struct tcp_bbr *)tp->t_fb_ptr; bbr->rtt_valid = 0; - inp = tp->t_inpcb; inp->inp_flags2 |= INP_CANNOT_DO_ECN; inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; TAILQ_INIT(&bbr->r_ctl.rc_map); TAILQ_INIT(&bbr->r_ctl.rc_free); TAILQ_INIT(&bbr->r_ctl.rc_tmap); bbr->rc_tp = tp; - if (tp->t_inpcb) { - bbr->rc_inp = tp->t_inpcb; - } + bbr->rc_inp = inp; cts = tcp_get_usecs(&bbr->rc_tv); tp->t_acktime = 0; bbr->rc_allow_data_af_clo = bbr_ignore_data_after_close; @@ -10238,6 +10236,7 @@ bbr_fini(struct tcpcb *tp, int32_t tcb_is_purged) { if (tp->t_fb_ptr) { + struct inpcb *inp = tptoinpcb(tp); uint32_t calc; struct tcp_bbr *bbr; struct bbr_sendmap *rsm; @@ -10247,12 +10246,10 @@ tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp); bbr_log_flowend(bbr); bbr->rc_tp = NULL; - if (tp->t_inpcb) { - /* Backout any flags2 we applied */ - tp->t_inpcb->inp_flags2 &= ~INP_CANNOT_DO_ECN; - tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; - tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; - } + /* Backout any flags2 we applied */ + inp->inp_flags2 &= ~INP_CANNOT_DO_ECN; + inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; if (bbr->bbr_hdrw_pacing) counter_u64_add(bbr_flows_whdwr_pacing, -1); else @@ -11331,6 +11328,7 @@ struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, struct timeval *tv) { + struct inpcb *inp = tptoinpcb(tp); int32_t thflags, retval; uint32_t cts, lcts; uint32_t tiwin; @@ -11356,7 +11354,7 @@ * caller may have unnecessarily acquired a write lock due to a * race. */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); @@ -11435,7 +11433,7 @@ * this is traditional behavior, may need to be cleaned up. */ if (bbr->rc_inp == NULL) { - bbr->rc_inp = tp->t_inpcb; + bbr->rc_inp = inp; } /* * We need to init rc_inp here since its not init'd when @@ -11472,7 +11470,7 @@ if (to.to_flags & TOF_MSS) mss = to.to_mss; else - if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) + if ((inp->inp_vflag & INP_IPV6) != 0) mss = TCP6_MSS; else mss = TCP_MSS; @@ -11496,8 +11494,8 @@ return (1); } /* Set the flag */ - bbr->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; - tcp_set_hpts(tp->t_inpcb); + bbr->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; + tcp_set_hpts(inp); sack_filter_clear(&bbr->r_ctl.bbr_sf, th->th_ack); } if (thflags & TH_ACK) { @@ -11558,13 +11556,6 @@ } if (tiwin > bbr->r_ctl.rc_high_rwnd) bbr->r_ctl.rc_high_rwnd = tiwin; -#ifdef BBR_INVARIANTS - if ((tp->t_inpcb->inp_flags & INP_DROPPED) || - (tp->t_inpcb->inp_flags2 & INP_FREED)) { - panic("tp:%p bbr:%p given a dropped inp:%p", - tp, bbr, tp->t_inpcb); - } -#endif bbr->r_ctl.rc_flight_at_input = ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); bbr->rtt_valid = 0; @@ -11578,13 +11569,6 @@ retval = (*bbr->r_substate) (m, th, so, tp, &to, drop_hdrlen, tlen, tiwin, thflags, nxt_pkt, iptos); -#ifdef BBR_INVARIANTS - if ((retval == 0) && - (tp->t_inpcb == NULL)) { - panic("retval:%d tp:%p t_inpcb:NULL state:%d", - retval, tp, prev_state); - } -#endif if (nxt_pkt == 0) BBR_STAT_INC(bbr_rlock_left_ret0); else @@ -11594,7 +11578,7 @@ * If retval is 1 the tcb is unlocked and most likely the tp * is gone. */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); tcp_bbr_xmit_timer_commit(bbr, tp, cts); if (bbr->rc_is_pkt_epoch_now) bbr_set_pktepoch(bbr, cts, __LINE__); @@ -11662,13 +11646,6 @@ bbr_log_doseg_done(bbr, cts, nxt_pkt, did_out); if (did_out) bbr->r_wanted_output = 0; -#ifdef BBR_INVARIANTS - if (tp->t_inpcb == NULL) { - panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", - did_out, - retval, tp, prev_state); - } -#endif } return (retval); } @@ -11696,7 +11673,7 @@ retval = bbr_do_segment_nounlock(m, th, so, tp, drop_hdrlen, tlen, iptos, 0, &tv); if (retval == 0) { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(tptoinpcb(tp)); } } @@ -12789,7 +12766,7 @@ if ((bbr->rc_in_persist == 0) && TCPS_HAVEESTABLISHED(tp->t_state) && (tp->snd_max == tp->snd_una) && - sbavail(&tp->t_inpcb->inp_socket->so_snd)) { + sbavail(&so->so_snd)) { /* No send window.. we must enter persist */ bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); } @@ -13005,11 +12982,11 @@ } #ifdef INET6 if (isipv6) - ipoptlen = ip6_optlen(tp->t_inpcb); + ipoptlen = ip6_optlen(inp); else #endif - if (tp->t_inpcb->inp_options) - ipoptlen = tp->t_inpcb->inp_options->m_len - + if (inp->inp_options) + ipoptlen = inp->inp_options->m_len - offsetof(struct ipoption, ipopt_list); else ipoptlen = 0; @@ -14132,7 +14109,7 @@ NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); (void)tcp_get_usecs(&tv); ret = bbr_output_wtime(tp, &tv); return (ret); diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c --- a/sys/netinet/tcp_stacks/rack.c +++ b/sys/netinet/tcp_stacks/rack.c @@ -2549,11 +2549,9 @@ log.u_bbr.applimited = rack->r_ctl.rc_sacked; log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; log.u_bbr.pacing_gain = rack->r_must_retran; - TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, - TCP_HDWR_PACE_SIZE, 0, - 0, &log, false, &tv); + TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, + TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); } } @@ -4517,7 +4515,7 @@ * this means we need to have the data available * before we start a measurement. */ - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { + if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { /* Nope not enough data. */ return; } @@ -4578,7 +4576,7 @@ struct tcp_log_buffer *lgb = NULL; uint8_t labc_to_use, quality; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); tp->ccv->nsegs = nsegs; acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { @@ -4705,7 +4703,7 @@ struct tcp_rack *rack; rack = (struct tcp_rack *)tp->t_fb_ptr; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); /* * If we are doing PRR and have enough * room to send we are pacing and prr @@ -4725,7 +4723,7 @@ uint32_t orig_cwnd; orig_cwnd = tp->snd_cwnd; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); rack = (struct tcp_rack *)tp->t_fb_ptr; /* only alert CC if we alerted when we entered */ if (CC_ALGO(tp)->post_recovery != NULL) { @@ -4765,7 +4763,7 @@ * Suck the next prr cnt back into cwnd, but * only do that if we are not application limited. */ - if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { + if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { /* * We are allowed to add back to the cwnd the amount we did * not get out if: @@ -4799,7 +4797,7 @@ struct tcp_rack *rack; uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); #ifdef STATS stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); #endif @@ -4891,7 +4889,7 @@ { uint32_t i_cwnd; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); #ifdef NETFLIX_STATS KMOD_TCPSTAT_INC(tcps_idle_restarts); @@ -5270,7 +5268,8 @@ if (TSTMP_GT(cts, tstmp_touse)) time_since_sent = cts - tstmp_touse; } - if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { + if (SEQ_LT(tp->snd_una, tp->snd_max) || + sbavail(&tptosocket(tp)->so_snd)) { rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; to = tp->t_rxtcur; if (to > time_since_sent) @@ -5608,7 +5607,7 @@ int32_t slot, uint32_t tot_len_this_send, int sup_rack) { struct hpts_diag diag; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); struct timeval tv; uint32_t delayed_ack = 0; uint32_t hpts_timeout; @@ -5617,7 +5616,6 @@ uint32_t left = 0; uint32_t us_cts; - inp = tp->t_inpcb; if ((tp->t_state == TCPS_CLOSED) || (tp->t_state == TCPS_LISTEN)) { return; @@ -5856,12 +5854,12 @@ * Arrange for the hpts to kick back in after the * t-o if the t-o does not cause a send. */ - (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), + (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), __LINE__, &diag); rack_log_hpts_diag(rack, us_cts, &diag, &tv); rack_log_to_start(rack, cts, hpts_timeout, slot, 0); } else { - (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), + (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), __LINE__, &diag); rack_log_hpts_diag(rack, us_cts, &diag, &tv); rack_log_to_start(rack, cts, hpts_timeout, slot, 1); @@ -5875,7 +5873,7 @@ * but it may change the prr stats so letting it in (the set defaults * at the start of this block) are good enough. */ - (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), + (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), __LINE__, &diag); rack_log_hpts_diag(rack, us_cts, &diag, &tv); rack_log_to_start(rack, cts, hpts_timeout, slot, 0); @@ -6120,7 +6118,7 @@ #ifdef INVARIANTS struct rack_sendmap *insret; #endif - struct socket *so; + struct socket *so = tptosocket(tp); uint32_t amm; uint32_t out, avail; int collapsed_win = 0; @@ -6146,7 +6144,6 @@ counter_u64_add(rack_tlp_tot, 1); if (rack->r_state && (rack->r_state != tp->t_state)) rack_set_state(tp, rack); - so = tp->t_inpcb->inp_socket; avail = sbavail(&so->so_snd); out = tp->snd_max - tp->snd_una; if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { @@ -6338,9 +6335,6 @@ rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) { struct tcptemp *t_template; -#ifdef INVARIANTS - struct inpcb *inp = tp->t_inpcb; -#endif int32_t retval = 1; if (tp->t_timers->tt_flags & TT_STOPPED) { @@ -6354,7 +6348,6 @@ counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); return (-ETIMEDOUT); /* tcp_drop() */ } - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); /* * Persistence timer into zero window. Force a byte to be output, if * possible. @@ -6430,13 +6423,12 @@ rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) { struct tcptemp *t_template; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); if (tp->t_timers->tt_flags & TT_STOPPED) { return (1); } rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; - inp = tp->t_inpcb; rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); /* * Keep-alive timer went off; send something or drop connection if @@ -6657,6 +6649,7 @@ static int rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) { + struct inpcb *inp = tptoinpcb(tp); int32_t rexmt; int32_t retval = 0; bool isipv6; @@ -6796,7 +6789,7 @@ * catch ESTABLISHED state. */ #ifdef INET6 - isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; + isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; #else isipv6 = false; #endif @@ -6904,11 +6897,11 @@ */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { #ifdef INET6 - if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) - in6_losing(tp->t_inpcb); + if ((inp->inp_vflag & INP_IPV6) != 0) + in6_losing(inp); else #endif - in_losing(tp->t_inpcb); + in_losing(inp); tp->t_rttvar += tp->t_srtt; tp->t_srtt = 0; } @@ -6940,7 +6933,7 @@ bytes = tp->gput_ack - tp->gput_seq; if (SEQ_GT(tp->gput_seq, tp->snd_una)) bytes += tp->gput_seq - tp->snd_una; - if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { + if (bytes > sbavail(&tptosocket(tp)->so_snd)) { /* * There are not enough bytes in the socket * buffer that have been sent to cover this @@ -7001,7 +6994,7 @@ */ ret = -3; left = rack->r_ctl.rc_timer_exp - cts; - tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); + tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left)); rack_log_to_processing(rack, cts, ret, left); return (1); } @@ -7282,7 +7275,7 @@ * -- i.e. return if err != 0 or should we pretend we sent it? -- * i.e. proceed with add ** do this for now. */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (err) /* * We don't log errors -- we could but snd_max does not @@ -8198,7 +8191,7 @@ uint32_t ideal_amount; ideal_amount = rack_get_measure_window(tp, rack); - if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { + if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { /* * There is no sense of continuing this measurement * because its too small to gain us anything we @@ -9488,7 +9481,7 @@ uint32_t tsused; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tcp_get_flags(th) & TH_RST) { /* We don't log resets */ return; @@ -10145,6 +10138,8 @@ int32_t under_pacing = 0; int32_t recovery = 0; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + rack = (struct tcp_rack *)tp->t_fb_ptr; if (SEQ_GT(th->th_ack, tp->snd_max)) { __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, @@ -10206,7 +10201,6 @@ } } nsegs = max(1, m->m_pkthdr.lro_nsegs); - INP_WLOCK_ASSERT(tp->t_inpcb); acked = BYTES_THIS_ACK(tp, th); if (acked) { @@ -10333,7 +10327,7 @@ if (rack->r_ctl.rc_went_idle_time == 0) rack->r_ctl.rc_went_idle_time = 1; rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) + if (sbavail(&tptosocket(tp)->so_snd) == 0) tp->t_acktime = 0; rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); /* Set need output so persist might get set */ @@ -10557,8 +10551,9 @@ int32_t tfo_syn; struct tcp_rack *rack; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + rack = (struct tcp_rack *)tp->t_fb_ptr; - INP_WLOCK_ASSERT(tp->t_inpcb); nsegs = max(1, m->m_pkthdr.lro_nsegs); if ((thflags & TH_ACK) && (SEQ_LT(tp->snd_wl1, th->th_seq) || @@ -10605,8 +10600,8 @@ (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && TCPS_HAVEESTABLISHED(tp->t_state) && ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && - sbavail(&tp->t_inpcb->inp_socket->so_snd) && - (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { + sbavail(&tptosocket(tp)->so_snd) && + (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { /* * Here the rwnd is less than * the pacing size, we are established, @@ -10624,7 +10619,6 @@ * along the up. */ tp->rcv_up = tp->rcv_nxt; - INP_WLOCK_ASSERT(tp->t_inpcb); /* * Process the segment text, merging it into the TCP sequencing @@ -10830,7 +10824,6 @@ (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { rack->r_wanted_output = 1; } - INP_WLOCK_ASSERT(tp->t_inpcb); return (0); } @@ -11061,8 +11054,8 @@ (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && TCPS_HAVEESTABLISHED(tp->t_state) && ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && - sbavail(&tp->t_inpcb->inp_socket->so_snd) && - (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { + sbavail(&tptosocket(tp)->so_snd) && + (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { /* * Here the rwnd is less than * the pacing size, we are established, @@ -11189,7 +11182,7 @@ if (rack->r_ctl.rc_went_idle_time == 0) rack->r_ctl.rc_went_idle_time = 1; rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) + if (sbavail(&tptosocket(tp)->so_snd) == 0) tp->t_acktime = 0; rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); } @@ -11216,6 +11209,8 @@ int32_t ourfinisacked = 0; struct tcp_rack *rack; + INP_WLOCK_ASSERT(tptoinpcb(tp)); + ctf_calc_rwin(so, tp); /* * If the state is SYN_SENT: if seg contains an ACK, but not for our @@ -11335,7 +11330,6 @@ tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); tcp_state_change(tp, TCPS_SYN_RECEIVED); } - INP_WLOCK_ASSERT(tp->t_inpcb); /* * Advance th->th_seq to correspond to first data byte. If data, * trim to stay within window, dropping FIN if necessary. @@ -12508,6 +12502,7 @@ static int rack_init(struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); struct tcp_rack *rack = NULL; #ifdef INVARIANTS struct rack_sendmap *insret; @@ -12532,9 +12527,9 @@ TAILQ_INIT(&rack->r_ctl.rc_free); TAILQ_INIT(&rack->r_ctl.rc_tmap); rack->rc_tp = tp; - rack->rc_inp = tp->t_inpcb; + rack->rc_inp = inp; /* Set the flag */ - rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; + rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; /* Probably not needed but lets be sure */ rack_clear_rate_sample(rack); /* @@ -12599,9 +12594,9 @@ else rack->r_mbuf_queue = 0; if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; + inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; else - tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; rack_set_pace_segments(tp, rack, __LINE__, NULL); if (rack_limits_scwnd) rack->r_limit_scw = 1; @@ -12825,6 +12820,8 @@ static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) { + struct inpcb *inp = tptoinpcb(tp); + if (tp->t_fb_ptr) { struct tcp_rack *rack; struct rack_sendmap *rsm, *nrsm; @@ -12963,15 +12960,13 @@ uma_zfree(rack_pcb_zone, tp->t_fb_ptr); tp->t_fb_ptr = NULL; } - if (tp->t_inpcb) { - tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; - tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; - tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; - tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; - /* Cancel the GP measurement in progress */ - tp->t_flags &= ~TF_GPUTINPROG; - tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; - } + inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; + inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; + inp->inp_flags2 &= ~INP_MBUF_ACKCMP; + /* Cancel the GP measurement in progress */ + tp->t_flags &= ~TF_GPUTINPROG; + inp->inp_flags2 &= ~INP_MBUF_L_ACKS; /* Make sure snd_nxt is correctly set */ tp->snd_nxt = tp->snd_max; } @@ -12980,7 +12975,7 @@ rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) { if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { - rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; + rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; } switch (tp->t_state) { case TCPS_SYN_SENT: @@ -13059,7 +13054,7 @@ if (tmr_up == PACE_TMR_DELACK) /* We are supposed to have delayed ack up and we do */ return; - } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { + } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { /* * if we hit enobufs then we would expect the possibility * of nothing outstanding and the RXT up (and the hptsi timer). @@ -13109,7 +13104,7 @@ } rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; } - tcp_hpts_remove(tp->t_inpcb); + tcp_hpts_remove(rack->rc_inp); } rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); @@ -13152,8 +13147,8 @@ (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && TCPS_HAVEESTABLISHED(tp->t_state) && ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && - sbavail(&tp->t_inpcb->inp_socket->so_snd) && - (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { + sbavail(&tptosocket(tp)->so_snd) && + (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { /* * Here the rwnd is less than * the pacing size, we are established, @@ -13169,6 +13164,7 @@ { if (tp->t_logstate != TCP_LOG_STATE_OFF) { + struct inpcb *inp = tptoinpcb(tp); union tcp_log_stackspecific log; struct timeval ltv; char tcp_hdr_buf[60]; @@ -13256,8 +13252,8 @@ th->th_ack = ae->ack; th->th_win = ae->win; /* Now fill in the ports */ - th->th_sport = tp->t_inpcb->inp_fport; - th->th_dport = tp->t_inpcb->inp_lport; + th->th_sport = inp->inp_fport; + th->th_dport = inp->inp_lport; tcp_set_flags(th, ae->flags); /* Now do we have a timestamp option? */ if (ae->flags & HAS_TSTMP) { @@ -13300,8 +13296,8 @@ } else xx = 0; TCP_LOG_EVENTP(tp, th, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 0, &log, true, <v); if (xx) { tp->snd_una = orig_snd_una; @@ -13423,7 +13419,7 @@ bytes = tp->gput_ack - tp->gput_seq; if (SEQ_GT(tp->gput_seq, tp->snd_una)) bytes += tp->gput_seq - tp->snd_una; - if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { + if (bytes > sbavail(&tptosocket(tp)->so_snd)) { /* * There are not enough bytes in the socket * buffer that have been sent to cover this @@ -13836,7 +13832,7 @@ if (rack->r_ctl.rc_went_idle_time == 0) rack->r_ctl.rc_went_idle_time = 1; rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) + if (sbavail(&tptosocket(tp)->so_snd) == 0) tp->t_acktime = 0; /* Set so we might enter persists... */ rack->r_wanted_output = 1; @@ -14059,6 +14055,7 @@ struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, struct timeval *tv) { + struct inpcb *inp = tptoinpcb(tp); #ifdef TCP_ACCOUNTING uint64_t ts_val; #endif @@ -14080,6 +14077,10 @@ int ack_val_set = 0xf; #endif int nsegs; + + NET_EPOCH_ASSERT(); + INP_WLOCK_ASSERT(inp); + /* * tv passed from common code is from either M_TSTMP_LRO or * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. @@ -14150,8 +14151,6 @@ tcp_dooptions(&to, (u_char *)(th + 1), (th->th_off << 2) - sizeof(struct tcphdr), (thflags & TH_SYN) ? TO_SYN : 0); - NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); @@ -14168,7 +14167,7 @@ bytes = tp->gput_ack - tp->gput_seq; if (SEQ_GT(tp->gput_seq, tp->snd_una)) bytes += tp->gput_seq - tp->snd_una; - if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { + if (bytes > sbavail(&tptosocket(tp)->so_snd)) { /* * There are not enough bytes in the socket * buffer that have been sent to cover this @@ -14341,7 +14340,7 @@ KASSERT(rack->rc_inp != NULL, ("%s: rack->rc_inp unexpectedly NULL", __func__)); if (rack->rc_inp == NULL) { - rack->rc_inp = tp->t_inpcb; + rack->rc_inp = inp; } /* @@ -14386,7 +14385,7 @@ if (to.to_flags & TOF_MSS) mss = to.to_mss; else - if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) + if ((inp->inp_vflag & INP_IPV6) != 0) mss = TCP6_MSS; else mss = TCP_MSS; @@ -14415,7 +14414,7 @@ #endif return (1); } - tcp_set_hpts(tp->t_inpcb); + tcp_set_hpts(inp); sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); } if (thflags & TH_FIN) @@ -14447,19 +14446,12 @@ retval = (*rack->r_substate) (m, th, so, tp, &to, drop_hdrlen, tlen, tiwin, thflags, nxt_pkt, iptos); -#ifdef INVARIANTS - if ((retval == 0) && - (tp->t_inpcb == NULL)) { - panic("retval:%d tp:%p t_inpcb:NULL state:%d", - retval, tp, prev_state); - } -#endif if (retval == 0) { /* * If retval is 1 the tcb is unlocked and most likely the tp * is gone. */ - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); if ((rack->rc_gp_dyn_mul) && (rack->rc_always_pace) && (rack->use_fixed_rate == 0) && @@ -14554,7 +14546,7 @@ ; } else { int late = 0; - if (tcp_in_hpts(rack->rc_inp)) { + if (tcp_in_hpts(inp)) { if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { us_cts = tcp_get_usecs(NULL); if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { @@ -14564,7 +14556,7 @@ late = 1; rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; } - tcp_hpts_remove(tp->t_inpcb); + tcp_hpts_remove(inp); } if (late && (did_out == 0)) { /* @@ -14586,13 +14578,6 @@ rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); if (did_out) rack->r_wanted_output = 0; -#ifdef INVARIANTS - if (tp->t_inpcb == NULL) { - panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", - did_out, - retval, tp, prev_state); - } -#endif #ifdef TCP_ACCOUNTING } else { /* @@ -14637,7 +14622,7 @@ } if (rack_do_segment_nounlock(m, th, so, tp, drop_hdrlen, tlen, iptos, 0, &tv) == 0) { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(tptoinpcb(tp)); } } @@ -15247,7 +15232,7 @@ * before we start a measurement. */ - if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < + if (sbavail(&tptosocket(tp)->so_snd) < max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { /* Nope not enough data */ @@ -16728,7 +16713,7 @@ struct timeval tv; int32_t prefetch_so_done = 0; struct tcp_log_buffer *lgb; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); struct sockbuf *sb; uint64_t ts_val = 0; #ifdef TCP_ACCOUNTING @@ -16740,15 +16725,16 @@ #endif bool hw_tls = false; + NET_EPOCH_ASSERT(); + INP_WLOCK_ASSERT(inp); + /* setup and take the cache hits here */ rack = (struct tcp_rack *)tp->t_fb_ptr; #ifdef TCP_ACCOUNTING sched_pin(); ts_val = get_cyclecount(); #endif - hpts_calling = rack->rc_inp->inp_hpts_calls; - NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(rack->rc_inp); + hpts_calling = inp->inp_hpts_calls; #ifdef TCP_OFFLOAD if (tp->t_flags & TF_TOE) { #ifdef TCP_ACCOUNTING @@ -17578,14 +17564,14 @@ */ #ifdef INET6 if (isipv6 && IPSEC_ENABLED(ipv6)) - ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); + ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); #ifdef INET else #endif #endif /* INET6 */ #ifdef INET if (IPSEC_ENABLED(ipv4)) - ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); + ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); #endif /* INET */ #endif @@ -18183,11 +18169,11 @@ } #ifdef INET6 if (isipv6) - ipoptlen = ip6_optlen(tp->t_inpcb); + ipoptlen = ip6_optlen(inp); else #endif - if (tp->t_inpcb->inp_options) - ipoptlen = tp->t_inpcb->inp_options->m_len - + if (inp->inp_options) + ipoptlen = inp->inp_options->m_len - offsetof(struct ipoption, ipopt_list); else ipoptlen = 0; @@ -19649,6 +19635,7 @@ struct epoch_tracker et; struct sockopt sopt; struct cc_newreno_opts opt; + struct inpcb *inp = tptoinpcb(tp); uint64_t val; int error = 0; uint16_t ca, ss; @@ -19797,10 +19784,10 @@ } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { rack->r_use_cmp_ack = 1; rack->r_mbuf_queue = 1; - tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; + inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; } if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) - rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; + inp->inp_flags2 |= INP_MBUF_ACKCMP; break; case TCP_SHARED_CWND_TIME_LIMIT: RACK_OPTS_INC(tcp_lscwnd); @@ -19852,9 +19839,9 @@ else rack->r_mbuf_queue = 0; if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; + inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; else - tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; break; case TCP_RACK_NONRXT_CFG_RATE: RACK_OPTS_INC(tcp_rack_cfg_rate); @@ -19937,9 +19924,9 @@ } } if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) - tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; + inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; else - tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; + inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; /* A rate may be set irate or other, if so set seg size */ rack_update_seg(rack); break; @@ -20513,7 +20500,7 @@ rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); bzero(ti, sizeof(*ti)); ti->tcpi_state = tp->t_state; diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.c b/sys/netinet/tcp_stacks/rack_bbr_common.c --- a/sys/netinet/tcp_stacks/rack_bbr_common.c +++ b/sys/netinet/tcp_stacks/rack_bbr_common.c @@ -403,7 +403,10 @@ uint16_t drop_hdrlen; uint8_t iptos, no_vn=0; + inp = tptoinpcb(tp); + INP_WLOCK_ASSERT(inp); NET_EPOCH_ASSERT(); + if (m) ifp = m_rcvif(m); else @@ -480,8 +483,8 @@ * been compressed. We assert the inp has * the flag set to enable this! */ - KASSERT((tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP), - ("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, tp->t_inpcb)); + KASSERT((inp->inp_flags2 & INP_MBUF_ACKCMP), + ("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, inp)); tlen = 0; drop_hdrlen = 0; th = NULL; @@ -496,8 +499,6 @@ KMOD_TCPSTAT_INC(tcps_rcvtotal); else KMOD_TCPSTAT_ADD(tcps_rcvtotal, (m->m_len / sizeof(struct tcp_ackent))); - inp = tp->t_inpcb; - INP_WLOCK_ASSERT(inp); retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen, iptos, nxt_pkt, &tv); if (retval) { @@ -571,7 +572,7 @@ { if (tp != NULL) { tcp_dropwithreset(m, th, tp, tlen, rstreason); - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(tptoinpcb(tp)); } else tcp_dropwithreset(m, th, NULL, tlen, rstreason); } @@ -759,7 +760,7 @@ * Drop space held by incoming segment and return. */ if (tp != NULL) - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(tptoinpcb(tp)); if (m) m_freem(m); } @@ -974,7 +975,7 @@ tcp_dropwithreset(m, th, tp, tlen, rstreason); tp = tcp_drop(tp, ETIMEDOUT); if (tp) - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(tptoinpcb(tp)); } uint32_t @@ -1010,8 +1011,8 @@ log.u_bbr.pkts_out = sack_blocks[3].end; } TCP_LOG_EVENTP(tp, NULL, - &tp->t_inpcb->inp_socket->so_rcv, - &tp->t_inpcb->inp_socket->so_snd, + &tptosocket(tp)->so_rcv, + &tptosocket(tp)->so_snd, TCP_SACK_FILTER_RES, 0, 0, &log, false, &tv); } diff --git a/sys/netinet/tcp_stats.c b/sys/netinet/tcp_stats.c --- a/sys/netinet/tcp_stats.c +++ b/sys/netinet/tcp_stats.c @@ -199,7 +199,7 @@ rm_runlock(&tcp_stats_tpl_sampling_lock, &tracker); if (tpl >= 0) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (tp->t_stats != NULL) stats_blob_destroy(tp->t_stats); tp->t_stats = stats_blob_alloc(tpl, 0); diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c --- a/sys/netinet/tcp_subr.c +++ b/sys/netinet/tcp_subr.c @@ -1044,10 +1044,9 @@ static int tcp_default_fb_init(struct tcpcb *tp) { + struct socket *so = tptosocket(tp); - struct socket *so; - - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT, ("%s: connection %p in unexpected state %d", __func__, tp, @@ -1064,7 +1063,6 @@ * Make sure some kind of transmission timer is set if there is * outstanding data. */ - so = tp->t_inpcb->inp_socket; if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) || tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) || tcp_timer_active(tp, TT_PERSIST))) { @@ -1110,8 +1108,7 @@ tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged) { - INP_WLOCK_ASSERT(tp->t_inpcb); - return; + INP_WLOCK_ASSERT(tptoinpcb(tp)); } /* @@ -1812,8 +1809,7 @@ ip = ipgen; if (tp != NULL) { - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("tcp control block w/o inpcb")); + inp = tptoinpcb(tp); INP_LOCK_ASSERT(inp); } else inp = NULL; @@ -2102,8 +2098,7 @@ nth->th_sum = in6_cksum_pseudo(ip6, tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0); } - ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : - NULL, NULL); + ip6->ip6_hlim = in6_selecthlim(inp, NULL); } #endif /* INET6 */ #if defined(INET6) && defined(INET) @@ -2139,7 +2134,7 @@ struct timeval tv; memset(&log.u_bbr, 0, sizeof(log.u_bbr)); - log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts; + log.u_bbr.inhpts = inp->inp_in_hpts; log.u_bbr.flex8 = 4; log.u_bbr.pkts_out = tp->t_maxseg; log.u_bbr.timeStamp = tcp_get_usecs(&tv); @@ -2356,10 +2351,10 @@ struct tcpcb * tcp_drop(struct tcpcb *tp, int errno) { - struct socket *so = tp->t_inpcb->inp_socket; + struct socket *so = tptosocket(tp); NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); if (TCPS_HAVERCVDSYN(tp->t_state)) { tcp_state_change(tp, TCPS_CLOSED); @@ -2377,7 +2372,7 @@ void tcp_discardcb(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); @@ -2452,8 +2447,8 @@ bool tcp_freecb(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; - struct socket *so = inp->inp_socket; + struct inpcb *inp = tptoinpcb(tp); + struct socket *so = tptosocket(tp); #ifdef INET6 bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0; #endif @@ -2544,8 +2539,8 @@ struct tcpcb * tcp_close(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; - struct socket *so; + struct inpcb *inp = tptoinpcb(tp); + struct socket *so = tptosocket(tp); INP_WLOCK_ASSERT(inp); @@ -2570,7 +2565,6 @@ if (tp->t_state != TCPS_CLOSED) tcp_state_change(tp, TCPS_CLOSED); KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL")); - so = inp->inp_socket; soisdisconnected(so); if (inp->inp_flags & INP_SOCKREF) { inp->inp_flags &= ~INP_SOCKREF; @@ -3463,7 +3457,7 @@ void tcp6_use_min_mtu(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); /* diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c --- a/sys/netinet/tcp_timer.c +++ b/sys/netinet/tcp_timer.c @@ -250,11 +250,10 @@ { struct epoch_tracker et; struct tcpcb *tp = xtp; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); + CURVNET_SET(tp->t_vnet); - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_delack) || !callout_active(&tp->t_timers->tt_delack)) { @@ -283,7 +282,7 @@ tcp_timer_close(struct tcpcb *tp) { struct epoch_tracker et; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); @@ -301,7 +300,7 @@ tcp_timer_drop(struct tcpcb *tp) { struct epoch_tracker et; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); @@ -316,21 +315,20 @@ tcp_timer_2msl(void *xtp) { struct tcpcb *tp = xtp; - struct inpcb *inp; + struct inpcb *inp = tptoinpcb(tp); CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); + INP_WLOCK(inp); tcp_log_end_status(tp, TCP_EI_STATUS_2MSL); tcp_free_sackholes(tp); if (callout_pending(&tp->t_timers->tt_2msl) || !callout_active(&tp->t_timers->tt_2msl)) { - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); CURVNET_RESTORE(); return; } @@ -359,8 +357,8 @@ CURVNET_RESTORE(); return; } else if (tp->t_state == TCPS_FIN_WAIT_2 && - tcp_fast_finwait2_recycle && tp->t_inpcb->inp_socket && - (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { + tcp_fast_finwait2_recycle && inp->inp_socket && + (inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { TCPSTAT_INC(tcps_finwait2_drops); tcp_timer_close(tp); CURVNET_RESTORE(); @@ -377,7 +375,7 @@ } #ifdef TCPDEBUG - if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + if (tptosocket(tp)->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif @@ -390,18 +388,17 @@ void tcp_timer_keep(void *xtp) { + struct epoch_tracker et; struct tcpcb *tp = xtp; + struct inpcb *inp = tptoinpcb(tp); struct tcptemp *t_template; - struct inpcb *inp; - struct epoch_tracker et; CURVNET_SET(tp->t_vnet); #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); + INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_keep) || !callout_active(&tp->t_timers->tt_keep)) { @@ -494,7 +491,7 @@ tp = tcp_drop(tp, ETIMEDOUT); #ifdef TCPDEBUG - if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp != NULL && (tptosocket(tp)->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif @@ -533,9 +530,9 @@ void tcp_timer_persist(void *xtp) { - struct tcpcb *tp = xtp; - struct inpcb *inp; struct epoch_tracker et; + struct tcpcb *tp = xtp; + struct inpcb *inp = tptoinpcb(tp); bool progdrop; int outrv; CURVNET_SET(tp->t_vnet); @@ -544,8 +541,7 @@ ostate = tp->t_state; #endif - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); + INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_persist) || !callout_active(&tp->t_timers->tt_persist)) { @@ -605,7 +601,7 @@ tp->t_flags &= ~TF_FORCEDATA; #ifdef TCPDEBUG - if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + if (tp != NULL && tptosocket(tp)->so_options & SO_DEBUG) tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); #endif TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); @@ -617,19 +613,18 @@ void tcp_timer_rexmt(void * xtp) { + struct epoch_tracker et; struct tcpcb *tp = xtp; CURVNET_SET(tp->t_vnet); + struct inpcb *inp = tptoinpcb(tp); int rexmt, outrv; - struct inpcb *inp; - struct epoch_tracker et; bool isipv6; #ifdef TCPDEBUG int ostate; ostate = tp->t_state; #endif - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); + INP_WLOCK(inp); if (callout_pending(&tp->t_timers->tt_rexmt) || !callout_active(&tp->t_timers->tt_rexmt)) { @@ -722,7 +717,7 @@ * ESTABLISHED state. */ #ifdef INET6 - isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; + isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; #else isipv6 = false; #endif @@ -865,11 +860,11 @@ */ if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { #ifdef INET6 - if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) - in6_losing(tp->t_inpcb); + if ((inp->inp_vflag & INP_IPV6) != 0) + in6_losing(inp); else #endif - in_losing(tp->t_inpcb); + in_losing(inp); } tp->snd_nxt = tp->snd_una; tp->snd_recover = tp->snd_max; @@ -886,7 +881,7 @@ NET_EPOCH_ENTER(et); outrv = tcp_output_nodrop(tp); #ifdef TCPDEBUG - if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp != NULL && (tptosocket(tp)->so_options & SO_DEBUG)) tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, PRU_SLOWTIMO); #endif @@ -901,7 +896,7 @@ { struct callout *t_callout; callout_func_t *f_callout; - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); int cpu = inp_to_cpuid(inp); #ifdef TCP_OFFLOAD @@ -1066,10 +1061,12 @@ break; case TT_2MSL: if (tp->t_timers->tt_flags &= TT_2MSL_SUS) { + struct socket *so = tptosocket(tp); + tp->t_timers->tt_flags &= ~TT_2MSL_SUS; if ((tp->t_state == TCPS_FIN_WAIT_2) && - ((tp->t_inpcb->inp_socket == NULL) || - (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) { + (so == NULL || /* XXXGL: needed? */ + (so->so_rcv.sb_state & SBS_CANTRCVMORE))) { /* Star the 2MSL timer */ tcp_timer_activate(tp, TT_2MSL, (tcp_fast_finwait2_recycle) ? @@ -1085,17 +1082,14 @@ static void tcp_timer_discard(void *ptp) { - struct inpcb *inp; - struct tcpcb *tp; struct epoch_tracker et; + struct tcpcb *tp = (struct tcpcb *)ptp; + struct inpcb *inp = tptoinpcb(tp); - tp = (struct tcpcb *)ptp; CURVNET_SET(tp->t_vnet); - NET_EPOCH_ENTER(et); - inp = tp->t_inpcb; - KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", - __func__, tp)); INP_WLOCK(inp); + NET_EPOCH_ENTER(et); + KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0, ("%s: tcpcb has to be stopped here", __func__)); if (--tp->t_timers->tt_draincnt > 0 || diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c --- a/sys/netinet/tcp_timewait.c +++ b/sys/netinet/tcp_timewait.c @@ -115,7 +115,7 @@ void tcp_twstart(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #ifdef INET6 bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; #endif diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c --- a/sys/netinet/tcp_usrreq.c +++ b/sys/netinet/tcp_usrreq.c @@ -1476,8 +1476,8 @@ static int tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td) { - struct inpcb *inp = tp->t_inpcb, *oinp; - struct socket *so = inp->inp_socket; + struct inpcb *inp = tptoinpcb(tp), *oinp; + struct socket *so = tptosocket(tp); struct in_addr laddr; u_short lport; int error; @@ -1549,7 +1549,7 @@ static int tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); int error; INP_WLOCK_ASSERT(inp); @@ -1597,7 +1597,7 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) { - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); bzero(ti, sizeof(*ti)); ti->tcpi_state = tp->t_state; @@ -1802,7 +1802,7 @@ } #ifdef TCPHPTS /* Assure that we are not on any hpts */ - tcp_hpts_remove(tp->t_inpcb); + tcp_hpts_remove(tptoinpcb(tp)); #endif if (blk->tfb_tcp_fb_init) { error = (*blk->tfb_tcp_fb_init)(tp); @@ -2713,8 +2713,8 @@ static void tcp_disconnect(struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; - struct socket *so = inp->inp_socket; + struct inpcb *inp = tptoinpcb(tp); + struct socket *so = tptosocket(tp); NET_EPOCH_ASSERT(); INP_WLOCK_ASSERT(inp); @@ -2757,7 +2757,7 @@ { NET_EPOCH_ASSERT(); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(tptoinpcb(tp)); switch (tp->t_state) { case TCPS_LISTEN: @@ -2792,7 +2792,7 @@ if (tp->t_acktime == 0) tp->t_acktime = ticks; if (tp->t_state >= TCPS_FIN_WAIT_2) { - soisdisconnected(tp->t_inpcb->inp_socket); + soisdisconnected(tptosocket(tp)); /* Prevent the connection hanging in FIN_WAIT_2 forever. */ if (tp->t_state == TCPS_FIN_WAIT_2) { int timeout; diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h --- a/sys/netinet/tcp_var.h +++ b/sys/netinet/tcp_var.h @@ -392,6 +392,11 @@ struct tcpcb * tcp_drop(struct tcpcb *, int); #ifdef _NETINET_IN_PCB_H_ +#define intotcpcb(inp) ((struct tcpcb *)(inp)->inp_ppcb) +#define sototcpcb(so) intotcpcb(sotoinpcb(so)) +#define tptoinpcb(tp) tp->t_inpcb +#define tptosocket(tp) tp->t_inpcb->inp_socket + /* * tcp_output() * Handles tcp_drop request from advanced stacks and reports that inpcb is @@ -401,9 +406,10 @@ static inline int tcp_output(struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); int rv; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); rv = tp->t_fb->tfb_tcp_output(tp); if (rv < 0) { @@ -412,7 +418,7 @@ tp->t_fb->tfb_tcp_block_name, tp)); tp = tcp_drop(tp, -rv); if (tp) - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); } return (rv); @@ -426,9 +432,10 @@ static inline int tcp_output_unlock(struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); int rv; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); rv = tp->t_fb->tfb_tcp_output(tp); if (rv < 0) { @@ -438,9 +445,9 @@ rv = -rv; tp = tcp_drop(tp, rv); if (tp) - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); } else - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); return (rv); } @@ -458,9 +465,10 @@ static inline int tcp_output_nodrop(struct tcpcb *tp) { + struct inpcb *inp = tptoinpcb(tp); int rv; - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); rv = tp->t_fb->tfb_tcp_output(tp); KASSERT(rv >= 0 || tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP, @@ -477,15 +485,16 @@ static inline int tcp_unlock_or_drop(struct tcpcb *tp, int tcp_output_retval) { + struct inpcb *inp = tptoinpcb(tp); - INP_WLOCK_ASSERT(tp->t_inpcb); + INP_WLOCK_ASSERT(inp); if (tcp_output_retval < 0) { tcp_output_retval = -tcp_output_retval; if (tcp_drop(tp, tcp_output_retval) != NULL) - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); } else - INP_WUNLOCK(tp->t_inpcb); + INP_WUNLOCK(inp); return (tcp_output_retval); } @@ -631,9 +640,6 @@ struct in_conninfo; #endif /* _NETINET_IN_PCB_H_ */ -#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) -#define sototcpcb(so) (intotcpcb(sotoinpcb(so))) - /* * The smoothed round-trip time and estimated variance * are stored as fixed point numbers scaled by the values below. diff --git a/sys/netinet/toecore.c b/sys/netinet/toecore.c --- a/sys/netinet/toecore.c +++ b/sys/netinet/toecore.c @@ -239,7 +239,7 @@ static void toe_listen_start_event(void *arg __unused, struct tcpcb *tp) { - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); INP_WLOCK_ASSERT(inp); KASSERT(tp->t_state == TCPS_LISTEN, @@ -253,7 +253,7 @@ { struct toedev *tod; #ifdef INVARIANTS - struct inpcb *inp = tp->t_inpcb; + struct inpcb *inp = tptoinpcb(tp); #endif INP_WLOCK_ASSERT(inp);