Index: sys/netinet/tcp_lro.h =================================================================== --- sys/netinet/tcp_lro.h +++ sys/netinet/tcp_lro.h @@ -114,6 +114,12 @@ int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t); void tcp_lro_queue_mbuf(struct lro_ctrl *, struct mbuf *); +/* append version of functions above */ +void tcp_lro_flush_inactive_append(struct lro_ctrl *, struct mbuf ***, const struct timeval *); +void tcp_lro_flush_append(struct lro_ctrl *, struct mbuf ***, struct lro_entry *); +void tcp_lro_flush_all_append(struct lro_ctrl *, struct mbuf ***); +void tcp_lro_queue_mbuf_append(struct lro_ctrl *, struct mbuf ***, struct mbuf *); + #define TCP_LRO_NO_ENTRIES -2 #define TCP_LRO_CANNOT -1 #define TCP_LRO_NOT_SUPPORTED 1 Index: sys/netinet/tcp_lro.c =================================================================== --- sys/netinet/tcp_lro.c +++ sys/netinet/tcp_lro.c @@ -69,9 +69,9 @@ #define TCP_LRO_INVALID_CSUM 0x0000 #endif -static void tcp_lro_rx_done(struct lro_ctrl *lc); -static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, - uint32_t csum, int use_hash); +static void tcp_lro_rx_done_append(struct lro_ctrl *lc, struct mbuf ***pppmb); +static int tcp_lro_rx_append(struct lro_ctrl *lc, struct mbuf *m, struct mbuf ***pppmb, + uint32_t csum, int use_hash); SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "TCP LRO"); @@ -82,6 +82,26 @@ "default number of LRO entries"); static __inline void +tcp_lro_append_mbuf(struct mbuf *mb, struct mbuf ***pppmb) +{ + + /* update last m_next pointer to point to new mbuf */ + **pppmb = mb; + *pppmb = &mb->m_next; +} + +static __inline void +tcp_lro_input_head(struct lro_ctrl *lc, struct mbuf *mb) +{ + + if (__predict_false(mb == NULL)) + return; + + /* input packet(s) to network layer */ + (*lc->ifp->if_input)(lc->ifp, mb); +} + +static __inline void tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, struct lro_entry *le) { @@ -275,18 +295,18 @@ #endif static void -tcp_lro_rx_done(struct lro_ctrl *lc) +tcp_lro_rx_done_append(struct lro_ctrl *lc, struct mbuf ***pppmb) { struct lro_entry *le; while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); } } void -tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) +tcp_lro_flush_inactive_append(struct lro_ctrl *lc, struct mbuf ***pppmb, const struct timeval *timeout) { struct lro_entry *le, *le_tmp; struct timeval tv; @@ -299,13 +319,24 @@ LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { if (timevalcmp(&tv, &le->mtime, >=)) { tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); } } } void -tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) +tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) +{ + struct mbuf *pmb = NULL; + struct mbuf **ppmb = &pmb; + + tcp_lro_flush_inactive_append(lc, &ppmb, timeout); + tcp_lro_input_head(lc, pmb); +} + +void +tcp_lro_flush_append(struct lro_ctrl *lc, struct mbuf ***pppmb, + struct lro_entry *le) { if (le->append_cnt > 0) { @@ -391,13 +422,25 @@ } le->m_head->m_pkthdr.lro_nsegs = le->append_cnt + 1; - (*lc->ifp->if_input)(lc->ifp, le->m_head); + + /* queue mbuf */ + tcp_lro_append_mbuf(le->m_head, pppmb); lc->lro_queued += le->append_cnt + 1; lc->lro_flushed++; bzero(le, sizeof(*le)); LIST_INSERT_HEAD(&lc->lro_free, le, next); } +void +tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) +{ + struct mbuf *pmb = NULL; + struct mbuf **ppmb = &pmb; + + tcp_lro_flush_append(lc, &ppmb, le); + tcp_lro_input_head(lc, pmb); +} + #ifdef HAVE_INLINE_FLSLL #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) #else @@ -487,7 +530,7 @@ } void -tcp_lro_flush_all(struct lro_ctrl *lc) +tcp_lro_flush_all_append(struct lro_ctrl *lc, struct mbuf ***pppmb) { uint64_t seq; uint64_t nseq; @@ -516,24 +559,33 @@ seq = nseq; /* flush active streams */ - tcp_lro_rx_done(lc); + tcp_lro_rx_done_append(lc, pppmb); } /* add packet to LRO engine */ - if (tcp_lro_rx2(lc, mb, 0, 0) != 0) { - /* input packet to network layer */ - (*lc->ifp->if_input)(lc->ifp, mb); + if (tcp_lro_rx_append(lc, mb, pppmb, 0, 0) != 0) { + tcp_lro_append_mbuf(mb, pppmb); lc->lro_queued++; lc->lro_flushed++; } } done: /* flush active streams */ - tcp_lro_rx_done(lc); + tcp_lro_rx_done_append(lc, pppmb); lc->lro_mbuf_count = 0; } +void +tcp_lro_flush_all(struct lro_ctrl *lc) +{ + struct mbuf *pmb = NULL; + struct mbuf **ppmb = &pmb; + + tcp_lro_flush_all_append(lc, &ppmb); + tcp_lro_input_head(lc, pmb); +} + #ifdef INET6 static int tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6, @@ -595,7 +647,7 @@ #endif static int -tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) +tcp_lro_rx_append(struct lro_ctrl *lc, struct mbuf *m, struct mbuf ***pppmb, uint32_t csum, int use_hash) { struct lro_entry *le; struct ether_header *eh; @@ -779,14 +831,14 @@ if (force_flush) { /* Timestamps mismatch; this is a FIN, etc */ tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); return (TCP_LRO_CANNOT); } /* Flush now if appending will result in overflow. */ if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); break; } @@ -795,7 +847,7 @@ (tcp_data_len == 0 && le->ack_seq == th->th_ack))) { /* Out of order packet or duplicate ACK. */ tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); return (TCP_LRO_CANNOT); } @@ -828,7 +880,7 @@ */ if (le->append_cnt >= lc->lro_ackcnt_lim) { tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); } return (0); } @@ -852,7 +904,7 @@ */ if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) { tcp_lro_active_remove(le); - tcp_lro_flush(lc, le); + tcp_lro_flush_append(lc, pppmb, le); } else getmicrotime(&le->mtime); @@ -935,12 +987,18 @@ int tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) { + struct mbuf *pmb = NULL; + struct mbuf **ppmb = &pmb; + int retval; + + retval = tcp_lro_rx_append(lc, m, &ppmb, csum, 1); + tcp_lro_input_head(lc, pmb); - return tcp_lro_rx2(lc, m, csum, 1); + return (retval); } void -tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) +tcp_lro_queue_mbuf_append(struct lro_ctrl *lc, struct mbuf ***pppmb, struct mbuf *mb) { /* sanity checks */ if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || @@ -954,8 +1012,8 @@ if (__predict_false(mb->m_pkthdr.csum_flags == 0 || (lc->ifp->if_capenable & IFCAP_LRO) == 0)) { - /* input packet to network layer */ - (*lc->ifp->if_input) (lc->ifp, mb); + /* queue mbuf */ + tcp_lro_append_mbuf(mb, pppmb); return; } @@ -970,7 +1028,17 @@ /* flush if array is full */ if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max)) - tcp_lro_flush_all(lc); + tcp_lro_flush_all_append(lc, pppmb); +} + +void +tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) +{ + struct mbuf *pmb = NULL; + struct mbuf **ppmb = &pmb; + + tcp_lro_queue_mbuf_append(lc, &ppmb, mb); + tcp_lro_input_head(lc, pmb); } /* end */