Changeset View
Changeset View
Standalone View
Standalone View
sys/netinet/tcp_lro.c
Show First 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
#include <sys/socket.h> | #include <sys/socket.h> | ||||
#include <sys/socketvar.h> | #include <sys/socketvar.h> | ||||
#include <sys/sockbuf.h> | #include <sys/sockbuf.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <net/if.h> | #include <net/if.h> | ||||
#include <net/if_var.h> | #include <net/if_var.h> | ||||
#include <net/ethernet.h> | #include <net/ethernet.h> | ||||
#include <net/bpf.h> | |||||
#include <net/vnet.h> | #include <net/vnet.h> | ||||
#include <netinet/in_systm.h> | #include <netinet/in_systm.h> | ||||
#include <netinet/in.h> | #include <netinet/in.h> | ||||
#include <netinet/ip6.h> | #include <netinet/ip6.h> | ||||
#include <netinet/ip.h> | #include <netinet/ip.h> | ||||
#include <netinet/ip_var.h> | #include <netinet/ip_var.h> | ||||
#include <netinet/in_pcb.h> | #include <netinet/in_pcb.h> | ||||
#include <netinet6/in6_pcb.h> | #include <netinet6/in6_pcb.h> | ||||
#include <netinet/tcp.h> | #include <netinet/tcp.h> | ||||
#include <netinet/tcp_seq.h> | #include <netinet/tcp_seq.h> | ||||
#include <netinet/tcp_lro.h> | #include <netinet/tcp_lro.h> | ||||
#include <netinet/tcp_var.h> | #include <netinet/tcp_var.h> | ||||
#include <netinet/tcpip.h> | |||||
#include <netinet/tcp_hpts.h> | #include <netinet/tcp_hpts.h> | ||||
#include <netinet/tcp_log_buf.h> | #include <netinet/tcp_log_buf.h> | ||||
#include <netinet6/ip6_var.h> | #include <netinet6/ip6_var.h> | ||||
#include <machine/in_cksum.h> | #include <machine/in_cksum.h> | ||||
static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); | static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); | ||||
Show All 11 Lines | |||||
static long tcplro_stacks_wanting_mbufq = 0; | static long tcplro_stacks_wanting_mbufq = 0; | ||||
counter_u64_t tcp_inp_lro_direct_queue; | counter_u64_t tcp_inp_lro_direct_queue; | ||||
counter_u64_t tcp_inp_lro_wokeup_queue; | counter_u64_t tcp_inp_lro_wokeup_queue; | ||||
counter_u64_t tcp_inp_lro_compressed; | counter_u64_t tcp_inp_lro_compressed; | ||||
counter_u64_t tcp_inp_lro_single_push; | counter_u64_t tcp_inp_lro_single_push; | ||||
counter_u64_t tcp_inp_lro_locks_taken; | counter_u64_t tcp_inp_lro_locks_taken; | ||||
counter_u64_t tcp_inp_lro_sack_wake; | counter_u64_t tcp_inp_lro_sack_wake; | ||||
counter_u64_t tcp_extra_mbuf; | |||||
counter_u64_t tcp_would_have_but; | |||||
counter_u64_t tcp_comp_total; | |||||
counter_u64_t tcp_uncomp_total; | |||||
counter_u64_t tcp_csum_hardware; | |||||
counter_u64_t tcp_csum_hardware_w_ph; | |||||
counter_u64_t tcp_csum_software; | |||||
static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; | static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; | ||||
static int32_t hold_lock_over_compress = 0; | |||||
SYSCTL_INT(_net_inet_tcp_lro, OID_AUTO, hold_lock, CTLFLAG_RW, | |||||
&hold_lock_over_compress, 0, | |||||
"Do we hold the lock over the compress of mbufs?"); | |||||
SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, | SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, | ||||
CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, | CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, | ||||
"default number of LRO entries"); | "default number of LRO entries"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD, | ||||
&tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport"); | &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD, | ||||
&tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts"); | &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD, | ||||
&tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport"); | &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, single, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, single, CTLFLAG_RD, | ||||
&tcp_inp_lro_single_push, "Number of lro's sent with single segment"); | &tcp_inp_lro_single_push, "Number of lro's sent with single segment"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD, | ||||
&tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken"); | &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, sackwakeups, CTLFLAG_RD, | SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, sackwakeups, CTLFLAG_RD, | ||||
&tcp_inp_lro_sack_wake, "Number of wakeups caused by sack/fin"); | &tcp_inp_lro_sack_wake, "Number of wakeups caused by sack/fin"); | ||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD, | |||||
&tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD, | |||||
&tcp_would_have_but, "Number of times we would have had an extra compressed but out of room"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD, | |||||
&tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD, | |||||
&tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, csum_hw, CTLFLAG_RD, | |||||
&tcp_csum_hardware, "Number of checksums processed in hardware"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, csum_hw_ph, CTLFLAG_RD, | |||||
&tcp_csum_hardware_w_ph, "Number of checksums processed in hardware with pseudo header"); | |||||
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, csum_sw, CTLFLAG_RD, | |||||
&tcp_csum_software, "Number of checksums processed in software"); | |||||
void | void | ||||
tcp_lro_reg_mbufq(void) | tcp_lro_reg_mbufq(void) | ||||
{ | { | ||||
atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1); | atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1); | ||||
} | } | ||||
void | void | ||||
tcp_lro_dereg_mbufq(void) | tcp_lro_dereg_mbufq(void) | ||||
▲ Show 20 Lines • Show All 104 Lines • ▼ Show 20 Lines | case ETHERTYPE_IP: | ||||
ip4 = (struct ip *)(eh + 1); | ip4 = (struct ip *)(eh + 1); | ||||
th = (struct tcphdr *)(ip4 + 1); | th = (struct tcphdr *)(ip4 + 1); | ||||
break; | break; | ||||
#endif | #endif | ||||
} | } | ||||
return (th); | return (th); | ||||
} | } | ||||
static void | |||||
lro_free_mbuf_chain(struct mbuf *m) | |||||
{ | |||||
struct mbuf *save; | |||||
while (m) { | |||||
save = m->m_nextpkt; | |||||
m->m_nextpkt = NULL; | |||||
m_freem(m); | |||||
m = save; | |||||
} | |||||
} | |||||
void | void | ||||
tcp_lro_free(struct lro_ctrl *lc) | tcp_lro_free(struct lro_ctrl *lc) | ||||
{ | { | ||||
struct lro_entry *le; | struct lro_entry *le; | ||||
unsigned x; | unsigned x; | ||||
/* reset LRO free list */ | /* reset LRO free list */ | ||||
LIST_INIT(&lc->lro_free); | LIST_INIT(&lc->lro_free); | ||||
/* free active mbufs, if any */ | /* free active mbufs, if any */ | ||||
while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { | while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { | ||||
tcp_lro_active_remove(le); | tcp_lro_active_remove(le); | ||||
m_freem(le->m_head); | lro_free_mbuf_chain(le->m_head); | ||||
} | } | ||||
/* free hash table */ | /* free hash table */ | ||||
free(lc->lro_hash, M_LRO); | free(lc->lro_hash, M_LRO); | ||||
lc->lro_hash = NULL; | lc->lro_hash = NULL; | ||||
lc->lro_hashsz = 0; | lc->lro_hashsz = 0; | ||||
/* free mbuf array, if any */ | /* free mbuf array, if any */ | ||||
▲ Show 20 Lines • Show All 192 Lines • ▼ Show 20 Lines | if (tp->t_logstate != TCP_LOG_STATE_OFF) { | ||||
log.u_bbr.flex8 = frm; | log.u_bbr.flex8 = frm; | ||||
log.u_bbr.flex1 = tcp_data_len; | log.u_bbr.flex1 = tcp_data_len; | ||||
if (m) | if (m) | ||||
log.u_bbr.flex2 = m->m_pkthdr.len; | log.u_bbr.flex2 = m->m_pkthdr.len; | ||||
else | else | ||||
log.u_bbr.flex2 = 0; | log.u_bbr.flex2 = 0; | ||||
log.u_bbr.flex3 = le->append_cnt; | log.u_bbr.flex3 = le->append_cnt; | ||||
log.u_bbr.flex4 = le->p_len; | log.u_bbr.flex4 = le->p_len; | ||||
if (le->m_head) { | |||||
log.u_bbr.flex5 = le->m_head->m_pkthdr.len; | log.u_bbr.flex5 = le->m_head->m_pkthdr.len; | ||||
log.u_bbr.delRate = le->m_head->m_flags; | log.u_bbr.delRate = le->m_head->m_flags; | ||||
log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp; | log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp; | ||||
log.u_bbr.flex6 = lc->lro_length_lim; | } | ||||
log.u_bbr.flex7 = lc->lro_ackcnt_lim; | |||||
log.u_bbr.inflight = th_seq; | log.u_bbr.inflight = th_seq; | ||||
log.u_bbr.timeStamp = cts; | log.u_bbr.timeStamp = cts; | ||||
log.u_bbr.epoch = le->next_seq; | log.u_bbr.epoch = le->next_seq; | ||||
log.u_bbr.delivered = th_ack; | log.u_bbr.delivered = th_ack; | ||||
log.u_bbr.lt_epoch = le->ack_seq; | log.u_bbr.lt_epoch = le->ack_seq; | ||||
log.u_bbr.pacing_gain = th_win; | log.u_bbr.pacing_gain = th_win; | ||||
log.u_bbr.cwnd_gain = le->window; | log.u_bbr.cwnd_gain = le->window; | ||||
log.u_bbr.cur_del_rate = (uintptr_t)m; | log.u_bbr.cur_del_rate = (uintptr_t)m; | ||||
log.u_bbr.bw_inuse = (uintptr_t)le->m_head; | log.u_bbr.bw_inuse = (uintptr_t)le->m_head; | ||||
log.u_bbr.pkts_out = le->mbuf_cnt; /* Total mbufs added */ | log.u_bbr.pkts_out = le->mbuf_cnt; /* Total mbufs added */ | ||||
log.u_bbr.applimited = le->ulp_csum; | log.u_bbr.applimited = le->ulp_csum; | ||||
log.u_bbr.lost = le->mbuf_appended; | log.u_bbr.lost = le->mbuf_appended; | ||||
log.u_bbr.pkt_epoch = le->cmp_ack_cnt; | |||||
log.u_bbr.flex6 = tcp_tv_to_usectick(&lc->lro_last_flush); | |||||
if (in_epoch(net_epoch_preempt)) | |||||
log.u_bbr.inhpts = 1; | |||||
else | |||||
log.u_bbr.inhpts = 0; | |||||
TCP_LOG_EVENTP(tp, NULL, | TCP_LOG_EVENTP(tp, NULL, | ||||
&tp->t_inpcb->inp_socket->so_rcv, | &tp->t_inpcb->inp_socket->so_rcv, | ||||
&tp->t_inpcb->inp_socket->so_snd, | &tp->t_inpcb->inp_socket->so_snd, | ||||
TCP_LOG_LRO, 0, | TCP_LOG_LRO, 0, | ||||
0, &log, false, &tv); | 0, &log, false, &tv); | ||||
} | } | ||||
} | } | ||||
static void | static void | ||||
tcp_flush_out_le(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked) | tcp_flush_out_le(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le) | ||||
{ | { | ||||
if (le->append_cnt > 1) { | if (le->append_cnt > 1) { | ||||
struct tcphdr *th; | struct tcphdr *th; | ||||
uint16_t p_len; | uint16_t p_len; | ||||
p_len = htons(le->p_len); | p_len = htons(le->p_len); | ||||
switch (le->eh_type) { | switch (le->eh_type) { | ||||
#ifdef INET6 | #ifdef INET6 | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | #endif | ||||
/* Update the TCP header checksum. */ | /* Update the TCP header checksum. */ | ||||
le->ulp_csum += p_len; | le->ulp_csum += p_len; | ||||
le->ulp_csum += tcp_lro_csum_th(th); | le->ulp_csum += tcp_lro_csum_th(th); | ||||
while (le->ulp_csum > 0xffff) | while (le->ulp_csum > 0xffff) | ||||
le->ulp_csum = (le->ulp_csum >> 16) + | le->ulp_csum = (le->ulp_csum >> 16) + | ||||
(le->ulp_csum & 0xffff); | (le->ulp_csum & 0xffff); | ||||
th->th_sum = (le->ulp_csum & 0xffff); | th->th_sum = (le->ulp_csum & 0xffff); | ||||
th->th_sum = ~th->th_sum; | th->th_sum = ~th->th_sum; | ||||
if (tp && locked) { | |||||
tcp_lro_log(tp, lc, le, NULL, 7, 0, 0, 0, 0); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* Break any chain, this is not set to NULL on the singleton | * Break any chain, this is not set to NULL on the singleton | ||||
* case m_nextpkt points to m_head. Other case set them | * case m_nextpkt points to m_head. Other case set them | ||||
* m_nextpkt to NULL in push_and_replace. | * m_nextpkt to NULL in push_and_replace. | ||||
*/ | */ | ||||
le->m_head->m_nextpkt = NULL; | le->m_head->m_nextpkt = NULL; | ||||
le->m_head->m_pkthdr.lro_nsegs = le->append_cnt; | le->m_head->m_pkthdr.lro_nsegs = le->append_cnt; | ||||
if (tp && locked) { | |||||
tcp_lro_log(tp, lc, le, le->m_head, 8, 0, 0, 0, 0); | |||||
} | |||||
(*lc->ifp->if_input)(lc->ifp, le->m_head); | (*lc->ifp->if_input)(lc->ifp, le->m_head); | ||||
lc->lro_queued += le->append_cnt; | lc->lro_queued += le->append_cnt; | ||||
} | } | ||||
static void | static void | ||||
tcp_set_le_to_m(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m) | tcp_set_le_to_m(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m) | ||||
{ | { | ||||
struct ether_header *eh; | struct ether_header *eh; | ||||
▲ Show 20 Lines • Show All 70 Lines • ▼ Show 20 Lines | #endif | ||||
le->append_cnt = 0; | le->append_cnt = 0; | ||||
le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, | le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, | ||||
~csum); | ~csum); | ||||
le->append_cnt++; | le->append_cnt++; | ||||
th->th_sum = csum; /* Restore checksum on first packet. */ | th->th_sum = csum; /* Restore checksum on first packet. */ | ||||
} | } | ||||
static void | static void | ||||
tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m, int locked) | tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m) | ||||
{ | { | ||||
/* | /* | ||||
* Push up the stack the current le and replace | * Push up the stack the current le and replace | ||||
* it with m. | * it with m. | ||||
*/ | */ | ||||
struct mbuf *msave; | struct mbuf *msave; | ||||
/* Grab off the next and save it */ | /* Grab off the next and save it */ | ||||
msave = le->m_head->m_nextpkt; | msave = le->m_head->m_nextpkt; | ||||
le->m_head->m_nextpkt = NULL; | le->m_head->m_nextpkt = NULL; | ||||
/* Now push out the old le entry */ | /* Now push out the old le entry */ | ||||
tcp_flush_out_le(tp, lc, le, locked); | tcp_flush_out_le(tp, lc, le); | ||||
/* | /* | ||||
* Now to replace the data properly in the le | * Now to replace the data properly in the le | ||||
* we have to reset the tcp header and | * we have to reset the tcp header and | ||||
* other fields. | * other fields. | ||||
*/ | */ | ||||
tcp_set_le_to_m(lc, le, m); | tcp_set_le_to_m(lc, le, m); | ||||
/* Restore the next list */ | /* Restore the next list */ | ||||
m->m_nextpkt = msave; | m->m_nextpkt = msave; | ||||
} | } | ||||
static void | static void | ||||
tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked) | tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le) | ||||
{ | { | ||||
/* | /* | ||||
* Walk through the mbuf chain we | * Walk through the mbuf chain we | ||||
* have on tap and compress/condense | * have on tap and compress/condense | ||||
* as required. | * as required. | ||||
*/ | */ | ||||
uint32_t *ts_ptr; | uint32_t *ts_ptr; | ||||
struct mbuf *m; | struct mbuf *m; | ||||
Show All 9 Lines | tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le) | ||||
*/ | */ | ||||
again: | again: | ||||
m = le->m_head->m_nextpkt; | m = le->m_head->m_nextpkt; | ||||
if (m == NULL) { | if (m == NULL) { | ||||
/* Just the one left */ | /* Just the one left */ | ||||
return; | return; | ||||
} | } | ||||
if (m->m_flags & M_ACKCMP) | |||||
panic("LRO condense lc:%p le:%p reaches with mbuf:%p ackcmp", | |||||
lc, le, m); | |||||
th = tcp_lro_get_th(le, le->m_head); | th = tcp_lro_get_th(le, le->m_head); | ||||
KASSERT(th != NULL, | KASSERT(th != NULL, | ||||
("le:%p m:%p th comes back NULL?", le, le->m_head)); | ("le:%p m:%p th comes back NULL?", le, le->m_head)); | ||||
l = (th->th_off << 2); | l = (th->th_off << 2); | ||||
l -= sizeof(*th); | l -= sizeof(*th); | ||||
ts_ptr = (uint32_t *)(th + 1); | ts_ptr = (uint32_t *)(th + 1); | ||||
if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || | if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || | ||||
(*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | ||||
TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | ||||
/* | /* | ||||
* Its not the timestamp. We can't | * Its not the timestamp. We can't | ||||
* use this guy as the head. | * use this guy as the head. | ||||
*/ | */ | ||||
le->m_head->m_nextpkt = m->m_nextpkt; | le->m_head->m_nextpkt = m->m_nextpkt; | ||||
tcp_push_and_replace(tp, lc, le, m, locked); | tcp_push_and_replace(tp, lc, le, m); | ||||
goto again; | goto again; | ||||
} | } | ||||
if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { | if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { | ||||
/* | /* | ||||
* Make sure that previously seen segements/ACKs are delivered | * Make sure that previously seen segements/ACKs are delivered | ||||
* before this segment, e.g. FIN. | * before this segment, e.g. FIN. | ||||
*/ | */ | ||||
le->m_head->m_nextpkt = m->m_nextpkt; | le->m_head->m_nextpkt = m->m_nextpkt; | ||||
tcp_push_and_replace(tp, lc, le, m, locked); | KASSERT(((m->m_flags & M_LRO_EHDRSTRP) == 0) , | ||||
("tp:%p mbuf:%p has stripped ethernet flags:0x%x", tp, m, m->m_flags)); | |||||
tcp_push_and_replace(tp, lc, le, m); | |||||
goto again; | goto again; | ||||
} | } | ||||
while((m = le->m_head->m_nextpkt) != NULL) { | while((m = le->m_head->m_nextpkt) != NULL) { | ||||
/* | /* | ||||
* condense m into le, first | * condense m into le, first | ||||
* pull m out of the list. | * pull m out of the list. | ||||
*/ | */ | ||||
KASSERT(((m->m_flags & M_LRO_EHDRSTRP) == 0) , | |||||
("tp:%p mbuf:%p has stripped ethernet flags:0x%x", tp, m, m->m_flags)); | |||||
KASSERT(((m->m_flags & M_ACKCMP) == 0), | |||||
("LRO condense lc:%p le:%p reaches with mbuf:%p ackcmp", lc, le, m)); | |||||
le->m_head->m_nextpkt = m->m_nextpkt; | le->m_head->m_nextpkt = m->m_nextpkt; | ||||
m->m_nextpkt = NULL; | m->m_nextpkt = NULL; | ||||
/* Setup my data */ | /* Setup my data */ | ||||
tcp_data_len = m->m_pkthdr.lro_len; | tcp_data_len = m->m_pkthdr.lro_len; | ||||
th = tcp_lro_get_th(le, m); | th = tcp_lro_get_th(le, m); | ||||
KASSERT(th != NULL, | KASSERT(th != NULL, | ||||
("le:%p m:%p th comes back NULL?", le, m)); | ("le:%p m:%p th comes back NULL?", le, m)); | ||||
ts_ptr = (uint32_t *)(th + 1); | ts_ptr = (uint32_t *)(th + 1); | ||||
l = (th->th_off << 2); | l = (th->th_off << 2); | ||||
l -= sizeof(*th); | l -= sizeof(*th); | ||||
if (tp && locked) { | |||||
tcp_lro_log(tp, lc, le, m, 1, 0, 0, 0, 0); | |||||
} | |||||
if (le->append_cnt >= lc->lro_ackcnt_lim) { | if (le->append_cnt >= lc->lro_ackcnt_lim) { | ||||
if (tp && locked) { | tcp_push_and_replace(tp, lc, le, m); | ||||
tcp_lro_log(tp, lc, le, m, 2, 0, 0, 0, 0); | |||||
} | |||||
tcp_push_and_replace(tp, lc, le, m, locked); | |||||
goto again; | goto again; | ||||
} | } | ||||
if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { | if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { | ||||
/* Flush now if appending will result in overflow. */ | /* Flush now if appending will result in overflow. */ | ||||
if (tp && locked) { | tcp_push_and_replace(tp, lc, le, m); | ||||
tcp_lro_log(tp, lc, le, m, 3, tcp_data_len, 0, 0, 0); | |||||
} | |||||
tcp_push_and_replace(tp, lc, le, m, locked); | |||||
goto again; | goto again; | ||||
} | } | ||||
if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || | if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || | ||||
(*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | ||||
TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | ||||
/* | /* | ||||
* Maybe a sack in the new one? We need to | * Maybe a sack in the new one? We need to | ||||
* start all over after flushing the | * start all over after flushing the | ||||
* current le. We will go up to the beginning | * current le. We will go up to the beginning | ||||
* and flush it (calling the replace again possibly | * and flush it (calling the replace again possibly | ||||
* or just returning). | * or just returning). | ||||
*/ | */ | ||||
tcp_push_and_replace(tp, lc, le, m, locked); | tcp_push_and_replace(tp, lc, le, m); | ||||
goto again; | goto again; | ||||
} | } | ||||
if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { | if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { | ||||
tcp_push_and_replace(tp, lc, le, m, locked); | tcp_push_and_replace(tp, lc, le, m); | ||||
goto again; | goto again; | ||||
} | } | ||||
if (l != 0) { | if (l != 0) { | ||||
uint32_t tsval = ntohl(*(ts_ptr + 1)); | uint32_t tsval = ntohl(*(ts_ptr + 1)); | ||||
/* Make sure timestamp values are increasing. */ | /* Make sure timestamp values are increasing. */ | ||||
if (TSTMP_GT(le->tsval, tsval)) { | if (TSTMP_GT(le->tsval, tsval)) { | ||||
tcp_push_and_replace(tp, lc, le, m, locked); | tcp_push_and_replace(tp, lc, le, m); | ||||
goto again; | goto again; | ||||
} | } | ||||
le->tsval = tsval; | le->tsval = tsval; | ||||
le->tsecr = *(ts_ptr + 2); | le->tsecr = *(ts_ptr + 2); | ||||
} | } | ||||
/* Try to append the new segment. */ | /* Try to append the new segment. */ | ||||
if (__predict_false(ntohl(th->th_seq) != le->next_seq || | if (__predict_false(ntohl(th->th_seq) != le->next_seq || | ||||
(tcp_data_len == 0 && | (tcp_data_len == 0 && | ||||
le->ack_seq == th->th_ack && | le->ack_seq == th->th_ack && | ||||
le->window == th->th_win))) { | le->window == th->th_win))) { | ||||
/* Out of order packet or duplicate ACK. */ | /* Out of order packet or duplicate ACK. */ | ||||
if (tp && locked) { | tcp_push_and_replace(tp, lc, le, m); | ||||
tcp_lro_log(tp, lc, le, m, 4, tcp_data_len, | |||||
ntohl(th->th_seq), | |||||
th->th_ack, | |||||
th->th_win); | |||||
} | |||||
tcp_push_and_replace(tp, lc, le, m, locked); | |||||
goto again; | goto again; | ||||
} | } | ||||
if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) { | if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) { | ||||
le->next_seq += tcp_data_len; | le->next_seq += tcp_data_len; | ||||
le->ack_seq = th->th_ack; | le->ack_seq = th->th_ack; | ||||
le->window = th->th_win; | le->window = th->th_win; | ||||
} else if (th->th_ack == le->ack_seq) { | } else if (th->th_ack == le->ack_seq) { | ||||
le->window = WIN_MAX(le->window, th->th_win); | le->window = WIN_MAX(le->window, th->th_win); | ||||
} | } | ||||
csum_upd = m->m_pkthdr.lro_csum; | csum_upd = m->m_pkthdr.lro_csum; | ||||
le->ulp_csum += csum_upd; | le->ulp_csum += csum_upd; | ||||
if (tcp_data_len == 0) { | if (tcp_data_len == 0) { | ||||
le->append_cnt++; | le->append_cnt++; | ||||
le->mbuf_cnt--; | le->mbuf_cnt--; | ||||
if (tp && locked) { | |||||
tcp_lro_log(tp, lc, le, m, 5, tcp_data_len, | |||||
ntohl(th->th_seq), | |||||
th->th_ack, | |||||
th->th_win); | |||||
} | |||||
m_freem(m); | m_freem(m); | ||||
continue; | continue; | ||||
} | } | ||||
le->append_cnt++; | le->append_cnt++; | ||||
le->mbuf_appended++; | le->mbuf_appended++; | ||||
le->p_len += tcp_data_len; | le->p_len += tcp_data_len; | ||||
/* | /* | ||||
* Adjust the mbuf so that m_data points to the first byte of | * Adjust the mbuf so that m_data points to the first byte of | ||||
* the ULP payload. Adjust the mbuf to avoid complications and | * the ULP payload. Adjust the mbuf to avoid complications and | ||||
* append new segment to existing mbuf chain. | * append new segment to existing mbuf chain. | ||||
*/ | */ | ||||
m_adj(m, m->m_pkthdr.len - tcp_data_len); | m_adj(m, m->m_pkthdr.len - tcp_data_len); | ||||
if (tp && locked) { | |||||
tcp_lro_log(tp, lc, le, m, 6, tcp_data_len, | |||||
ntohl(th->th_seq), | |||||
th->th_ack, | |||||
th->th_win); | |||||
} | |||||
m_demote_pkthdr(m); | m_demote_pkthdr(m); | ||||
le->m_tail->m_next = m; | le->m_tail->m_next = m; | ||||
le->m_tail = m_last(m); | le->m_tail = m_last(m); | ||||
} | } | ||||
} | } | ||||
#ifdef TCPHPTS | #ifdef TCPHPTS | ||||
static void | static void | ||||
tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le) | tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le) | ||||
{ | { | ||||
if (tp->t_in_pkt == NULL) { | if (tp->t_in_pkt == NULL) { | ||||
/* Nothing yet there */ | /* Nothing yet there */ | ||||
tp->t_in_pkt = le->m_head; | tp->t_in_pkt = le->m_head; | ||||
tp->t_tail_pkt = le->m_last_mbuf; | tp->t_tail_pkt = le->m_last_mbuf; | ||||
} else { | } else { | ||||
/* Already some there */ | /* Already some there */ | ||||
tp->t_tail_pkt->m_nextpkt = le->m_head; | tp->t_tail_pkt->m_nextpkt = le->m_head; | ||||
tp->t_tail_pkt = le->m_last_mbuf; | tp->t_tail_pkt = le->m_last_mbuf; | ||||
} | } | ||||
le->m_head = NULL; | le->m_head = NULL; | ||||
le->m_last_mbuf = NULL; | le->m_last_mbuf = NULL; | ||||
} | } | ||||
#endif | |||||
void | static struct mbuf * | ||||
tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) | tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le, struct inpcb *inp) | ||||
{ | { | ||||
struct tcpcb *tp = NULL; | struct mbuf *m = NULL; | ||||
int locked = 0; | struct tcpcb *tp; | ||||
#ifdef TCPHPTS | |||||
tp = intotcpcb(inp); | |||||
if (tp) { | |||||
/* Look at the last mbuf if any in queue */ | |||||
if ((tp->t_tail_pkt) && | |||||
(tp->t_tail_pkt->m_flags & M_ACKCMP)) { | |||||
if (M_TRAILINGSPACE(tp->t_tail_pkt) >= sizeof(struct tcp_ackent)) { | |||||
tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0); | |||||
m = tp->t_tail_pkt; | |||||
} else { | |||||
if ((inp->inp_flags2 & INP_MBUF_L_ACKS) == 0) { | |||||
counter_u64_add(tcp_would_have_but, 1); | |||||
inp->inp_flags2 |= INP_MBUF_L_ACKS; | |||||
} | |||||
} | |||||
} | |||||
} | |||||
return (m); | |||||
} | |||||
static struct inpcb * | |||||
tcp_lro_lookup(struct lro_ctrl *lc, struct lro_entry *le) | |||||
{ | |||||
struct inpcb *inp = NULL; | struct inpcb *inp = NULL; | ||||
int need_wakeup = 0, can_queue = 0; | |||||
struct epoch_tracker et; | |||||
/* Now lets lookup the inp first */ | NET_EPOCH_ASSERT(); | ||||
CURVNET_SET(lc->ifp->if_vnet); | |||||
/* | |||||
* XXXRRS Currently the common input handler for | |||||
* mbuf queuing cannot handle VLAN Tagged. This needs | |||||
* to be fixed and the or condition removed (i.e. the | |||||
* common code should do the right lookup for the vlan | |||||
* tag and anything else that the vlan_input() does). | |||||
*/ | |||||
if ((tcplro_stacks_wanting_mbufq == 0) || (le->m_head->m_flags & M_VLANTAG)) | |||||
goto skip_lookup; | |||||
NET_EPOCH_ENTER(et); | |||||
switch (le->eh_type) { | switch (le->eh_type) { | ||||
#ifdef INET6 | #ifdef INET6 | ||||
case ETHERTYPE_IPV6: | case ETHERTYPE_IPV6: | ||||
inp = in6_pcblookup(&V_tcbinfo, &le->source_ip6, | inp = in6_pcblookup(&V_tcbinfo, &le->source_ip6, | ||||
le->source_port, &le->dest_ip6,le->dest_port, | le->source_port, &le->dest_ip6,le->dest_port, | ||||
INPLOOKUP_WLOCKPCB, | INPLOOKUP_WLOCKPCB, | ||||
lc->ifp); | lc->ifp); | ||||
break; | break; | ||||
#endif | #endif | ||||
#ifdef INET | #ifdef INET | ||||
case ETHERTYPE_IP: | case ETHERTYPE_IP: | ||||
inp = in_pcblookup(&V_tcbinfo, le->le_ip4->ip_src, | inp = in_pcblookup(&V_tcbinfo, le->le_ip4->ip_src, | ||||
le->source_port, le->le_ip4->ip_dst, le->dest_port, | le->source_port, le->le_ip4->ip_dst, le->dest_port, | ||||
INPLOOKUP_WLOCKPCB, | INPLOOKUP_WLOCKPCB, | ||||
lc->ifp); | lc->ifp); | ||||
break; | break; | ||||
#endif | #endif | ||||
} | } | ||||
NET_EPOCH_EXIT(et); | return (inp); | ||||
} | |||||
#endif | |||||
#ifdef NO | |||||
static void | |||||
stack_guard_prep(uint32_t *sg, int len) | |||||
{ | |||||
int i; | |||||
for (i = 0; i < len; i++) { | |||||
sg[i] = 0xdeadc0de; | |||||
} | |||||
} | |||||
static void | |||||
stack_guard_check(struct lro_ctrl *lc, struct lro_entry *le, uint32_t *sg, int len) | |||||
{ | |||||
int i; | |||||
for (i = 0; i < len; i++) { | |||||
if (sg[i] != 0xdeadc0de) | |||||
panic("Stack guard fails sg[%d] = 0x%x le:%p lc:%p sg:%p\n", | |||||
i, sg[i], le, lc, sg); | |||||
} | |||||
} | |||||
#endif | |||||
void | |||||
tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) | |||||
{ | |||||
struct tcpcb *tp = NULL; | |||||
#ifdef TCPHPTS | |||||
struct inpcb *inp = NULL; | |||||
int need_wakeup = 0, can_queue = 0; | |||||
/* Now lets lookup the inp first */ | |||||
CURVNET_SET(lc->ifp->if_vnet); | |||||
/* | |||||
* XXXRRS Currently the common input handler for | |||||
* mbuf queuing cannot handle VLAN Tagged. This needs | |||||
* to be fixed and the or condition removed (i.e. the | |||||
* common code should do the right lookup for the vlan | |||||
* tag and anything else that the vlan_input() does). | |||||
*/ | |||||
if (le->m_head == NULL) { | |||||
/* | |||||
* Everything was pushed up to the stack nothing to do | |||||
* but release the reference and be done. | |||||
*/ | |||||
if (le->inp) { | |||||
INP_WLOCK(le->inp); | |||||
if (in_pcbrele_wlocked(le->inp) == 0) { | |||||
/* | |||||
* We released it and still | |||||
* have the lock. | |||||
*/ | |||||
INP_WUNLOCK(le->inp); | |||||
} | |||||
} | |||||
goto done; | |||||
} | |||||
if ((tcplro_stacks_wanting_mbufq == 0) || (le->m_head->m_flags & M_VLANTAG)) | |||||
goto skip_lookup; | |||||
if (le->inp == NULL) { | |||||
le->inp = inp = tcp_lro_lookup(lc, le); | |||||
if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) || | if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) || | ||||
(inp->inp_flags2 & INP_FREED))) { | (inp->inp_flags2 & INP_FREED))) { | ||||
/* We don't want this guy */ | /* | ||||
* We can't present these to the inp since | |||||
* it will not support the stripped ethernet | |||||
* header that these have nor if a compressed | |||||
* ack is presnet. | |||||
*/ | |||||
INP_WUNLOCK(inp); | INP_WUNLOCK(inp); | ||||
inp = NULL; | lro_free_mbuf_chain(le->m_head); | ||||
goto done; | |||||
} | } | ||||
if (inp && (inp->inp_flags2 & INP_SUPPORTS_MBUFQ)) { | if ((le->flags & HAS_COMP_ENTRIES) && | ||||
((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)) { | |||||
/* | |||||
* It swapped to off, must be a stack | |||||
* switch. We need to ditch all the packets | |||||
* and the peer will just have to retransmit. | |||||
*/ | |||||
INP_WUNLOCK(inp); | |||||
lro_free_mbuf_chain(le->m_head); | |||||
goto done; | |||||
} | |||||
} else { | |||||
/* We have a reference on the inp lets lock and release it */ | |||||
inp = le->inp; | |||||
INP_WLOCK(inp); | |||||
if (in_pcbrele_wlocked(inp)) { | |||||
/* | |||||
* We lost the inp. We can't present these to the inp since | |||||
* it will not support the stripped off etherent header. | |||||
*/ | |||||
lro_free_mbuf_chain(le->m_head); | |||||
goto done; | |||||
} | |||||
if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) || | |||||
(inp->inp_flags2 & INP_FREED))) { | |||||
/* | |||||
* We can't present these to the inp since | |||||
* it may not support them. | |||||
*/ | |||||
INP_WUNLOCK(inp); | |||||
lro_free_mbuf_chain(le->m_head); | |||||
goto done; | |||||
} | |||||
if ((le->flags & HAS_COMP_ENTRIES) && | |||||
((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)) { | |||||
/* | |||||
* It swapped to off, must be a stack | |||||
* switch. We need to ditch all the packets | |||||
* and the peer will just have to retransmit. | |||||
*/ | |||||
INP_WUNLOCK(inp); | |||||
lro_free_mbuf_chain(le->m_head); | |||||
goto done; | |||||
} | |||||
} | |||||
if (inp && ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) || | |||||
(inp->inp_flags2 & INP_MBUF_ACKCMP))) { | |||||
/* The transport supports mbuf queuing */ | /* The transport supports mbuf queuing */ | ||||
can_queue = 1; | can_queue = 1; | ||||
if (le->need_wakeup || | if (le->need_wakeup || | ||||
((inp->inp_in_input == 0) && | ((inp->inp_in_input == 0) && | ||||
((inp->inp_flags2 & INP_MBUF_QUEUE_READY) == 0))) { | ((inp->inp_flags2 & INP_MBUF_QUEUE_READY) == 0))) { | ||||
/* | /* | ||||
* Either the transport is off on a keep-alive | * Either the transport is off on a keep-alive | ||||
* (it has the queue_ready flag clear and its | * (it has the queue_ready flag clear and its | ||||
Show All 11 Lines | if (le->need_wakeup || | ||||
need_wakeup = 0; | need_wakeup = 0; | ||||
} | } | ||||
} | } | ||||
/* Do we need to be awoken due to lots of data or acks? */ | /* Do we need to be awoken due to lots of data or acks? */ | ||||
if ((le->tcp_tot_p_len >= lc->lro_length_lim) || | if ((le->tcp_tot_p_len >= lc->lro_length_lim) || | ||||
(le->mbuf_cnt >= lc->lro_ackcnt_lim)) | (le->mbuf_cnt >= lc->lro_ackcnt_lim)) | ||||
need_wakeup = 1; | need_wakeup = 1; | ||||
} | } | ||||
if (inp) { | if (inp) | ||||
tp = intotcpcb(inp); | tp = intotcpcb(inp); | ||||
locked = 1; | else | ||||
} else | |||||
tp = NULL; | tp = NULL; | ||||
if (can_queue) { | if (can_queue) { | ||||
counter_u64_add(tcp_inp_lro_direct_queue, 1); | counter_u64_add(tcp_inp_lro_direct_queue, 1); | ||||
tcp_lro_log(tp, lc, le, NULL, 22, need_wakeup, | tcp_lro_log(tp, lc, le, NULL, 22, need_wakeup, | ||||
inp->inp_flags2, inp->inp_in_input, le->need_wakeup); | inp->inp_flags2, inp->inp_in_input, le->need_wakeup); | ||||
tcp_queue_pkts(tp, le); | tcp_queue_pkts(tp, le); | ||||
if (need_wakeup) { | if (need_wakeup) { | ||||
/* | /* | ||||
* We must get the guy to wakeup via | * We must get the guy to wakeup via | ||||
* hpts. | * hpts. | ||||
*/ | */ | ||||
counter_u64_add(tcp_inp_lro_wokeup_queue, 1); | NET_EPOCH_ASSERT(); | ||||
if (le->need_wakeup) | if (le->need_wakeup == 2) { | ||||
/* | |||||
* The value 2 is set if the | |||||
* options are unrecognized i.e. | |||||
* not just a timestamp. So really | |||||
* sack is usually what it is but | |||||
* it might be some other option (CWR | |||||
* etc). | |||||
*/ | |||||
counter_u64_add(tcp_inp_lro_sack_wake, 1); | counter_u64_add(tcp_inp_lro_sack_wake, 1); | ||||
tcp_queue_to_input(inp); | |||||
} | } | ||||
counter_u64_add(tcp_inp_lro_wokeup_queue, 1); | |||||
if ((*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0)) { | |||||
inp = NULL; | |||||
} | } | ||||
if (inp && (hold_lock_over_compress == 0)) { | } | ||||
} | |||||
if (inp) { | |||||
/* Unlock it */ | /* Unlock it */ | ||||
locked = 0; | |||||
tp = NULL; | tp = NULL; | ||||
counter_u64_add(tcp_inp_lro_locks_taken, 1); | counter_u64_add(tcp_inp_lro_locks_taken, 1); | ||||
INP_WUNLOCK(inp); | INP_WUNLOCK(inp); | ||||
} | } | ||||
if (can_queue == 0) { | if (can_queue == 0) { | ||||
skip_lookup: | skip_lookup: | ||||
if (le->strip_cnt) { | |||||
/* | |||||
* We have stripped mbufs, the connection | |||||
* must have changed underneath us. You | |||||
* loose the packets as a penalty. | |||||
*/ | |||||
lro_free_mbuf_chain(le->m_head); | |||||
goto done; | |||||
} | |||||
#endif /* TCPHPTS */ | #endif /* TCPHPTS */ | ||||
/* Old fashioned lro method */ | /* Old fashioned lro method */ | ||||
if (le->m_head != le->m_last_mbuf) { | if (le->m_head != le->m_last_mbuf) { | ||||
counter_u64_add(tcp_inp_lro_compressed, 1); | counter_u64_add(tcp_inp_lro_compressed, 1); | ||||
tcp_lro_condense(tp, lc, le, locked); | tcp_lro_condense(tp, lc, le); | ||||
} else | } else | ||||
counter_u64_add(tcp_inp_lro_single_push, 1); | counter_u64_add(tcp_inp_lro_single_push, 1); | ||||
tcp_flush_out_le(tp, lc, le, locked); | tcp_flush_out_le(tp, lc, le); | ||||
#ifdef TCPHPTS | #ifdef TCPHPTS | ||||
} | } | ||||
if (inp && locked) { | done: | ||||
counter_u64_add(tcp_inp_lro_locks_taken, 1); | |||||
INP_WUNLOCK(inp); | |||||
} | |||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
#endif | #endif | ||||
lc->lro_flushed++; | lc->lro_flushed++; | ||||
bzero(le, sizeof(*le)); | bzero(le, sizeof(*le)); | ||||
LIST_INSERT_HEAD(&lc->lro_free, le, next); | LIST_INSERT_HEAD(&lc->lro_free, le, next); | ||||
} | } | ||||
#ifdef HAVE_INLINE_FLSLL | #ifdef HAVE_INLINE_FLSLL | ||||
▲ Show 20 Lines • Show All 90 Lines • ▼ Show 20 Lines | tcp_lro_flush_all(struct lro_ctrl *lc) | ||||
uint64_t seq; | uint64_t seq; | ||||
uint64_t nseq; | uint64_t nseq; | ||||
unsigned x; | unsigned x; | ||||
/* check if no mbufs to flush */ | /* check if no mbufs to flush */ | ||||
if (lc->lro_mbuf_count == 0) | if (lc->lro_mbuf_count == 0) | ||||
goto done; | goto done; | ||||
microuptime(&lc->lro_last_flush); | |||||
/* sort all mbufs according to stream */ | /* sort all mbufs according to stream */ | ||||
tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); | tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); | ||||
/* input data into LRO engine, stream by stream */ | /* input data into LRO engine, stream by stream */ | ||||
seq = 0; | seq = 0; | ||||
for (x = 0; x != lc->lro_mbuf_count; x++) { | for (x = 0; x != lc->lro_mbuf_count; x++) { | ||||
struct mbuf *mb; | struct mbuf *mb; | ||||
Show All 28 Lines | |||||
static void | static void | ||||
lro_set_mtime(struct timeval *tv, struct timespec *ts) | lro_set_mtime(struct timeval *tv, struct timespec *ts) | ||||
{ | { | ||||
tv->tv_sec = ts->tv_sec; | tv->tv_sec = ts->tv_sec; | ||||
tv->tv_usec = ts->tv_nsec / 1000; | tv->tv_usec = ts->tv_nsec / 1000; | ||||
} | } | ||||
static void | |||||
build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m, uint16_t hdr_len, uint16_t iptos) | |||||
{ | |||||
/* | |||||
* Given a TCP ack, summarize it down into the small tcp | |||||
* ack entry. | |||||
*/ | |||||
u_char *cp; | |||||
KASSERT(((th->th_flags & ~(TH_ACK | TH_PUSH | TH_CWR | TH_ECE)) == 0), | |||||
("tcphdr:%p mbuf:%p has unallowed bits %x", th, m, th->th_flags)); | |||||
ae->timestamp = m->m_pkthdr.rcv_tstmp; | |||||
if (m->m_flags & M_TSTMP_LRO) | |||||
ae->flags = TSTMP_LRO; | |||||
else if (m->m_flags & M_TSTMP) | |||||
ae->flags = TSTMP_HDWR; | |||||
ae->seq = ntohl(th->th_seq); | |||||
ae->ack = ntohl(th->th_ack); | |||||
ae->flags |= th->th_flags; | |||||
if (hdr_len) { | |||||
/* We have a timestamp options get out the bits */ | |||||
cp = (u_char *)(th + 1); | |||||
/* Skip the two NOP's at the front */ | |||||
while (*cp == TCPOPT_NOP) | |||||
cp++; | |||||
KASSERT(((*cp == TCPOPT_TIMESTAMP) && | |||||
(cp[1] == TCPOLEN_TIMESTAMP)), | |||||
("At %p in tcphdr:%p options of %d not timestamp", | |||||
cp, th, hdr_len)); | |||||
bcopy((char *)cp + 2, | |||||
(char *)&ae->ts_value, sizeof(uint32_t)); | |||||
ae->ts_value = ntohl(ae->ts_value); | |||||
bcopy((char *)cp + 6, | |||||
(char *)&ae->ts_echo, sizeof(uint32_t)); | |||||
ae->ts_echo = ntohl(ae->ts_echo); | |||||
ae->flags |= HAS_TSTMP; | |||||
} | |||||
ae->win = ntohs(th->th_win); | |||||
ae->codepoint = iptos; | |||||
} | |||||
static struct mbuf * | |||||
do_bpf_and_csum(struct inpcb *inp, struct lro_ctrl *lc, struct lro_entry *le, | |||||
struct ether_header *eh, struct mbuf *m, int bpf_req, int locked) | |||||
{ | |||||
/* | |||||
* Do TCP/IP checksum and BPF tap for either ACK_CMP packets or | |||||
* MBUF QUEUE type packets. | |||||
*/ | |||||
struct tcphdr *th; | |||||
#ifdef INET6 | |||||
struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ | |||||
#endif | |||||
#ifdef INET | |||||
struct ip *ip = NULL; /* Keep compiler happy. */ | |||||
#endif | |||||
uint16_t drop_hdrlen; | |||||
int etype, tlen; | |||||
uint8_t iptos; | |||||
/* Let the BPF see the packet */ | |||||
if (bpf_req && lc->ifp) | |||||
ETHER_BPF_MTAP(lc->ifp, m); | |||||
/* Get type and Trim off the ethernet header */ | |||||
m->m_pkthdr.lro_etype = etype = ntohs(eh->ether_type); | |||||
m_adj(m, sizeof(*eh)); | |||||
m->m_flags |= M_LRO_EHDRSTRP; | |||||
switch (etype) { | |||||
#ifdef INET6 | |||||
case ETHERTYPE_IPV6: | |||||
{ | |||||
if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { | |||||
m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); | |||||
if (m == NULL) { | |||||
TCPSTAT_INC(tcps_rcvshort); | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
} | |||||
ip6 = (struct ip6_hdr *)(eh + 1); | |||||
th = (struct tcphdr *)(ip6 + 1); | |||||
tlen = ntohs(ip6->ip6_plen); | |||||
drop_hdrlen = sizeof(*ip6); | |||||
if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { | |||||
if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { | |||||
counter_u64_add(tcp_csum_hardware_w_ph, 1); | |||||
th->th_sum = m->m_pkthdr.csum_data; | |||||
} else { | |||||
counter_u64_add(tcp_csum_hardware, 1); | |||||
th->th_sum = in6_cksum_pseudo(ip6, tlen, | |||||
IPPROTO_TCP, m->m_pkthdr.csum_data); | |||||
} | |||||
th->th_sum ^= 0xffff; | |||||
} else { | |||||
counter_u64_add(tcp_csum_software, 1); | |||||
th->th_sum = in6_cksum(m, IPPROTO_TCP, drop_hdrlen, tlen); | |||||
} | |||||
if (th->th_sum) { | |||||
TCPSTAT_INC(tcps_rcvbadsum); | |||||
if (locked) { | |||||
/* Log the bad news */ | |||||
struct tcpcb *tp = intotcpcb(inp); | |||||
tcp_lro_log(tp, lc, le, m, 13, tlen, m->m_pkthdr.csum_flags, drop_hdrlen, th->th_sum); | |||||
} | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
/* | |||||
* Be proactive about unspecified IPv6 address in source. | |||||
* As we use all-zero to indicate unbounded/unconnected pcb, | |||||
* unspecified IPv6 address can be used to confuse us. | |||||
* | |||||
* Note that packets with unspecified IPv6 destination is | |||||
* already dropped in ip6_input. | |||||
*/ | |||||
if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { | |||||
/* XXX stat */ | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
break; | |||||
} | |||||
#endif | |||||
#ifdef INET | |||||
case ETHERTYPE_IP: | |||||
{ | |||||
if (m->m_len < sizeof (struct tcpiphdr)) { | |||||
if ((m = m_pullup(m, sizeof (struct tcpiphdr))) | |||||
== NULL) { | |||||
TCPSTAT_INC(tcps_rcvshort); | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
} | |||||
ip = (struct ip *)(eh + 1); | |||||
th = (struct tcphdr *)(ip + 1); | |||||
iptos = ip->ip_tos; | |||||
drop_hdrlen = sizeof(*ip); | |||||
tlen = ntohs(ip->ip_len) - sizeof(struct ip); | |||||
if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { | |||||
if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { | |||||
counter_u64_add(tcp_csum_hardware_w_ph, 1); | |||||
th->th_sum = m->m_pkthdr.csum_data; | |||||
} else { | |||||
counter_u64_add(tcp_csum_hardware, 1); | |||||
th->th_sum = in_pseudo(ip->ip_src.s_addr, | |||||
ip->ip_dst.s_addr, | |||||
htonl(m->m_pkthdr.csum_data + tlen + | |||||
IPPROTO_TCP)); | |||||
} | |||||
th->th_sum ^= 0xffff; | |||||
} else { | |||||
int len; | |||||
struct ipovly *ipov = (struct ipovly *)ip; | |||||
/* | |||||
* Checksum extended TCP header and data. | |||||
*/ | |||||
counter_u64_add(tcp_csum_software, 1); | |||||
len = drop_hdrlen + tlen; | |||||
bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); | |||||
ipov->ih_len = htons(tlen); | |||||
th->th_sum = in_cksum(m, len); | |||||
/* Reset length for SDT probes. */ | |||||
ip->ip_len = htons(len); | |||||
/* Reset TOS bits */ | |||||
ip->ip_tos = iptos; | |||||
/* Re-initialization for later version check */ | |||||
ip->ip_v = IPVERSION; | |||||
ip->ip_hl = sizeof(*ip) >> 2; | |||||
} | |||||
if (th->th_sum) { | |||||
TCPSTAT_INC(tcps_rcvbadsum); | |||||
if (locked) { | |||||
/* Log the bad news */ | |||||
struct tcpcb *tp = intotcpcb(inp); | |||||
tcp_lro_log(tp, lc, le, m, 13, tlen, m->m_pkthdr.csum_flags, drop_hdrlen, th->th_sum); | |||||
} | |||||
m_freem(m); | |||||
return (NULL); | |||||
} | |||||
break; | |||||
} | |||||
#endif | |||||
} /* end switch */ | |||||
return (m); | |||||
} | |||||
static int | static int | ||||
tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) | tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) | ||||
{ | { | ||||
struct lro_entry *le; | struct lro_entry *le; | ||||
struct ether_header *eh; | struct ether_header *eh; | ||||
#ifdef INET6 | #ifdef INET6 | ||||
struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ | struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ | ||||
#endif | #endif | ||||
#ifdef INET | #ifdef INET | ||||
struct ip *ip4 = NULL; /* Keep compiler happy. */ | struct ip *ip4 = NULL; /* Keep compiler happy. */ | ||||
#endif | #endif | ||||
struct tcphdr *th; | struct tcphdr *th; | ||||
void *l3hdr = NULL; /* Keep compiler happy. */ | void *l3hdr = NULL; /* Keep compiler happy. */ | ||||
uint32_t *ts_ptr; | uint32_t *ts_ptr; | ||||
tcp_seq seq; | tcp_seq seq; | ||||
int error, ip_len, l; | int error, ip_len, hdr_len, locked = 0; | ||||
uint16_t eh_type, tcp_data_len, need_flush; | uint16_t eh_type, tcp_data_len, need_flush; | ||||
#ifdef TCPHPTS | |||||
uint16_t iptos; | |||||
#endif | |||||
struct lro_head *bucket; | struct lro_head *bucket; | ||||
struct timespec arrv; | struct timespec arrv; | ||||
/* Clear the flags we may use to communicate with TCP */ | |||||
m->m_flags &= ~(M_ACKCMP|M_LRO_EHDRSTRP); | |||||
/* We expect a contiguous header [eh, ip, tcp]. */ | /* We expect a contiguous header [eh, ip, tcp]. */ | ||||
if ((m->m_flags & (M_TSTMP_LRO|M_TSTMP)) == 0) { | if ((m->m_flags & (M_TSTMP_LRO|M_TSTMP)) == 0) { | ||||
/* If no hardware or arrival stamp on the packet add arrival */ | /* If no hardware or arrival stamp on the packet add arrival */ | ||||
nanouptime(&arrv); | nanouptime(&arrv); | ||||
m->m_pkthdr.rcv_tstmp = (arrv.tv_sec * 1000000000) + arrv.tv_nsec; | m->m_pkthdr.rcv_tstmp = (arrv.tv_sec * 1000000000) + arrv.tv_nsec; | ||||
m->m_flags |= M_TSTMP_LRO; | m->m_flags |= M_TSTMP_LRO; | ||||
} | } | ||||
eh = mtod(m, struct ether_header *); | eh = mtod(m, struct ether_header *); | ||||
Show All 9 Lines | if (V_ip6_forwarding != 0) { | ||||
return (TCP_LRO_CANNOT); | return (TCP_LRO_CANNOT); | ||||
} | } | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
l3hdr = ip6 = (struct ip6_hdr *)(eh + 1); | l3hdr = ip6 = (struct ip6_hdr *)(eh + 1); | ||||
error = tcp_lro_rx_ipv6(lc, m, ip6, &th); | error = tcp_lro_rx_ipv6(lc, m, ip6, &th); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
tcp_data_len = ntohs(ip6->ip6_plen); | tcp_data_len = ntohs(ip6->ip6_plen); | ||||
#ifdef TCPHPTS | |||||
iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; | |||||
#endif | |||||
ip_len = sizeof(*ip6) + tcp_data_len; | ip_len = sizeof(*ip6) + tcp_data_len; | ||||
break; | break; | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef INET | #ifdef INET | ||||
case ETHERTYPE_IP: | case ETHERTYPE_IP: | ||||
{ | { | ||||
CURVNET_SET(lc->ifp->if_vnet); | CURVNET_SET(lc->ifp->if_vnet); | ||||
if (V_ipforwarding != 0) { | if (V_ipforwarding != 0) { | ||||
/* XXX-BZ stats but changing lro_ctrl is a problem. */ | /* XXX-BZ stats but changing lro_ctrl is a problem. */ | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
return (TCP_LRO_CANNOT); | return (TCP_LRO_CANNOT); | ||||
} | } | ||||
CURVNET_RESTORE(); | CURVNET_RESTORE(); | ||||
l3hdr = ip4 = (struct ip *)(eh + 1); | l3hdr = ip4 = (struct ip *)(eh + 1); | ||||
error = tcp_lro_rx_ipv4(lc, m, ip4, &th); | error = tcp_lro_rx_ipv4(lc, m, ip4, &th); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
ip_len = ntohs(ip4->ip_len); | ip_len = ntohs(ip4->ip_len); | ||||
#ifdef TCPHPTS | |||||
iptos = ip4->ip_tos; | |||||
#endif | |||||
tcp_data_len = ip_len - sizeof(*ip4); | tcp_data_len = ip_len - sizeof(*ip4); | ||||
break; | break; | ||||
} | } | ||||
#endif | #endif | ||||
/* XXX-BZ what happens in case of VLAN(s)? */ | /* XXX-BZ what happens in case of VLAN(s)? */ | ||||
default: | default: | ||||
return (TCP_LRO_NOT_SUPPORTED); | return (TCP_LRO_NOT_SUPPORTED); | ||||
} | } | ||||
/* | /* | ||||
* If the frame is padded beyond the end of the IP packet, then we must | * If the frame is padded beyond the end of the IP packet, then we must | ||||
* trim the extra bytes off. | * trim the extra bytes off. | ||||
*/ | */ | ||||
l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len); | hdr_len = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len); | ||||
if (l != 0) { | if (hdr_len != 0) { | ||||
if (l < 0) | if (hdr_len < 0) | ||||
/* Truncated packet. */ | /* Truncated packet. */ | ||||
return (TCP_LRO_CANNOT); | return (TCP_LRO_CANNOT); | ||||
m_adj(m, -l); | m_adj(m, -hdr_len); | ||||
} | } | ||||
/* | /* | ||||
* Check TCP header constraints. | * Check TCP header constraints. | ||||
*/ | */ | ||||
hdr_len = (th->th_off << 2); | |||||
ts_ptr = (uint32_t *)(th + 1); | |||||
tcp_data_len -= hdr_len; | |||||
hdr_len -= sizeof(*th); | |||||
if (th->th_flags & TH_SYN) | if (th->th_flags & TH_SYN) | ||||
return (TCP_LRO_CANNOT); | return (TCP_LRO_CANNOT); | ||||
if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) | if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { | ||||
need_flush = 1; | need_flush = 1; | ||||
else | } else | ||||
need_flush = 0; | need_flush = 0; | ||||
l = (th->th_off << 2); | if (hdr_len != 0 && (__predict_false(hdr_len != TCPOLEN_TSTAMP_APPA) || | ||||
ts_ptr = (uint32_t *)(th + 1); | |||||
tcp_data_len -= l; | |||||
l -= sizeof(*th); | |||||
if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || | |||||
(*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| | ||||
TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { | ||||
/* | /* | ||||
* We have an option besides Timestamps, maybe | * We have an option besides Timestamps, maybe | ||||
* it is a sack (most likely) which means we | * it is a sack (most likely) which means we | ||||
* will probably need to wake up a sleeper (if | * will probably need to wake up a sleeper (if | ||||
* the guy does queueing). | * the guy does queueing). | ||||
*/ | */ | ||||
need_flush = 2; | need_flush = 2; | ||||
} | } | ||||
/* If the driver did not pass in the checksum, set it now. */ | /* If the driver did not pass in the checksum, set it now. */ | ||||
if (csum == 0x0000) | if (csum == 0x0000) | ||||
csum = th->th_sum; | csum = th->th_sum; | ||||
seq = ntohl(th->th_seq); | seq = ntohl(th->th_seq); | ||||
if (!use_hash) { | if (!use_hash) { | ||||
bucket = &lc->lro_hash[0]; | bucket = &lc->lro_hash[0]; | ||||
} else if (M_HASHTYPE_ISHASH(m)) { | } else if (M_HASHTYPE_ISHASH(m)) { | ||||
bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz]; | bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz]; | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | #endif | ||||
if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq)) || | if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq)) || | ||||
(th->th_ack == le->ack_seq)) { | (th->th_ack == le->ack_seq)) { | ||||
m->m_pkthdr.lro_len = tcp_data_len; | m->m_pkthdr.lro_len = tcp_data_len; | ||||
} else { | } else { | ||||
/* no data and old ack */ | /* no data and old ack */ | ||||
m_freem(m); | m_freem(m); | ||||
return (0); | return (0); | ||||
} | } | ||||
#ifdef TCPHPTS | |||||
if ((tcplro_stacks_wanting_mbufq == 0) || (m->m_flags & M_VLANTAG)) | |||||
goto skip_lookup_a; | |||||
if (le->inp == NULL) { | |||||
CURVNET_SET(lc->ifp->if_vnet); | |||||
le->inp = tcp_lro_lookup(lc, le); | |||||
if (le->inp) { | |||||
in_pcbref(le->inp); | |||||
locked = 1; | |||||
} | |||||
CURVNET_RESTORE(); | |||||
} else if (le->inp) { | |||||
INP_WLOCK(le->inp); | |||||
locked = 1; | |||||
} | |||||
if (locked && ((le->inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) || | |||||
(le->inp->inp_flags2 & INP_FREED))) { | |||||
/* We can't present these to the inp since | |||||
* its dead Jim. | |||||
*/ | |||||
int ret; | |||||
ret = in_pcbrele_wlocked(le->inp); | |||||
if (ret == 0) | |||||
INP_WUNLOCK(le->inp); | |||||
le->inp = NULL; | |||||
locked = 0; | |||||
tcp_lro_active_remove(le); | |||||
if (le->strip_cnt && le->m_head) { | |||||
/* | |||||
* If we have any stripped packets we | |||||
* just dump the whole chain. The | |||||
* tcp_lro_flush code knows how | |||||
* to handle things when le->m_head is NULL | |||||
* and even le->inp is NULL. | |||||
*/ | |||||
lro_free_mbuf_chain(le->m_head); | |||||
le->m_head = NULL; | |||||
} | |||||
tcp_lro_flush(lc, le); | |||||
return (TCP_LRO_CANNOT); | |||||
} | |||||
/* See if it has been switched on */ | |||||
if (le->inp && (le->inp->inp_flags2 & INP_MBUF_ACKCMP)) | |||||
le->flags |= CAN_USE_ACKCMP; | |||||
if ((need_flush == 1) && | |||||
le->inp && | |||||
(le->inp->inp_flags2 & (INP_MBUF_ACKCMP|INP_SUPPORTS_MBUFQ)) && | |||||
((th->th_flags & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) == 0)) { | |||||
/* | |||||
* For MBUF queuing or ACKCMP we can accept ECE and CWR | |||||
* since each packet is sent to the transport (or the | |||||
* compressed state including the ECN bits). | |||||
*/ | |||||
need_flush = 0; | |||||
} | |||||
skip_lookup_a: | |||||
#endif | |||||
if (need_flush) | if (need_flush) | ||||
le->need_wakeup = need_flush; | le->need_wakeup = need_flush; | ||||
/* Save of the data only csum */ | /* Save of the data only csum */ | ||||
m->m_pkthdr.rcvif = lc->ifp; | m->m_pkthdr.rcvif = lc->ifp; | ||||
m->m_pkthdr.lro_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, | m->m_pkthdr.lro_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, | ||||
tcp_data_len, ~csum); | tcp_data_len, ~csum); | ||||
th->th_sum = csum; /* Restore checksum */ | th->th_sum = csum; /* Restore checksum */ | ||||
#ifdef TCPHPTS | |||||
if ((le->flags & CAN_USE_ACKCMP) || | |||||
(le->inp && | |||||
(le->inp->inp_flags2 & (INP_MBUF_ACKCMP|INP_SUPPORTS_MBUFQ)))) { | |||||
/* | |||||
* Mbuf queued and ACKCMP packets have their BPF and csum | |||||
* done here in LRO. They will still end up looking at the | |||||
* headers and such (IP/TCP) but we don't want to proceed | |||||
* with any bad csum! | |||||
*/ | |||||
m = do_bpf_and_csum(le->inp, lc, le, eh, m, bpf_peers_present(lc->ifp->if_bpf), locked); | |||||
if (m == NULL) { | |||||
/* Bad csum, accounting already done */ | |||||
if (locked) { | |||||
INP_WUNLOCK(le->inp); | |||||
} | |||||
return (0); | |||||
} | |||||
le->strip_cnt++; | |||||
} | |||||
if ((need_flush == 0) && | |||||
(th->th_flags & TH_ACK) && | |||||
(tcp_data_len == 0) && | |||||
(le->flags & CAN_USE_ACKCMP)) { | |||||
/* | |||||
* Ok this is a pure ack lets find out if our | |||||
* last packet already has one of these. | |||||
*/ | |||||
struct mbuf *nm; | |||||
struct tcp_ackent *ack_ent; | |||||
int idx; | |||||
INP_WLOCK_ASSERT(le->inp); | |||||
if (le->m_head == NULL) { | |||||
/* Ok can we still use the end of the inp's? */ | |||||
nm = tcp_lro_get_last_if_ackcmp(lc, le, le->inp); | |||||
if (nm == NULL) { | |||||
/* gone or full */ | |||||
goto new_one; | |||||
} | |||||
/* We can add in to the one on the tail */ | |||||
ack_ent = mtod(nm, struct tcp_ackent *); | |||||
idx = (nm->m_len / sizeof(struct tcp_ackent)); | |||||
build_ack_entry(&ack_ent[idx], th, m, hdr_len, iptos); | |||||
/* Bump the size of both pkt-hdr and len */ | |||||
nm->m_len += sizeof(struct tcp_ackent); | |||||
nm->m_pkthdr.len += sizeof(struct tcp_ackent); | |||||
le->ack_seq = th->th_ack; | |||||
le->window = th->th_win; | |||||
m_freem(m); | |||||
counter_u64_add(tcp_extra_mbuf, 1); | |||||
INP_WUNLOCK(le->inp); | |||||
return (0); | |||||
} else if (le->m_last_mbuf->m_flags & M_ACKCMP) { | |||||
/* Yes we might be able to be appended to */ | |||||
nm = le->m_last_mbuf; | |||||
if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) { | |||||
if ((le->inp->inp_flags2 & INP_MBUF_L_ACKS) == 0) { | |||||
counter_u64_add(tcp_would_have_but, 1); | |||||
le->inp->inp_flags2 |= INP_MBUF_L_ACKS; | |||||
} | |||||
goto new_one; | |||||
} | |||||
/* we have room */ | |||||
ack_ent = mtod(nm, struct tcp_ackent *); | |||||
idx = (nm->m_len / sizeof(struct tcp_ackent)); | |||||
build_ack_entry(&ack_ent[idx], th, m, hdr_len, iptos); | |||||
/* Bump the size of both pkt-hdr and len */ | |||||
nm->m_len += sizeof(struct tcp_ackent); | |||||
nm->m_pkthdr.len += sizeof(struct tcp_ackent); | |||||
m_freem(m); | |||||
le->flags |= HAS_COMP_ENTRIES; | |||||
le->cmp_ack_cnt++; | |||||
goto compressed; | |||||
} else { | |||||
/* Nope we need a new one */ | |||||
new_one: | |||||
if (le->inp->inp_flags2 & INP_MBUF_L_ACKS) | |||||
nm = m_getcl(M_NOWAIT, MT_DATA, (M_ACKCMP|M_PKTHDR)); | |||||
else { | |||||
nm = m_gethdr(M_NOWAIT, MT_DATA); | |||||
nm->m_flags |= M_ACKCMP; | |||||
} | |||||
if (nm) { | |||||
nm->m_pkthdr.rcvif = lc->ifp; | |||||
ack_ent = mtod(nm, struct tcp_ackent *); | |||||
build_ack_entry(ack_ent, th, m, hdr_len, iptos); | |||||
m_freem(m); | |||||
m = nm; | |||||
m->m_pkthdr.len = m->m_len = sizeof(struct tcp_ackent); | |||||
le->flags |= HAS_COMP_ENTRIES; | |||||
le->cmp_ack_cnt++; | |||||
} | |||||
/* We fall through and append */ | |||||
} | |||||
} | |||||
if (m->m_flags & M_ACKCMP) { | |||||
counter_u64_add(tcp_comp_total, 1); | |||||
} else { | |||||
counter_u64_add(tcp_uncomp_total, 1); | |||||
} | |||||
#endif | |||||
/* Save off the tail I am appending too (prev) */ | /* Save off the tail I am appending too (prev) */ | ||||
m->m_nextpkt = NULL; | |||||
if (le->m_head == NULL) { | |||||
/* | |||||
* Case where we wer chaining off the inp | |||||
* and now no-longer can. | |||||
*/ | |||||
le->m_head = m; | |||||
le->m_tail = m_last(m); | |||||
le->m_last_mbuf = m; | |||||
le->m_prev_last = NULL; | |||||
} else { | |||||
le->m_prev_last = le->m_last_mbuf; | le->m_prev_last = le->m_last_mbuf; | ||||
/* Mark me in the last spot */ | /* Mark me in the last spot */ | ||||
le->m_last_mbuf->m_nextpkt = m; | le->m_last_mbuf->m_nextpkt = m; | ||||
/* Now set the tail to me */ | /* Now set the tail to me */ | ||||
le->m_last_mbuf = m; | le->m_last_mbuf = m; | ||||
le->tcp_tot_p_len += tcp_data_len; | |||||
} | |||||
#ifdef TCPHPTS | |||||
compressed: | |||||
#endif | |||||
le->mbuf_cnt++; | le->mbuf_cnt++; | ||||
m->m_nextpkt = NULL; | |||||
/* Add to the total size of data */ | /* Add to the total size of data */ | ||||
le->tcp_tot_p_len += tcp_data_len; | |||||
lro_set_mtime(&le->mtime, &arrv); | lro_set_mtime(&le->mtime, &arrv); | ||||
if (locked) | |||||
INP_WUNLOCK(le->inp); | |||||
return (0); | return (0); | ||||
} | } | ||||
/* Try to find an empty slot. */ | /* Try to find an empty slot. */ | ||||
if (LIST_EMPTY(&lc->lro_free)) | if (LIST_EMPTY(&lc->lro_free)) | ||||
return (TCP_LRO_NO_ENTRIES); | return (TCP_LRO_NO_ENTRIES); | ||||
/* Start a new segment chain. */ | /* Start a new segment chain. */ | ||||
le = LIST_FIRST(&lc->lro_free); | le = LIST_FIRST(&lc->lro_free); | ||||
Show All 22 Lines | case ETHERTYPE_IP: | ||||
break; | break; | ||||
#endif | #endif | ||||
} | } | ||||
le->source_port = th->th_sport; | le->source_port = th->th_sport; | ||||
le->dest_port = th->th_dport; | le->dest_port = th->th_dport; | ||||
le->next_seq = seq + tcp_data_len; | le->next_seq = seq + tcp_data_len; | ||||
le->ack_seq = th->th_ack; | le->ack_seq = th->th_ack; | ||||
le->window = th->th_win; | le->window = th->th_win; | ||||
if (l != 0) { | if (hdr_len != 0) { | ||||
le->timestamp = 1; | le->timestamp = 1; | ||||
le->tsval = ntohl(*(ts_ptr + 1)); | le->tsval = ntohl(*(ts_ptr + 1)); | ||||
le->tsecr = *(ts_ptr + 2); | le->tsecr = *(ts_ptr + 2); | ||||
} | } | ||||
KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n", | KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n", | ||||
__func__, le, le->ulp_csum)); | __func__, le, le->ulp_csum)); | ||||
le->append_cnt = 0; | le->append_cnt = 0; | ||||
le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, | le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, | ||||
~csum); | ~csum); | ||||
le->append_cnt++; | le->append_cnt++; | ||||
th->th_sum = csum; /* Restore checksum */ | th->th_sum = csum; /* Restore checksum */ | ||||
le->m_head = m; | |||||
m->m_pkthdr.rcvif = lc->ifp; | m->m_pkthdr.rcvif = lc->ifp; | ||||
m->m_pkthdr.lro_len = tcp_data_len; | |||||
le->mbuf_cnt = 1; | le->mbuf_cnt = 1; | ||||
le->cmp_ack_cnt = 0; | |||||
le->flags = 0; | |||||
#ifdef TCPHPTS | |||||
/* | |||||
* Lets find out if we can use the mbuf-compression. | |||||
*/ | |||||
if ((tcplro_stacks_wanting_mbufq == 0) || (m->m_flags & M_VLANTAG)) | |||||
goto skip_lookup_b; | |||||
CURVNET_SET(lc->ifp->if_vnet); | |||||
le->inp = tcp_lro_lookup(lc, le); | |||||
if (le->inp && ((le->inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) || | |||||
(le->inp->inp_flags2 & INP_FREED))) { | |||||
INP_WUNLOCK(le->inp); | |||||
le->inp = NULL; | |||||
} | |||||
if (le->inp) { | |||||
if ((need_flush == 1) && | |||||
(le->inp->inp_flags2 & (INP_MBUF_ACKCMP|INP_SUPPORTS_MBUFQ)) && | |||||
((th->th_flags & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) == 0)) { | |||||
/* | |||||
* For MBUF queuing or ACKCMP we can accept ECE and CWR | |||||
* since each packet is sent to the transport (or the | |||||
* compressed state including the ECN bits). | |||||
*/ | |||||
need_flush = 0; | |||||
} | |||||
locked = 1; | |||||
if (le->inp->inp_flags2 & INP_MBUF_ACKCMP) | |||||
le->flags |= CAN_USE_ACKCMP; | |||||
if ((le->flags & CAN_USE_ACKCMP) || | |||||
(le->inp && | |||||
(le->inp->inp_flags2 & (INP_MBUF_ACKCMP|INP_SUPPORTS_MBUFQ)))) { | |||||
m = do_bpf_and_csum(le->inp, lc, le, eh, m, bpf_peers_present(lc->ifp->if_bpf), locked); | |||||
if (m == NULL) { | |||||
/* Bad csum, accounting already done */ | |||||
INP_WUNLOCK(le->inp); | |||||
le->inp = NULL; | |||||
return (0); | |||||
} | |||||
le->strip_cnt++; | |||||
} | |||||
in_pcbref(le->inp); | |||||
} | |||||
CURVNET_RESTORE(); | |||||
if ((need_flush == 0) && | |||||
(th->th_flags & TH_ACK) && | |||||
(tcp_data_len == 0) && | |||||
(le->flags & CAN_USE_ACKCMP)) { | |||||
/* Ok this is a pure ack lets build our special COMPRESS mbuf */ | |||||
struct mbuf *nm; | |||||
struct tcp_ackent *ack_ent; | |||||
/* Question what is going on with the last mbuf on the inp queue, can we use it? */ | |||||
INP_WLOCK_ASSERT(le->inp); | |||||
nm = tcp_lro_get_last_if_ackcmp(lc, le, le->inp); | |||||
if (nm) { | |||||
int idx; | |||||
/* We can add in to the one on the tail */ | |||||
ack_ent = mtod(nm, struct tcp_ackent *); | |||||
idx = (nm->m_len / sizeof(struct tcp_ackent)); | |||||
build_ack_entry(&ack_ent[idx], th, m, hdr_len, iptos); | |||||
nm->m_len += sizeof(struct tcp_ackent); | |||||
nm->m_pkthdr.len += sizeof(struct tcp_ackent); | |||||
le->ack_seq = th->th_ack; | |||||
le->window = th->th_win; | |||||
m_freem(m); | |||||
counter_u64_add(tcp_extra_mbuf, 1); | |||||
le->m_head = NULL; | |||||
le->m_tail = NULL; | |||||
le->m_last_mbuf = NULL; | |||||
le->m_prev_last = NULL; | |||||
INP_WUNLOCK(le->inp); | |||||
return (0); | |||||
} else { | |||||
if (le->inp->inp_flags2 & INP_MBUF_L_ACKS) | |||||
nm = m_getcl(M_NOWAIT, MT_DATA, (M_ACKCMP|M_PKTHDR)); | |||||
else { | |||||
nm = m_gethdr(M_NOWAIT, MT_DATA); | |||||
nm->m_flags |= M_ACKCMP; | |||||
} | |||||
if (nm) { | |||||
nm->m_pkthdr.rcvif = lc->ifp; | |||||
ack_ent = mtod(nm, struct tcp_ackent *); | |||||
build_ack_entry(ack_ent, th, m, hdr_len, iptos); | |||||
m_freem(m); | |||||
m = nm; | |||||
m->m_pkthdr.len = m->m_len = sizeof(struct tcp_ackent); | |||||
le->flags |= HAS_COMP_ENTRIES; | |||||
le->cmp_ack_cnt++; | |||||
} | |||||
} | |||||
} | |||||
if (m->m_flags & M_ACKCMP) { | |||||
counter_u64_add(tcp_comp_total, 1); | |||||
} else { | |||||
counter_u64_add(tcp_uncomp_total, 1); | |||||
} | |||||
skip_lookup_b: | |||||
#endif | |||||
if (need_flush) | if (need_flush) | ||||
le->need_wakeup = need_flush; | le->need_wakeup = need_flush; | ||||
else | else | ||||
le->need_wakeup = 0; | le->need_wakeup = 0; | ||||
m->m_nextpkt = NULL; | |||||
le->m_head = m; | |||||
le->m_tail = m_last(m); | le->m_tail = m_last(m); | ||||
le->m_last_mbuf = m; | le->m_last_mbuf = m; | ||||
m->m_nextpkt = NULL; | |||||
le->m_prev_last = NULL; | le->m_prev_last = NULL; | ||||
/* | /* | ||||
* We keep the total size here for cross checking when we may need | * We keep the total size here for cross checking when we may need | ||||
* to flush/wakeup in the MBUF_QUEUE case. | * to flush/wakeup in the MBUF_QUEUE case. | ||||
*/ | */ | ||||
le->tcp_tot_p_len = tcp_data_len; | le->tcp_tot_p_len = tcp_data_len; | ||||
m->m_pkthdr.lro_len = tcp_data_len; | if (locked) | ||||
INP_WUNLOCK(le->inp); | |||||
return (0); | return (0); | ||||
} | } | ||||
int | int | ||||
tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) | tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) | ||||
{ | { | ||||
return tcp_lro_rx2(lc, m, csum, 1); | return tcp_lro_rx2(lc, m, csum, 1); | ||||
▲ Show 20 Lines • Show All 46 Lines • Show Last 20 Lines |