Changeset View
Standalone View
sys/netinet/ip_input.c
Show All 31 Lines | |||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include "opt_bootp.h" | #include "opt_bootp.h" | ||||
#include "opt_ipfw.h" | #include "opt_ipfw.h" | ||||
#include "opt_ipstealth.h" | #include "opt_ipstealth.h" | ||||
#include "opt_ipsec.h" | #include "opt_ipsec.h" | ||||
#include "opt_route.h" | #include "opt_route.h" | ||||
#include "opt_rss.h" | |||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/mbuf.h> | #include <sys/mbuf.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/domain.h> | #include <sys/domain.h> | ||||
#include <sys/protosw.h> | #include <sys/protosw.h> | ||||
#include <sys/socket.h> | #include <sys/socket.h> | ||||
Show All 24 Lines | |||||
#include <netinet/ip_fw.h> | #include <netinet/ip_fw.h> | ||||
#include <netinet/ip_icmp.h> | #include <netinet/ip_icmp.h> | ||||
#include <netinet/ip_options.h> | #include <netinet/ip_options.h> | ||||
#include <machine/in_cksum.h> | #include <machine/in_cksum.h> | ||||
#include <netinet/ip_carp.h> | #include <netinet/ip_carp.h> | ||||
#ifdef IPSEC | #ifdef IPSEC | ||||
#include <netinet/ip_ipsec.h> | #include <netinet/ip_ipsec.h> | ||||
#endif /* IPSEC */ | #endif /* IPSEC */ | ||||
#include <netinet/in_rss.h> | |||||
#include <sys/socketvar.h> | #include <sys/socketvar.h> | ||||
#include <security/mac/mac_framework.h> | #include <security/mac/mac_framework.h> | ||||
#ifdef CTASSERT | #ifdef CTASSERT | ||||
CTASSERT(sizeof(struct ip) == 20); | CTASSERT(sizeof(struct ip) == 20); | ||||
#endif | #endif | ||||
Show All 33 Lines | |||||
/* | /* | ||||
* XXX - Setting ip_checkinterface mostly implements the receive side of | * XXX - Setting ip_checkinterface mostly implements the receive side of | ||||
* the Strong ES model described in RFC 1122, but since the routing table | * the Strong ES model described in RFC 1122, but since the routing table | ||||
* and transmit implementation do not implement the Strong ES model, | * and transmit implementation do not implement the Strong ES model, | ||||
* setting this to 1 results in an odd hybrid. | * setting this to 1 results in an odd hybrid. | ||||
* | * | ||||
* XXX - ip_checkinterface currently must be disabled if you use ipnat | * XXX - ip_checkinterface currently must be disabled if you use ipnat | ||||
* to translate the destination address to another local interface. | * to translate the destination address to another local interface. | ||||
grehan: If RSS is active, there is no option to direct-dispatch RSS-capable traffic or you risk sending… | |||||
* | * | ||||
* XXX - ip_checkinterface must be disabled if you add IP aliases | * XXX - ip_checkinterface must be disabled if you add IP aliases | ||||
* to the loopback interface instead of the interface where the | * to the loopback interface instead of the interface where the | ||||
* packets for those addresses are received. | * packets for those addresses are received. | ||||
*/ | */ | ||||
static VNET_DEFINE(int, ip_checkinterface); | static VNET_DEFINE(int, ip_checkinterface); | ||||
#define V_ip_checkinterface VNET(ip_checkinterface) | #define V_ip_checkinterface VNET(ip_checkinterface) | ||||
SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, | SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW, | ||||
&VNET_NAME(ip_checkinterface), 0, | &VNET_NAME(ip_checkinterface), 0, | ||||
"Verify packet arrives on correct interface"); | "Verify packet arrives on correct interface"); | ||||
VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ | VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */ | ||||
/* | |||||
* We may need to re-inject packets into the IP stack for further work. | |||||
* In this instance, use the CPU policy and query the RSS layer for the | |||||
Not Done Inline ActionsFragment should not be resubmitted to the stack: they have already been accounted for in terms of statistics. It would seem you need an additional netisr that is going to submit the already verified/accounted frame directly to the inetsw (even the M_FASTFWD_OURS bypass will still double-count ips_delivered) grehan: Fragment should not be resubmitted to the stack: they have already been accounted for in terms… | |||||
Not Done Inline ActionsOn further thought, I think the FASTFWD is what you want, modulo the byte order/length expectations in the first conditional in ip_input. ips_delivered won't be double counted since the frame is netisr'd prior to reaching inetsw. grehan: On further thought, I think the FASTFWD is what you want, modulo the byte order/length… | |||||
Not Done Inline ActionsI think that yes, perhaps we do need an ip reinject netisr that things get queued to. I'm just worried about unintended queue behaviour between the IP and IP-reinject queues. Thanks for picking that up though! I didn't even think about it double-accounting things. So hm, does that mean that the various IP tunnel de-encapsulation paths are falling similarly afoul? They also re-queue into the NETISR_IP queue. adrian: I think that yes, perhaps we do need an ip reinject netisr that things get queued to. I'm just… | |||||
Not Done Inline ActionsTunnel decaps is entirely different: you're dealing with a completely new packet once the outer layer has been stripped: it hasn't had any input processing. The requeueing is problematic. There has already been work done to process the packet, and you don't want to drop it because a netisr queue is full. That smells a lot like a DoS attack. A possible way to do this is to reserve a netisr queue slot for a reassembly entry, and only initiate a new reassembly if there is an available slot, so it is guaranteed that a successful reassembly will result in delivery to the transport layer. Seems like it's easy to spin out of control with complexity in this area :( grehan: Tunnel decaps is entirely different: you're dealing with a completely new packet once the outer… | |||||
* relevant CPU ID to use. | |||||
*/ | |||||
static struct netisr_handler ip_nh = { | static struct netisr_handler ip_nh = { | ||||
.nh_name = "ip", | .nh_name = "ip", | ||||
.nh_handler = ip_input, | .nh_handler = ip_input, | ||||
.nh_proto = NETISR_IP, | .nh_proto = NETISR_IP, | ||||
#ifdef RSS | |||||
.nh_m2cpuid = rss_m2cpuid, | |||||
.nh_policy = NETISR_POLICY_CPU, | |||||
#else | |||||
.nh_policy = NETISR_POLICY_FLOW, | .nh_policy = NETISR_POLICY_FLOW, | ||||
#endif | |||||
}; | }; | ||||
#ifdef RSS | |||||
static struct netisr_handler ip_direct_nh = { | |||||
.nh_name = "ip_direct", | |||||
.nh_handler = ip_direct_input, | |||||
.nh_proto = NETISR_IP_DIRECT, | |||||
.nh_m2cpuid = rss_m2cpuid, | |||||
.nh_policy = NETISR_POLICY_CPU, | |||||
}; | |||||
#endif | |||||
extern struct domain inetdomain; | extern struct domain inetdomain; | ||||
extern struct protosw inetsw[]; | extern struct protosw inetsw[]; | ||||
u_char ip_protox[IPPROTO_MAX]; | u_char ip_protox[IPPROTO_MAX]; | ||||
VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ | VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */ | ||||
VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ | VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */ | ||||
VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ | VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */ | ||||
static VNET_DEFINE(uma_zone_t, ipq_zone); | static VNET_DEFINE(uma_zone_t, ipq_zone); | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS) | ||||
netisr_clearqdrops(&ip_nh); | netisr_clearqdrops(&ip_nh); | ||||
return (0); | return (0); | ||||
} | } | ||||
SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, | SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, | ||||
CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", | CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I", | ||||
"Number of packets dropped from the IP input queue"); | "Number of packets dropped from the IP input queue"); | ||||
#ifdef RSS | |||||
static int | |||||
sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
int error, qlimit; | |||||
netisr_getqlimit(&ip_direct_nh, &qlimit); | |||||
error = sysctl_handle_int(oidp, &qlimit, 0, req); | |||||
if (error || !req->newptr) | |||||
return (error); | |||||
if (qlimit < 1) | |||||
return (EINVAL); | |||||
return (netisr_setqlimit(&ip_direct_nh, qlimit)); | |||||
} | |||||
SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_direct_queue_maxlen, | |||||
CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_direct_queue_maxlen, "I", | |||||
"Maximum size of the IP direct input queue"); | |||||
static int | |||||
sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
u_int64_t qdrops_long; | |||||
int error, qdrops; | |||||
netisr_getqdrops(&ip_direct_nh, &qdrops_long); | |||||
qdrops = qdrops_long; | |||||
error = sysctl_handle_int(oidp, &qdrops, 0, req); | |||||
if (error || !req->newptr) | |||||
return (error); | |||||
if (qdrops != 0) | |||||
return (EINVAL); | |||||
netisr_clearqdrops(&ip_direct_nh); | |||||
return (0); | |||||
} | |||||
SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_direct_queue_drops, | |||||
CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_direct_queue_drops, "I", | |||||
"Number of packets dropped from the IP direct input queue"); | |||||
#endif /* RSS */ | |||||
/* | /* | ||||
* IP initialization: fill in IP protocol switch table. | * IP initialization: fill in IP protocol switch table. | ||||
* All protocols not implemented in kernel go to raw IP protocol handler. | * All protocols not implemented in kernel go to raw IP protocol handler. | ||||
*/ | */ | ||||
void | void | ||||
ip_init(void) | ip_init(void) | ||||
{ | { | ||||
struct protosw *pr; | struct protosw *pr; | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | for (pr = inetdomain.dom_protosw; | ||||
} | } | ||||
EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, | EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change, | ||||
NULL, EVENTHANDLER_PRI_ANY); | NULL, EVENTHANDLER_PRI_ANY); | ||||
/* Initialize various other remaining things. */ | /* Initialize various other remaining things. */ | ||||
IPQ_LOCK_INIT(); | IPQ_LOCK_INIT(); | ||||
netisr_register(&ip_nh); | netisr_register(&ip_nh); | ||||
#ifdef RSS | |||||
netisr_register(&ip_direct_nh); | |||||
#endif | |||||
} | } | ||||
#ifdef VIMAGE | #ifdef VIMAGE | ||||
void | void | ||||
ip_destroy(void) | ip_destroy(void) | ||||
{ | { | ||||
int i; | int i; | ||||
if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0) | if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0) | ||||
printf("%s: WARNING: unable to unregister pfil hook, " | printf("%s: WARNING: unable to unregister pfil hook, " | ||||
"error %d\n", __func__, i); | "error %d\n", __func__, i); | ||||
/* Cleanup in_ifaddr hash table; should be empty. */ | /* Cleanup in_ifaddr hash table; should be empty. */ | ||||
hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); | hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask); | ||||
IPQ_LOCK(); | IPQ_LOCK(); | ||||
ip_drain_locked(); | ip_drain_locked(); | ||||
IPQ_UNLOCK(); | IPQ_UNLOCK(); | ||||
uma_zdestroy(V_ipq_zone); | uma_zdestroy(V_ipq_zone); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef RSS | |||||
/* | /* | ||||
* IP direct input routine. | |||||
* | |||||
* This is called when reinjecting completed fragments where | |||||
* all of the previous checking and book-keeping has been done. | |||||
*/ | |||||
void | |||||
ip_direct_input(struct mbuf *m) | |||||
{ | |||||
struct ip *ip; | |||||
int hlen; | |||||
ip = mtod(m, struct ip *); | |||||
hlen = ip->ip_hl << 2; | |||||
IPSTAT_INC(ips_delivered); | |||||
(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen); | |||||
return; | |||||
} | |||||
#endif | |||||
/* | |||||
* Ip input routine. Checksum and byte swap header. If fragmented | * Ip input routine. Checksum and byte swap header. If fragmented | ||||
* try to reassemble. Process options. Pass to next level. | * try to reassemble. Process options. Pass to next level. | ||||
*/ | */ | ||||
void | void | ||||
ip_input(struct mbuf *m) | ip_input(struct mbuf *m) | ||||
{ | { | ||||
struct ip *ip = NULL; | struct ip *ip = NULL; | ||||
struct in_ifaddr *ia = NULL; | struct in_ifaddr *ia = NULL; | ||||
struct ifaddr *ifa; | struct ifaddr *ifa; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
int checkif, hlen = 0; | int checkif, hlen = 0; | ||||
uint16_t sum, ip_len; | uint16_t sum, ip_len; | ||||
int dchg = 0; /* dest changed after fw */ | int dchg = 0; /* dest changed after fw */ | ||||
struct in_addr odst; /* original dst address */ | struct in_addr odst; /* original dst address */ | ||||
M_ASSERTPKTHDR(m); | M_ASSERTPKTHDR(m); | ||||
if (m->m_flags & M_FASTFWD_OURS) { | if (m->m_flags & M_FASTFWD_OURS) { | ||||
Not Done Inline ActionsYou're now making changes that are outside of the RSS conditional, so it seems like this needs additional review. My suggestion is that collapse the 2 tests of *_OURS into 1 to avoid the extra conditional test in a common path if (m->m_flags & M_FASTFWD_OURS|M_REINJECT_OURS) { /* put individual tests here */ } Also, the use of the reinjected flag doesn't seem to be necessary. There is no need to do *any* tests again: the fragment code has always passed reassembled fragments directly to the stack, and all the intercept points knew this. Seems less invasive to have a label just prior to the inetsw dispatch and goto that, rather than a seemingly arbitrary set of tests bracketed with the reinjected flag, and some without. The real fix is to have a netisr (or other) dispatch that goes directly to the protocols. I'm not sure what the "unintended queue behaviour" is that you are referring to. Certainly the proprietary implementation I worked on did have a dispatch direct to protocols without any side effects. grehan: You're now making changes that are outside of the RSS conditional, so it seems like this needs… | |||||
m->m_flags &= ~M_FASTFWD_OURS; | |||||
/* Set up some basics that will be used later. */ | /* Set up some basics that will be used later. */ | ||||
m->m_flags &= ~M_FASTFWD_OURS; | |||||
ip = mtod(m, struct ip *); | ip = mtod(m, struct ip *); | ||||
hlen = ip->ip_hl << 2; | hlen = ip->ip_hl << 2; | ||||
ip_len = ntohs(ip->ip_len); | |||||
goto ours; | goto ours; | ||||
} | } | ||||
IPSTAT_INC(ips_total); | IPSTAT_INC(ips_total); | ||||
if (m->m_pkthdr.len < sizeof(struct ip)) | if (m->m_pkthdr.len < sizeof(struct ip)) | ||||
goto tooshort; | goto tooshort; | ||||
▲ Show 20 Lines • Show All 73 Lines • ▼ Show 20 Lines | tooshort: | ||||
} | } | ||||
if (m->m_pkthdr.len > ip_len) { | if (m->m_pkthdr.len > ip_len) { | ||||
if (m->m_len == m->m_pkthdr.len) { | if (m->m_len == m->m_pkthdr.len) { | ||||
m->m_len = ip_len; | m->m_len = ip_len; | ||||
m->m_pkthdr.len = ip_len; | m->m_pkthdr.len = ip_len; | ||||
} else | } else | ||||
m_adj(m, ip_len - m->m_pkthdr.len); | m_adj(m, ip_len - m->m_pkthdr.len); | ||||
} | } | ||||
#ifdef IPSEC | #ifdef IPSEC | ||||
/* | /* | ||||
* Bypass packet filtering for packets previously handled by IPsec. | * Bypass packet filtering for packets previously handled by IPsec. | ||||
*/ | */ | ||||
if (ip_ipsec_filtertunnel(m)) | if (ip_ipsec_filtertunnel(m)) | ||||
goto passin; | goto passin; | ||||
#endif /* IPSEC */ | #endif /* IPSEC */ | ||||
▲ Show 20 Lines • Show All 242 Lines • ▼ Show 20 Lines | #ifdef IPSEC | ||||
* enforce IPsec policy checking if we are seeing last header. | * enforce IPsec policy checking if we are seeing last header. | ||||
* note that we do not visit this with protocols with pcb layer | * note that we do not visit this with protocols with pcb layer | ||||
* code - like udp/tcp/raw ip. | * code - like udp/tcp/raw ip. | ||||
*/ | */ | ||||
if (ip_ipsec_input(m)) | if (ip_ipsec_input(m)) | ||||
goto bad; | goto bad; | ||||
#endif /* IPSEC */ | #endif /* IPSEC */ | ||||
/* | /* | ||||
Not Done Inline Actionsgoto label for reinjected frames should go here. grehan: goto label for reinjected frames should go here. | |||||
Not Done Inline ActionsRight, but we should also at least run it through pfil once more, right? Hm, thinking about it it _isn't_ doing it for reassembled frames anyway, so. Ok. I think I'll do this for now. adrian: Right, but we should also at least run it through pfil once more, right?
Hm, thinking about it… | |||||
* Switch out to protocol's input routine. | * Switch out to protocol's input routine. | ||||
*/ | */ | ||||
IPSTAT_INC(ips_delivered); | IPSTAT_INC(ips_delivered); | ||||
(*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); | (*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p); | ||||
return; | return; | ||||
bad: | bad: | ||||
m_freem(m); | m_freem(m); | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | |||||
* Take incoming datagram fragment and try to reassemble it into | * Take incoming datagram fragment and try to reassemble it into | ||||
* whole datagram. If the argument is the first fragment or one | * whole datagram. If the argument is the first fragment or one | ||||
* in between the function will return NULL and store the mbuf | * in between the function will return NULL and store the mbuf | ||||
* in the fragment chain. If the argument is the last fragment | * in the fragment chain. If the argument is the last fragment | ||||
* the packet will be reassembled and the pointer to the new | * the packet will be reassembled and the pointer to the new | ||||
* mbuf returned for further processing. Only m_tags attached | * mbuf returned for further processing. Only m_tags attached | ||||
* to the first packet/fragment are preserved. | * to the first packet/fragment are preserved. | ||||
* The IP header is *NOT* adjusted out of iplen. | * The IP header is *NOT* adjusted out of iplen. | ||||
* | |||||
* XXX TODO: re-calculate the RSS flowid upon completing the received | |||||
* IP packet. | |||||
*/ | */ | ||||
struct mbuf * | struct mbuf * | ||||
ip_reass(struct mbuf *m) | ip_reass(struct mbuf *m) | ||||
{ | { | ||||
struct ip *ip; | struct ip *ip; | ||||
struct mbuf *p, *q, *nq, *t; | struct mbuf *p, *q, *nq, *t; | ||||
struct ipq *fp = NULL; | struct ipq *fp = NULL; | ||||
struct ipqhead *head; | struct ipqhead *head; | ||||
int i, hlen, next; | int i, hlen, next; | ||||
u_int8_t ecn, ecn0; | u_int8_t ecn, ecn0; | ||||
u_short hash; | u_short hash; | ||||
#ifdef RSS | |||||
uint32_t rss_hash, rss_type; | |||||
#endif | |||||
/* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ | /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */ | ||||
if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { | if (V_maxnipq == 0 || V_maxfragsperpacket == 0) { | ||||
IPSTAT_INC(ips_fragments); | IPSTAT_INC(ips_fragments); | ||||
IPSTAT_INC(ips_fragdropped); | IPSTAT_INC(ips_fragdropped); | ||||
m_freem(m); | m_freem(m); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 273 Lines • ▼ Show 20 Lines | #endif | ||||
uma_zfree(V_ipq_zone, fp); | uma_zfree(V_ipq_zone, fp); | ||||
m->m_len += (ip->ip_hl << 2); | m->m_len += (ip->ip_hl << 2); | ||||
m->m_data -= (ip->ip_hl << 2); | m->m_data -= (ip->ip_hl << 2); | ||||
/* some debugging cruft by sklower, below, will go away soon */ | /* some debugging cruft by sklower, below, will go away soon */ | ||||
if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ | if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ | ||||
m_fixhdr(m); | m_fixhdr(m); | ||||
IPSTAT_INC(ips_reassembled); | IPSTAT_INC(ips_reassembled); | ||||
IPQ_UNLOCK(); | IPQ_UNLOCK(); | ||||
#ifdef RSS | |||||
/* | |||||
* Query the RSS layer for the flowid / flowtype for the | |||||
* mbuf payload. | |||||
* | |||||
* We then queue into the relevant netisr so it can be dispatched | |||||
* to the correct CPU. | |||||
* | |||||
* Note - this may return 1, which means the flowid in the mbuf | |||||
* is correct for the configured RSS hash types and can be used. | |||||
*/ | |||||
if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { | |||||
m->m_pkthdr.flowid = rss_hash; | |||||
M_HASHTYPE_SET(m, rss_type); | |||||
m->m_flags |= M_FLOWID; | |||||
} | |||||
/* | |||||
* Queue/dispatch for reprocessing. | |||||
* | |||||
* Note: this is much slower than just handling the frame in the | |||||
* current receive context. It's likely worth investigating | |||||
* why this is. | |||||
*/ | |||||
netisr_dispatch(NETISR_IP_DIRECT, m); | |||||
return (NULL); | |||||
#endif | |||||
/* Handle in-line */ | |||||
return (m); | return (m); | ||||
Not Done Inline ActionsThis should either be a panic if RSS is defined and the packet can be hashed, or perhaps verify if the current PCB context is the one the packet belongs to. grehan: This should either be a panic if RSS is defined and the packet can be hashed, or perhaps verify… | |||||
Not Done Inline ActionsThis was mostly for debugging/evaluation purposes for the (current) UDP situation where it's 2-tuple hashed. You're right though - if RSS is enabled then it should just netisr_dispatch() and not be configurable. adrian: This was mostly for debugging/evaluation purposes for the (current) UDP situation where it's 2… | |||||
dropfrag: | dropfrag: | ||||
IPSTAT_INC(ips_fragdropped); | IPSTAT_INC(ips_fragdropped); | ||||
if (fp != NULL) | if (fp != NULL) | ||||
fp->ipq_nfrags--; | fp->ipq_nfrags--; | ||||
m_freem(m); | m_freem(m); | ||||
done: | done: | ||||
IPQ_UNLOCK(); | IPQ_UNLOCK(); | ||||
▲ Show 20 Lines • Show All 633 Lines • Show Last 20 Lines |
If RSS is active, there is no option to direct-dispatch RSS-capable traffic or you risk sending a flow to the wrong PCB group.