diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c index d00e8604bc16..24c2ba5ed0fe 100644 --- a/sys/netinet/sctp_pcb.c +++ b/sys/netinet/sctp_pcb.c @@ -1,5293 +1,5293 @@ /*- * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_ipsec.h" #include "opt_compat.h" #include "opt_inet6.h" #include "opt_inet.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #include #endif /* INET6 */ #ifdef IPSEC #include #include #endif /* IPSEC */ #include #include #include #include #include #include #include #include #include #ifdef SCTP_DEBUG uint32_t sctp_debug_on = 0; #endif /* SCTP_DEBUG */ extern int sctp_pcbtblsize; extern int sctp_hashtblsize; extern int sctp_chunkscale; struct sctp_epinfo sctppcbinfo; /* FIX: we don't handle multiple link local scopes */ /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ int SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b) { struct in6_addr tmp_a, tmp_b; /* use a copy of a and b */ tmp_a = *a; tmp_b = *b; in6_clearscope(&tmp_a); in6_clearscope(&tmp_b); return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); } void sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb) { /* * We really don't need to lock this, but I will just because it * does not hurt. */ SCTP_INP_INFO_RLOCK(); spcb->ep_count = sctppcbinfo.ipi_count_ep; spcb->asoc_count = sctppcbinfo.ipi_count_asoc; spcb->laddr_count = sctppcbinfo.ipi_count_laddr; spcb->raddr_count = sctppcbinfo.ipi_count_raddr; spcb->chk_count = sctppcbinfo.ipi_count_chunk; spcb->readq_count = sctppcbinfo.ipi_count_readq; spcb->stream_oque = sctppcbinfo.ipi_count_strmoq; spcb->free_chunks = sctppcbinfo.ipi_free_chunks; SCTP_INP_INFO_RUNLOCK(); } /* * Notes on locks for FreeBSD 5 and up. All association lookups that have a * definte ep, the INP structure is assumed to be locked for reading. If we * need to go find the INP (ususally when a **inp is passed) then we must * lock the INFO structure first and if needed lock the INP too. Note that if * we lock it we must * */ /* * Given a endpoint, look and find in its association list any association * with the "to" address given. This can be a "from" address, too, for * inbound packets. For outbound packets it is a true "to" address. */ static struct sctp_tcb * sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, struct sockaddr *to, struct sctp_nets **netp) { /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */ /* * Note for this module care must be taken when observing what to is * for. In most of the rest of the code the TO field represents my * peer and the FROM field represents my address. For this module it * is reversed of that. */ /* * If we support the TCP model, then we must now dig through to see * if we can find our endpoint in the list of tcp ep's. */ uint16_t lport, rport; struct sctppcbhead *ephead; struct sctp_inpcb *inp; struct sctp_laddr *laddr; struct sctp_tcb *stcb; struct sctp_nets *net; if ((to == NULL) || (from == NULL)) { return (NULL); } if (to->sa_family == AF_INET && from->sa_family == AF_INET) { lport = ((struct sockaddr_in *)to)->sin_port; rport = ((struct sockaddr_in *)from)->sin_port; } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) { lport = ((struct sockaddr_in6 *)to)->sin6_port; rport = ((struct sockaddr_in6 *)from)->sin6_port; } else { return NULL; } ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR( (lport + rport), sctppcbinfo.hashtcpmark)]; /* * Ok now for each of the guys in this bucket we must look and see: * - Does the remote port match. - Does there single association's * addresses match this address (to). If so we update p_ep to point * to this ep and return the tcb from it. */ LIST_FOREACH(inp, ephead, sctp_hash) { if (lport != inp->sctp_lport) { continue; } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } /* check to see if the ep has one of the addresses */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* We are NOT bound all, so look further */ int match = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("An ounce of prevention is worth a pound of cure\n"); } #endif continue; } if (laddr->ifa->ifa_addr == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("ifa with a NULL address\n"); } #endif continue; } if (laddr->ifa->ifa_addr->sa_family == to->sa_family) { /* see if it matches */ struct sockaddr_in *intf_addr, *sin; intf_addr = (struct sockaddr_in *) laddr->ifa->ifa_addr; sin = (struct sockaddr_in *)to; if (from->sa_family == AF_INET) { if (sin->sin_addr.s_addr == intf_addr->sin_addr.s_addr) { match = 1; break; } } else { struct sockaddr_in6 *intf_addr6; struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) to; intf_addr6 = (struct sockaddr_in6 *) laddr->ifa->ifa_addr; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &intf_addr6->sin6_addr)) { match = 1; break; } } } } if (match == 0) { /* This endpoint does not have this address */ SCTP_INP_RUNLOCK(inp); continue; } } /* * Ok if we hit here the ep has the address, does it hold * the tcb? */ stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); continue; } SCTP_TCB_LOCK(stcb); if (stcb->rport != rport) { /* remote port does not match. */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); continue; } /* Does this TCB have a matching address? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._l_addr.sa.sa_family != from->sa_family) { /* not the same family, can't be a match */ continue; } if (from->sa_family == AF_INET) { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *)&net->ro._l_addr; rsin = (struct sockaddr_in *)from; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } /* Update the endpoint pointer */ *inp_p = inp; SCTP_INP_RUNLOCK(inp); return (stcb); } } else { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)from; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &rsin6->sin6_addr)) { /* found it */ if (netp != NULL) { *netp = net; } /* Update the endpoint pointer */ *inp_p = inp; SCTP_INP_RUNLOCK(inp); return (stcb); } } } SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); } return (NULL); } /* * rules for use * * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an * stcb, both will be locked (locked_tcb and stcb) but decrement will be done * (if locked == NULL). 3) Decrement happens on return ONLY if locked == * NULL. */ struct sctp_tcb * sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb) { struct sctpasochead *head; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; uint16_t rport; inp = *inp_p; if (remote->sa_family == AF_INET) { rport = (((struct sockaddr_in *)remote)->sin_port); } else if (remote->sa_family == AF_INET6) { rport = (((struct sockaddr_in6 *)remote)->sin6_port); } else { return (NULL); } if (locked_tcb) { /* * UN-lock so we can do proper locking here this occurs when * called from load_addresses_from_init. */ SCTP_TCB_UNLOCK(locked_tcb); } SCTP_INP_INFO_RLOCK(); if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { /* * Now either this guy is our listener or it's the * connector. If it is the one that issued the connect, then * it's only chance is to be the first TCB in the list. If * it is the acceptor, then do the special_lookup to hash * and find the real inp. */ if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) { /* to is peer addr, from is my addr */ stcb = sctp_tcb_special_locate(inp_p, remote, local, netp); if ((stcb != NULL) && (locked_tcb == NULL)) { /* we have a locked tcb, lower refcount */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if ((locked_tcb != NULL) && (locked_tcb != stcb)) { SCTP_INP_RLOCK(locked_tcb->sctp_ep); SCTP_TCB_LOCK(locked_tcb); SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); } SCTP_INP_INFO_RUNLOCK(); return (stcb); } else { SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { goto null_return; } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto null_return; } SCTP_TCB_LOCK(stcb); if (stcb->rport != rport) { /* remote port does not match. */ SCTP_TCB_UNLOCK(stcb); goto null_return; } /* now look at the list of remote addresses */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { #ifdef INVARIENTS if (net == (TAILQ_NEXT(net, sctp_next))) { panic("Corrupt net list"); } #endif if (net->ro._l_addr.sa.sa_family != remote->sa_family) { /* not the same family */ continue; } if (remote->sa_family == AF_INET) { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *) &net->ro._l_addr; rsin = (struct sockaddr_in *)remote; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } } else if (remote->sa_family == AF_INET6) { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)remote; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &rsin6->sin6_addr)) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } } } SCTP_TCB_UNLOCK(stcb); } } else { SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { goto null_return; } head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport, inp->sctp_hashmark)]; if (head == NULL) { goto null_return; } LIST_FOREACH(stcb, head, sctp_tcbhash) { if (stcb->rport != rport) { /* remote port does not match */ continue; } /* now look at the list of remote addresses */ SCTP_TCB_LOCK(stcb); TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { #ifdef INVARIENTS if (net == (TAILQ_NEXT(net, sctp_next))) { panic("Corrupt net list"); } #endif if (net->ro._l_addr.sa.sa_family != remote->sa_family) { /* not the same family */ continue; } if (remote->sa_family == AF_INET) { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *) &net->ro._l_addr; rsin = (struct sockaddr_in *)remote; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } } else if (remote->sa_family == AF_INET6) { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *) &net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)remote; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &rsin6->sin6_addr)) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } } } SCTP_TCB_UNLOCK(stcb); } } null_return: /* clean up for returning null */ if (locked_tcb) { SCTP_TCB_LOCK(locked_tcb); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); /* not found */ return (NULL); } /* * Find an association for a specific endpoint using the association id given * out in the COMM_UP notification */ struct sctp_tcb * sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) { /* * Use my the assoc_id to find a endpoint */ struct sctpasochead *head; struct sctp_tcb *stcb; uint32_t id; if (asoc_id == 0 || inp == NULL) { return (NULL); } SCTP_INP_INFO_RLOCK(); id = (uint32_t) asoc_id; head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashasocmark)]; if (head == NULL) { /* invalid id TSNH */ SCTP_INP_INFO_RUNLOCK(); return (NULL); } LIST_FOREACH(stcb, head, sctp_asocs) { SCTP_INP_RLOCK(stcb->sctp_ep); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(stcb->sctp_ep); SCTP_INP_INFO_RUNLOCK(); return (NULL); } if (stcb->asoc.assoc_id == id) { /* candidate */ if (inp != stcb->sctp_ep) { /* * some other guy has the same id active (id * collision ??). */ SCTP_INP_RUNLOCK(stcb->sctp_ep); continue; } if (want_lock) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(stcb->sctp_ep); SCTP_INP_INFO_RUNLOCK(); return (stcb); } SCTP_INP_RUNLOCK(stcb->sctp_ep); } /* Ok if we missed here, lets try the restart hash */ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)]; if (head == NULL) { /* invalid id TSNH */ SCTP_INP_INFO_RUNLOCK(); return (NULL); } LIST_FOREACH(stcb, head, sctp_tcbrestarhash) { SCTP_INP_RLOCK(stcb->sctp_ep); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(stcb->sctp_ep); SCTP_INP_INFO_RUNLOCK(); return (NULL); } SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(stcb->sctp_ep); if (stcb->asoc.assoc_id == id) { /* candidate */ if (inp != stcb->sctp_ep) { /* * some other guy has the same id active (id * collision ??). */ SCTP_TCB_UNLOCK(stcb); continue; } SCTP_INP_INFO_RUNLOCK(); return (stcb); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_INFO_RUNLOCK(); return (NULL); } static struct sctp_inpcb * sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, uint16_t lport) { struct sctp_inpcb *inp; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; struct sctp_laddr *laddr; /* * Endpoing probe expects that the INP_INFO is locked. */ if (nam->sa_family == AF_INET) { sin = (struct sockaddr_in *)nam; sin6 = NULL; } else if (nam->sa_family == AF_INET6) { sin6 = (struct sockaddr_in6 *)nam; sin = NULL; } else { /* unsupported family */ return (NULL); } if (head == NULL) return (NULL); LIST_FOREACH(inp, head, sctp_hash) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) && (inp->sctp_lport == lport)) { /* got it */ if ((nam->sa_family == AF_INET) && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) ) { /* IPv4 on a IPv6 socket with ONLY IPv6 set */ SCTP_INP_RUNLOCK(inp); continue; } /* A V6 address and the endpoint is NOT bound V6 */ if (nam->sa_family == AF_INET6 && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { SCTP_INP_RUNLOCK(inp); continue; } SCTP_INP_RUNLOCK(inp); return (inp); } SCTP_INP_RUNLOCK(inp); } if ((nam->sa_family == AF_INET) && (sin->sin_addr.s_addr == INADDR_ANY)) { /* Can't hunt for one that has no address specified */ return (NULL); } else if ((nam->sa_family == AF_INET6) && (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) { /* Can't hunt for one that has no address specified */ return (NULL); } /* * ok, not bound to all so see if we can find a EP bound to this * address. */ LIST_FOREACH(inp, head, sctp_hash) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) { SCTP_INP_RUNLOCK(inp); continue; } /* * Ok this could be a likely candidate, look at all of its * addresses */ if (inp->sctp_lport != lport) { SCTP_INP_RUNLOCK(inp); continue; } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("An ounce of prevention is worth a pound of cure\n"); } #endif continue; } #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Ok laddr->ifa:%p is possible, ", laddr->ifa); } #endif if (laddr->ifa->ifa_addr == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Huh IFA as an ifa_addr=NULL, "); } #endif continue; } if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) { /* possible, see if it matches */ struct sockaddr_in *intf_addr; intf_addr = (struct sockaddr_in *) laddr->ifa->ifa_addr; if (nam->sa_family == AF_INET) { if (sin->sin_addr.s_addr == intf_addr->sin_addr.s_addr) { SCTP_INP_RUNLOCK(inp); return (inp); } } else if (nam->sa_family == AF_INET6) { struct sockaddr_in6 *intf_addr6; intf_addr6 = (struct sockaddr_in6 *) laddr->ifa->ifa_addr; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &intf_addr6->sin6_addr)) { SCTP_INP_RUNLOCK(inp); return (inp); } } } } SCTP_INP_RUNLOCK(inp); } return (NULL); } struct sctp_inpcb * sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock) { /* * First we check the hash table to see if someone has this port * bound with just the port. */ struct sctp_inpcb *inp; struct sctppcbhead *head; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; int lport; if (nam->sa_family == AF_INET) { sin = (struct sockaddr_in *)nam; lport = ((struct sockaddr_in *)nam)->sin_port; } else if (nam->sa_family == AF_INET6) { sin6 = (struct sockaddr_in6 *)nam; lport = ((struct sockaddr_in6 *)nam)->sin6_port; } else { /* unsupported family */ return (NULL); } /* * I could cheat here and just cast to one of the types but we will * do it right. It also provides the check against an Unsupported * type too. */ /* Find the head of the ALLADDR chain */ if (have_lock == 0) { SCTP_INP_INFO_RLOCK(); } head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, sctppcbinfo.hashmark)]; inp = sctp_endpoint_probe(nam, head, lport); /* * If the TCP model exists it could be that the main listening * endpoint is gone but there exists a connected socket for this guy * yet. If so we can return the first one that we find. This may NOT * be the correct one but the sctp_findassociation_ep_addr has * further code to look at all TCP models. */ if (inp == NULL && find_tcp_pool) { unsigned int i; for (i = 0; i < sctppcbinfo.hashtblsize; i++) { /* * This is real gross, but we do NOT have a remote * port at this point depending on who is calling. * We must therefore look for ANY one that matches * our local port :/ */ head = &sctppcbinfo.sctp_tcpephash[i]; if (LIST_FIRST(head)) { inp = sctp_endpoint_probe(nam, head, lport); if (inp) { /* Found one */ break; } } } } if (inp) { SCTP_INP_INCR_REF(inp); } if (have_lock == 0) { SCTP_INP_INFO_RUNLOCK(); } return (inp); } /* * Find an association for an endpoint with the pointer to whom you want to * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may * need to change the *to to some other struct like a mbuf... */ struct sctp_tcb * sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from, struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool) { struct sctp_inpcb *inp; struct sctp_tcb *retval; SCTP_INP_INFO_RLOCK(); if (find_tcp_pool) { if (inp_p != NULL) { retval = sctp_tcb_special_locate(inp_p, from, to, netp); } else { retval = sctp_tcb_special_locate(&inp, from, to, netp); } if (retval != NULL) { SCTP_INP_INFO_RUNLOCK(); return (retval); } } inp = sctp_pcb_findep(to, 0, 1); if (inp_p != NULL) { *inp_p = inp; } SCTP_INP_INFO_RUNLOCK(); if (inp == NULL) { return (NULL); } /* * ok, we have an endpoint, now lets find the assoc for it (if any) * we now place the source address or from in the to of the find * endpoint call. Since in reality this chain is used from the * inbound packet side. */ if (inp_p != NULL) { retval = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL); } else { retval = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL); } return retval; } /* * This routine will grub through the mbuf that is a INIT or INIT-ACK and * find all addresses that the sender has specified in any address list. Each * address will be used to lookup the TCB and see if one exits. */ static struct sctp_tcb * sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, struct sockaddr *dest) { struct sockaddr_in sin4; struct sockaddr_in6 sin6; struct sctp_paramhdr *phdr, parm_buf; struct sctp_tcb *retval; uint32_t ptype, plen; memset(&sin4, 0, sizeof(sin4)); memset(&sin6, 0, sizeof(sin6)); sin4.sin_len = sizeof(sin4); sin4.sin_family = AF_INET; sin4.sin_port = sh->src_port; sin6.sin6_len = sizeof(sin6); sin6.sin6_family = AF_INET6; sin6.sin6_port = sh->src_port; retval = NULL; offset += sizeof(struct sctp_init_chunk); phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); while (phdr != NULL) { /* now we must see if we want the parameter */ ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); if (plen == 0) { break; } if (ptype == SCTP_IPV4_ADDRESS && plen == sizeof(struct sctp_ipv4addr_param)) { /* Get the rest of the address */ struct sctp_ipv4addr_param ip4_parm, *p4; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ip4_parm, plen); if (phdr == NULL) { return (NULL); } p4 = (struct sctp_ipv4addr_param *)phdr; memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr)); /* look it up */ retval = sctp_findassociation_ep_addr(inp_p, (struct sockaddr *)&sin4, netp, dest, NULL); if (retval != NULL) { return (retval); } } else if (ptype == SCTP_IPV6_ADDRESS && plen == sizeof(struct sctp_ipv6addr_param)) { /* Get the rest of the address */ struct sctp_ipv6addr_param ip6_parm, *p6; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ip6_parm, plen); if (phdr == NULL) { return (NULL); } p6 = (struct sctp_ipv6addr_param *)phdr; memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr)); /* look it up */ retval = sctp_findassociation_ep_addr(inp_p, (struct sockaddr *)&sin6, netp, dest, NULL); if (retval != NULL) { return (retval); } } offset += SCTP_SIZE32(plen); phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); } return (NULL); } static struct sctp_tcb * sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport, uint16_t lport, int skip_src_check) { /* * Use my vtag to hash. If we find it we then verify the source addr * is in the assoc. If all goes well we save a bit on rec of a * packet. */ struct sctpasochead *head; struct sctp_nets *net; struct sctp_tcb *stcb; *netp = NULL; *inp_p = NULL; SCTP_INP_INFO_RLOCK(); head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag, sctppcbinfo.hashasocmark)]; if (head == NULL) { /* invalid vtag */ SCTP_INP_INFO_RUNLOCK(); return (NULL); } LIST_FOREACH(stcb, head, sctp_asocs) { SCTP_INP_RLOCK(stcb->sctp_ep); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(stcb->sctp_ep); SCTP_INP_INFO_RUNLOCK(); return (NULL); } SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(stcb->sctp_ep); if (stcb->asoc.my_vtag == vtag) { /* candidate */ if (stcb->rport != rport) { /* * we could remove this if vtags are unique * across the system. */ SCTP_TCB_UNLOCK(stcb); continue; } if (stcb->sctp_ep->sctp_lport != lport) { /* * we could remove this if vtags are unique * across the system. */ SCTP_TCB_UNLOCK(stcb); continue; } if (skip_src_check) { *netp = NULL; /* unknown */ *inp_p = stcb->sctp_ep; SCTP_INP_INFO_RUNLOCK(); return (stcb); } net = sctp_findnet(stcb, from); if (net) { /* yep its him. */ *netp = net; SCTP_STAT_INCR(sctps_vtagexpress); *inp_p = stcb->sctp_ep; SCTP_INP_INFO_RUNLOCK(); return (stcb); } else { /* * not him, this should only happen in rare * cases so I peg it. */ SCTP_STAT_INCR(sctps_vtagbogus); } } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_INFO_RUNLOCK(); return (NULL); } /* * Find an association with the pointer to the inbound IP packet. This can be * a IPv4 or IPv6 packet. */ struct sctp_tcb * sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb **inp_p, struct sctp_nets **netp) { int find_tcp_pool; struct ip *iph; struct sctp_tcb *retval; struct sockaddr_storage to_store, from_store; struct sockaddr *to = (struct sockaddr *)&to_store; struct sockaddr *from = (struct sockaddr *)&from_store; struct sctp_inpcb *inp; iph = mtod(m, struct ip *); if (iph->ip_v == IPVERSION) { /* its IPv4 */ struct sockaddr_in *from4; from4 = (struct sockaddr_in *)&from_store; bzero(from4, sizeof(*from4)); from4->sin_family = AF_INET; from4->sin_len = sizeof(struct sockaddr_in); from4->sin_addr.s_addr = iph->ip_src.s_addr; from4->sin_port = sh->src_port; } else if (iph->ip_v == (IPV6_VERSION >> 4)) { /* its IPv6 */ struct ip6_hdr *ip6; struct sockaddr_in6 *from6; ip6 = mtod(m, struct ip6_hdr *); from6 = (struct sockaddr_in6 *)&from_store; bzero(from6, sizeof(*from6)); from6->sin6_family = AF_INET6; from6->sin6_len = sizeof(struct sockaddr_in6); from6->sin6_addr = ip6->ip6_src; from6->sin6_port = sh->src_port; /* Get the scopes in properly to the sin6 addr's */ /* we probably don't need these operations */ (void)sa6_recoverscope(from6); sa6_embedscope(from6, ip6_use_defzone); } else { /* Currently not supported. */ return (NULL); } if (sh->v_tag) { /* we only go down this path if vtag is non-zero */ retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag), inp_p, netp, sh->src_port, sh->dest_port, 0); if (retval) { return (retval); } } if (iph->ip_v == IPVERSION) { /* its IPv4 */ struct sockaddr_in *to4; to4 = (struct sockaddr_in *)&to_store; bzero(to4, sizeof(*to4)); to4->sin_family = AF_INET; to4->sin_len = sizeof(struct sockaddr_in); to4->sin_addr.s_addr = iph->ip_dst.s_addr; to4->sin_port = sh->dest_port; } else if (iph->ip_v == (IPV6_VERSION >> 4)) { /* its IPv6 */ struct ip6_hdr *ip6; struct sockaddr_in6 *to6; ip6 = mtod(m, struct ip6_hdr *); to6 = (struct sockaddr_in6 *)&to_store; bzero(to6, sizeof(*to6)); to6->sin6_family = AF_INET6; to6->sin6_len = sizeof(struct sockaddr_in6); to6->sin6_addr = ip6->ip6_dst; to6->sin6_port = sh->dest_port; /* Get the scopes in properly to the sin6 addr's */ /* we probably don't need these operations */ (void)sa6_recoverscope(to6); sa6_embedscope(to6, ip6_use_defzone); } find_tcp_pool = 0; /* * FIX FIX?, I think we only need to look in the TCP pool if its an * INIT or COOKIE-ECHO, We really don't need to find it that way if * its a INIT-ACK or COOKIE_ACK since these in bot one-2-one and * one-2-N would be in the main pool anyway. */ if ((ch->chunk_type != SCTP_INITIATION) && (ch->chunk_type != SCTP_INITIATION_ACK) && (ch->chunk_type != SCTP_COOKIE_ACK) && (ch->chunk_type != SCTP_COOKIE_ECHO)) { /* Other chunk types go to the tcp pool. */ find_tcp_pool = 1; } if (inp_p) { retval = sctp_findassociation_addr_sa(to, from, inp_p, netp, find_tcp_pool); inp = *inp_p; } else { retval = sctp_findassociation_addr_sa(to, from, &inp, netp, find_tcp_pool); } #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("retval:%p inp:%p\n", retval, inp); } #endif if (retval == NULL && inp) { /* Found a EP but not this address */ if ((ch->chunk_type == SCTP_INITIATION) || (ch->chunk_type == SCTP_INITIATION_ACK)) { /* * special hook, we do NOT return linp or an * association that is linked to an existing * association that is under the TCP pool (i.e. no * listener exists). The endpoint finding routine * will always find a listner before examining the * TCP pool. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { if (inp_p) { *inp_p = NULL; } return (NULL); } retval = sctp_findassociation_special_addr(m, iphlen, offset, sh, &inp, netp, to); if (inp_p != NULL) { *inp_p = inp; } } } #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("retval is %p\n", retval); } #endif return (retval); } /* * lookup an association by an ASCONF lookup address. * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup */ struct sctp_tcb * sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp) { struct sctp_tcb *stcb; struct sockaddr_in *sin; struct sockaddr_in6 *sin6; struct sockaddr_storage local_store, remote_store; struct ip *iph; struct sctp_paramhdr parm_buf, *phdr; int ptype; int zero_address = 0; memset(&local_store, 0, sizeof(local_store)); memset(&remote_store, 0, sizeof(remote_store)); /* First get the destination address setup too. */ iph = mtod(m, struct ip *); if (iph->ip_v == IPVERSION) { /* its IPv4 */ sin = (struct sockaddr_in *)&local_store; sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_port = sh->dest_port; sin->sin_addr.s_addr = iph->ip_dst.s_addr; } else if (iph->ip_v == (IPV6_VERSION >> 4)) { /* its IPv6 */ struct ip6_hdr *ip6; ip6 = mtod(m, struct ip6_hdr *); sin6 = (struct sockaddr_in6 *)&local_store; sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = sh->dest_port; sin6->sin6_addr = ip6->ip6_dst; } else { return NULL; } phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), &parm_buf, sizeof(struct sctp_paramhdr)); if (phdr == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_INPUT3) { printf("findassociation_ep_asconf: failed to get asconf lookup addr\n"); } #endif /* SCTP_DEBUG */ return NULL; } ptype = (int)((uint32_t) ntohs(phdr->param_type)); /* get the correlation address */ if (ptype == SCTP_IPV6_ADDRESS) { /* ipv6 address param */ struct sctp_ipv6addr_param *p6, p6_buf; if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) { return NULL; } p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), &p6_buf.ph, sizeof(*p6)); if (p6 == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_INPUT3) { printf("findassociation_ep_asconf: failed to get asconf v6 lookup addr\n"); } #endif /* SCTP_DEBUG */ return (NULL); } sin6 = (struct sockaddr_in6 *)&remote_store; sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = sh->src_port; memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr)); if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) zero_address = 1; } else if (ptype == SCTP_IPV4_ADDRESS) { /* ipv4 address param */ struct sctp_ipv4addr_param *p4, p4_buf; if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) { return NULL; } p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), &p4_buf.ph, sizeof(*p4)); if (p4 == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_INPUT3) { printf("findassociation_ep_asconf: failed to get asconf v4 lookup addr\n"); } #endif /* SCTP_DEBUG */ return (NULL); } sin = (struct sockaddr_in *)&remote_store; sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_port = sh->src_port; memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr)); if (sin->sin_addr.s_addr == INADDR_ANY) zero_address = 1; } else { /* invalid address param type */ return NULL; } if (zero_address) { stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p, netp, sh->src_port, sh->dest_port, 1); /* * printf("findassociation_ep_asconf: zero lookup address * finds stcb 0x%x\n", (uint32_t)stcb); */ } else { stcb = sctp_findassociation_ep_addr(inp_p, (struct sockaddr *)&remote_store, netp, (struct sockaddr *)&local_store, NULL); } return (stcb); } extern int sctp_max_burst_default; extern unsigned int sctp_delayed_sack_time_default; extern unsigned int sctp_heartbeat_interval_default; extern unsigned int sctp_pmtu_raise_time_default; extern unsigned int sctp_shutdown_guard_time_default; extern unsigned int sctp_secret_lifetime_default; extern unsigned int sctp_rto_max_default; extern unsigned int sctp_rto_min_default; extern unsigned int sctp_rto_initial_default; extern unsigned int sctp_init_rto_max_default; extern unsigned int sctp_valid_cookie_life_default; extern unsigned int sctp_init_rtx_max_default; extern unsigned int sctp_assoc_rtx_max_default; extern unsigned int sctp_path_rtx_max_default; extern unsigned int sctp_nr_outgoing_streams_default; /* * allocate a sctp_inpcb and setup a temporary binding to a port/all * addresses. This way if we don't get a bind we by default pick a ephemeral * port with all addresses bound. */ int sctp_inpcb_alloc(struct socket *so) { /* * we get called when a new endpoint starts up. We need to allocate * the sctp_inpcb structure from the zone and init it. Mark it as * unbound and find a port that we can use as an ephemeral with * INADDR_ANY. If the user binds later no problem we can then add in * the specific addresses. And setup the default parameters for the * EP. */ int i, error; struct sctp_inpcb *inp; struct sctp_pcb *m; struct timeval time; sctp_sharedkey_t *null_key; error = 0; SCTP_INP_INFO_WLOCK(); inp = (struct sctp_inpcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep); if (inp == NULL) { printf("Out of SCTP-INPCB structures - no resources\n"); SCTP_INP_INFO_WUNLOCK(); return (ENOBUFS); } /* zap it */ bzero(inp, sizeof(*inp)); /* bump generations */ /* setup socket pointers */ inp->sctp_socket = so; inp->ip_inp.inp.inp_socket = so; inp->partial_delivery_point = so->so_rcv.sb_hiwat >> SCTP_PARTIAL_DELIVERY_SHIFT; inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; #ifdef IPSEC { struct inpcbpolicy *pcb_sp = NULL; error = ipsec_init_pcbpolicy(so, &pcb_sp); /* Arrange to share the policy */ inp->ip_inp.inp.inp_sp = pcb_sp; ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp; } if (error != 0) { SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); SCTP_INP_INFO_WUNLOCK(); return error; } #endif /* IPSEC */ SCTP_INCR_EP_COUNT(); inp->ip_inp.inp.inp_ip_ttl = ip_defttl; SCTP_INP_INFO_WUNLOCK(); so->so_pcb = (caddr_t)inp; if ((so->so_type == SOCK_DGRAM) || (so->so_type == SOCK_SEQPACKET)) { /* UDP style socket */ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | SCTP_PCB_FLAGS_UNBOUND); sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); /* Be sure it is NON-BLOCKING IO for UDP */ /* so->so_state |= SS_NBIO; */ } else if (so->so_type == SOCK_STREAM) { /* TCP style socket */ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_UNBOUND); sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); /* Be sure we have blocking IO by default */ so->so_state &= ~SS_NBIO; } else { /* * unsupported socket type (RAW, etc)- in case we missed it * in protosw */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); return (EOPNOTSUPP); } inp->sctp_tcbhash = hashinit(sctp_pcbtblsize, M_PCB, &inp->sctp_hashmark); if (inp->sctp_tcbhash == NULL) { printf("Out of SCTP-INPCB->hashinit - no resources\n"); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); return (ENOBUFS); } SCTP_INP_INFO_WLOCK(); SCTP_INP_LOCK_INIT(inp); SCTP_INP_READ_INIT(inp); SCTP_ASOC_CREATE_LOCK_INIT(inp); /* lock the new ep */ SCTP_INP_WLOCK(inp); /* add it to the info area */ LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list); SCTP_INP_INFO_WUNLOCK(); TAILQ_INIT(&inp->read_queue); LIST_INIT(&inp->sctp_addr_list); LIST_INIT(&inp->sctp_asoc_list); /* Init the timer structure for signature change */ callout_init(&inp->sctp_ep.signature_change.timer, 1); inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE; /* now init the actual endpoint default data */ m = &inp->sctp_ep; /* setup the base timeout information */ m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */ m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */ m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default); m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default); m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default); m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default); m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default); /* all max/min max are in ms */ m->sctp_maxrto = sctp_rto_max_default; m->sctp_minrto = sctp_rto_min_default; m->initial_rto = sctp_rto_initial_default; m->initial_init_rto_max = sctp_init_rto_max_default; m->max_open_streams_intome = MAX_SCTP_STREAMS; m->max_init_times = sctp_init_rtx_max_default; m->max_send_times = sctp_assoc_rtx_max_default; m->def_net_failure = sctp_path_rtx_max_default; m->sctp_sws_sender = SCTP_SWS_SENDER_DEF; m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF; m->max_burst = sctp_max_burst_default; /* number of streams to pre-open on a association */ m->pre_open_stream_count = sctp_nr_outgoing_streams_default; /* Add adaptation cookie */ m->adaptation_layer_indicator = 0x504C5253; /* seed random number generator */ m->random_counter = 1; m->store_at = SCTP_SIGNATURE_SIZE; sctp_read_random(m->random_numbers, sizeof(m->random_numbers)); sctp_fill_random_store(m); /* Minimum cookie size */ m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) + sizeof(struct sctp_state_cookie); m->size_of_a_cookie += SCTP_SIGNATURE_SIZE; /* Setup the initial secret */ SCTP_GETTIME_TIMEVAL(&time); m->time_of_secret_change = time.tv_sec; for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { m->secret_key[0][i] = sctp_select_initial_TSN(m); } sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); /* How long is a cookie good for ? */ m->def_cookie_life = sctp_valid_cookie_life_default; /* * Initialize authentication parameters */ m->local_hmacs = sctp_default_supported_hmaclist(); m->local_auth_chunks = sctp_alloc_chunklist(); sctp_auth_set_default_chunks(m->local_auth_chunks); LIST_INIT(&m->shared_keys); /* add default NULL key as key id 0 */ null_key = sctp_alloc_sharedkey(); sctp_insert_sharedkey(&m->shared_keys, null_key); SCTP_INP_WUNLOCK(inp); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 12); #endif return (error); } void sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, struct sctp_tcb *stcb) { struct sctp_nets *net; uint16_t lport, rport; struct sctppcbhead *head; struct sctp_laddr *laddr, *oladdr; SCTP_TCB_UNLOCK(stcb); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(old_inp); SCTP_INP_WLOCK(new_inp); SCTP_TCB_LOCK(stcb); new_inp->sctp_ep.time_of_secret_change = old_inp->sctp_ep.time_of_secret_change; memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key, sizeof(old_inp->sctp_ep.secret_key)); new_inp->sctp_ep.current_secret_number = old_inp->sctp_ep.current_secret_number; new_inp->sctp_ep.last_secret_number = old_inp->sctp_ep.last_secret_number; new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie; /* make it so new data pours into the new socket */ stcb->sctp_socket = new_inp->sctp_socket; stcb->sctp_ep = new_inp; /* Copy the port across */ lport = new_inp->sctp_lport = old_inp->sctp_lport; rport = stcb->rport; /* Pull the tcb from the old association */ LIST_REMOVE(stcb, sctp_tcbhash); LIST_REMOVE(stcb, sctp_tcblist); /* Now insert the new_inp into the TCP connected hash */ head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport), sctppcbinfo.hashtcpmark)]; LIST_INSERT_HEAD(head, new_inp, sctp_hash); /* Now move the tcb into the endpoint list */ LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist); /* * Question, do we even need to worry about the ep-hash since we * only have one connection? Probably not :> so lets get rid of it * and not suck up any kernel memory in that. */ /* Ok. Let's restart timer. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp, stcb, net); } SCTP_INP_INFO_WUNLOCK(); if (new_inp->sctp_tcbhash != NULL) { SCTP_FREE(new_inp->sctp_tcbhash); new_inp->sctp_tcbhash = NULL; } if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* Subset bound, so copy in the laddr list from the old_inp */ LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) { laddr = (struct sctp_laddr *)SCTP_ZONE_GET( sctppcbinfo.ipi_zone_laddr); if (laddr == NULL) { /* * Gak, what can we do? This assoc is really * HOSED. We probably should send an abort * here. */ #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Association hosed in TCP model, out of laddr memory\n"); } #endif /* SCTP_DEBUG */ continue; } SCTP_INCR_LADDR_COUNT(); bzero(laddr, sizeof(*laddr)); laddr->ifa = oladdr->ifa; LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr, sctp_nxt_addr); new_inp->laddr_count++; } } /* * Now any running timers need to be adjusted since we really don't * care if they are running or not just blast in the new_inp into * all of them. */ stcb->asoc.hb_timer.ep = (void *)new_inp; stcb->asoc.dack_timer.ep = (void *)new_inp; stcb->asoc.asconf_timer.ep = (void *)new_inp; stcb->asoc.strreset_timer.ep = (void *)new_inp; stcb->asoc.shut_guard_timer.ep = (void *)new_inp; stcb->asoc.autoclose_timer.ep = (void *)new_inp; stcb->asoc.delayed_event_timer.ep = (void *)new_inp; /* now what about the nets? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->pmtu_timer.ep = (void *)new_inp; net->rxt_timer.ep = (void *)new_inp; net->fr_timer.ep = (void *)new_inp; } SCTP_INP_WUNLOCK(new_inp); SCTP_INP_WUNLOCK(old_inp); } static int sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport) { struct sctppcbhead *head; struct sctp_inpcb *t_inp; head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, sctppcbinfo.hashmark)]; LIST_FOREACH(t_inp, head, sctp_hash) { if (t_inp->sctp_lport != lport) { continue; } /* This one is in use. */ /* check the v6/v4 binding issue */ if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (((struct inpcb *)t_inp)->inp_flags & IN6P_IPV6_V6ONLY) ) { if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { /* collision in V6 space */ return (1); } else { /* inp is BOUND_V4 no conflict */ continue; } } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { /* t_inp is bound v4 and v6, conflict always */ return (1); } else { /* t_inp is bound only V4 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) ) { /* no conflict */ continue; } /* else fall through to conflict */ } return (1); } return (0); } int sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { /* bind a ep to a socket address */ struct sctppcbhead *head; struct sctp_inpcb *inp, *inp_tmp; struct inpcb *ip_inp; int bindall; uint16_t lport; int error; lport = 0; error = 0; bindall = 1; inp = (struct sctp_inpcb *)so->so_pcb; ip_inp = (struct inpcb *)so->so_pcb; #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { if (addr) { printf("Bind called port:%d\n", ntohs(((struct sockaddr_in *)addr)->sin_port)); printf("Addr :"); sctp_print_address(addr); } } #endif /* SCTP_DEBUG */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { /* already did a bind, subsequent binds NOT allowed ! */ return (EINVAL); } if (addr != NULL) { if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; /* IPV6_V6ONLY socket? */ if ( (ip_inp->inp_flags & IN6P_IPV6_V6ONLY) ) { return (EINVAL); } if (addr->sa_len != sizeof(*sin)) return (EINVAL); sin = (struct sockaddr_in *)addr; lport = sin->sin_port; if (sin->sin_addr.s_addr != INADDR_ANY) { bindall = 0; } } else if (addr->sa_family == AF_INET6) { /* Only for pure IPv6 Address. (No IPv4 Mapped!) */ struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (addr->sa_len != sizeof(*sin6)) return (EINVAL); lport = sin6->sin6_port; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { bindall = 0; /* KAME hack: embed scopeid */ if (sa6_embedscope(sin6, ip6_use_defzone) != 0) return (EINVAL); } /* this must be cleared for ifa_ifwithaddr() */ sin6->sin6_scope_id = 0; } else { return (EAFNOSUPPORT); } } SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); /* increase our count due to the unlock we do */ SCTP_INP_INCR_REF(inp); if (lport) { /* * Did the caller specify a port? if so we must see if a ep * already has this one bound. */ /* got to be root to get at low ports */ if (ntohs(lport) < IPPORT_RESERVED) { - if (p && (error = - suser_cred(p->td_ucred, 0) - )) { + if (p && (error = priv_check(p, + PRIV_NETINET_RESERVEDPORT))) { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } } if (p == NULL) { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } SCTP_INP_WUNLOCK(inp); inp_tmp = sctp_pcb_findep(addr, 0, 1); if (inp_tmp != NULL) { /* * lock guy returned and lower count note that we * are not bound so inp_tmp should NEVER be inp. And * it is this inp (inp_tmp) that gets the reference * bump, so we must lower it. */ SCTP_INP_DECR_REF(inp_tmp); SCTP_INP_DECR_REF(inp); /* unlock info */ SCTP_INP_INFO_WUNLOCK(); return (EADDRNOTAVAIL); } SCTP_INP_WLOCK(inp); if (bindall) { /* verify that no lport is not used by a singleton */ if (sctp_isport_inuse(inp, lport)) { /* Sorry someone already has this one bound */ SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (EADDRNOTAVAIL); } } } else { /* * get any port but lets make sure no one has any address * with this port bound */ /* * setup the inp to the top (I could use the union but this * is just as easy */ uint32_t port_guess; uint16_t port_attempt; int not_done = 1; while (not_done) { port_guess = sctp_select_initial_TSN(&inp->sctp_ep); port_attempt = (port_guess & 0x0000ffff); if (port_attempt == 0) { goto next_half; } if (port_attempt < IPPORT_RESERVED) { port_attempt += IPPORT_RESERVED; } if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { /* got a port we can use */ not_done = 0; continue; } /* try upper half */ next_half: port_attempt = ((port_guess >> 16) & 0x0000ffff); if (port_attempt == 0) { goto last_try; } if (port_attempt < IPPORT_RESERVED) { port_attempt += IPPORT_RESERVED; } if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { /* got a port we can use */ not_done = 0; continue; } /* try two half's added together */ last_try: port_attempt = (((port_guess >> 16) & 0x0000ffff) + (port_guess & 0x0000ffff)); if (port_attempt == 0) { /* get a new random number */ continue; } if (port_attempt < IPPORT_RESERVED) { port_attempt += IPPORT_RESERVED; } if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { /* got a port we can use */ not_done = 0; continue; } } /* we don't get out of the loop until we have a port */ lport = htons(port_attempt); } SCTP_INP_DECR_REF(inp); if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { /* * this really should not happen. The guy did a non-blocking * bind and then did a close at the same time. */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (EINVAL); } /* ok we look clear to give out this port, so lets setup the binding */ if (bindall) { /* binding to all addresses, so just set in the proper flags */ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL; sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); /* set the automatic addr changes from kernel flag */ if (sctp_auto_asconf == 0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } else { sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } } else { /* * bind specific, make sure flags is off and add a new * address structure to the sctp_addr_list inside the ep * structure. * * We will need to allocate one and insert it at the head. The * socketopt call can just insert new addresses in there as * well. It will also have to do the embed scope kame hack * too (before adding). */ struct ifaddr *ifa; struct sockaddr_storage store_sa; memset(&store_sa, 0, sizeof(store_sa)); if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)&store_sa; memcpy(sin, addr, sizeof(struct sockaddr_in)); sin->sin_port = 0; } else if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&store_sa; memcpy(sin6, addr, sizeof(struct sockaddr_in6)); sin6->sin6_port = 0; } /* * first find the interface with the bound address need to * zero out the port to find the address! yuck! can't do * this earlier since need port for sctp_pcb_findep() */ ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa); if (ifa == NULL) { /* Can't find an interface with that address */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (EADDRNOTAVAIL); } if (addr->sa_family == AF_INET6) { struct in6_ifaddr *ifa6; ifa6 = (struct in6_ifaddr *)ifa; /* * allow binding of deprecated addresses as per RFC * 2462 and ipng discussion */ if (ifa6->ia6_flags & (IN6_IFF_DETACHED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) { /* Can't bind a non-existent addr. */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (EINVAL); } } /* we're not bound all */ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL; /* set the automatic addr changes from kernel flag */ if (sctp_auto_asconf == 0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } else { sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } /* allow bindx() to send ASCONF's for binding changes */ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); /* add this address to the endpoint list */ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa); if (error != 0) { SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } inp->laddr_count++; } /* find the bucket */ head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, sctppcbinfo.hashmark)]; /* put it in the bucket */ LIST_INSERT_HEAD(head, inp, sctp_hash); #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport)); } #endif /* set in the port */ inp->sctp_lport = lport; /* turn off just the unbound flag */ inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (0); } static void sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next) { struct sctp_iterator *it; /* * We enter with the only the ITERATOR_LOCK in place and a write * lock on the inp_info stuff. */ /* * Go through all iterators, we must do this since it is possible * that some iterator does NOT have the lock, but is waiting for it. * And the one that had the lock has either moved in the last * iteration or we just cleared it above. We need to find all of * those guys. The list of iterators should never be very big * though. */ LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) { if (it == inp->inp_starting_point_for_iterator) /* skip this guy, he's special */ continue; if (it->inp == inp) { /* * This is tricky and we DON'T lock the iterator. * Reason is he's running but waiting for me since * inp->inp_starting_point_for_iterator has the lock * on me (the guy above we skipped). This tells us * its is not running but waiting for * inp->inp_starting_point_for_iterator to be * released by the guy that does have our INP in a * lock. */ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { it->inp = NULL; it->stcb = NULL; } else { /* set him up to do the next guy not me */ it->inp = inp_next; it->stcb = NULL; } } } it = inp->inp_starting_point_for_iterator; if (it) { if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { it->inp = NULL; } else { it->inp = inp_next; } it->stcb = NULL; } } /* release sctp_inpcb unbind the port */ void sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) { /* * Here we free a endpoint. We must find it (if it is in the Hash * table) and remove it from there. Then we must also find it in the * overall list and remove it from there. After all removals are * complete then any timer has to be stopped. Then start the actual * freeing. a) Any local lists. b) Any associations. c) The hash of * all associations. d) finally the ep itself. */ struct sctp_pcb *m; struct sctp_inpcb *inp_save; struct sctp_tcb *asoc, *nasoc; struct sctp_laddr *laddr, *nladdr; struct inpcb *ip_pcb; struct socket *so; struct sctp_queued_to_read *sq; int s, cnt; sctp_sharedkey_t *shared_key; s = splnet(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 0); #endif SCTP_ITERATOR_LOCK(); so = inp->sctp_socket; if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { /* been here before.. eeks.. get out of here */ splx(s); printf("This conflict in free SHOULD not be happening!\n"); SCTP_ITERATOR_UNLOCK(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 1); #endif return; } SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); /* * First time through we have the socket lock, after that no more. */ if (from == 1) { /* * Once we are in we can remove the flag from = 1 is only * passed from the actual closing routines that are called * via the sockets layer. */ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; } sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); if (inp->control) { sctp_m_freem(inp->control); inp->control = NULL; } if (inp->pkt) { sctp_m_freem(inp->pkt); inp->pkt = NULL; } m = &inp->sctp_ep; ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer * here but I will be nice :> (i.e. * ip_pcb = ep;) */ if (immediate == 0) { int cnt_in_sd; cnt_in_sd = 0; for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL; asoc = nasoc) { nasoc = LIST_NEXT(asoc, sctp_tcblist); if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { /* Skip guys being freed */ asoc->sctp_socket = NULL; cnt_in_sd++; continue; } if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) { /* Just abandon things in the front states */ if (asoc->asoc.total_output_queue_size == 0) { sctp_free_assoc(inp, asoc, 1); continue; } } SCTP_TCB_LOCK(asoc); /* Disconnect the socket please */ asoc->sctp_socket = NULL; asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET; if ((asoc->asoc.size_on_reasm_queue > 0) || (asoc->asoc.control_pdapi) || (asoc->asoc.size_on_all_streams > 0) || (so && (so->so_rcv.sb_cc > 0)) ) { /* Left with Data unread */ struct mbuf *op_err; op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 0, M_DONTWAIT, 1, MT_DATA); if (op_err) { /* Fill in the user initiated abort */ struct sctp_paramhdr *ph; uint32_t *ippp; op_err->m_len = sizeof(struct sctp_paramhdr) + sizeof(uint32_t); ph = mtod(op_err, struct sctp_paramhdr *); ph->param_type = htons( SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(op_err->m_len); ippp = (uint32_t *) (ph + 1); *ippp = htonl(0x30000004); } sctp_send_abort_tcb(asoc, op_err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } sctp_free_assoc(inp, asoc, 1); continue; } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) && TAILQ_EMPTY(&asoc->asoc.sent_queue) && (asoc->asoc.stream_queue_cnt == 0) ) { if (asoc->asoc.locked_on_sending) { goto abort_anyway; } if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* * there is nothing queued to send, * so I send shutdown */ sctp_send_shutdown(asoc, asoc->asoc.primary_destination); asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT; SCTP_STAT_DECR_GAUGE32(sctps_currestab); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc, asoc->asoc.primary_destination); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, asoc->asoc.primary_destination); sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR); } } else { /* mark into shutdown pending */ struct sctp_stream_queue_pending *sp; asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING; sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, asoc->asoc.primary_destination); if (asoc->asoc.locked_on_sending) { sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue), sctp_streamhead); if (sp == NULL) { printf("Error, sp is NULL, locked on sending is %p strm:%d\n", asoc->asoc.locked_on_sending, asoc->asoc.locked_on_sending->stream_no); } else { if ((sp->length == 0) && (sp->msg_is_complete == 0)) asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT; } } if (TAILQ_EMPTY(&asoc->asoc.send_queue) && TAILQ_EMPTY(&asoc->asoc.sent_queue) && (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 0, M_DONTWAIT, 1, MT_DATA); if (op_err) { /* * Fill in the user * initiated abort */ struct sctp_paramhdr *ph; uint32_t *ippp; op_err->m_len = (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); ph = mtod(op_err, struct sctp_paramhdr *); ph->param_type = htons( SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(op_err->m_len); ippp = (uint32_t *) (ph + 1); *ippp = htonl(0x30000005); } sctp_send_abort_tcb(asoc, op_err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } sctp_free_assoc(inp, asoc, 1); continue; } } cnt_in_sd++; SCTP_TCB_UNLOCK(asoc); } /* now is there some left in our SHUTDOWN state? */ if (cnt_in_sd) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != SCTP_PCB_FLAGS_UNBOUND) { /* * ok, this guy has been bound. It's port is * somewhere in the sctppcbinfo hash table. * Remove it! * * Note we are depending on lookup by vtag to * find associations that are dieing. This * free's the port so we don't have to block * its useage. The SCTP_PCB_FLAGS_UNBOUND * flags will prevent us from doing this * again. */ LIST_REMOVE(inp, sctp_hash); inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; } splx(s); SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_ITERATOR_UNLOCK(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 2); #endif return; } } inp->sctp_socket = NULL; if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != SCTP_PCB_FLAGS_UNBOUND) { /* * ok, this guy has been bound. It's port is somewhere in * the sctppcbinfo hash table. Remove it! */ LIST_REMOVE(inp, sctp_hash); inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; } /* * If there is a timer running to kill us, forget it, since it may * have a contest on the INP lock.. which would cause us to die ... */ cnt = 0; for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL; asoc = nasoc) { nasoc = LIST_NEXT(asoc, sctp_tcblist); if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { cnt++; continue; } /* Free associations that are NOT killing us */ SCTP_TCB_LOCK(asoc); if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) && ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) { struct mbuf *op_err; uint32_t *ippp; op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 0, M_DONTWAIT, 1, MT_DATA); if (op_err) { /* Fill in the user initiated abort */ struct sctp_paramhdr *ph; op_err->m_len = (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); ph = mtod(op_err, struct sctp_paramhdr *); ph->param_type = htons( SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(op_err->m_len); ippp = (uint32_t *) (ph + 1); *ippp = htonl(0x30000006); } sctp_send_abort_tcb(asoc, op_err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { cnt++; SCTP_TCB_UNLOCK(asoc); continue; } if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } sctp_free_assoc(inp, asoc, 2); } if (cnt) { /* Ok we have someone out there that will kill us */ callout_stop(&inp->sctp_ep.signature_change.timer); SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_ITERATOR_UNLOCK(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 3); #endif return; } if ((inp->refcount) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { callout_stop(&inp->sctp_ep.signature_change.timer); sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_ITERATOR_UNLOCK(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 4); #endif return; } callout_stop(&inp->sctp_ep.signature_change.timer); inp->sctp_ep.signature_change.type = 0; inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 5); #endif callout_stop(&inp->sctp_ep.signature_change.timer); inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE; /* Clear the read queue */ while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) { TAILQ_REMOVE(&inp->read_queue, sq, next); sctp_free_remote_addr(sq->whoFrom); if (so) so->so_rcv.sb_cc -= sq->length; if (sq->data) { sctp_m_freem(sq->data); sq->data = NULL; } /* * no need to free the net count, since at this point all * assoc's are gone. */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq); SCTP_DECR_READQ_COUNT(); } /* Now the sctp_pcb things */ /* * free each asoc if it is not already closed/free. we can't use the * macro here since le_next will get freed as part of the * sctp_free_assoc() call. */ cnt = 0; if (so) { #ifdef IPSEC ipsec4_delete_pcbpolicy(ip_pcb); #endif /* IPSEC */ /* Unlocks not needed since the socket is gone now */ } if (ip_pcb->inp_options) { (void)sctp_m_free(ip_pcb->inp_options); ip_pcb->inp_options = 0; } if (ip_pcb->inp_moptions) { ip_freemoptions(ip_pcb->inp_moptions); ip_pcb->inp_moptions = 0; } #ifdef INET6 if (ip_pcb->inp_vflag & INP_IPV6) { struct in6pcb *in6p; in6p = (struct in6pcb *)inp; ip6_freepcbopts(in6p->in6p_outputopts); } #endif /* INET6 */ ip_pcb->inp_vflag = 0; /* free up authentication fields */ if (inp->sctp_ep.local_auth_chunks != NULL) sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); if (inp->sctp_ep.local_hmacs != NULL) sctp_free_hmaclist(inp->sctp_ep.local_hmacs); shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys); while (shared_key) { LIST_REMOVE(shared_key, next); sctp_free_sharedkey(shared_key); shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys); } inp_save = LIST_NEXT(inp, sctp_list); LIST_REMOVE(inp, sctp_list); /* fix any iterators only after out of the list */ sctp_iterator_inp_being_freed(inp, inp_save); /* * if we have an address list the following will free the list of * ifaddr's that are set into this ep. Again macro limitations here, * since the LIST_FOREACH could be a bad idea. */ for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL; laddr = nladdr) { nladdr = LIST_NEXT(laddr, sctp_nxt_addr); LIST_REMOVE(laddr, sctp_nxt_addr); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); SCTP_DECR_LADDR_COUNT(); } /* Now lets see about freeing the EP hash table. */ if (inp->sctp_tcbhash != NULL) { SCTP_FREE(inp->sctp_tcbhash); inp->sctp_tcbhash = 0; } /* Now we must put the ep memory back into the zone pool */ SCTP_INP_LOCK_DESTROY(inp); SCTP_INP_READ_DESTROY(inp); SCTP_ASOC_CREATE_LOCK_DESTROY(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_ITERATOR_UNLOCK(); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); SCTP_DECR_EP_COUNT(); splx(s); } struct sctp_nets * sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) { struct sctp_nets *net; /* locate the address */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr)) return (net); } return (NULL); } /* * add's a remote endpoint address, done with the INIT/INIT-ACK as well as * when a ASCONF arrives that adds it. It will also initialize all the cwnd * stats of stuff. */ int sctp_is_address_on_local_host(struct sockaddr *addr) { struct ifnet *ifn; struct ifaddr *ifa; TAILQ_FOREACH(ifn, &ifnet, if_list) { TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { if (addr->sa_family == ifa->ifa_addr->sa_family) { /* same family */ if (addr->sa_family == AF_INET) { struct sockaddr_in *sin, *sin_c; sin = (struct sockaddr_in *)addr; sin_c = (struct sockaddr_in *) ifa->ifa_addr; if (sin->sin_addr.s_addr == sin_c->sin_addr.s_addr) { /* * we are on the same * machine */ return (1); } } else if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6, *sin_c6; sin6 = (struct sockaddr_in6 *)addr; sin_c6 = (struct sockaddr_in6 *) ifa->ifa_addr; if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &sin_c6->sin6_addr)) { /* * we are on the same * machine */ return (1); } } } } } return (0); } int sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, int set_scope, int from) { /* * The following is redundant to the same lines in the * sctp_aloc_assoc() but is needed since other's call the add * address function */ struct sctp_nets *net, *netfirst; int addr_inscope; #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Adding an address (from:%d) to the peer: ", from); sctp_print_address(newaddr); } #endif netfirst = sctp_findnet(stcb, newaddr); if (netfirst) { /* * Lie and return ok, we don't want to make the association * go away for this behavior. It will happen in the TCP * model in a connected socket. It does not reach the hash * table until after the association is built so it can't be * found. Mark as reachable, since the initial creation will * have been cleared and the NOT_IN_ASSOC flag will have * been added... and we don't want to end up removing it * back out. */ if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) { netfirst->dest_state = (SCTP_ADDR_REACHABLE | SCTP_ADDR_UNCONFIRMED); } else { netfirst->dest_state = SCTP_ADDR_REACHABLE; } return (0); } addr_inscope = 1; if (newaddr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)newaddr; if (sin->sin_addr.s_addr == 0) { /* Invalid address */ return (-1); } /* zero out the bzero area */ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); /* assure len is set */ sin->sin_len = sizeof(struct sockaddr_in); if (set_scope) { #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE stcb->ipv4_local_scope = 1; #else if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { stcb->asoc.ipv4_local_scope = 1; } #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ if (sctp_is_address_on_local_host(newaddr)) { stcb->asoc.loopback_scope = 1; stcb->asoc.ipv4_local_scope = 1; stcb->asoc.local_scope = 1; stcb->asoc.site_scope = 1; } } else { if (from == 8) { /* From connectx */ if (sctp_is_address_on_local_host(newaddr)) { stcb->asoc.loopback_scope = 1; stcb->asoc.ipv4_local_scope = 1; stcb->asoc.local_scope = 1; stcb->asoc.site_scope = 1; } } /* Validate the address is in scope */ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) && (stcb->asoc.ipv4_local_scope == 0)) { addr_inscope = 0; } } } else if (newaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)newaddr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* Invalid address */ return (-1); } /* assure len is set */ sin6->sin6_len = sizeof(struct sockaddr_in6); if (set_scope) { if (sctp_is_address_on_local_host(newaddr)) { stcb->asoc.loopback_scope = 1; stcb->asoc.local_scope = 1; stcb->asoc.ipv4_local_scope = 1; stcb->asoc.site_scope = 1; } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { /* * If the new destination is a LINK_LOCAL we * must have common site scope. Don't set * the local scope since we may not share * all links, only loopback can do this. * Links on the local network would also be * on our private network for v4 too. */ stcb->asoc.ipv4_local_scope = 1; stcb->asoc.site_scope = 1; } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { /* * If the new destination is SITE_LOCAL then * we must have site scope in common. */ stcb->asoc.site_scope = 1; } } else { if (from == 8) { /* From connectx */ if (sctp_is_address_on_local_host(newaddr)) { stcb->asoc.loopback_scope = 1; stcb->asoc.ipv4_local_scope = 1; stcb->asoc.local_scope = 1; stcb->asoc.site_scope = 1; } } /* Validate the address is in scope */ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && (stcb->asoc.loopback_scope == 0)) { addr_inscope = 0; } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) && (stcb->asoc.local_scope == 0)) { addr_inscope = 0; } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) && (stcb->asoc.site_scope == 0)) { addr_inscope = 0; } } } else { /* not supported family type */ return (-1); } net = (struct sctp_nets *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net); if (net == NULL) { return (-1); } SCTP_INCR_RADDR_COUNT(); bzero(net, sizeof(*net)); memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len); if (newaddr->sa_family == AF_INET) { ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport; } else if (newaddr->sa_family == AF_INET6) { ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport; } net->addr_is_local = sctp_is_address_on_local_host(newaddr); net->failure_threshold = stcb->asoc.def_net_failure; if (addr_inscope == 0) { net->dest_state = (SCTP_ADDR_REACHABLE | SCTP_ADDR_OUT_OF_SCOPE); } else { if (from == 8) /* 8 is passed by connect_x */ net->dest_state = SCTP_ADDR_REACHABLE; else net->dest_state = SCTP_ADDR_REACHABLE | SCTP_ADDR_UNCONFIRMED; } net->RTO = stcb->asoc.initial_rto; stcb->asoc.numnets++; *(&net->ref_count) = 1; net->tos_flowlabel = 0; #ifdef AF_INET if (newaddr->sa_family == AF_INET) net->tos_flowlabel = stcb->asoc.default_tos; #endif #ifdef AF_INET6 if (newaddr->sa_family == AF_INET6) net->tos_flowlabel = stcb->asoc.default_flowlabel; #endif /* Init the timer structure */ callout_init(&net->rxt_timer.timer, 1); callout_init(&net->fr_timer.timer, 1); callout_init(&net->pmtu_timer.timer, 1); /* Now generate a route for this guy */ /* KAME hack: embed scopeid */ if (newaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; (void)sa6_embedscope(sin6, ip6_use_defzone); sin6->sin6_scope_id = 0; } rtalloc_ign((struct route *)&net->ro, 0UL); if (newaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; (void)sa6_recoverscope(sin6); } if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { net->mtu = net->ro.ro_rt->rt_ifp->if_mtu; if (from == 1) { stcb->asoc.smallest_mtu = net->mtu; } /* start things off to match mtu of interface please. */ net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu; } else { net->mtu = stcb->asoc.smallest_mtu; } if (stcb->asoc.smallest_mtu > net->mtu) { stcb->asoc.smallest_mtu = net->mtu; } /* * We take the max of the burst limit times a MTU or the * INITIAL_CWND. We then limit this to 4 MTU's of sending. */ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); /* we always get at LEAST 2 MTU's */ if (net->cwnd < (2 * net->mtu)) { net->cwnd = 2 * net->mtu; } net->ssthresh = stcb->asoc.peers_rwnd; #if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING) sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); #endif /* * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning * of assoc (2005/06/27, iyengar@cis.udel.edu) */ net->find_pseudo_cumack = 1; net->find_rtx_pseudo_cumack = 1; net->src_addr_selected = 0; netfirst = TAILQ_FIRST(&stcb->asoc.nets); if (net->ro.ro_rt == NULL) { /* Since we have no route put it at the back */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); } else if (netfirst == NULL) { /* We are the first one in the pool. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else if (netfirst->ro.ro_rt == NULL) { /* * First one has NO route. Place this one ahead of the first * one. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { /* * This one has a different interface than the one at the * top of the list. Place it ahead. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else { /* * Ok we have the same interface as the first one. Move * forward until we find either a) one with a NULL route... * insert ahead of that b) one with a different ifp.. insert * after that. c) end of the list.. insert at the tail. */ struct sctp_nets *netlook; do { netlook = TAILQ_NEXT(netfirst, sctp_next); if (netlook == NULL) { /* End of the list */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); break; } else if (netlook->ro.ro_rt == NULL) { /* next one has NO route */ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); break; } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) { TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, net, sctp_next); break; } /* Shift forward */ netfirst = netlook; } while (netlook != NULL); } /* got to have a primary set */ if (stcb->asoc.primary_destination == 0) { stcb->asoc.primary_destination = net; } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && (net->ro.ro_rt) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { /* No route to current primary adopt new primary */ stcb->asoc.primary_destination = net; } sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); /* Validate primary is first */ net = TAILQ_FIRST(&stcb->asoc.nets); if ((net != stcb->asoc.primary_destination) && (stcb->asoc.primary_destination)) { /* * first one on the list is NOT the primary sctp_cmpaddr() * is much more efficent if the primary is the first on the * list, make it so. */ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); } return (0); } /* * allocate an association and add it to the endpoint. The caller must be * careful to add all additional addresses once they are know right away or * else the assoc will be may experience a blackout scenario. */ struct sctp_tcb * sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, int for_a_init, int *error, uint32_t override_tag) { struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctpasochead *head; uint16_t rport; int err; /* * Assumption made here: Caller has done a * sctp_findassociation_ep_addr(ep, addr's); to make sure the * address does not exist already. */ if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) { /* Hit max assoc, sorry no more */ *error = ENOBUFS; return (NULL); } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { /* * If its in the TCP pool, its NOT allowed to create an * association. The parent listener needs to call * sctp_aloc_assoc.. or the one-2-many socket. If a peeled * off, or connected one does this.. its an error. */ SCTP_INP_RUNLOCK(inp); *error = EINVAL; return (NULL); } #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB3) { printf("Allocate an association for peer:"); if (firstaddr) sctp_print_address(firstaddr); else printf("None\n"); printf("Port:%d\n", ntohs(((struct sockaddr_in *)firstaddr)->sin_port)); } #endif /* SCTP_DEBUG */ if (firstaddr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)firstaddr; if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) { /* Invalid address */ SCTP_INP_RUNLOCK(inp); *error = EINVAL; return (NULL); } rport = sin->sin_port; } else if (firstaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)firstaddr; if ((sin6->sin6_port == 0) || (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) { /* Invalid address */ SCTP_INP_RUNLOCK(inp); *error = EINVAL; return (NULL); } rport = sin6->sin6_port; } else { /* not supported family type */ SCTP_INP_RUNLOCK(inp); *error = EINVAL; return (NULL); } SCTP_INP_RUNLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* * If you have not performed a bind, then we need to do the * ephemerial bind for you. */ if ((err = sctp_inpcb_bind(inp->sctp_socket, (struct sockaddr *)NULL, (struct thread *)NULL ))) { /* bind error, probably perm */ *error = err; return (NULL); } } stcb = (struct sctp_tcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc); if (stcb == NULL) { /* out of memory? */ *error = ENOMEM; return (NULL); } SCTP_INCR_ASOC_COUNT(); bzero(stcb, sizeof(*stcb)); asoc = &stcb->asoc; SCTP_TCB_LOCK_INIT(stcb); SCTP_TCB_SEND_LOCK_INIT(stcb); /* setup back pointer's */ stcb->sctp_ep = inp; stcb->sctp_socket = inp->sctp_socket; if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) { /* failed */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); SCTP_DECR_ASOC_COUNT(); *error = err; return (NULL); } /* and the port */ stcb->rport = rport; SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { /* inpcb freed while alloc going on */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_DECR_ASOC_COUNT(); *error = EINVAL; return (NULL); } SCTP_TCB_LOCK(stcb); /* now that my_vtag is set, add it to the hash */ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, sctppcbinfo.hashasocmark)]; /* put it in the bucket in the vtag hash of assoc's for the system */ LIST_INSERT_HEAD(head, stcb, sctp_asocs); SCTP_INP_INFO_WUNLOCK(); if ((err = sctp_add_remote_addr(stcb, firstaddr, 1, 1))) { /* failure.. memory error? */ if (asoc->strmout) SCTP_FREE(asoc->strmout); if (asoc->mapping_array) SCTP_FREE(asoc->mapping_array); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); SCTP_DECR_ASOC_COUNT(); SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); *error = ENOBUFS; return (NULL); } /* Init all the timers */ callout_init(&asoc->hb_timer.timer, 1); callout_init(&asoc->dack_timer.timer, 1); callout_init(&asoc->asconf_timer.timer, 1); callout_init(&asoc->strreset_timer.timer, 1); callout_init(&asoc->shut_guard_timer.timer, 1); callout_init(&asoc->autoclose_timer.timer, 1); callout_init(&asoc->delayed_event_timer.timer, 1); LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); /* now file the port under the hash as well */ if (inp->sctp_tcbhash != NULL) { head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport, inp->sctp_hashmark)]; LIST_INSERT_HEAD(head, stcb, sctp_tcbhash); } SCTP_INP_WUNLOCK(inp); #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Association %p now allocated\n", stcb); } #endif return (stcb); } void sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) { struct sctp_association *asoc; asoc = &stcb->asoc; asoc->numnets--; TAILQ_REMOVE(&asoc->nets, net, sctp_next); sctp_free_remote_addr(net); if (net == asoc->primary_destination) { /* Reset primary */ struct sctp_nets *lnet; lnet = TAILQ_FIRST(&asoc->nets); /* Try to find a confirmed primary */ asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0); } if (net == asoc->last_data_chunk_from) { /* Reset primary */ asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets); } if (net == asoc->last_control_chunk_from) { /* Clear net */ asoc->last_control_chunk_from = NULL; } /* if (net == asoc->asconf_last_sent_to) {*/ /* Reset primary */ /* asoc->asconf_last_sent_to = TAILQ_FIRST(&asoc->nets);*/ /* }*/ } /* * remove a remote endpoint address from an association, it will fail if the * address does not exist. */ int sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) { /* * Here we need to remove a remote address. This is quite simple, we * first find it in the list of address for the association * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE * on that item. Note we do not allow it to be removed if there are * no other addresses. */ struct sctp_association *asoc; struct sctp_nets *net, *net_tmp; asoc = &stcb->asoc; /* locate the address */ for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) { net_tmp = TAILQ_NEXT(net, sctp_next); if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) { continue; } if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr, remaddr)) { /* we found the guy */ if (asoc->numnets < 2) { /* Must have at LEAST two remote addresses */ return (-1); } else { sctp_remove_net(stcb, net); return (0); } } } /* not found. */ return (-2); } static void sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, uint32_t tag) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; struct timeval now; int set, i; SCTP_GETTIME_TIMEVAL(&now); chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; set = 0; if (!LIST_EMPTY(chain)) { /* Block(s) present, lets find space, and expire on the fly */ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if ((twait_block->vtag_block[i].v_tag == 0) && !set) { twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT; twait_block->vtag_block[i].v_tag = tag; set = 1; } else if ((twait_block->vtag_block[i].v_tag) && ((long)twait_block->vtag_block[i].tv_sec_at_expire > now.tv_sec)) { /* Audit expires this guy */ twait_block->vtag_block[i].tv_sec_at_expire = 0; twait_block->vtag_block[i].v_tag = 0; if (set == 0) { /* Reuse it for my new tag */ twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT; twait_block->vtag_block[0].v_tag = tag; set = 1; } } } if (set) { /* * We only do up to the block where we can * place our tag for audits */ break; } } } /* Need to add a new block to chain */ if (!set) { SCTP_MALLOC(twait_block, struct sctp_tagblock *, sizeof(struct sctp_tagblock), "TimeWait"); if (twait_block == NULL) { return; } memset(twait_block, 0, sizeof(struct sctp_timewait)); LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT; twait_block->vtag_block[0].v_tag = tag; } } static void sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { struct sctp_iterator *it; /* * Unlock the tcb lock we do this so we avoid a dead lock scenario * where the iterator is waiting on the TCB lock and the TCB lock is * waiting on the iterator lock. */ it = stcb->asoc.stcb_starting_point_for_iterator; if (it == NULL) { return; } if (it->inp != stcb->sctp_ep) { /* hmm, focused on the wrong one? */ return; } if (it->stcb != stcb) { return; } it->stcb = LIST_NEXT(stcb, sctp_tcblist); if (it->stcb == NULL) { /* done with all asoc's in this assoc */ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { it->inp = NULL; } else { it->inp = LIST_NEXT(inp, sctp_list); } } } /* * Free the association after un-hashing the remote port. */ int sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree) { int i; struct sctp_association *asoc; struct sctp_nets *net, *prev; struct sctp_laddr *laddr; struct sctp_tmit_chunk *chk; struct sctp_asconf_addr *aparam; struct sctp_stream_reset_list *liste; struct sctp_queued_to_read *sq; struct sctp_stream_queue_pending *sp; sctp_sharedkey_t *shared_key; struct socket *so; int ccnt = 0; int s, cnt = 0; /* first, lets purge the entry from the hash table. */ s = splnet(); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 6); #endif if (stcb->asoc.state == 0) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 7); #endif splx(s); /* there is no asoc, really TSNH :-0 */ return (1); } asoc = &stcb->asoc; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) /* nothing around */ so = NULL; else so = inp->sctp_socket; /* * We used timer based freeing if a reader or writer is in the way. * So we first check if we are actually being called from a timer, * if so we abort early if a reader or writer is still in the way. */ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && (from_inpcbfree == 0)) { /* * is it the timer driving us? if so are the reader/writers * gone? */ if (stcb->asoc.refcnt) { /* nope, reader or writer in the way */ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); /* no asoc destroyed */ SCTP_TCB_UNLOCK(stcb); splx(s); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 8); #endif return (0); } } /* now clean up any other timers */ callout_stop(&asoc->hb_timer.timer); callout_stop(&asoc->dack_timer.timer); callout_stop(&asoc->strreset_timer.timer); callout_stop(&asoc->asconf_timer.timer); callout_stop(&asoc->autoclose_timer.timer); callout_stop(&asoc->shut_guard_timer.timer); callout_stop(&asoc->delayed_event_timer.timer); TAILQ_FOREACH(net, &asoc->nets, sctp_next) { callout_stop(&net->fr_timer.timer); callout_stop(&net->rxt_timer.timer); callout_stop(&net->pmtu_timer.timer); } /* Now the read queue needs to be cleaned up (only once) */ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { SCTP_INP_READ_LOCK(inp); TAILQ_FOREACH(sq, &inp->read_queue, next) { if (sq->stcb == stcb) { sq->do_not_ref_stcb = 1; sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn; if ((from_inpcbfree == 0) && so) { /* * Only if we have a socket lock do * we do this */ if ((sq->held_length) || (sq->end_added == 0) || ((sq->length == 0) && (sq->end_added == 0))) { /* Held for PD-API */ sq->held_length = 0; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) { /* * need to change to * PD-API aborted */ stcb->asoc.control_pdapi = sq; sctp_notify_partial_delivery_indication(stcb, SCTP_PARTIAL_DELIVERY_ABORTED, 1); stcb->asoc.control_pdapi = NULL; } else { /* * need to get the * reader to remove * it */ sq->length = 0; if (sq->data) { struct mbuf *m; m = sq->data; while (m) { sctp_sbfree(sq, stcb, &stcb->sctp_socket->so_rcv, m); m = sctp_m_free(m); } sq->data = NULL; sq->tail_mbuf = NULL; } } } } sq->end_added = 1; cnt++; } } SCTP_INP_READ_UNLOCK(inp); if (stcb->block_entry) { stcb->block_entry->error = ECONNRESET; stcb->block_entry = NULL; } } stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED; if ((from_inpcbfree != 2) && (stcb->asoc.refcnt)) { /* * reader or writer in the way, we have hopefully given him * something to chew on above. */ if (so) { /* Wake any reader/writers */ sctp_sorwakeup(inp, so); sctp_sowwakeup(inp, so); } sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); SCTP_TCB_UNLOCK(stcb); splx(s); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 9); #endif /* no asoc destroyed */ return (0); } #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 10); #endif if ((from_inpcbfree == 0) && so) { sctp_sorwakeup(inp, so); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* * For TCP type we need special handling when we are * connected. We also include the peel'ed off ones to. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED; inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED; if (so) { SOCK_LOCK(so); if (so->so_rcv.sb_cc == 0) { so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING | SS_ISCONNECTED); } SOCK_UNLOCK(so); sctp_sowwakeup(inp, so); sctp_sorwakeup(inp, so); wakeup(&so->so_timeo); } } } /* * When I reach here, no others want to kill the assoc yet.. and I * own the lock. Now its possible an abort comes in when I do the * lock exchange below to grab all the locks to do the final take * out. to prevent this we increment the count, which will start a * timer and blow out above thus assuring us that we hold exclusive * killing of the asoc. Note that after getting back the TCB lock we * will go ahead and increment the counter back up and stop any * timer a passing stranger may have started :-S */ if (from_inpcbfree == 0) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_ITERATOR_LOCK(); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); SCTP_TCB_LOCK(stcb); } /* Stop any timer someone may have started */ callout_stop(&asoc->strreset_timer.timer); /* * Make it invalid too, that way if its about to run it will abort * and return. */ asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE; sctp_iterator_asoc_being_freed(inp, stcb); /* re-increment the lock */ if (from_inpcbfree == 0) { atomic_add_int(&stcb->asoc.refcnt, -1); } /* now restop the timers to be sure - this is paranoia at is finest! */ callout_stop(&asoc->hb_timer.timer); callout_stop(&asoc->dack_timer.timer); callout_stop(&asoc->strreset_timer.timer); callout_stop(&asoc->asconf_timer.timer); callout_stop(&asoc->shut_guard_timer.timer); callout_stop(&asoc->autoclose_timer.timer); callout_stop(&asoc->delayed_event_timer.timer); TAILQ_FOREACH(net, &asoc->nets, sctp_next) { callout_stop(&net->fr_timer.timer); callout_stop(&net->rxt_timer.timer); callout_stop(&net->pmtu_timer.timer); } asoc->state = 0; if (inp->sctp_tcbhash) { LIST_REMOVE(stcb, sctp_tcbhash); } if (stcb->asoc.in_restart_hash) { LIST_REMOVE(stcb, sctp_tcbrestarhash); } /* Now lets remove it from the list of ALL associations in the EP */ LIST_REMOVE(stcb, sctp_tcblist); if (from_inpcbfree == 0) { SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_ITERATOR_UNLOCK(); } /* pull from vtag hash */ LIST_REMOVE(stcb, sctp_asocs); sctp_add_vtag_to_timewait(inp, asoc->my_vtag); if (from_inpcbfree == 0) { SCTP_INP_INFO_WUNLOCK(); } prev = NULL; /* * The chunk lists and such SHOULD be empty but we check them just * in case. */ /* anything on the wheel needs to be removed */ for (i = 0; i < asoc->streamoutcnt; i++) { struct sctp_stream_out *outs; outs = &asoc->strmout[i]; /* now clean up any chunks here */ sp = TAILQ_FIRST(&outs->outqueue); while (sp) { TAILQ_REMOVE(&outs->outqueue, sp, next); if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; sp->tail_mbuf = NULL; } sctp_free_remote_addr(sp->net); sctp_free_spbufspace(stcb, asoc, sp); /* Free the zone stuff */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp); SCTP_DECR_STRMOQ_COUNT(); sp = TAILQ_FIRST(&outs->outqueue); } } while ((sp = TAILQ_FIRST(&asoc->free_strmoq)) != NULL) { TAILQ_REMOVE(&asoc->free_strmoq, sp, next); if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; sp->tail_mbuf = NULL; } /* Free the zone stuff */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp); SCTP_DECR_STRMOQ_COUNT(); atomic_add_int(&sctppcbinfo.ipi_free_strmoq, -1); } while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) { TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); SCTP_FREE(liste); } sq = TAILQ_FIRST(&asoc->pending_reply_queue); while (sq) { TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); if (sq->data) { sctp_m_freem(sq->data); sq->data = NULL; } sctp_free_remote_addr(sq->whoFrom); sq->whoFrom = NULL; sq->stcb = NULL; /* Free the ctl entry */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq); SCTP_DECR_READQ_COUNT(); sq = TAILQ_FIRST(&asoc->pending_reply_queue); } chk = TAILQ_FIRST(&asoc->free_chunks); while (chk) { TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } ccnt++; SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); SCTP_DECR_CHK_COUNT(); atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1); asoc->free_chunk_cnt--; chk = TAILQ_FIRST(&asoc->free_chunks); } /* pending send queue SHOULD be empty */ if (!TAILQ_EMPTY(&asoc->send_queue)) { chk = TAILQ_FIRST(&asoc->send_queue); while (chk) { TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } ccnt++; sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); SCTP_DECR_CHK_COUNT(); chk = TAILQ_FIRST(&asoc->send_queue); } } /* if(ccnt) { printf("Freed %d from send_queue\n", ccnt); ccnt = 0; } */ /* sent queue SHOULD be empty */ if (!TAILQ_EMPTY(&asoc->sent_queue)) { chk = TAILQ_FIRST(&asoc->sent_queue); while (chk) { TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } ccnt++; sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); SCTP_DECR_CHK_COUNT(); chk = TAILQ_FIRST(&asoc->sent_queue); } } /* if(ccnt) { printf("Freed %d from sent_queue\n", ccnt); ccnt = 0; } */ /* control queue MAY not be empty */ if (!TAILQ_EMPTY(&asoc->control_send_queue)) { chk = TAILQ_FIRST(&asoc->control_send_queue); while (chk) { TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } ccnt++; sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); SCTP_DECR_CHK_COUNT(); chk = TAILQ_FIRST(&asoc->control_send_queue); } } /* if(ccnt) { printf("Freed %d from ctrl_queue\n", ccnt); ccnt = 0; } */ if (!TAILQ_EMPTY(&asoc->reasmqueue)) { chk = TAILQ_FIRST(&asoc->reasmqueue); while (chk) { TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_remote_addr(chk->whoTo); ccnt++; SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); SCTP_DECR_CHK_COUNT(); chk = TAILQ_FIRST(&asoc->reasmqueue); } } /* if(ccnt) { printf("Freed %d from reasm_queue\n", ccnt); ccnt = 0; } */ if (asoc->mapping_array) { SCTP_FREE(asoc->mapping_array); asoc->mapping_array = NULL; } /* the stream outs */ if (asoc->strmout) { SCTP_FREE(asoc->strmout); asoc->strmout = NULL; } asoc->streamoutcnt = 0; if (asoc->strmin) { struct sctp_queued_to_read *ctl; int i; for (i = 0; i < asoc->streamincnt; i++) { if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) { /* We have somethings on the streamin queue */ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); while (ctl) { TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); sctp_free_remote_addr(ctl->whoFrom); if (ctl->data) { sctp_m_freem(ctl->data); ctl->data = NULL; } /* * We don't free the address here * since all the net's were freed * above. */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl); SCTP_DECR_READQ_COUNT(); ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); } } } SCTP_FREE(asoc->strmin); asoc->strmin = NULL; } asoc->streamincnt = 0; while (!TAILQ_EMPTY(&asoc->nets)) { net = TAILQ_FIRST(&asoc->nets); /* pull from list */ if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) { #ifdef INVARIENTS panic("no net's left alloc'ed, or list points to itself"); #endif break; } prev = net; TAILQ_REMOVE(&asoc->nets, net, sctp_next); sctp_free_remote_addr(net); } /* local addresses, if any */ while (!LIST_EMPTY(&asoc->sctp_local_addr_list)) { laddr = LIST_FIRST(&asoc->sctp_local_addr_list); LIST_REMOVE(laddr, sctp_nxt_addr); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); SCTP_DECR_LADDR_COUNT(); } /* pending asconf (address) parameters */ while (!TAILQ_EMPTY(&asoc->asconf_queue)) { aparam = TAILQ_FIRST(&asoc->asconf_queue); TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); SCTP_FREE(aparam); } if (asoc->last_asconf_ack_sent != NULL) { sctp_m_freem(asoc->last_asconf_ack_sent); asoc->last_asconf_ack_sent = NULL; } /* clean up auth stuff */ if (asoc->local_hmacs) sctp_free_hmaclist(asoc->local_hmacs); if (asoc->peer_hmacs) sctp_free_hmaclist(asoc->peer_hmacs); if (asoc->local_auth_chunks) sctp_free_chunklist(asoc->local_auth_chunks); if (asoc->peer_auth_chunks) sctp_free_chunklist(asoc->peer_auth_chunks); sctp_free_authinfo(&asoc->authinfo); shared_key = LIST_FIRST(&asoc->shared_keys); while (shared_key) { LIST_REMOVE(shared_key, next); sctp_free_sharedkey(shared_key); shared_key = LIST_FIRST(&asoc->shared_keys); } /* Insert new items here :> */ /* Get rid of LOCK */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); /* now clean up the tasoc itself */ SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); SCTP_DECR_ASOC_COUNT(); if (from_inpcbfree == 0) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { /* * If its NOT the inp_free calling us AND sctp_close * as been called, we call back... */ SCTP_INP_RUNLOCK(inp); /* * This will start the kill timer (if we are the * lastone) since we hold an increment yet. But this * is the only safe way to do this since otherwise * if the socket closes at the same time we are here * we might collide in the cleanup. */ sctp_inpcb_free(inp, 0, 0); SCTP_INP_DECR_REF(inp); } else { /* The socket is still open. */ SCTP_INP_DECR_REF(inp); SCTP_INP_RUNLOCK(inp); } } splx(s); /* destroyed the asoc */ #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 11); #endif return (1); } /* * determine if a destination is "reachable" based upon the addresses bound * to the current endpoint (e.g. only v4 or v6 currently bound) */ /* * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use * assoc level v4/v6 flags, as the assoc *may* not have the same address * types bound as its endpoint */ int sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) { struct sctp_inpcb *inp; int answer; /* * No locks here, the TCB, in all cases is already locked and an * assoc is up. There is either a INP lock by the caller applied (in * asconf case when deleting an address) or NOT in the HB case, * however if HB then the INP increment is up and the INP will not * be removed (on top of the fact that we have a TCB lock). So we * only want to read the sctp_flags, which is either bound-all or * not.. no protection needed since once an assoc is up you can't be * changing your binding. */ inp = stcb->sctp_ep; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* if bound all, destination is not restricted */ /* * RRS: Question during lock work: Is this correct? If you * are bound-all you still might need to obey the V4--V6 * flags??? IMO this bound-all stuff needs to be removed! */ return (1); } /* NOTE: all "scope" checks are done when local addresses are added */ if (destaddr->sa_family == AF_INET6) { answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; } else if (destaddr->sa_family == AF_INET) { answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; } else { /* invalid family, so it's unreachable */ answer = 0; } return (answer); } /* * update the inp_vflags on an endpoint */ static void sctp_update_ep_vflag(struct sctp_inpcb *inp) { struct sctp_laddr *laddr; /* first clear the flag */ inp->ip_inp.inp.inp_vflag = 0; /* set the flag based on addresses on the ep list */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("An ounce of prevention is worth a pound of cure\n"); } #endif /* SCTP_DEBUG */ continue; } if (laddr->ifa->ifa_addr) { continue; } if (laddr->ifa->ifa_addr->sa_family == AF_INET6) { inp->ip_inp.inp.inp_vflag |= INP_IPV6; } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) { inp->ip_inp.inp.inp_vflag |= INP_IPV4; } } } /* * Add the address to the endpoint local address list There is nothing to be * done if we are bound to all addresses */ int sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa) { struct sctp_laddr *laddr; int fnd, error; fnd = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* You are already bound to all. You have it already */ return (0); } if (ifa->ifa_addr->sa_family == AF_INET6) { struct in6_ifaddr *ifa6; ifa6 = (struct in6_ifaddr *)ifa; if (ifa6->ia6_flags & (IN6_IFF_DETACHED | IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) /* Can't bind a non-existent addr. */ return (-1); } /* first, is it already present? */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { fnd = 1; break; } } if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) { /* Not bound to all */ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa); if (error != 0) return (error); inp->laddr_count++; /* update inp_vflag flags */ if (ifa->ifa_addr->sa_family == AF_INET6) { inp->ip_inp.inp.inp_vflag |= INP_IPV6; } else if (ifa->ifa_addr->sa_family == AF_INET) { inp->ip_inp.inp.inp_vflag |= INP_IPV4; } } return (0); } /* * select a new (hopefully reachable) destination net (should only be used * when we deleted an ep addr that is the only usable source address to reach * the destination net) */ static void sctp_select_primary_destination(struct sctp_tcb *stcb) { struct sctp_nets *net; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* for now, we'll just pick the first reachable one we find */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) continue; if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr)) { /* found a reachable destination */ stcb->asoc.primary_destination = net; } } /* I can't there from here! ...we're gonna die shortly... */ } /* * Delete the address from the endpoint local address list There is nothing * to be done if we are bound to all addresses */ int sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa) { struct sctp_laddr *laddr; int fnd; fnd = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* You are already bound to all. You have it already */ return (EINVAL); } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { fnd = 1; break; } } if (fnd && (inp->laddr_count < 2)) { /* can't delete unless there are at LEAST 2 addresses */ return (-1); } if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) { /* * clean up any use of this address go through our * associations and clear any last_used_address that match * this one for each assoc, see if a new primary_destination * is needed */ struct sctp_tcb *stcb; /* clean up "next_addr_touse" */ if (inp->next_addr_touse == laddr) /* delete this address */ inp->next_addr_touse = NULL; /* clean up "last_used_address" */ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { if (stcb->asoc.last_used_address == laddr) /* delete this address */ stcb->asoc.last_used_address = NULL; } /* for each tcb */ /* remove it from the ep list */ sctp_remove_laddr(laddr); inp->laddr_count--; /* update inp_vflag flags */ sctp_update_ep_vflag(inp); /* select a new primary destination if needed */ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { /* * presume caller (sctp_asconf.c) already owns INP * lock */ SCTP_TCB_LOCK(stcb); if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) { sctp_select_primary_destination(stcb); } SCTP_TCB_UNLOCK(stcb); } /* for each tcb */ } return (0); } /* * Add the addr to the TCB local address list For the BOUNDALL or dynamic * case, this is a "pending" address list (eg. addresses waiting for an * ASCONF-ACK response) For the subset binding, static case, this is a * "valid" address list */ int sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa) { struct sctp_inpcb *inp; struct sctp_laddr *laddr; int error; /* * Assumes TCP is locked.. and possiblye the INP. May need to * confirm/fix that if we need it and is not the case. */ inp = stcb->sctp_ep; if (ifa->ifa_addr->sa_family == AF_INET6) { struct in6_ifaddr *ifa6; ifa6 = (struct in6_ifaddr *)ifa; if (ifa6->ia6_flags & (IN6_IFF_DETACHED | /* IN6_IFF_DEPRECATED | */ IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) /* Can't bind a non-existent addr. */ return (-1); } /* does the address already exist? */ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { return (-1); } } /* add to the list */ error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa); if (error != 0) return (error); return (0); } /* * insert an laddr entry with the given ifa for the desired list */ int sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa) { struct sctp_laddr *laddr; int s; s = splnet(); laddr = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr); if (laddr == NULL) { /* out of memory? */ splx(s); return (EINVAL); } SCTP_INCR_LADDR_COUNT(); bzero(laddr, sizeof(*laddr)); laddr->ifa = ifa; /* insert it */ LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr); splx(s); return (0); } /* * Remove an laddr entry from the local address list (on an assoc) */ void sctp_remove_laddr(struct sctp_laddr *laddr) { int s; s = splnet(); /* remove from the list */ LIST_REMOVE(laddr, sctp_nxt_addr); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); SCTP_DECR_LADDR_COUNT(); splx(s); } /* * Remove an address from the TCB local address list */ int sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa) { struct sctp_inpcb *inp; struct sctp_laddr *laddr; /* * This is called by asconf work. It is assumed that a) The TCB is * locked and b) The INP is locked. This is true in as much as I can * trace through the entry asconf code where I did these locks. * Again, the ASCONF code is a bit different in that it does lock * the INP during its work often times. This must be since we don't * want other proc's looking up things while what they are looking * up is changing :-D */ inp = stcb->sctp_ep; /* if subset bound and don't allow ASCONF's, can't delete last */ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) { if (stcb->asoc.numnets < 2) { /* can't delete last address */ return (-1); } } LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { /* remove the address if it exists */ if (laddr->ifa == NULL) continue; if (laddr->ifa == ifa) { sctp_remove_laddr(laddr); return (0); } } /* address not found! */ return (-1); } /* * Remove an address from the TCB local address list lookup using a sockaddr * addr */ int sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa) { struct sctp_inpcb *inp; struct sctp_laddr *laddr; struct sockaddr *l_sa; /* * This function I find does not seem to have a caller. As such we * NEED TO DELETE this code. If we do find a caller, the caller MUST * have locked the TCB at the least and probably the INP as well. */ inp = stcb->sctp_ep; /* if subset bound and don't allow ASCONF's, can't delete last */ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) { if (stcb->asoc.numnets < 2) { /* can't delete last address */ return (-1); } } LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { /* make sure the address exists */ if (laddr->ifa == NULL) continue; if (laddr->ifa->ifa_addr == NULL) continue; l_sa = laddr->ifa->ifa_addr; if (l_sa->sa_family == AF_INET6) { /* IPv6 address */ struct sockaddr_in6 *sin1, *sin2; sin1 = (struct sockaddr_in6 *)l_sa; sin2 = (struct sockaddr_in6 *)sa; if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, sizeof(struct in6_addr)) == 0) { /* matched */ sctp_remove_laddr(laddr); return (0); } } else if (l_sa->sa_family == AF_INET) { /* IPv4 address */ struct sockaddr_in *sin1, *sin2; sin1 = (struct sockaddr_in *)l_sa; sin2 = (struct sockaddr_in *)sa; if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) { /* matched */ sctp_remove_laddr(laddr); return (0); } } else { /* invalid family */ return (-1); } } /* end foreach */ /* address not found! */ return (-1); } static char sctp_pcb_initialized = 0; /* * Temporarily remove for __APPLE__ until we use the Tiger equivalents */ /* sysctl */ static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; void sctp_pcb_init() { /* * SCTP initialization for the PCB structures should be called by * the sctp_init() funciton. */ int i; if (sctp_pcb_initialized != 0) { /* error I was called twice */ return; } sctp_pcb_initialized = 1; bzero(&sctpstat, sizeof(struct sctpstat)); /* init the empty list of (All) Endpoints */ LIST_INIT(&sctppcbinfo.listhead); /* init the iterator head */ LIST_INIT(&sctppcbinfo.iteratorhead); /* init the hash table of endpoints */ TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize); TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize); TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale); sctppcbinfo.sctp_asochash = hashinit((sctp_hashtblsize * 31), M_PCB, &sctppcbinfo.hashasocmark); sctppcbinfo.sctp_ephash = hashinit(sctp_hashtblsize, M_PCB, &sctppcbinfo.hashmark); sctppcbinfo.sctp_tcpephash = hashinit(sctp_hashtblsize, M_PCB, &sctppcbinfo.hashtcpmark); sctppcbinfo.hashtblsize = sctp_hashtblsize; /* * init the small hash table we use to track restarted asoc's */ sctppcbinfo.sctp_restarthash = hashinit(SCTP_STACK_VTAG_HASH_SIZE, M_PCB, &sctppcbinfo.hashrestartmark); /* init the zones */ /* * FIX ME: Should check for NULL returns, but if it does fail we are * doomed to panic anyways... add later maybe. */ SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep", sizeof(struct sctp_inpcb), maxsockets); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc", sizeof(struct sctp_tcb), sctp_max_number_of_assoc); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr", sizeof(struct sctp_laddr), (sctp_max_number_of_assoc * sctp_scale_up_for_address)); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr", sizeof(struct sctp_nets), (sctp_max_number_of_assoc * sctp_scale_up_for_address)); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk", sizeof(struct sctp_tmit_chunk), (sctp_max_number_of_assoc * sctp_chunkscale)); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq", sizeof(struct sctp_queued_to_read), (sctp_max_number_of_assoc * sctp_chunkscale)); SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out", sizeof(struct sctp_stream_queue_pending), (sctp_max_number_of_assoc * sctp_chunkscale)); /* Master Lock INIT for info structure */ SCTP_INP_INFO_LOCK_INIT(); SCTP_STATLOG_INIT_LOCK(); SCTP_ITERATOR_LOCK_INIT(); SCTP_IPI_COUNT_INIT(); SCTP_IPI_ADDR_INIT(); LIST_INIT(&sctppcbinfo.addr_wq); /* not sure if we need all the counts */ sctppcbinfo.ipi_count_ep = 0; /* assoc/tcb zone info */ sctppcbinfo.ipi_count_asoc = 0; /* local addrlist zone info */ sctppcbinfo.ipi_count_laddr = 0; /* remote addrlist zone info */ sctppcbinfo.ipi_count_raddr = 0; /* chunk info */ sctppcbinfo.ipi_count_chunk = 0; /* socket queue zone info */ sctppcbinfo.ipi_count_readq = 0; /* stream out queue cont */ sctppcbinfo.ipi_count_strmoq = 0; sctppcbinfo.ipi_free_strmoq = 0; sctppcbinfo.ipi_free_chunks = 0; callout_init(&sctppcbinfo.addr_wq_timer.timer, 1); /* port stuff */ sctppcbinfo.lastlow = ipport_firstauto; /* Init the TIMEWAIT list */ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { LIST_INIT(&sctppcbinfo.vtag_timewait[i]); } } int sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, int iphlen, int offset, int limit, struct sctphdr *sh, struct sockaddr *altsa) { /* * grub through the INIT pulling addresses and loading them to the * nets structure in the asoc. The from address in the mbuf should * also be loaded (if it is not already). This routine can be called * with either INIT or INIT-ACK's as long as the m points to the IP * packet and the offset points to the beginning of the parameters. */ struct sctp_inpcb *inp, *l_inp; struct sctp_nets *net, *net_tmp; struct ip *iph; struct sctp_paramhdr *phdr, parm_buf; struct sctp_tcb *stcb_tmp; uint16_t ptype, plen; struct sockaddr *sa; struct sockaddr_storage dest_store; struct sockaddr *local_sa = (struct sockaddr *)&dest_store; struct sockaddr_in sin; struct sockaddr_in6 sin6; uint8_t store[384]; struct sctp_auth_random *random = NULL; uint16_t random_len = 0; struct sctp_auth_hmac_algo *hmacs = NULL; uint16_t hmacs_len = 0; struct sctp_auth_chunk_list *chunks = NULL; uint16_t num_chunks = 0; sctp_key_t *new_key; uint32_t keylen; int got_random = 0, got_hmacs = 0, got_chklist = 0; /* First get the destination address setup too. */ memset(&sin, 0, sizeof(sin)); memset(&sin6, 0, sizeof(sin6)); sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); sin.sin_port = stcb->rport; sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_port = stcb->rport; if (altsa == NULL) { iph = mtod(m, struct ip *); if (iph->ip_v == IPVERSION) { /* its IPv4 */ struct sockaddr_in *sin_2; sin_2 = (struct sockaddr_in *)(local_sa); memset(sin_2, 0, sizeof(sin)); sin_2->sin_family = AF_INET; sin_2->sin_len = sizeof(sin); sin_2->sin_port = sh->dest_port; sin_2->sin_addr.s_addr = iph->ip_dst.s_addr; sin.sin_addr = iph->ip_src; sa = (struct sockaddr *)&sin; } else if (iph->ip_v == (IPV6_VERSION >> 4)) { /* its IPv6 */ struct ip6_hdr *ip6; struct sockaddr_in6 *sin6_2; ip6 = mtod(m, struct ip6_hdr *); sin6_2 = (struct sockaddr_in6 *)(local_sa); memset(sin6_2, 0, sizeof(sin6)); sin6_2->sin6_family = AF_INET6; sin6_2->sin6_len = sizeof(struct sockaddr_in6); sin6_2->sin6_port = sh->dest_port; sin6.sin6_addr = ip6->ip6_src; sa = (struct sockaddr *)&sin6; } else { sa = NULL; } } else { /* * For cookies we use the src address NOT from the packet * but from the original INIT */ sa = altsa; } /* Turn off ECN until we get through all params */ stcb->asoc.ecn_allowed = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* mark all addresses that we have currently on the list */ net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC; } /* does the source address already exist? if so skip it */ l_inp = inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { /* we must add the source address */ /* no scope set here since we have a tcb already. */ if ((sa->sa_family == AF_INET) && (stcb->asoc.ipv4_addr_legal)) { if (sctp_add_remote_addr(stcb, sa, 0, 2)) { return (-1); } } else if ((sa->sa_family == AF_INET6) && (stcb->asoc.ipv6_addr_legal)) { if (sctp_add_remote_addr(stcb, sa, 0, 3)) { return (-2); } } } else { if (net_tmp != NULL && stcb_tmp == stcb) { net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } else if (stcb_tmp != stcb) { /* It belongs to another association? */ SCTP_TCB_UNLOCK(stcb_tmp); return (-3); } } if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-4); } /* now we must go through each of the params. */ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); while (phdr) { ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); /* * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype, * (int)plen); */ if (offset + plen > limit) { break; } if (plen == 0) { break; } if (ptype == SCTP_IPV4_ADDRESS) { if (stcb->asoc.ipv4_addr_legal) { struct sctp_ipv4addr_param *p4, p4_buf; /* ok get the v4 address and check/add */ phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); if (plen != sizeof(struct sctp_ipv4addr_param) || phdr == NULL) { return (-5); } p4 = (struct sctp_ipv4addr_param *)phdr; sin.sin_addr.s_addr = p4->addr; sa = (struct sockaddr *)&sin; inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, local_sa, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { /* we must add the source address */ /* * no scope set since we have a tcb * already */ /* * we must validate the state again * here */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-7); } if (sctp_add_remote_addr(stcb, sa, 0, 4)) { return (-8); } } else if (stcb_tmp == stcb) { if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-10); } if (net != NULL) { /* clear flag */ net->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } } else { /* * strange, address is in another * assoc? straighten out locks. */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-12); } return (-13); } } } else if (ptype == SCTP_IPV6_ADDRESS) { if (stcb->asoc.ipv6_addr_legal) { /* ok get the v6 address and check/add */ struct sctp_ipv6addr_param *p6, p6_buf; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); if (plen != sizeof(struct sctp_ipv6addr_param) || phdr == NULL) { return (-14); } p6 = (struct sctp_ipv6addr_param *)phdr; memcpy((caddr_t)&sin6.sin6_addr, p6->addr, sizeof(p6->addr)); sa = (struct sockaddr *)&sin6; inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, local_sa, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if (stcb_tmp == NULL && (inp == stcb->sctp_ep || inp == NULL)) { /* * we must validate the state again * here */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-16); } /* * we must add the address, no scope * set */ if (sctp_add_remote_addr(stcb, sa, 0, 5)) { return (-17); } } else if (stcb_tmp == stcb) { /* * we must validate the state again * here */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-19); } if (net != NULL) { /* clear flag */ net->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } } else { /* * strange, address is in another * assoc? straighten out locks. */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-21); } return (-22); } } } else if (ptype == SCTP_ECN_CAPABLE) { stcb->asoc.ecn_allowed = 1; } else if (ptype == SCTP_ULP_ADAPTATION) { if (stcb->asoc.state != SCTP_STATE_OPEN) { struct sctp_adaptation_layer_indication ai, *aip; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ai, sizeof(ai)); aip = (struct sctp_adaptation_layer_indication *)phdr; sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION, stcb, ntohl(aip->indication), NULL); } } else if (ptype == SCTP_SET_PRIM_ADDR) { struct sctp_asconf_addr_param lstore, *fee; struct sctp_asconf_addrv4_param *fii; int lptype; struct sockaddr *lsa = NULL; stcb->asoc.peer_supports_asconf = 1; if (plen > sizeof(lstore)) { return (-23); } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&lstore, plen); if (phdr == NULL) { return (-24); } fee = (struct sctp_asconf_addr_param *)phdr; lptype = ntohs(fee->addrp.ph.param_type); if (lptype == SCTP_IPV4_ADDRESS) { if (plen != sizeof(struct sctp_asconf_addrv4_param)) { printf("Sizeof setprim in init/init ack not %d but %d - ignored\n", (int)sizeof(struct sctp_asconf_addrv4_param), plen); } else { fii = (struct sctp_asconf_addrv4_param *)fee; sin.sin_addr.s_addr = fii->addrp.addr; lsa = (struct sockaddr *)&sin; } } else if (lptype == SCTP_IPV6_ADDRESS) { if (plen != sizeof(struct sctp_asconf_addr_param)) { printf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n", (int)sizeof(struct sctp_asconf_addr_param), plen); } else { memcpy(sin6.sin6_addr.s6_addr, fee->addrp.addr, sizeof(fee->addrp.addr)); lsa = (struct sockaddr *)&sin6; } } if (lsa) { sctp_set_primary_addr(stcb, sa, NULL); } } else if (ptype == SCTP_PRSCTP_SUPPORTED) { /* Peer supports pr-sctp */ stcb->asoc.peer_supports_prsctp = 1; } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) { /* A supported extension chunk */ struct sctp_supported_chunk_types_param *pr_supported; uint8_t local_store[128]; int num_ent, i; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&local_store, plen); if (phdr == NULL) { return (-25); } stcb->asoc.peer_supports_asconf = 0; stcb->asoc.peer_supports_prsctp = 0; stcb->asoc.peer_supports_pktdrop = 0; stcb->asoc.peer_supports_strreset = 0; stcb->asoc.peer_supports_auth = 0; pr_supported = (struct sctp_supported_chunk_types_param *)phdr; num_ent = plen - sizeof(struct sctp_paramhdr); for (i = 0; i < num_ent; i++) { switch (pr_supported->chunk_types[i]) { case SCTP_ASCONF: case SCTP_ASCONF_ACK: stcb->asoc.peer_supports_asconf = 1; break; case SCTP_FORWARD_CUM_TSN: stcb->asoc.peer_supports_prsctp = 1; break; case SCTP_PACKET_DROPPED: stcb->asoc.peer_supports_pktdrop = 1; break; case SCTP_STREAM_RESET: stcb->asoc.peer_supports_strreset = 1; break; case SCTP_AUTHENTICATION: stcb->asoc.peer_supports_auth = 1; break; default: /* one I have not learned yet */ break; } } } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) { /* Peer supports ECN-nonce */ stcb->asoc.peer_supports_ecn_nonce = 1; stcb->asoc.ecn_nonce_allowed = 1; } else if (ptype == SCTP_RANDOM) { if (plen > sizeof(store)) break; if (got_random) { /* already processed a RANDOM */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)store, plen); if (phdr == NULL) return (-26); random = (struct sctp_auth_random *)phdr; random_len = plen - sizeof(*random); /* enforce the random length */ if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_AUTH1) printf("SCTP: invalid RANDOM len\n"); #endif return (-27); } got_random = 1; } else if (ptype == SCTP_HMAC_LIST) { int num_hmacs; int i; if (plen > sizeof(store)) break; if (got_hmacs) { /* already processed a HMAC list */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)store, plen); if (phdr == NULL) return (-28); hmacs = (struct sctp_auth_hmac_algo *)phdr; hmacs_len = plen - sizeof(*hmacs); num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]); /* validate the hmac list */ if (sctp_verify_hmac_param(hmacs, num_hmacs)) { return (-29); } if (stcb->asoc.peer_hmacs != NULL) sctp_free_hmaclist(stcb->asoc.peer_hmacs); stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs); if (stcb->asoc.peer_hmacs != NULL) { for (i = 0; i < num_hmacs; i++) { sctp_auth_add_hmacid(stcb->asoc.peer_hmacs, ntohs(hmacs->hmac_ids[i])); } } got_hmacs = 1; } else if (ptype == SCTP_CHUNK_LIST) { int i; if (plen > sizeof(store)) break; if (got_chklist) { /* already processed a Chunks list */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)store, plen); if (phdr == NULL) return (-30); chunks = (struct sctp_auth_chunk_list *)phdr; num_chunks = plen - sizeof(*chunks); if (stcb->asoc.peer_auth_chunks != NULL) sctp_clear_chunklist(stcb->asoc.peer_auth_chunks); else stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist(); for (i = 0; i < num_chunks; i++) { sctp_auth_add_chunk(chunks->chunk_types[i], stcb->asoc.peer_auth_chunks); } got_chklist = 1; } else if ((ptype == SCTP_HEARTBEAT_INFO) || (ptype == SCTP_STATE_COOKIE) || (ptype == SCTP_UNRECOG_PARAM) || (ptype == SCTP_COOKIE_PRESERVE) || (ptype == SCTP_SUPPORTED_ADDRTYPE) || (ptype == SCTP_ADD_IP_ADDRESS) || (ptype == SCTP_DEL_IP_ADDRESS) || (ptype == SCTP_ERROR_CAUSE_IND) || (ptype == SCTP_SUCCESS_REPORT)) { /* don't care */ ; } else { if ((ptype & 0x8000) == 0x0000) { /* * must stop processing the rest of the * param's. Any report bits were handled * with the call to * sctp_arethere_unrecognized_parameters() * when the INIT or INIT-ACK was first seen. */ break; } } next_param: offset += SCTP_SIZE32(plen); if (offset >= limit) { break; } phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); } /* Now check to see if we need to purge any addresses */ for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) { net_tmp = TAILQ_NEXT(net, sctp_next); if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) == SCTP_ADDR_NOT_IN_ASSOC) { /* This address has been removed from the asoc */ /* remove and free it */ stcb->asoc.numnets--; TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); sctp_free_remote_addr(net); if (net == stcb->asoc.primary_destination) { stcb->asoc.primary_destination = NULL; sctp_select_primary_destination(stcb); } } } /* validate authentication required parameters */ if (got_random && got_hmacs) { stcb->asoc.peer_supports_auth = 1; } else { stcb->asoc.peer_supports_auth = 0; } if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf && !stcb->asoc.peer_supports_auth) { return (-31); } /* concatenate the full random key */ keylen = random_len + num_chunks + hmacs_len; new_key = sctp_alloc_key(keylen); if (new_key != NULL) { /* copy in the RANDOM */ if (random != NULL) bcopy(random->random_data, new_key->key, random_len); /* append in the AUTH chunks */ if (chunks != NULL) bcopy(chunks->chunk_types, new_key->key + random_len, num_chunks); /* append in the HMACs */ if (hmacs != NULL) bcopy(hmacs->hmac_ids, new_key->key + random_len + num_chunks, hmacs_len); } else { return (-32); } if (stcb->asoc.authinfo.peer_random != NULL) sctp_free_key(stcb->asoc.authinfo.peer_random); stcb->asoc.authinfo.peer_random = new_key; #ifdef SCTP_AUTH_DRAFT_04 /* don't include the chunks and hmacs for draft -04 */ stcb->asoc.authinfo.peer_random->keylen = random_len; #endif sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid); sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid); return (0); } int sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, struct sctp_nets *net) { /* make sure the requested primary address exists in the assoc */ if (net == NULL && sa) net = sctp_findnet(stcb, sa); if (net == NULL) { /* didn't find the requested primary address! */ return (-1); } else { /* set the primary address */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* Must be confirmed */ return (-1); } stcb->asoc.primary_destination = net; net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; net = TAILQ_FIRST(&stcb->asoc.nets); if (net != stcb->asoc.primary_destination) { /* * first one on the list is NOT the primary * sctp_cmpaddr() is much more efficent if the * primary is the first on the list, make it so. */ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); } return (0); } } int sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now) { /* * This function serves two purposes. It will see if a TAG can be * re-used and return 1 for yes it is ok and 0 for don't use that * tag. A secondary function it will do is purge out old tags that * can be removed. */ struct sctpasochead *head; struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; struct sctp_tcb *stcb; int i; SCTP_INP_INFO_WLOCK(); chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; /* First is the vtag in use ? */ head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag, sctppcbinfo.hashasocmark)]; if (head == NULL) { goto check_restart; } LIST_FOREACH(stcb, head, sctp_asocs) { if (stcb->asoc.my_vtag == tag) { /* * We should remove this if and return 0 always if * we want vtags unique across all endpoints. For * now within a endpoint is ok. */ if (inp == stcb->sctp_ep) { /* bad tag, in use */ SCTP_INP_INFO_WUNLOCK(); return (0); } } } check_restart: /* Now lets check the restart hash */ head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag, sctppcbinfo.hashrestartmark)]; if (head == NULL) { goto check_time_wait; } LIST_FOREACH(stcb, head, sctp_tcbrestarhash) { if (stcb->asoc.assoc_id == tag) { /* candidate */ if (inp == stcb->sctp_ep) { /* bad tag, in use */ SCTP_INP_INFO_WUNLOCK(); return (0); } } } check_time_wait: /* Now what about timed wait ? */ if (!LIST_EMPTY(chain)) { /* * Block(s) are present, lets see if we have this tag in the * list */ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if (twait_block->vtag_block[i].v_tag == 0) { /* not used */ continue; } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire > now->tv_sec) { /* Audit expires this guy */ twait_block->vtag_block[i].tv_sec_at_expire = 0; twait_block->vtag_block[i].v_tag = 0; } else if (twait_block->vtag_block[i].v_tag == tag) { /* Bad tag, sorry :< */ SCTP_INP_INFO_WUNLOCK(); return (0); } } } } /* Not found, ok to use the tag */ SCTP_INP_INFO_WUNLOCK(); return (1); } /* * Delete the address from the endpoint local address list Lookup using a * sockaddr address (ie. not an ifaddr) */ int sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa) { struct sctp_laddr *laddr; struct sockaddr *l_sa; int found = 0; /* * Here is another function I cannot find a caller for. As such we * SHOULD delete it if we have no users. If we find a user that user * MUST have the INP locked. * */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* You are already bound to all. You have it already */ return (EINVAL); } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { /* make sure the address exists */ if (laddr->ifa == NULL) continue; if (laddr->ifa->ifa_addr == NULL) continue; l_sa = laddr->ifa->ifa_addr; if (l_sa->sa_family == AF_INET6) { /* IPv6 address */ struct sockaddr_in6 *sin1, *sin2; sin1 = (struct sockaddr_in6 *)l_sa; sin2 = (struct sockaddr_in6 *)sa; if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, sizeof(struct in6_addr)) == 0) { /* matched */ found = 1; break; } } else if (l_sa->sa_family == AF_INET) { /* IPv4 address */ struct sockaddr_in *sin1, *sin2; sin1 = (struct sockaddr_in *)l_sa; sin2 = (struct sockaddr_in *)sa; if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) { /* matched */ found = 1; break; } } else { /* invalid family */ return (-1); } } if (found && inp->laddr_count < 2) { /* can't delete unless there are at LEAST 2 addresses */ return (-1); } if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* * remove it from the ep list, this should NOT be done until * its really gone from the interface list and we won't be * receiving more of these. Probably right away. If we do * allow a removal of an address from an association * (sub-set bind) than this should NOT be called until the * all ASCONF come back from this association. */ sctp_remove_laddr(laddr); return (0); } else { return (-1); } } static sctp_assoc_t reneged_asoc_ids[256]; static uint8_t reneged_at = 0; extern int sctp_do_drain; static void sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { /* * We must hunt this association for MBUF's past the cumack (i.e. * out of order data that we can renege on). */ struct sctp_association *asoc; struct sctp_tmit_chunk *chk, *nchk; uint32_t cumulative_tsn_p1, tsn; struct sctp_queued_to_read *ctl, *nctl; int cnt, strmat, gap; /* We look for anything larger than the cum-ack + 1 */ if (sctp_do_drain == 0) { return; } asoc = &stcb->asoc; if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) { /* none we can reneg on. */ return; } cumulative_tsn_p1 = asoc->cumulative_tsn + 1; cnt = 0; /* First look in the re-assembly queue */ chk = TAILQ_FIRST(&asoc->reasmqueue); while (chk) { /* Get the next one */ nchk = TAILQ_NEXT(chk, sctp_next); if (compare_with_wrap(chk->rec.data.TSN_seq, cumulative_tsn_p1, MAX_TSN)) { /* Yep it is above cum-ack */ cnt++; tsn = chk->rec.data.TSN_seq; if (tsn >= asoc->mapping_array_base_tsn) { gap = tsn - asoc->mapping_array_base_tsn; } else { gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; } asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size); sctp_ucount_decr(asoc->cnt_on_reasm_queue); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_remote_addr(chk->whoTo); sctp_free_a_chunk(stcb, chk); } chk = nchk; } /* Ok that was fun, now we will drain all the inbound streams? */ for (strmat = 0; strmat < asoc->streamincnt; strmat++) { ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue); while (ctl) { nctl = TAILQ_NEXT(ctl, next); if (compare_with_wrap(ctl->sinfo_tsn, cumulative_tsn_p1, MAX_TSN)) { /* Yep it is above cum-ack */ cnt++; tsn = ctl->sinfo_tsn; if (tsn >= asoc->mapping_array_base_tsn) { gap = tsn - asoc->mapping_array_base_tsn; } else { gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; } asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length); sctp_ucount_decr(asoc->cnt_on_all_streams); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, ctl, next); if (ctl->data) { sctp_m_freem(ctl->data); ctl->data = NULL; } sctp_free_remote_addr(ctl->whoFrom); SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl); SCTP_DECR_READQ_COUNT(); } ctl = nctl; } } /* * Question, should we go through the delivery queue? The only * reason things are on here is the app not reading OR a p-d-api up. * An attacker COULD send enough in to initiate the PD-API and then * send a bunch of stuff to other streams... these would wind up on * the delivery queue.. and then we would not get to them. But in * order to do this I then have to back-track and un-deliver * sequence numbers in streams.. el-yucko. I think for now we will * NOT look at the delivery queue and leave it to be something to * consider later. An alternative would be to abort the P-D-API with * a notification and then deliver the data.... Or another method * might be to keep track of how many times the situation occurs and * if we see a possible attack underway just abort the association. */ #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { if (cnt) { printf("Freed %d chunks from reneg harvest\n", cnt); } } #endif /* SCTP_DEBUG */ if (cnt) { /* * Now do we need to find a new * asoc->highest_tsn_inside_map? */ if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn; } else { gap = (MAX_TSN - asoc->mapping_array_base_tsn) + asoc->highest_tsn_inside_map + 1; } if (gap >= (asoc->mapping_array_size << 3)) { /* * Something bad happened or cum-ack and high were * behind the base, but if so earlier checks should * have found NO data... wierd... we will start at * end of mapping array. */ printf("Gap was larger than array?? %d set to max:%d maparraymax:%x\n", (int)gap, (int)(asoc->mapping_array_size << 3), (int)asoc->highest_tsn_inside_map); gap = asoc->mapping_array_size << 3; } while (gap > 0) { if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { /* found the new highest */ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap; break; } gap--; } if (gap == 0) { /* Nothing left in map */ memset(asoc->mapping_array, 0, asoc->mapping_array_size); asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; asoc->highest_tsn_inside_map = asoc->cumulative_tsn; } asoc->last_revoke_count = cnt; callout_stop(&stcb->asoc.dack_timer.timer); sctp_send_sack(stcb); reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb); reneged_at++; } /* * Another issue, in un-setting the TSN's in the mapping array we * DID NOT adjust the higest_tsn marker. This will cause one of two * things to occur. It may cause us to do extra work in checking for * our mapping array movement. More importantly it may cause us to * SACK every datagram. This may not be a bad thing though since we * will recover once we get our cum-ack above and all this stuff we * dumped recovered. */ } void sctp_drain() { /* * We must walk the PCB lists for ALL associations here. The system * is LOW on MBUF's and needs help. This is where reneging will * occur. We really hope this does NOT happen! */ struct sctp_inpcb *inp; struct sctp_tcb *stcb; SCTP_INP_INFO_RLOCK(); LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) { /* For each endpoint */ SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { /* For each association */ SCTP_TCB_LOCK(stcb); sctp_drain_mbufs(inp, stcb); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } SCTP_INP_INFO_RUNLOCK(); } /* * start a new iterator * iterates through all endpoints and associations based on the pcb_state * flags and asoc_state. "af" (mandatory) is executed for all matching * assocs and "ef" (optional) is executed when the iterator completes. * "inpf" (optional) is executed for each new endpoint as it is being * iterated through. */ int sctp_initiate_iterator(inp_func inpf, asoc_func af, uint32_t pcb_state, uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi, end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off) { struct sctp_iterator *it = NULL; int s; if (af == NULL) { return (-1); } SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), "Iterator"); if (it == NULL) { return (ENOMEM); } memset(it, 0, sizeof(*it)); it->function_assoc = af; it->function_inp = inpf; it->function_atend = ef; it->pointer = argp; it->val = argi; it->pcb_flags = pcb_state; it->pcb_features = pcb_features; it->asoc_state = asoc_state; it->no_chunk_output = chunk_output_off; if (s_inp) { it->inp = s_inp; it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP; } else { SCTP_INP_INFO_RLOCK(); it->inp = LIST_FIRST(&sctppcbinfo.listhead); SCTP_INP_INFO_RUNLOCK(); it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; } /* Init the timer */ callout_init(&it->tmr.timer, 1); /* add to the list of all iterators */ SCTP_INP_INFO_WLOCK(); LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); SCTP_INP_INFO_WUNLOCK(); s = splnet(); sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL); splx(s); return (0); } /* * Callout/Timer routines for OS that doesn't have them */ diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c index 63097a84e90e..25d639f678c8 100644 --- a/sys/netinet/sctp_usrreq.c +++ b/sys/netinet/sctp_usrreq.c @@ -1,4850 +1,4857 @@ /*- * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_ipsec.h" #include "opt_inet6.h" #include "opt_inet.h" #include "opt_sctp.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IPSEC #include #include #endif /* IPSEC */ #ifndef in6pcb #define in6pcb inpcb #endif #ifndef sotoin6pcb #define sotoin6pcb sotoinpcb #endif /* * sysctl tunable variables */ int sctp_sendspace = (128 * 1024); int sctp_recvspace = 128 * (1024 + #ifdef INET6 sizeof(struct sockaddr_in6) #else sizeof(struct sockaddr_in) #endif ); int sctp_mbuf_threshold_count = SCTP_DEFAULT_MBUFS_IN_CHAIN; int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF; int sctp_ecn_enable = 1; int sctp_ecn_nonce = 0; int sctp_strict_sacks = 0; int sctp_no_csum_on_loopback = 1; int sctp_strict_init = 1; int sctp_abort_if_one_2_one_hits_limit = 0; int sctp_strict_data_order = 0; int sctp_peer_chunk_oh = sizeof(struct mbuf); int sctp_max_burst_default = SCTP_DEF_MAX_BURST; int sctp_use_cwnd_based_maxburst = 1; int sctp_do_drain = 1; int sctp_warm_the_crc32_table = 0; unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE; unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC; unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC; unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC; unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC; unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC; unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND; unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND; unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL; unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND; unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE; unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT; unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND; unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_PATH_RTX; unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL; unsigned int sctp_add_more_threshold = SCTP_DEFAULT_ADD_MORE; uint32_t sctp_asoc_free_resc_limit = SCTP_DEF_ASOC_RESC_LIMIT; uint32_t sctp_system_free_resc_limit = SCTP_DEF_SYSTEM_RESC_LIMIT; int sctp_min_split_point = SCTP_DEFAULT_SPLIT_POINT_MIN; int sctp_pcbtblsize = SCTP_PCBHASHSIZE; int sctp_hashtblsize = SCTP_TCBHASHSIZE; int sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE; unsigned int sctp_cmt_on_off = 0; unsigned int sctp_cmt_sockopt_on_off = 0; unsigned int sctp_cmt_use_dac = 0; unsigned int sctp_cmt_sockopt_use_dac = 0; int sctp_L2_abc_variable = 1; unsigned int sctp_early_fr = 0; unsigned int sctp_early_fr_msec = SCTP_MINFR_MSEC_TIMER; unsigned int sctp_use_rttvar_cc = 0; int sctp_says_check_for_deadlock = 0; unsigned int sctp_asconf_auth_nochk = 0; unsigned int sctp_auth_disable = 0; unsigned int sctp_auth_random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; unsigned int sctp_auth_hmac_id_default = SCTP_AUTH_HMAC_ID_SHA1; struct sctpstat sctpstat; #ifdef SCTP_DEBUG extern uint32_t sctp_debug_on; #endif /* SCTP_DEBUG */ void sctp_init(void) { /* Init the SCTP pcb in sctp_pcb.c */ u_long sb_max_adj; sctp_pcb_init(); if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) sctp_max_chunks_on_queue = (nmbclusters / 8); /* * Allow a user to take no more than 1/2 the number of clusters or * the SB_MAX whichever is smaller for the send window. */ sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); sctp_sendspace = min((min(SB_MAX, sb_max_adj)), ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); /* * Now for the recv window, should we take the same amount? or * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For * now I will just copy. */ sctp_recvspace = sctp_sendspace; } #ifdef INET6 void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip) { bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = ip->ip_len; ip6->ip6_nxt = ip->ip_p; ip6->ip6_hlim = ip->ip_ttl; ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP; ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr; ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr; } #endif /* INET6 */ static void sctp_pathmtu_adustment(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint16_t nxtsz) { struct sctp_tmit_chunk *chk; /* Adjust that too */ stcb->asoc.smallest_mtu = nxtsz; /* now off to subtract IP_DF flag if needed */ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; } } TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if ((chk->send_size + IP_HDR_SIZE) > nxtsz) { /* * For this guy we also mark for immediate resend * since we sent to big of chunk */ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; if (chk->sent != SCTP_DATAGRAM_RESEND) { sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); } chk->sent = SCTP_DATAGRAM_RESEND; chk->rec.data.doing_fast_retransmit = 0; /* Clear any time so NO RTT is being done */ chk->do_rtt = 0; if (stcb->asoc.total_flight >= chk->book_size) stcb->asoc.total_flight -= chk->book_size; else stcb->asoc.total_flight = 0; if (stcb->asoc.total_flight_count > 0) stcb->asoc.total_flight_count--; if (net->flight_size >= chk->book_size) net->flight_size -= chk->book_size; else net->flight_size = 0; } } } static void sctp_notify_mbuf(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, struct ip *ip, struct sctphdr *sh) { struct icmp *icmph; int totsz, tmr_stopped = 0; uint16_t nxtsz; /* protection */ if ((inp == NULL) || (stcb == NULL) || (net == NULL) || (ip == NULL) || (sh == NULL)) { if (stcb != NULL) SCTP_TCB_UNLOCK(stcb); return; } /* First job is to verify the vtag matches what I would send */ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { SCTP_TCB_UNLOCK(stcb); return; } icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) - sizeof(struct ip))); if (icmph->icmp_type != ICMP_UNREACH) { /* We only care about unreachable */ SCTP_TCB_UNLOCK(stcb); return; } if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) { /* not a unreachable message due to frag. */ SCTP_TCB_UNLOCK(stcb); return; } totsz = ip->ip_len; nxtsz = ntohs(icmph->icmp_seq); if (nxtsz == 0) { /* * old type router that does not tell us what the next size * mtu is. Rats we will have to guess (in a educated fashion * of course) */ nxtsz = find_next_best_mtu(totsz); } /* Stop any PMTU timer */ if (callout_pending(&net->pmtu_timer.timer)) { tmr_stopped = 1; sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } /* Adjust destination size limit */ if (net->mtu > nxtsz) { net->mtu = nxtsz; } /* now what about the ep? */ if (stcb->asoc.smallest_mtu > nxtsz) { sctp_pathmtu_adustment(inp, stcb, net, nxtsz); } if (tmr_stopped) sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); SCTP_TCB_UNLOCK(stcb); } void sctp_notify(struct sctp_inpcb *inp, int errno, struct sctphdr *sh, struct sockaddr *to, struct sctp_tcb *stcb, struct sctp_nets *net) { /* protection */ if ((inp == NULL) || (stcb == NULL) || (net == NULL) || (sh == NULL) || (to == NULL)) { return; } /* First job is to verify the vtag matches what I would send */ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) { return; } /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */ if ((errno == EHOSTUNREACH) || /* Host is not reachable */ (errno == EHOSTDOWN) || /* Host is down */ (errno == ECONNREFUSED) || /* Host refused the connection, (not * an abort?) */ (errno == ENOPROTOOPT) /* SCTP is not present on host */ ) { /* * Hmm reachablity problems we must examine closely. If its * not reachable, we may have lost a network. Or if there is * NO protocol at the other end named SCTP. well we consider * it a OOTB abort. */ if ((errno == EHOSTUNREACH) || (errno == EHOSTDOWN)) { if (net->dest_state & SCTP_ADDR_REACHABLE) { /* Ok that destination is NOT reachable */ net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state |= SCTP_ADDR_NOT_REACHABLE; net->error_count = net->failure_threshold + 1; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, SCTP_FAILED_THRESHOLD, (void *)net); } if (stcb) SCTP_TCB_UNLOCK(stcb); } else { /* * Here the peer is either playing tricks on us, * including an address that belongs to someone who * does not support SCTP OR was a userland * implementation that shutdown and now is dead. In * either case treat it like a OOTB abort with no * TCB */ sctp_abort_notification(stcb, SCTP_PEER_FAULTY); sctp_free_assoc(inp, stcb, 0); /* no need to unlock here, since the TCB is gone */ } } else { /* Send all others to the app */ if (stcb) SCTP_TCB_UNLOCK(stcb); if (inp->sctp_socket) { #ifdef SCTP_LOCK_LOGGING sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK); #endif SOCK_LOCK(inp->sctp_socket); inp->sctp_socket->so_error = errno; sctp_sowwakeup(inp, inp->sctp_socket); SOCK_UNLOCK(inp->sctp_socket); } } } void sctp_ctlinput(cmd, sa, vip) int cmd; struct sockaddr *sa; void *vip; { struct ip *ip = vip; struct sctphdr *sh; int s; if (sa->sa_family != AF_INET || ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { return; } if (PRC_IS_REDIRECT(cmd)) { ip = 0; } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { return; } if (ip) { struct sctp_inpcb *inp = NULL; struct sctp_tcb *stcb = NULL; struct sctp_nets *net = NULL; struct sockaddr_in to, from; sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); bzero(&to, sizeof(to)); bzero(&from, sizeof(from)); from.sin_family = to.sin_family = AF_INET; from.sin_len = to.sin_len = sizeof(to); from.sin_port = sh->src_port; from.sin_addr = ip->ip_src; to.sin_port = sh->dest_port; to.sin_addr = ip->ip_dst; /* * 'to' holds the dest of the packet that failed to be sent. * 'from' holds our local endpoint address. Thus we reverse * the to and the from in the lookup. */ s = splnet(); stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from, (struct sockaddr *)&to, &inp, &net, 1); if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { if (cmd != PRC_MSGSIZE) { int cm; if (cmd == PRC_HOSTDEAD) { cm = EHOSTUNREACH; } else { cm = inetctlerrmap[cmd]; } sctp_notify(inp, cm, sh, (struct sockaddr *)&to, stcb, net); } else { /* handle possible ICMP size messages */ sctp_notify_mbuf(inp, stcb, net, ip, sh); } } else { if ((stcb == NULL) && (inp != NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } } splx(s); } return; } static int sctp_getcred(SYSCTL_HANDLER_ARGS) { struct sockaddr_in addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error, s; - error = suser(req->td); + /* + * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket + * visibility is scoped using cr_canseesocket(), which it is not + * here. + */ + error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED, 0); if (error) return (error); + error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); s = splnet(); stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]), sintosa(&addrs[1]), &inp, &net, 1); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { if ((inp != NULL) && (stcb == NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } error = ENOENT; goto out; } error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred)); SCTP_TCB_UNLOCK(stcb); out: splx(s); return (error); } SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); /* * sysctl definitions */ SYSCTL_INT(_net_inet_sctp, OID_AUTO, sendspace, CTLFLAG_RW, &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW, &sctp_recvspace, 0, "Maximum incoming SCTP buffer size"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW, &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW, &sctp_ecn_enable, 0, "Enable SCTP ECN"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW, &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW, &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW, &sctp_no_csum_on_loopback, 0, "Enable NO Csum on packets sent on loopback"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW, &sctp_strict_init, 0, "Enable strict INIT/INIT-ACK singleton enforcement"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW, &sctp_peer_chunk_oh, 0, "Amount to debit peers rwnd per chunk sent"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW, &sctp_max_burst_default, 0, "Default max burst for sctp endpoints"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW, &sctp_max_chunks_on_queue, 0, "Default max chunks on queue per asoc"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, tcbhashsize, CTLFLAG_RW, &sctp_hashtblsize, 0, "Tuneable for Hash table sizes"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, min_split_point, CTLFLAG_RW, &sctp_min_split_point, 0, "Minimum size when splitting a chunk"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, pcbhashsize, CTLFLAG_RW, &sctp_pcbtblsize, 0, "Tuneable for PCB Hash table sizes"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, sys_resource, CTLFLAG_RW, &sctp_system_free_resc_limit, 0, "Max number of cached resources in the system"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, asoc_resource, CTLFLAG_RW, &sctp_asoc_free_resc_limit, 0, "Max number of cached resources in an asoc"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, chunkscale, CTLFLAG_RW, &sctp_chunkscale, 0, "Tuneable for Scaling of number of chunks and messages"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW, &sctp_delayed_sack_time_default, 0, "Default delayed SACK timer in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW, &sctp_heartbeat_interval_default, 0, "Default heartbeat interval in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW, &sctp_pmtu_raise_time_default, 0, "Default PMTU raise timer in sec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW, &sctp_shutdown_guard_time_default, 0, "Default shutdown guard timer in sec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW, &sctp_secret_lifetime_default, 0, "Default secret lifetime in sec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW, &sctp_rto_max_default, 0, "Default maximum retransmission timeout in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW, &sctp_rto_min_default, 0, "Default minimum retransmission timeout in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW, &sctp_rto_initial_default, 0, "Default initial retransmission timeout in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW, &sctp_init_rto_max_default, 0, "Default maximum retransmission timeout during association setup in msec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW, &sctp_valid_cookie_life_default, 0, "Default cookie lifetime in sec"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW, &sctp_init_rtx_max_default, 0, "Default maximum number of retransmission for INIT chunks"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW, &sctp_assoc_rtx_max_default, 0, "Default maximum number of retransmissions per association"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW, &sctp_path_rtx_max_default, 0, "Default maximum of retransmissions per path"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, add_more_on_output, CTLFLAG_RW, &sctp_add_more_threshold, 0, "When space wise is it worthwhile to try to add more to a socket send buffer"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW, &sctp_nr_outgoing_streams_default, 0, "Default number of outgoing streams"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLFLAG_RW, &sctp_cmt_on_off, 0, "CMT ON/OFF flag"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cwnd_maxburst, CTLFLAG_RW, &sctp_use_cwnd_based_maxburst, 0, "Use a CWND adjusting maxburst"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLFLAG_RW, &sctp_early_fr, 0, "Early Fast Retransmit with Timer"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, use_rttvar_congctrl, CTLFLAG_RW, &sctp_use_rttvar_cc, 0, "Use congestion control via rtt variation"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, deadlock_detect, CTLFLAG_RW, &sctp_says_check_for_deadlock, 0, "SMP Deadlock detection on/off"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran_msec, CTLFLAG_RW, &sctp_early_fr_msec, 0, "Early Fast Retransmit minimum timer value"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, asconf_auth_nochk, CTLFLAG_RW, &sctp_asconf_auth_nochk, 0, "Disable SCTP ASCONF AUTH requirement"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_disable, CTLFLAG_RW, &sctp_auth_disable, 0, "Disable SCTP AUTH chunk requirement/function"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_random_len, CTLFLAG_RW, &sctp_auth_random_len, 0, "Length of AUTH RANDOMs"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, auth_hmac_id, CTLFLAG_RW, &sctp_auth_hmac_id_default, 0, "Default HMAC Id for SCTP AUTHenthication"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, abc_l_var, CTLFLAG_RW, &sctp_L2_abc_variable, 0, "SCTP ABC max increase per SACK (L)"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, max_chained_mbufs, CTLFLAG_RW, &sctp_mbuf_threshold_count, 0, "Default max number of small mbufs on a chain"); SYSCTL_UINT(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLFLAG_RW, &sctp_cmt_use_dac, 0, "CMT DAC ON/OFF flag"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, do_sctp_drain, CTLFLAG_RW, &sctp_do_drain, 0, "Should SCTP respond to the drain calls"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, warm_crc_table, CTLFLAG_RW, &sctp_warm_the_crc32_table, 0, "Should the CRC32c tables be warmed before checksum?"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, abort_at_limit, CTLFLAG_RW, &sctp_abort_if_one_2_one_hits_limit, 0, "When one-2-one hits qlimit abort"); SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_data_order, CTLFLAG_RW, &sctp_strict_data_order, 0, "Enforce strict data ordering, abort if control inside data"); SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW, &sctpstat, sctpstat, "SCTP statistics (struct sctps_stat, netinet/sctp.h"); #ifdef SCTP_DEBUG SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW, &sctp_debug_on, 0, "Configure debug output"); #endif /* SCTP_DEBUG */ static void sctp_abort(struct socket *so) { struct sctp_inpcb *inp; int s; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return; s = splnet(); sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif sctp_inpcb_free(inp, 1, 0); SOCK_LOCK(so); so->so_snd.sb_cc = 0; so->so_snd.sb_mb = NULL; so->so_snd.sb_mbcnt = 0; /* * same for the rcv ones, they are only here for the * accounting/select. */ so->so_rcv.sb_cc = 0; so->so_rcv.sb_mb = NULL; so->so_rcv.sb_mbcnt = 0; /* * Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } splx(s); return; } static int sctp_attach(struct socket *so, int proto, struct thread *p) { struct sctp_inpcb *inp; struct inpcb *ip_inp; int s, error; #ifdef IPSEC uint32_t flags; #endif s = splnet(); inp = (struct sctp_inpcb *)so->so_pcb; if (inp != 0) { splx(s); return EINVAL; } error = soreserve(so, sctp_sendspace, sctp_recvspace); if (error) { splx(s); return error; } error = sctp_inpcb_alloc(so); if (error) { splx(s); return error; } inp = (struct sctp_inpcb *)so->so_pcb; SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ ip_inp = &inp->ip_inp.inp; ip_inp->inp_vflag |= INP_IPV4; ip_inp->inp_ip_ttl = ip_defttl; #ifdef IPSEC error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (error != 0) { flags = inp->sctp_flags; if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 15); #endif sctp_inpcb_free(inp, 1, 0); } return error; } #endif /* IPSEC */ SCTP_INP_WUNLOCK(inp); splx(s); return 0; } static int sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; int s, error; #ifdef INET6 if (addr && addr->sa_family != AF_INET) /* must be a v4 address! */ return EINVAL; #endif /* INET6 */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return EINVAL; s = splnet(); error = sctp_inpcb_bind(so, addr, p); splx(s); return error; } static void sctp_close(struct socket *so) { struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return; /* * Inform all the lower layer assoc that we are done. */ sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 13); #endif sctp_inpcb_free(inp, 1, 1); } else { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 14); #endif sctp_inpcb_free(inp, 0, 1); } /* * The socket is now detached, no matter what the state of * the SCTP association. */ SOCK_LOCK(so); so->so_snd.sb_cc = 0; so->so_snd.sb_mb = NULL; so->so_snd.sb_mbcnt = 0; /* * same for the rcv ones, they are only here for the * accounting/select. */ so->so_rcv.sb_cc = 0; so->so_rcv.sb_mb = NULL; so->so_rcv.sb_mbcnt = 0; /* * Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } return; } int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; int error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { if (control) { sctp_m_freem(control); control = NULL; } sctp_m_freem(m); return EINVAL; } /* Got to have an to address if we are NOT a connected socket */ if ((addr == NULL) && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) ) { goto connected_type; } else if (addr == NULL) { error = EDESTADDRREQ; sctp_m_freem(m); if (control) { sctp_m_freem(control); control = NULL; } return (error); } #ifdef INET6 if (addr->sa_family != AF_INET) { /* must be a v4 address! */ sctp_m_freem(m); if (control) { sctp_m_freem(control); control = NULL; } error = EDESTADDRREQ; return EINVAL; } #endif /* INET6 */ connected_type: /* now what about control */ if (control) { if (inp->control) { printf("huh? control set?\n"); sctp_m_freem(inp->control); inp->control = NULL; } inp->control = control; } /* add it in possibly */ if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) { struct mbuf *x; int c_len; c_len = 0; /* How big is it */ for (x = m; x; x = x->m_next) { c_len += x->m_len; } inp->pkt->m_pkthdr.len += c_len; } /* Place the data */ if (inp->pkt) { inp->pkt_last->m_next = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for * re-defining sosend to use the sctp_sosend. One can * optionally switch back to this code (by changing back the * definitions) but this is not advisable. This code is used * by FreeBSD when sending a file with sendfile() though. */ int ret; ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } static int sctp_disconnect(struct socket *so) { struct sctp_inpcb *inp; int s; s = splnet(); inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { splx(s); return (ENOTCONN); } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { if (LIST_EMPTY(&inp->sctp_asoc_list)) { /* No connection */ splx(s); SCTP_INP_RUNLOCK(inp); return (0); } else { struct sctp_association *asoc; struct sctp_tcb *stcb; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { splx(s); SCTP_INP_RUNLOCK(inp); return (EINVAL); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { /* We are about to be freed, out of here */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { if (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) { /* Left with Data unread */ struct mbuf *err; err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); if (err) { /* * Fill in the user * initiated abort */ struct sctp_paramhdr *ph; ph = mtod(err, struct sctp_paramhdr *); err->m_len = sizeof(struct sctp_paramhdr); ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(err->m_len); } sctp_send_abort_tcb(stcb, err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } SCTP_INP_RUNLOCK(inp); if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } sctp_free_assoc(inp, stcb, 0); /* No unlock tcb assoc is gone */ splx(s); return (0); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { /* there is nothing queued to send, so done */ if (asoc->locked_on_sending) { goto abort_anyway; } if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* only send SHUTDOWN 1st time thru */ sctp_stop_timers_for_shutdown(stcb); sctp_send_shutdown(stcb, stcb->asoc.primary_destination); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); asoc->state = SCTP_STATE_SHUTDOWN_SENT; SCTP_STAT_DECR_GAUGE32(sctps_currestab); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, asoc->primary_destination); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, asoc->primary_destination); } } else { /* * we still got (or just got) data to send, * so set SHUTDOWN_PENDING */ /* * XXX sockets draft says that SCTP_EOF * should be sent with no data. currently, * we will allow user data to be sent first * and move to SHUTDOWN-PENDING */ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, asoc->primary_destination); if (asoc->locked_on_sending) { /* Locked to send out the data */ struct sctp_stream_queue_pending *sp; sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); if (sp == NULL) { printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", asoc->locked_on_sending->stream_no); } else { if ((sp->length == 0) && (sp->msg_is_complete == 0)) asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; } } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 0, M_DONTWAIT, 1, MT_DATA); if (op_err) { /* * Fill in the user * initiated abort */ struct sctp_paramhdr *ph; uint32_t *ippp; op_err->m_len = (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)); ph = mtod(op_err, struct sctp_paramhdr *); ph->param_type = htons( SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(op_err->m_len); ippp = (uint32_t *) (ph + 1); *ippp = htonl(0x30000007); } sctp_send_abort_tcb(stcb, op_err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_INP_RUNLOCK(inp); sctp_free_assoc(inp, stcb, 0); splx(s); return (0); } } SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); splx(s); return (0); } /* not reached */ } else { /* UDP model does not support this */ SCTP_INP_RUNLOCK(inp); splx(s); return EOPNOTSUPP; } } int sctp_shutdown(struct socket *so) { struct sctp_inpcb *inp; int s; s = splnet(); inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { splx(s); return EINVAL; } SCTP_INP_RLOCK(inp); /* For UDP model this is a invalid call */ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { /* Restore the flags that the soshutdown took away. */ so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; /* This proc will wakeup for read and do nothing (I hope) */ splx(s); SCTP_INP_RUNLOCK(inp); return (EOPNOTSUPP); } /* * Ok if we reach here its the TCP model and it is either a SHUT_WR * or SHUT_RDWR. This means we put the shutdown flag against it. */ { struct sctp_tcb *stcb; struct sctp_association *asoc; socantsendmore(so); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { /* * Ok we hit the case that the shutdown call was * made after an abort or something. Nothing to do * now. */ splx(s); return (0); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { if (asoc->locked_on_sending) { goto abort_anyway; } /* there is nothing queued to send, so I'm done... */ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { /* only send SHUTDOWN the first time through */ sctp_stop_timers_for_shutdown(stcb); sctp_send_shutdown(stcb, stcb->asoc.primary_destination); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3); asoc->state = SCTP_STATE_SHUTDOWN_SENT; SCTP_STAT_DECR_GAUGE32(sctps_currestab); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, asoc->primary_destination); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, asoc->primary_destination); } } else { /* * we still got (or just got) data to send, so set * SHUTDOWN_PENDING */ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, asoc->primary_destination); if (asoc->locked_on_sending) { /* Locked to send out the data */ struct sctp_stream_queue_pending *sp; sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); if (sp == NULL) { printf("Error, sp is NULL, locked on sending is non-null strm:%d\n", asoc->locked_on_sending->stream_no); } else { if ((sp->length == 0) && (sp->msg_is_complete == 0)) { asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; } } } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 0, M_DONTWAIT, 1, MT_DATA); if (op_err) { /* Fill in the user initiated abort */ struct sctp_paramhdr *ph; uint32_t *ippp; op_err->m_len = sizeof(struct sctp_paramhdr) + sizeof(uint32_t); ph = mtod(op_err, struct sctp_paramhdr *); ph->param_type = htons( SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(op_err->m_len); ippp = (uint32_t *) (ph + 1); *ippp = htonl(0x30000008); } sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, op_err); goto skip_unlock; } } SCTP_TCB_UNLOCK(stcb); } skip_unlock: SCTP_INP_RUNLOCK(inp); splx(s); return 0; } /* * copies a "user" presentable address and removes embedded scope, etc. * returns 0 on success, 1 on error */ static uint32_t sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) { struct sockaddr_in6 lsa6; sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, &lsa6); memcpy(ss, sa, sa->sa_len); return (0); } static int sctp_fill_up_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int limit, struct sockaddr_storage *sas) { struct ifnet *ifn; struct ifaddr *ifa; int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual; int ipv4_addr_legal, ipv6_addr_legal; actual = 0; if (limit <= 0) return (actual); if (stcb) { /* Turn on all the appropriate scope */ loopback_scope = stcb->asoc.loopback_scope; ipv4_local_scope = stcb->asoc.ipv4_local_scope; local_scope = stcb->asoc.local_scope; site_scope = stcb->asoc.site_scope; } else { /* Turn on ALL scope, since we look at the EP */ loopback_scope = ipv4_local_scope = local_scope = site_scope = 1; } ipv4_addr_legal = ipv6_addr_legal = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ipv6_addr_legal = 1; if ( (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) == 0) { ipv4_addr_legal = 1; } } else { ipv4_addr_legal = 1; } if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { TAILQ_FOREACH(ifn, &ifnet, if_list) { if ((loopback_scope == 0) && (ifn->if_type == IFT_LOOP)) { /* Skip loopback if loopback_scope not set */ continue; } TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { if (stcb) { /* * For the BOUND-ALL case, the list * associated with a TCB is Always * considered a reverse list.. i.e. * it lists addresses that are NOT * part of the association. If this * is one of those we must skip it. */ if (sctp_is_addr_restricted(stcb, ifa->ifa_addr)) { continue; } } if ((ifa->ifa_addr->sa_family == AF_INET) && (ipv4_addr_legal)) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)ifa->ifa_addr; if (sin->sin_addr.s_addr == 0) { /* * we skip unspecifed * addresses */ continue; } if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { continue; } if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) { in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); actual += sizeof(sizeof(struct sockaddr_in6)); } else { memcpy(sas, sin, sizeof(*sin)); ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin)); actual += sizeof(*sin); } if (actual >= limit) { return (actual); } } else if ((ifa->ifa_addr->sa_family == AF_INET6) && (ipv6_addr_legal)) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* * we skip unspecifed * addresses */ continue; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (local_scope == 0) continue; if (sin6->sin6_scope_id == 0) { if (sa6_recoverscope(sin6) != 0) /* * bad link * local * address */ continue; } } if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { continue; } memcpy(sas, sin6, sizeof(*sin6)); ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6)); actual += sizeof(*sin6); if (actual >= limit) { return (actual); } } } } } else { struct sctp_laddr *laddr; /* * If we have a TCB and we do NOT support ASCONF (it's * turned off or otherwise) then the list is always the true * list of addresses (the else case below). Otherwise the * list on the association is a list of addresses that are * NOT part of the association. */ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { /* The list is a NEGATIVE list */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (stcb) { if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) { continue; } } if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr)) continue; ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + laddr->ifa->ifa_addr->sa_len); actual += laddr->ifa->ifa_addr->sa_len; if (actual >= limit) { return (actual); } } } else { /* The list is a positive list if present */ if (stcb) { /* Must use the specific association list */ LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr)) continue; ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + laddr->ifa->ifa_addr->sa_len); actual += laddr->ifa->ifa_addr->sa_len; if (actual >= limit) { return (actual); } } } else { /* * No endpoint so use the endpoints * individual list */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr)) continue; ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + laddr->ifa->ifa_addr->sa_len); actual += laddr->ifa->ifa_addr->sa_len; if (actual >= limit) { return (actual); } } } } } return (actual); } static int sctp_count_max_addresses(struct sctp_inpcb *inp) { int cnt = 0; /* * In both sub-set bound an bound_all cases we return the MAXIMUM * number of addresses that you COULD get. In reality the sub-set * bound may have an exclusion list for a given TCB OR in the * bound-all case a TCB may NOT include the loopback or other * addresses as well. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct ifnet *ifn; struct ifaddr *ifa; TAILQ_FOREACH(ifn, &ifnet, if_list) { TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { /* Count them if they are the right type */ if (ifa->ifa_addr->sa_family == AF_INET) { if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) cnt += sizeof(struct sockaddr_in6); else cnt += sizeof(struct sockaddr_in); } else if (ifa->ifa_addr->sa_family == AF_INET6) cnt += sizeof(struct sockaddr_in6); } } } else { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->ifa_addr->sa_family == AF_INET) { if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) cnt += sizeof(struct sockaddr_in6); else cnt += sizeof(struct sockaddr_in); } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6) cnt += sizeof(struct sockaddr_in6); } } return (cnt); } static int sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, struct mbuf *m, struct thread *p, int delay ) { int s = splnet(); int error = 0; int creat_lock_on = 0; struct sctp_tcb *stcb = NULL; struct sockaddr *sa; int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr, i, incr, at; #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_PCB1) { printf("Connectx called\n"); } #endif /* SCTP_DEBUG */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ splx(s); return (EADDRINUSE); } if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { splx(s); return (EINVAL); } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); SCTP_INP_RUNLOCK(inp); } if (stcb) { splx(s); return (EALREADY); } SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); creat_lock_on = 1; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { error = EFAULT; goto out_now; } totaddrp = mtod(m, int *); totaddr = *totaddrp; sa = (struct sockaddr *)(totaddrp + 1); at = incr = 0; /* account and validate addresses */ for (i = 0; i < totaddr; i++) { if (sa->sa_family == AF_INET) { num_v4++; incr = sizeof(struct sockaddr_in); } else if (sa->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)sa; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { /* Must be non-mapped for connectx */ error = EINVAL; goto out_now; } num_v6++; incr = sizeof(struct sockaddr_in6); } else { totaddr = i; break; } stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); creat_lock_on = 0; SCTP_TCB_UNLOCK(stcb); error = EALREADY; goto out_now; } if ((at + incr) > m->m_len) { totaddr = i; break; } sa = (struct sockaddr *)((caddr_t)sa + incr); } sa = (struct sockaddr *)(totaddrp + 1); #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (num_v6 > 0)) { splx(s); error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (num_v4 > 0)) { struct in6pcb *inp6; inp6 = (struct in6pcb *)inp; if ( (inp6->inp_flags & IN6P_IPV6_V6ONLY) ) { /* * if IPV6_V6ONLY flag, ignore connections destined * to a v4 addr or v4-mapped addr */ error = EINVAL; goto out_now; } } #endif /* INET6 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ SCTP_INP_WUNLOCK(inp); error = sctp_inpcb_bind(so, NULL, p); if (error) { goto out_now; } } else { SCTP_INP_WUNLOCK(inp); } /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0); if (stcb == NULL) { /* Gak! no memory */ error = ENOMEM; goto out_now; } /* move to second address */ if (sa->sa_family == AF_INET) sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); else sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); for (i = 1; i < totaddr; i++) { if (sa->sa_family == AF_INET) { incr = sizeof(struct sockaddr_in); if (sctp_add_remote_addr(stcb, sa, 0, 8)) { /* assoc gone no un-lock */ sctp_free_assoc(inp, stcb, 0); error = ENOBUFS; goto out_now; } } else if (sa->sa_family == AF_INET6) { incr = sizeof(struct sockaddr_in6); if (sctp_add_remote_addr(stcb, sa, 0, 8)) { /* assoc gone no un-lock */ sctp_free_assoc(inp, stcb, 0); error = ENOBUFS; goto out_now; } } sa = (struct sockaddr *)((caddr_t)sa + incr); } stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; /* initialize authentication parameters for the assoc */ sctp_initialize_auth_params(inp, stcb); if (delay) { /* doing delayed connection */ stcb->asoc.delayed_connection = 1; sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); } else { SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb); } SCTP_TCB_UNLOCK(stcb); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } out_now: if (creat_lock_on) SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_DECR_REF(inp); splx(s); return error; } static int sctp_optsget(struct socket *so, int opt, struct mbuf **mp, struct thread *p ) { struct sctp_inpcb *inp; struct mbuf *m; int error, optval = 0; struct sctp_tcb *stcb = NULL; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return EINVAL; error = 0; if (mp == NULL) { return (EINVAL); } m = *mp; if (m == NULL) { /* Got to have a mbuf */ return (EINVAL); } switch (opt) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_EXPLICIT_EOR: case SCTP_AUTO_ASCONF: case SCTP_DISABLE_FRAGMENTS: case SCTP_I_WANT_MAPPED_V4_ADDR: case SCTP_USE_EXT_RCVINFO: SCTP_INP_RLOCK(inp); switch (opt) { case SCTP_DISABLE_FRAGMENTS: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); break; case SCTP_I_WANT_MAPPED_V4_ADDR: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); break; case SCTP_AUTO_ASCONF: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); break; case SCTP_EXPLICIT_EOR: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); break; case SCTP_NODELAY: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); break; case SCTP_USE_EXT_RCVINFO: optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); break; case SCTP_AUTOCLOSE: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) optval = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); else optval = 0; break; default: error = ENOPROTOOPT; } /* end switch (sopt->sopt_name) */ if (opt != SCTP_AUTOCLOSE) { /* make it an "on/off" value */ optval = (optval != 0); } if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; } SCTP_INP_RUNLOCK(inp); if (error == 0) { /* return the option value */ *mtod(m, int *)= optval; m->m_len = sizeof(optval); } break; case SCTP_PARTIAL_DELIVERY_POINT: { if ((size_t)m->m_len < sizeof(unsigned int)) { error = EINVAL; break; } *mtod(m, unsigned int *)= inp->partial_delivery_point; m->m_len = sizeof(unsigned int); } break; case SCTP_FRAGMENT_INTERLEAVE: { if ((size_t)m->m_len < sizeof(unsigned int)) { error = EINVAL; break; } *mtod(m, unsigned int *)= sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); m->m_len = sizeof(unsigned int); } break; case SCTP_CMT_ON_OFF: { if ((size_t)m->m_len < sizeof(unsigned int)) { error = EINVAL; break; } *mtod(m, unsigned int *)= sctp_cmt_sockopt_on_off; m->m_len = sizeof(unsigned int); } break; case SCTP_CMT_USE_DAC: { *mtod(m, unsigned int *)= sctp_cmt_sockopt_use_dac; m->m_len = sizeof(unsigned int); } break; case SCTP_GET_ADDR_LEN: { struct sctp_assoc_value *av; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } av = mtod(m, struct sctp_assoc_value *); error = EINVAL; #ifdef AF_INET if (av->assoc_value == AF_INET) { av->assoc_value = sizeof(struct sockaddr_in); error = 0; } #endif #ifdef AF_INET6 if (av->assoc_value == AF_INET6) { av->assoc_value = sizeof(struct sockaddr_in6); error = 0; } #endif } break; case SCTP_GET_ASOC_ID_LIST: { struct sctp_assoc_ids *ids; int cnt, at; uint16_t orig; if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) { error = EINVAL; break; } ids = mtod(m, struct sctp_assoc_ids *); cnt = 0; SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { none_out_now: ids->asls_numb_present = 0; ids->asls_more_to_get = 0; SCTP_INP_RUNLOCK(inp); break; } orig = ids->asls_assoc_start; stcb = LIST_FIRST(&inp->sctp_asoc_list); while (orig) { stcb = LIST_NEXT(stcb, sctp_tcblist); orig--; cnt--; if (stcb == NULL) goto none_out_now; } if (stcb == NULL) goto none_out_now; at = 0; ids->asls_numb_present = 0; ids->asls_more_to_get = 1; while (at < MAX_ASOC_IDS_RET) { ids->asls_assoc_id[at] = sctp_get_associd(stcb); at++; ids->asls_numb_present++; stcb = LIST_NEXT(stcb, sctp_tcblist); if (stcb == NULL) { ids->asls_more_to_get = 0; break; } } SCTP_INP_RUNLOCK(inp); } break; case SCTP_CONTEXT: { struct sctp_assoc_value *av; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } av = mtod(m, struct sctp_assoc_value *); if (av->assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { av->assoc_value = stcb->asoc.context; SCTP_TCB_UNLOCK(stcb); } } else { av->assoc_value = inp->sctp_context; } } break; case SCTP_GET_NONCE_VALUES: { struct sctp_get_nonce_values *gnv; if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) { error = EINVAL; break; } gnv = mtod(m, struct sctp_get_nonce_values *); stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { gnv->gn_peers_tag = stcb->asoc.peer_vtag; gnv->gn_local_tag = stcb->asoc.my_vtag; SCTP_TCB_UNLOCK(stcb); } } break; case SCTP_DELAYED_ACK_TIME: { struct sctp_assoc_value *tm; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } tm = mtod(m, struct sctp_assoc_value *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); tm->assoc_value = stcb->asoc.delayed_ack; SCTP_TCB_UNLOCK(stcb); } else { tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); } SCTP_INP_RUNLOCK(inp); } else { stcb = sctp_findassociation_ep_asocid(inp, tm->assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; tm->assoc_value = 0; } else { stcb->asoc.delayed_ack = tm->assoc_value; SCTP_TCB_UNLOCK(stcb); } } } break; case SCTP_GET_SNDBUF_USE: if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) { error = EINVAL; } else { struct sctp_sockstat *ss; struct sctp_tcb *stcb; struct sctp_association *asoc; ss = mtod(m, struct sctp_sockstat *); stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { asoc = &stcb->asoc; ss->ss_total_sndbuf = (uint32_t) asoc->total_output_queue_size; ss->ss_total_recv_buf = (uint32_t) (asoc->size_on_reasm_queue + asoc->size_on_all_streams); SCTP_TCB_UNLOCK(stcb); error = 0; m->m_len = sizeof(struct sctp_sockstat); } } break; case SCTP_MAXBURST: { uint8_t *burst; burst = mtod(m, uint8_t *); SCTP_INP_RLOCK(inp); *burst = inp->sctp_ep.max_burst; SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(uint8_t); } break; case SCTP_MAXSEG: { uint32_t *segsize; sctp_assoc_t *assoc_id; int ovh; if ((size_t)m->m_len < sizeof(uint32_t)) { error = EINVAL; break; } if ((size_t)m->m_len < sizeof(sctp_assoc_t)) { error = EINVAL; break; } assoc_id = mtod(m, sctp_assoc_t *); segsize = mtod(m, uint32_t *); m->m_len = sizeof(uint32_t); if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { struct sctp_tcb *stcb; SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); *segsize = sctp_get_frag_point(stcb, &stcb->asoc); SCTP_TCB_UNLOCK(stcb); } else { SCTP_INP_RUNLOCK(inp); goto skipit; } } else { stcb = sctp_findassociation_ep_asocid(inp, *assoc_id, 1); if (stcb) { *segsize = sctp_get_frag_point(stcb, &stcb->asoc); SCTP_TCB_UNLOCK(stcb); break; } skipit: /* * default is to get the max, if I can't * calculate from an existing association. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ovh = SCTP_MED_OVERHEAD; } else { ovh = SCTP_MED_V4_OVERHEAD; } *segsize = inp->sctp_frag_point - ovh; } } break; case SCTP_SET_DEBUG_LEVEL: #ifdef SCTP_DEBUG { uint32_t *level; if ((size_t)m->m_len < sizeof(uint32_t)) { error = EINVAL; break; } level = mtod(m, uint32_t *); error = 0; *level = sctp_debug_on; m->m_len = sizeof(uint32_t); printf("Returning DEBUG LEVEL %x is set\n", (uint32_t) sctp_debug_on); } #else /* SCTP_DEBUG */ error = EOPNOTSUPP; #endif break; case SCTP_GET_STAT_LOG: #ifdef SCTP_STAT_LOGGING error = sctp_fill_stat_log(m); #else /* SCTP_DEBUG */ error = EOPNOTSUPP; #endif break; case SCTP_EVENTS: { struct sctp_event_subscribe *events; if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) { error = EINVAL; break; } events = mtod(m, struct sctp_event_subscribe *); memset(events, 0, sizeof(*events)); SCTP_INP_RLOCK(inp); if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) events->sctp_data_io_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) events->sctp_association_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) events->sctp_address_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) events->sctp_send_failure_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) events->sctp_peer_error_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) events->sctp_shutdown_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) events->sctp_partial_delivery_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) events->sctp_adaptation_layer_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) events->sctp_authentication_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) events->sctp_stream_reset_events = 1; SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(struct sctp_event_subscribe); } break; case SCTP_ADAPTATION_LAYER: if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; break; } SCTP_INP_RLOCK(inp); *mtod(m, int *)= inp->sctp_ep.adaptation_layer_indicator; SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(int); break; case SCTP_SET_INITIAL_DBG_SEQ: if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; break; } SCTP_INP_RLOCK(inp); *mtod(m, int *)= inp->sctp_ep.initial_sequence_debug; SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(int); break; case SCTP_GET_LOCAL_ADDR_SIZE: if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; break; } SCTP_INP_RLOCK(inp); *mtod(m, int *)= sctp_count_max_addresses(inp); SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(int); break; case SCTP_GET_REMOTE_ADDR_SIZE: { sctp_assoc_t *assoc_id; uint32_t *val, sz; struct sctp_nets *net; if ((size_t)m->m_len < sizeof(sctp_assoc_t)) { error = EINVAL; break; } stcb = NULL; val = mtod(m, uint32_t *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } if (stcb == NULL) { assoc_id = mtod(m, sctp_assoc_t *); stcb = sctp_findassociation_ep_asocid(inp, *assoc_id, 1); } if (stcb == NULL) { error = EINVAL; break; } *val = 0; sz = 0; /* Count the sizes */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { sz += sizeof(struct sockaddr_in6); } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { sz += sizeof(struct sockaddr_in); } else { /* huh */ break; } } SCTP_TCB_UNLOCK(stcb); *val = sz; m->m_len = sizeof(uint32_t); } break; case SCTP_GET_PEER_ADDRESSES: /* * Get the address information, an array is passed in to * fill up we pack it. */ { int cpsz, left; struct sockaddr_storage *sas; struct sctp_nets *net; struct sctp_getaddresses *saddr; if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) { error = EINVAL; break; } left = m->m_len - sizeof(struct sctp_getaddresses); saddr = mtod(m, struct sctp_getaddresses *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } m->m_len = sizeof(struct sctp_getaddresses); sas = (struct sockaddr_storage *)&saddr->addr[0]; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) || (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) { cpsz = sizeof(struct sockaddr_in6); } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { cpsz = sizeof(struct sockaddr_in); } else { /* huh */ break; } if (left < cpsz) { /* not enough room. */ break; } if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) { /* Must map the address */ in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr, (struct sockaddr_in6 *)sas); } else { memcpy(sas, &net->ro._l_addr, cpsz); } ((struct sockaddr_in *)sas)->sin_port = stcb->rport; sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); left -= cpsz; m->m_len += cpsz; } SCTP_TCB_UNLOCK(stcb); } break; case SCTP_GET_LOCAL_ADDRESSES: { int limit, actual; struct sockaddr_storage *sas; struct sctp_getaddresses *saddr; if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) { error = EINVAL; break; } saddr = mtod(m, struct sctp_getaddresses *); if (saddr->sget_assoc_id) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id, 1); } else { stcb = NULL; } /* * assure that the TCP model does not need a assoc * id once connected. */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) && (stcb == NULL)) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } sas = (struct sockaddr_storage *)&saddr->addr[0]; limit = m->m_len - sizeof(sctp_assoc_t); actual = sctp_fill_up_addresses(inp, stcb, limit, sas); if (stcb) SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(struct sockaddr_storage) + actual; } break; case SCTP_PEER_ADDR_PARAMS: { struct sctp_paddrparams *paddrp; struct sctp_nets *net; if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) { error = EINVAL; break; } paddrp = mtod(m, struct sctp_paddrparams *); net = NULL; if (paddrp->spp_assoc_id) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); } SCTP_INP_RLOCK(inp); } else { stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id, 1); } if (stcb == NULL) { error = ENOENT; break; } } if ((stcb == NULL) && ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) || (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) { /* Lookup via address */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb == NULL) { error = ENOENT; break; } } if (stcb) { /* Applys to the specific association */ paddrp->spp_flags = 0; if (net) { paddrp->spp_pathmaxrxt = net->failure_threshold; paddrp->spp_pathmtu = net->mtu; /* get flags for HB */ if (net->dest_state & SCTP_ADDR_NOHB) paddrp->spp_flags |= SPP_HB_DISABLE; else paddrp->spp_flags |= SPP_HB_ENABLE; /* get flags for PMTU */ if (callout_pending(&net->pmtu_timer.timer)) { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } else { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } #ifdef AF_INET if (net->ro._l_addr.sin.sin_family == AF_INET) { paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc; paddrp->spp_flags |= SPP_IPV4_TOS; } #endif #ifdef AF_INET6 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { paddrp->spp_ipv6_flowlabel = net->tos_flowlabel; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif } else { /* * No destination so return default * value */ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc); #ifdef AF_INET paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc; paddrp->spp_flags |= SPP_IPV4_TOS; #endif #ifdef AF_INET6 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; #endif /* default settings should be these */ if (sctp_is_hb_timer_running(stcb)) { paddrp->spp_flags |= SPP_HB_ENABLE; } } paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; paddrp->spp_sackdelay = stcb->asoc.delayed_ack; /* * Currently we don't support no sack delay * aka SPP_SACKDELAY_DISABLE. */ paddrp->spp_flags |= SPP_SACKDELAY_ENABLE; paddrp->spp_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } else { /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); paddrp->spp_sackdelay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); paddrp->spp_assoc_id = (sctp_assoc_t) 0; /* get inp's default */ #ifdef AF_INET paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos; paddrp->spp_flags |= SPP_IPV4_TOS; #endif #ifdef AF_INET6 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif /* can't return this */ paddrp->spp_pathmaxrxt = 0; paddrp->spp_pathmtu = 0; /* default behavior, no stcb */ paddrp->spp_flags = SPP_HB_ENABLE | SPP_SACKDELAY_ENABLE | SPP_PMTUD_ENABLE; SCTP_INP_RUNLOCK(inp); } m->m_len = sizeof(struct sctp_paddrparams); } break; case SCTP_GET_PEER_ADDR_INFO: { struct sctp_paddrinfo *paddri; struct sctp_nets *net; if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) { error = EINVAL; break; } paddri = mtod(m, struct sctp_paddrinfo *); net = NULL; if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) || (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) { /* Lookup via address */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } } else { stcb = NULL; } if ((stcb == NULL) || (net == NULL)) { if (stcb) { SCTP_TCB_UNLOCK(stcb); } error = ENOENT; break; } m->m_len = sizeof(struct sctp_paddrinfo); paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB); paddri->spinfo_cwnd = net->cwnd; paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1; paddri->spinfo_rto = net->RTO; paddri->spinfo_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } break; case SCTP_PCB_STATUS: { struct sctp_pcbinfo *spcb; if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) { error = EINVAL; break; } spcb = mtod(m, struct sctp_pcbinfo *); sctp_fill_pcbinfo(spcb); m->m_len = sizeof(struct sctp_pcbinfo); } break; case SCTP_STATUS: { struct sctp_nets *net; struct sctp_status *sstat; if ((size_t)m->m_len < sizeof(struct sctp_status)) { error = EINVAL; break; } sstat = mtod(m, struct sctp_status *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id, 1); if (stcb == NULL) { error = EINVAL; break; } /* * I think passing the state is fine since * sctp_constants.h will be available to the user * land. */ sstat->sstat_state = stcb->asoc.state; sstat->sstat_rwnd = stcb->asoc.peers_rwnd; sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; /* * We can't include chunks that have been passed to * the socket layer. Only things in queue. */ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + stcb->asoc.cnt_on_all_streams); sstat->sstat_instrms = stcb->asoc.streamincnt; sstat->sstat_outstrms = stcb->asoc.streamoutcnt; sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); memcpy(&sstat->sstat_primary.spinfo_address, &stcb->asoc.primary_destination->ro._l_addr, ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); net = stcb->asoc.primary_destination; ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; /* * Again the user can get info from sctp_constants.h * for what the state of the network is. */ sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK; sstat->sstat_primary.spinfo_cwnd = net->cwnd; sstat->sstat_primary.spinfo_srtt = net->lastsa; sstat->sstat_primary.spinfo_rto = net->RTO; sstat->sstat_primary.spinfo_mtu = net->mtu; sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(*sstat); } break; case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) { error = EINVAL; break; } srto = mtod(m, struct sctp_rtoinfo *); if (srto->srto_assoc_id == 0) { /* Endpoint only please */ SCTP_INP_RLOCK(inp); srto->srto_initial = inp->sctp_ep.initial_rto; srto->srto_max = inp->sctp_ep.sctp_maxrto; srto->srto_min = inp->sctp_ep.sctp_minrto; SCTP_INP_RUNLOCK(inp); break; } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id, 1); if (stcb == NULL) { error = EINVAL; break; } srto->srto_initial = stcb->asoc.initial_rto; srto->srto_max = stcb->asoc.maxrto; srto->srto_min = stcb->asoc.minrto; SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(*srto); } break; case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) { error = EINVAL; break; } sasoc = mtod(m, struct sctp_assocparams *); stcb = NULL; if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else if (sasoc->sasoc_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, sasoc->sasoc_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } else { stcb = NULL; } if (stcb) { sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; sasoc->sasoc_cookie_life = stcb->asoc.cookie_life; SCTP_TCB_UNLOCK(stcb); } else { SCTP_INP_RLOCK(inp); sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; sasoc->sasoc_number_peer_destinations = 0; sasoc->sasoc_peer_rwnd = 0; sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life; SCTP_INP_RUNLOCK(inp); } m->m_len = sizeof(*sasoc); } break; case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; if (m->m_len != sizeof(struct sctp_sndrcvinfo)) { error = EINVAL; break; } s_info = mtod(m, struct sctp_sndrcvinfo *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } /* Copy it out */ *s_info = stcb->asoc.def_send; SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(*s_info); } break; case SCTP_INITMSG: { struct sctp_initmsg *sinit; if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) { error = EINVAL; break; } sinit = mtod(m, struct sctp_initmsg *); SCTP_INP_RLOCK(inp); sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; SCTP_INP_RUNLOCK(inp); m->m_len = sizeof(*sinit); } break; case SCTP_PRIMARY_ADDR: /* we allow a "get" operation on this */ { struct sctp_setprim *ssp; if ((size_t)m->m_len < sizeof(struct sctp_setprim)) { error = EINVAL; break; } ssp = mtod(m, struct sctp_setprim *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else { stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id, 1); if (stcb == NULL) { /* * one last shot, try it by the * address in */ struct sctp_nets *net; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&ssp->ssp_addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb == NULL) { error = EINVAL; break; } } /* simply copy out the sockaddr_storage... */ memcpy(&ssp->ssp_addr, &stcb->asoc.primary_destination->ro._l_addr, ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len); SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(*ssp); } break; case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; uint32_t size; int i; if ((size_t)(m->m_len) < sizeof(*shmac)) { error = EINVAL; break; } shmac = mtod(m, struct sctp_hmacalgo *); SCTP_INP_RLOCK(inp); hmaclist = inp->sctp_ep.local_hmacs; if (hmaclist == NULL) { /* no HMACs to return */ m->m_len = sizeof(*shmac); break; } /* is there room for all of the hmac ids? */ size = sizeof(*shmac) + (hmaclist->num_algo * sizeof(shmac->shmac_idents[0])); if ((size_t)(m->m_len) < size) { error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } /* copy in the list */ for (i = 0; i < hmaclist->num_algo; i++) shmac->shmac_idents[i] = hmaclist->hmac[i]; SCTP_INP_RUNLOCK(inp); m->m_len = size; break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; if ((size_t)(m->m_len) < sizeof(*scact)) { error = EINVAL; break; } scact = mtod(m, struct sctp_authkeyid *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, get from the connected * assoc; else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (scact->scact_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, scact->scact_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } if (stcb != NULL) { /* get the active key on the assoc */ scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid; SCTP_TCB_UNLOCK(stcb); } else { /* get the endpoint active key */ SCTP_INP_RLOCK(inp); scact->scact_keynumber = inp->sctp_ep.default_keyid; SCTP_INP_RUNLOCK(inp); } m->m_len = sizeof(*scact); break; } case SCTP_LOCAL_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; int size = 0; if ((size_t)(m->m_len) < sizeof(*sac)) { error = EINVAL; break; } sac = mtod(m, struct sctp_authchunks *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, get from the connected * assoc; else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (sac->gauth_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, sac->gauth_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } if (stcb != NULL) { /* get off the assoc */ chklist = stcb->asoc.local_auth_chunks; if (chklist == NULL) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } /* copy in the chunks */ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); SCTP_TCB_UNLOCK(stcb); } else { /* get off the endpoint */ SCTP_INP_RLOCK(inp); chklist = inp->sctp_ep.local_auth_chunks; if (chklist == NULL) { error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } /* copy in the chunks */ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); SCTP_INP_RUNLOCK(inp); } m->m_len = sizeof(struct sctp_authchunks) + size; break; } case SCTP_PEER_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; int size = 0; if ((size_t)(m->m_len) < sizeof(*sac)) { error = EINVAL; break; } sac = mtod(m, struct sctp_authchunks *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, get from the connected * assoc, else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb != NULL) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (sac->gauth_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, sac->gauth_assoc_id, 1); } if (stcb == NULL) { error = ENOENT; break; } /* get off the assoc */ chklist = stcb->asoc.peer_auth_chunks; if (chklist == NULL) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if ((size_t)m->m_len < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } /* copy in the chunks */ sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); SCTP_TCB_UNLOCK(stcb); m->m_len = sizeof(struct sctp_authchunks) + size; break; } default: error = ENOPROTOOPT; m->m_len = 0; break; } /* end switch (sopt->sopt_name) */ return (error); } static int sctp_optsset(struct socket *so, int opt, struct mbuf **mp, struct thread *p ) { int error, *mopt, set_opt, s; struct mbuf *m; struct sctp_tcb *stcb = NULL; struct sctp_inpcb *inp; if (mp == NULL) { return (EINVAL); } m = *mp; if (m == NULL) return (EINVAL); inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return EINVAL; error = 0; switch (opt) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_AUTO_ASCONF: case SCTP_EXPLICIT_EOR: case SCTP_DISABLE_FRAGMENTS: case SCTP_USE_EXT_RCVINFO: case SCTP_I_WANT_MAPPED_V4_ADDR: /* copy in the option value */ if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; break; } mopt = mtod(m, int *); set_opt = 0; if (error) break; switch (opt) { case SCTP_DISABLE_FRAGMENTS: set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; break; case SCTP_AUTO_ASCONF: set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; break; case SCTP_EXPLICIT_EOR: set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; break; case SCTP_USE_EXT_RCVINFO: set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; break; case SCTP_I_WANT_MAPPED_V4_ADDR: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; } else { return (EINVAL); } break; case SCTP_NODELAY: set_opt = SCTP_PCB_FLAGS_NODELAY; break; case SCTP_AUTOCLOSE: set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; /* * The value is in ticks. Note this does not effect * old associations, only new ones. */ inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); break; } SCTP_INP_WLOCK(inp); if (*mopt != 0) { sctp_feature_on(inp, set_opt); } else { sctp_feature_off(inp, set_opt); } SCTP_INP_WUNLOCK(inp); break; case SCTP_PARTIAL_DELIVERY_POINT: { if ((size_t)m->m_len < sizeof(unsigned int)) { error = EINVAL; break; } inp->partial_delivery_point = *mtod(m, unsigned int *); m->m_len = sizeof(unsigned int); } break; case SCTP_FRAGMENT_INTERLEAVE: /* not yet until we re-write sctp_recvmsg() */ { int on_off; if ((size_t)m->m_len < sizeof(int)) { error = EINVAL; break; } on_off = *(mtod(m, int *)); if (on_off) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); } } break; case SCTP_CMT_ON_OFF: { struct sctp_assoc_value *av; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } av = mtod(m, struct sctp_assoc_value *); stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { if (sctp_cmt_on_off) { stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value; } else { if ((stcb->asoc.sctp_cmt_on_off) && (av->assoc_value == 0)) { stcb->asoc.sctp_cmt_on_off = 0; } else { error = EACCES; } } SCTP_TCB_UNLOCK(stcb); } } break; case SCTP_CMT_USE_DAC: { if ((size_t)m->m_len < sizeof(unsigned int)) { error = EINVAL; break; } sctp_cmt_sockopt_use_dac = *mtod(m, unsigned int *); if (sctp_cmt_sockopt_use_dac != 0) sctp_cmt_sockopt_use_dac = 1; } break; case SCTP_CLR_STAT_LOG: #ifdef SCTP_STAT_LOGGING sctp_clr_stat_log(); #else error = EOPNOTSUPP; #endif break; case SCTP_CONTEXT: { struct sctp_assoc_value *av; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } av = mtod(m, struct sctp_assoc_value *); if (av->assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, av->assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { stcb->asoc.context = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } } else { inp->sctp_context = av->assoc_value; } } break; case SCTP_DELAYED_ACK_TIME: { struct sctp_assoc_value *tm; if ((size_t)m->m_len < sizeof(struct sctp_assoc_value)) { error = EINVAL; break; } tm = mtod(m, struct sctp_assoc_value *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_WLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); stcb->asoc.delayed_ack = tm->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value); } SCTP_INP_WUNLOCK(inp); } else { if (tm->assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, tm->assoc_id, 1); if (stcb == NULL) { error = ENOTCONN; } else { stcb->asoc.delayed_ack = tm->assoc_value; SCTP_TCB_UNLOCK(stcb); } } else { SCTP_INP_WLOCK(inp); inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value); SCTP_INP_WUNLOCK(inp); } } } break; case SCTP_AUTH_CHUNK: { struct sctp_authchunk *sauth; if ((size_t)m->m_len < sizeof(*sauth)) { error = EINVAL; break; } sauth = mtod(m, struct sctp_authchunk *); if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) error = EINVAL; break; } case SCTP_AUTH_KEY: { struct sctp_authkey *sca; struct sctp_keyhead *shared_keys; sctp_sharedkey_t *shared_key; sctp_key_t *key = NULL; int size; size = m->m_len - sizeof(*sca); if (size < 0) { error = EINVAL; break; } sca = mtod(m, struct sctp_authkey *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, set it on the connected * assoc; else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (sca->sca_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, sca->sca_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } if (stcb != NULL) { /* set it on the assoc */ shared_keys = &stcb->asoc.shared_keys; /* clear the cached keys for this key id */ sctp_clear_cachedkeys(stcb, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t) size); if (key == NULL) { error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; sctp_insert_sharedkey(shared_keys, shared_key); SCTP_TCB_UNLOCK(stcb); } else { /* ste it on the endpoint */ SCTP_INP_WLOCK(inp); shared_keys = &inp->sctp_ep.shared_keys; /* * clear the cached keys on all assocs for * this key id */ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t) size); if (key == NULL) { error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; sctp_insert_sharedkey(shared_keys, shared_key); SCTP_INP_WUNLOCK(inp); } break; } case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; uint32_t hmacid; int size, i; size = m->m_len - sizeof(*shmac); if (size < 0) { error = EINVAL; break; } shmac = mtod(m, struct sctp_hmacalgo *); size = size / sizeof(shmac->shmac_idents[0]); hmaclist = sctp_alloc_hmaclist(size); if (hmaclist == NULL) { error = ENOMEM; break; } for (i = 0; i < size; i++) { hmacid = shmac->shmac_idents[i]; if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) { /* invalid HMACs were found */ ; error = EINVAL; goto sctp_set_hmac_done; } } /* set it on the endpoint */ SCTP_INP_WLOCK(inp); if (inp->sctp_ep.local_hmacs) sctp_free_hmaclist(inp->sctp_ep.local_hmacs); inp->sctp_ep.local_hmacs = hmaclist; SCTP_INP_WUNLOCK(inp); sctp_set_hmac_done: break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; if ((size_t)m->m_len < sizeof(*scact)) { error = EINVAL; break; } scact = mtod(m, struct sctp_authkeyid *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, set it on the connected * assoc; else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (scact->scact_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, scact->scact_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } /* set the active key on the right place */ if (stcb != NULL) { /* set the active key on the assoc */ if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { /* set the active key on the endpoint */ SCTP_INP_WLOCK(inp); if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) error = EINVAL; SCTP_INP_WUNLOCK(inp); } break; } case SCTP_AUTH_DELETE_KEY: { struct sctp_authkeyid *scdel; if ((size_t)m->m_len < sizeof(*scdel)) { error = EINVAL; break; } scdel = mtod(m, struct sctp_authkeyid *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { /* * if one-to-one, delete from the connected * assoc; else endpoint */ SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else if (scdel->scact_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, scdel->scact_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } /* delete the key from the right place */ if (stcb != NULL) { if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { SCTP_INP_WLOCK(inp); if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) error = EINVAL; SCTP_INP_WUNLOCK(inp); } break; } case SCTP_RESET_STREAMS: { struct sctp_stream_reset *strrst; uint8_t send_in = 0, send_tsn = 0, send_out = 0; int i; if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) { error = EINVAL; break; } strrst = mtod(m, struct sctp_stream_reset *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } if (stcb->asoc.peer_supports_strreset == 0) { /* * Peer does not support it, we return * protocol not supported since this is true * for this feature and this peer, not the * socket request in general. */ error = EPROTONOSUPPORT; SCTP_TCB_UNLOCK(stcb); break; } if (stcb->asoc.stream_reset_outstanding) { error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) { send_in = 1; } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) { send_out = 1; } else if (strrst->strrst_flags == SCTP_RESET_BOTH) { send_in = 1; send_out = 1; } else if (strrst->strrst_flags == SCTP_RESET_TSN) { send_tsn = 1; } else { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } for (i = 0; i < strrst->strrst_num_streams; i++) { if ((send_in) && (strrst->strrst_list[i] > stcb->asoc.streamincnt)) { error = EINVAL; goto get_out; } if ((send_out) && (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) { error = EINVAL; goto get_out; } } if (error) { get_out: SCTP_TCB_UNLOCK(stcb); break; } error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams, strrst->strrst_list, send_out, (stcb->asoc.str_reset_seq_in - 3), send_in, send_tsn); s = splnet(); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ); SCTP_TCB_UNLOCK(stcb); splx(s); } break; case SCTP_CONNECT_X: if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) { error = EINVAL; break; } error = sctp_do_connect_x(so, inp, m, p, 0); break; case SCTP_CONNECT_X_DELAYED: if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) { error = EINVAL; break; } error = sctp_do_connect_x(so, inp, m, p, 1); break; case SCTP_CONNECT_X_COMPLETE: { struct sockaddr *sa; struct sctp_nets *net; if ((size_t)m->m_len < sizeof(struct sockaddr_in)) { error = EINVAL; break; } sa = mtod(m, struct sockaddr *); /* find tcb */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, sa); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb == NULL) { error = ENOENT; break; } if (stcb->asoc.delayed_connection == 1) { stcb->asoc.delayed_connection = 0; SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); sctp_send_initiate(inp, stcb); } else { /* * already expired or did not use delayed * connectx */ error = EALREADY; } SCTP_TCB_UNLOCK(stcb); } break; case SCTP_MAXBURST: { uint8_t *burst; SCTP_INP_WLOCK(inp); burst = mtod(m, uint8_t *); if (*burst) { inp->sctp_ep.max_burst = *burst; } SCTP_INP_WUNLOCK(inp); } break; case SCTP_MAXSEG: { uint32_t *segsize; int ovh; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ovh = SCTP_MED_OVERHEAD; } else { ovh = SCTP_MED_V4_OVERHEAD; } segsize = mtod(m, uint32_t *); if (*segsize < 1) { error = EINVAL; break; } SCTP_INP_WLOCK(inp); inp->sctp_frag_point = (*segsize + ovh); if (inp->sctp_frag_point < MHLEN) { inp->sctp_frag_point = MHLEN; } SCTP_INP_WUNLOCK(inp); } break; case SCTP_SET_DEBUG_LEVEL: #ifdef SCTP_DEBUG { uint32_t *level; if ((size_t)m->m_len < sizeof(uint32_t)) { error = EINVAL; break; } level = mtod(m, uint32_t *); error = 0; sctp_debug_on = (*level & (SCTP_DEBUG_ALL | SCTP_DEBUG_NOISY)); printf("SETTING DEBUG LEVEL to %x\n", (uint32_t) sctp_debug_on); } #else error = EOPNOTSUPP; #endif /* SCTP_DEBUG */ break; case SCTP_EVENTS: { struct sctp_event_subscribe *events; if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) { error = EINVAL; break; } SCTP_INP_WLOCK(inp); events = mtod(m, struct sctp_event_subscribe *); if (events->sctp_data_io_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } if (events->sctp_association_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } if (events->sctp_address_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } if (events->sctp_send_failure_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } if (events->sctp_peer_error_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); } if (events->sctp_shutdown_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } if (events->sctp_partial_delivery_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } if (events->sctp_adaptation_layer_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } if (events->sctp_authentication_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); } if (events->sctp_stream_reset_events) { sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } SCTP_INP_WUNLOCK(inp); } break; case SCTP_ADAPTATION_LAYER: { struct sctp_setadaptation *adap_bits; if ((size_t)m->m_len < sizeof(struct sctp_setadaptation)) { error = EINVAL; break; } SCTP_INP_WLOCK(inp); adap_bits = mtod(m, struct sctp_setadaptation *); inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; SCTP_INP_WUNLOCK(inp); } break; case SCTP_SET_INITIAL_DBG_SEQ: { uint32_t *vvv; if ((size_t)m->m_len < sizeof(uint32_t)) { error = EINVAL; break; } SCTP_INP_WLOCK(inp); vvv = mtod(m, uint32_t *); inp->sctp_ep.initial_sequence_debug = *vvv; SCTP_INP_WUNLOCK(inp); } break; case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; if (m->m_len != sizeof(struct sctp_sndrcvinfo)) { error = EINVAL; break; } s_info = mtod(m, struct sctp_sndrcvinfo *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else { if (s_info->sinfo_assoc_id) { stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id, 1); } else { stcb = NULL; } } if ((s_info->sinfo_assoc_id == 0) && (stcb == NULL)) { inp->def_send = *s_info; } else if (stcb == NULL) { error = ENOENT; break; } /* Validate things */ if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) { SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } /* Copy it in */ stcb->asoc.def_send = *s_info; SCTP_TCB_UNLOCK(stcb); } break; case SCTP_PEER_ADDR_PARAMS: /* Applys to the specific association */ { struct sctp_paddrparams *paddrp; struct sctp_nets *net; if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) { error = EINVAL; break; } paddrp = mtod(m, struct sctp_paddrparams *); net = NULL; if (paddrp->spp_assoc_id) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); } SCTP_INP_RUNLOCK(inp); } else { stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id, 1); } if (stcb == NULL) { error = ENOENT; break; } } if ((stcb == NULL) && ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) || (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) { /* Lookup via address */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } } if (stcb) { /************************TCB SPECIFIC SET ******************/ /* sack delay first */ if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) { /* * we do NOT support turning it off * (yet). only setting the delay. */ if (paddrp->spp_sackdelay >= SCTP_CLOCK_GRANULARITY) stcb->asoc.delayed_ack = paddrp->spp_sackdelay; else stcb->asoc.delayed_ack = SCTP_CLOCK_GRANULARITY; } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) { stcb->asoc.delayed_ack = 0; } /* * do we change the timer for HB, we run * only one? */ if (paddrp->spp_hbinterval) stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) stcb->asoc.heart_beat_delay = 0; /* network sets ? */ if (net) { /************************NET SPECIFIC SET ******************/ if (paddrp->spp_flags & SPP_HB_DEMAND) { /* on demand HB */ sctp_send_hb(stcb, 1, net); } if (paddrp->spp_flags & SPP_HB_DISABLE) { net->dest_state |= SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_HB_ENABLE) { net->dest_state &= ~SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { if (callout_pending(&net->pmtu_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) { net->mtu = paddrp->spp_pathmtu; if (net->mtu < stcb->asoc.smallest_mtu) sctp_pathmtu_adustment(inp, stcb, net, net->mtu); } } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { if (callout_pending(&net->pmtu_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } } if (paddrp->spp_pathmaxrxt) net->failure_threshold = paddrp->spp_pathmaxrxt; #ifdef AF_INET if (paddrp->spp_flags & SPP_IPV4_TOS) { if (net->ro._l_addr.sin.sin_family == AF_INET) { net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc; } } #endif #ifdef AF_INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { net->tos_flowlabel = paddrp->spp_ipv6_flowlabel; } } #endif } else { /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ if (paddrp->spp_pathmaxrxt) stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; if (paddrp->spp_flags & SPP_HB_ENABLE) { /* Turn back on the timer */ stcb->asoc.hb_is_disabled = 0; sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } if (paddrp->spp_flags & SPP_HB_DISABLE) { int cnt_of_unconf = 0; struct sctp_nets *lnet; stcb->asoc.hb_is_disabled = 1; TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) { cnt_of_unconf++; } } /* * stop the timer ONLY if we * have no unconfirmed * addresses */ if (cnt_of_unconf == 0) { sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } } if (paddrp->spp_flags & SPP_HB_ENABLE) { /* start up the timer. */ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } #ifdef AF_INET if (paddrp->spp_flags & SPP_IPV4_TOS) stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc; #endif #ifdef AF_INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel; #endif } SCTP_TCB_UNLOCK(stcb); } else { /************************NO TCB, SET TO default stuff ******************/ SCTP_INP_WLOCK(inp); /* * For the TOS/FLOWLABEL stuff you set it * with the options on the socket */ if (paddrp->spp_pathmaxrxt) { inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; } if (paddrp->spp_flags & SPP_HB_ENABLE) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } else if (paddrp->spp_flags & SPP_HB_DISABLE) { sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_SACKDELAY_ENABLE) { if (paddrp->spp_sackdelay > SCTP_CLOCK_GRANULARITY) inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(paddrp->spp_sackdelay); else inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_CLOCK_GRANULARITY); } else if (paddrp->spp_flags & SPP_SACKDELAY_DISABLE) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = 0; } SCTP_INP_WUNLOCK(inp); } } break; case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) { error = EINVAL; break; } srto = mtod(m, struct sctp_rtoinfo *); if (srto->srto_assoc_id == 0) { SCTP_INP_WLOCK(inp); /* * If we have a null asoc, its default for * the endpoint */ if (srto->srto_initial > 10) inp->sctp_ep.initial_rto = srto->srto_initial; if (srto->srto_max > 10) inp->sctp_ep.sctp_maxrto = srto->srto_max; if (srto->srto_min > 10) inp->sctp_ep.sctp_minrto = srto->srto_min; SCTP_INP_WUNLOCK(inp); break; } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id, 1); if (stcb == NULL) { error = EINVAL; break; } /* Set in ms we hope :-) */ if (srto->srto_initial > 10) stcb->asoc.initial_rto = srto->srto_initial; if (srto->srto_max > 10) stcb->asoc.maxrto = srto->srto_max; if (srto->srto_min > 10) stcb->asoc.minrto = srto->srto_min; SCTP_TCB_UNLOCK(stcb); } break; case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) { error = EINVAL; break; } sasoc = mtod(m, struct sctp_assocparams *); if (sasoc->sasoc_assoc_id) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, sasoc->sasoc_assoc_id, 1); if (stcb == NULL) { error = ENOENT; break; } } else { stcb = NULL; } if (stcb) { if (sasoc->sasoc_asocmaxrxt) stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; sasoc->sasoc_peer_rwnd = 0; sasoc->sasoc_local_rwnd = 0; if (stcb->asoc.cookie_life) stcb->asoc.cookie_life = sasoc->sasoc_cookie_life; SCTP_TCB_UNLOCK(stcb); } else { SCTP_INP_WLOCK(inp); if (sasoc->sasoc_asocmaxrxt) inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; sasoc->sasoc_number_peer_destinations = 0; sasoc->sasoc_peer_rwnd = 0; sasoc->sasoc_local_rwnd = 0; if (sasoc->sasoc_cookie_life) inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life; SCTP_INP_WUNLOCK(inp); } } break; case SCTP_INITMSG: { struct sctp_initmsg *sinit; if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) { error = EINVAL; break; } sinit = mtod(m, struct sctp_initmsg *); SCTP_INP_WLOCK(inp); if (sinit->sinit_num_ostreams) inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; if (sinit->sinit_max_instreams) inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; if (sinit->sinit_max_attempts) inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; if (sinit->sinit_max_init_timeo > 10) /* * We must be at least a 100ms (we set in * ticks) */ inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; SCTP_INP_WUNLOCK(inp); } break; case SCTP_PRIMARY_ADDR: { struct sctp_setprim *spa; struct sctp_nets *net, *lnet; if ((size_t)m->m_len < sizeof(struct sctp_setprim)) { error = EINVAL; break; } spa = mtod(m, struct sctp_setprim *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } else { error = EINVAL; break; } SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id, 1); if (stcb == NULL) { /* One last shot */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&spa->ssp_addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); error = EINVAL; break; } } else { /* * find the net, associd or connected lookup * type */ net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr); if (net == NULL) { SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } if ((net != stcb->asoc.primary_destination) && (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) { /* Ok we need to set it */ lnet = stcb->asoc.primary_destination; if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH; } net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY; } } SCTP_TCB_UNLOCK(stcb); } break; case SCTP_SET_PEER_PRIMARY_ADDR: { struct sctp_setpeerprim *sspp; if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) { error = EINVAL; break; } sspp = mtod(m, struct sctp_setpeerprim *); if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); } else stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id, 1); if (stcb == NULL) { error = EINVAL; break; } if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) { error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } break; case SCTP_BINDX_ADD_ADDR: { struct sctp_getaddresses *addrs; struct sockaddr *addr_touse; struct sockaddr_in sin; /* see if we're bound all already! */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { error = EINVAL; break; } if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) { error = EINVAL; break; } addrs = mtod(m, struct sctp_getaddresses *); addr_touse = addrs->addr; if (addrs->addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr_touse; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin, sin6); addr_touse = (struct sockaddr *)&sin; } } if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { if (p == NULL) { /* Can't get proc for Net/Open BSD */ error = EINVAL; break; } error = sctp_inpcb_bind(so, addr_touse, p); break; } /* * No locks required here since bind and mgmt_ep_sa * all do their own locking. If we do something for * the FIX: below we may need to lock in that case. */ if (addrs->sget_assoc_id == 0) { /* add the address */ struct sctp_inpcb *lep; ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport; lep = sctp_pcb_findep(addr_touse, 1, 0); if (lep != NULL) { /* * We must decrement the refcount * since we have the ep already and * are binding. No remove going on * here. */ SCTP_INP_DECR_REF(inp); } if (lep == inp) { /* already bound to it.. ok */ break; } else if (lep == NULL) { ((struct sockaddr_in *)addr_touse)->sin_port = 0; error = sctp_addr_mgmt_ep_sa(inp, addr_touse, SCTP_ADD_IP_ADDRESS); } else { error = EADDRNOTAVAIL; } if (error) break; } else { /* * FIX: decide whether we allow assoc based * bindx */ } } break; case SCTP_BINDX_REM_ADDR: { struct sctp_getaddresses *addrs; struct sockaddr *addr_touse; struct sockaddr_in sin; /* see if we're bound all already! */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { error = EINVAL; break; } if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) { error = EINVAL; break; } addrs = mtod(m, struct sctp_getaddresses *); addr_touse = addrs->addr; if (addrs->addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr_touse; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin, sin6); addr_touse = (struct sockaddr *)&sin; } } /* * No lock required mgmt_ep_sa does its own locking. * If the FIX: below is ever changed we may need to * lock before calling association level binding. */ if (addrs->sget_assoc_id == 0) { /* delete the address */ sctp_addr_mgmt_ep_sa(inp, addr_touse, SCTP_DEL_IP_ADDRESS); } else { /* * FIX: decide whether we allow assoc based * bindx */ } } break; default: error = ENOPROTOOPT; break; } /* end switch (opt) */ return (error); } extern int sctp_chatty_mbuf; int sctp_ctloutput(struct socket *so, struct sockopt *sopt) { struct mbuf *m = NULL; struct sctp_inpcb *inp; int s, error; inp = (struct sctp_inpcb *)so->so_pcb; s = splnet(); if (inp == 0) { splx(s); /* I made the same as TCP since we are not setup? */ return (ECONNRESET); } if (sopt->sopt_level != IPPROTO_SCTP) { /* wrong proto level... send back up to IP */ #ifdef INET6 if (INP_CHECK_SOCKAF(so, AF_INET6)) error = ip6_ctloutput(so, sopt); else #endif /* INET6 */ error = ip_ctloutput(so, sopt); splx(s); return (error); } if (sopt->sopt_valsize) { if (sopt->sopt_valsize < MLEN) { m = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); } else { m = sctp_get_mbuf_for_msg(sopt->sopt_valsize, 0, M_WAIT, 1, MT_DATA); } if (m == NULL) { sctp_m_freem(m); splx(s); return (ENOBUFS); } if (sopt->sopt_valsize > M_TRAILINGSPACE(m)) { /* Limit to actual size gotten */ sopt->sopt_valsize = M_TRAILINGSPACE(m); } error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize, sopt->sopt_valsize); if (error) { (void)sctp_m_free(m); goto out; } m->m_len = sopt->sopt_valsize; } if (sopt->sopt_dir == SOPT_SET) { error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td); } else if (sopt->sopt_dir == SOPT_GET) { error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td); } else { error = EINVAL; } if ((error == 0) && (m != NULL)) { error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len); sctp_m_freem(m); } else if (m != NULL) { sctp_m_freem(m); } out: splx(s); return (error); } static int sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { int s = splnet(); int error = 0; int create_lock_on = 0; struct sctp_inpcb *inp; struct sctp_tcb *stcb = NULL; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { splx(s); /* I made the same as TCP since we are not setup? */ return (ECONNRESET); } SCTP_ASOC_CREATE_LOCK(inp); create_lock_on = 1; SCTP_INP_INCR_REF(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { /* Should I really unlock ? */ error = EFAULT; goto out_now; } #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (addr->sa_family == AF_INET6)) { error = EINVAL; goto out_now; } #endif /* INET6 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ error = sctp_inpcb_bind(so, NULL, p); if (error) { goto out_now; } } /* Now do we connect? */ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ error = EADDRINUSE; goto out_now; } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); } else { /* * Raise the count a second time, since on sucess * f-a-ep_addr will decrement it. */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb != NULL) { /* Already have or am bring up an association */ error = EALREADY; goto out_now; } /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0); if (stcb == NULL) { /* Gak! no memory */ splx(s); return (error); } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); /* initialize authentication parameters for the assoc */ sctp_initialize_auth_params(inp, stcb); sctp_send_initiate(inp, stcb); out_now: if (create_lock_on) SCTP_ASOC_CREATE_UNLOCK(inp); if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_INP_DECR_REF(inp); splx(s); return error; } int sctp_listen(struct socket *so, int backlog, struct thread *p) { /* * Note this module depends on the protocol processing being called * AFTER any socket level flags and backlog are applied to the * socket. The traditional way that the socket flags are applied is * AFTER protocol processing. We have made a change to the * sys/kern/uipc_socket.c module to reverse this but this MUST be in * place if the socket API for SCTP is to work properly. */ int s = splnet(); int error = 0; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { splx(s); /* I made the same as TCP since we are not setup? */ return (ECONNRESET); } SCTP_INP_RLOCK(inp); #ifdef SCTP_LOCK_LOGGING sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); #endif SOCK_LOCK(so); error = solisten_proto_check(so); if (error) { SOCK_UNLOCK(so); return (error); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ splx(s); SCTP_INP_RUNLOCK(inp); SOCK_UNLOCK(so); return (EADDRINUSE); } if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* We must do a bind. */ SCTP_INP_RUNLOCK(inp); if ((error = sctp_inpcb_bind(so, NULL, p))) { /* bind error, probably perm */ SOCK_UNLOCK(so); splx(s); return (error); } } else { SCTP_INP_RUNLOCK(inp); } /* It appears for 7.0 and on, we must always call this. */ solisten_proto(so, backlog); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { /* remove the ACCEPTCONN flag for one-to-many sockets */ so->so_options &= ~SO_ACCEPTCONN; } if (backlog == 0) { /* turning off listen */ so->so_options &= ~SO_ACCEPTCONN; } SOCK_UNLOCK(so); splx(s); return (error); } static int sctp_defered_wakeup_cnt = 0; int sctp_accept(struct socket *so, struct sockaddr **addr) { int s = splnet(); struct sctp_tcb *stcb; struct sctp_inpcb *inp; union sctp_sockstore store; int error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { splx(s); return (ECONNRESET); } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { return (ENOTSUP); } if (so->so_state & SS_ISDISCONNECTED) { splx(s); SCTP_INP_RUNLOCK(inp); return (ECONNABORTED); } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { splx(s); SCTP_INP_RUNLOCK(inp); return (ECONNRESET); } SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); store = stcb->asoc.primary_destination->ro._l_addr; SCTP_TCB_UNLOCK(stcb); if (store.sa.sa_family == AF_INET) { struct sockaddr_in *sin; SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_port = ((struct sockaddr_in *)&store)->sin_port; sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr; *addr = (struct sockaddr *)sin; } else { struct sockaddr_in6 *sin6; SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port; sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr; if ((error = sa6_recoverscope(sin6)) != 0) return (error); *addr = (struct sockaddr *)sin6; } /* Wake any delayed sleep action */ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; SOCKBUF_LOCK(&inp->sctp_socket->so_snd); if (sowriteable(inp->sctp_socket)) { sowwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); } } if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); if (soreadable(inp->sctp_socket)) { sctp_defered_wakeup_cnt++; sorwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); } } } splx(s); return (0); } int sctp_ingetaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; int s; struct sctp_inpcb *inp; /* * Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); s = splnet(); inp = (struct sctp_inpcb *)so->so_pcb; if (!inp) { splx(s); SCTP_FREE_SONAME(sin); return ECONNRESET; } SCTP_INP_RLOCK(inp); sin->sin_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in *sin_a; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto notConn; } fnd = 0; sin_a = NULL; SCTP_TCB_LOCK(stcb); TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a->sin_family == AF_INET) { fnd = 1; break; } } if ((!fnd) || (sin_a == NULL)) { /* punt */ SCTP_TCB_UNLOCK(stcb); goto notConn; } sin->sin_addr = sctp_ipv4_source_address_selection(inp, stcb, (struct route *)&net->ro, net, 0); SCTP_TCB_UNLOCK(stcb); } else { /* For the bound all case you get back 0 */ notConn: sin->sin_addr.s_addr = 0; } } else { /* Take the first IPv4 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in *sin_a; sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr; sin->sin_addr = sin_a->sin_addr; fnd = 1; break; } } if (!fnd) { splx(s); SCTP_FREE_SONAME(sin); SCTP_INP_RUNLOCK(inp); return ENOENT; } } SCTP_INP_RUNLOCK(inp); splx(s); (*addr) = (struct sockaddr *)sin; return (0); } int sctp_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin = (struct sockaddr_in *)*addr; int s, fnd; struct sockaddr_in *sin_a; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; /* Do the malloc first in case it blocks. */ inp = (struct sctp_inpcb *)so->so_pcb; if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ return (ENOTCONN); } s = splnet(); SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); /* We must recapture incase we blocked */ inp = (struct sctp_inpcb *)so->so_pcb; if (!inp) { splx(s); SCTP_FREE_SONAME(sin); return ECONNRESET; } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { splx(s); SCTP_FREE_SONAME(sin); return ECONNRESET; } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a->sin_family == AF_INET) { fnd = 1; sin->sin_port = stcb->rport; sin->sin_addr = sin_a->sin_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ splx(s); SCTP_FREE_SONAME(sin); return ENOENT; } splx(s); (*addr) = (struct sockaddr *)sin; return (0); } struct pr_usrreqs sctp_usrreqs = { .pru_abort = sctp_abort, .pru_accept = sctp_accept, .pru_attach = sctp_attach, .pru_bind = sctp_bind, .pru_connect = sctp_connect, .pru_control = in_control, .pru_close = sctp_close, .pru_detach = sctp_close, .pru_sopoll = sopoll_generic, .pru_disconnect = sctp_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp_peeraddr, .pru_send = sctp_sendm, .pru_shutdown = sctp_shutdown, .pru_sockaddr = sctp_ingetaddr, .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive }; diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c index 39d531b82ce9..34836587705b 100644 --- a/sys/netinet6/sctp6_usrreq.c +++ b/sys/netinet6/sctp6_usrreq.c @@ -1,1372 +1,1379 @@ /*- * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* $KAME: sctp6_usrreq.c,v 1.38 2005/08/24 08:08:56 suz Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_inet.h" #include "opt_ipsec.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IPSEC #include #include #endif /* IPSEC */ #if defined(NFAITH) && NFAITH > 0 #include #endif extern struct protosw inetsw[]; #ifndef in6pcb #define in6pcb inpcb #endif #ifndef sotoin6pcb #define sotoin6pcb sotoinpcb #endif #ifdef SCTP_DEBUG extern u_int32_t sctp_debug_on; #endif extern int sctp_no_csum_on_loopback; int sctp6_input(mp, offp, proto) struct mbuf **mp; int *offp; int proto; { struct mbuf *m = *mp; struct ip6_hdr *ip6; struct sctphdr *sh; struct sctp_inpcb *in6p = NULL; struct sctp_nets *net; int refcount_up = 0; u_int32_t check, calc_check; struct inpcb *in6p_ip; struct sctp_chunkhdr *ch; int length, mlen, offset, iphlen; u_int8_t ecn_bits; struct sctp_tcb *stcb = NULL; int off = *offp; int s; ip6 = mtod(m, struct ip6_hdr *); #ifndef PULLDOWN_TEST /* If PULLDOWN_TEST off, must be in a single mbuf. */ IP6_EXTHDR_CHECK(m, off, (int)(sizeof(*sh) + sizeof(*ch)), IPPROTO_DONE); sh = (struct sctphdr *)((caddr_t)ip6 + off); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); #else /* Ensure that (sctphdr + sctp_chunkhdr) in a row. */ IP6_EXTHDR_GET(sh, struct sctphdr *, m, off, sizeof(*sh) + sizeof(*ch)); if (sh == NULL) { SCTP_STAT_INCR(sctps_hdrops); return IPPROTO_DONE; } ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); #endif iphlen = off; offset = iphlen + sizeof(*sh) + sizeof(*ch); #if defined(NFAITH) && NFAITH > 0 if (faithprefix_p != NULL && (*faithprefix_p) (&ip6->ip6_dst)) { /* XXX send icmp6 host/port unreach? */ goto bad; } #endif /* NFAITH defined and > 0 */ SCTP_STAT_INCR(sctps_recvpackets); SCTP_STAT_INCR_COUNTER64(sctps_inpackets); #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_INPUT1) { printf("V6 input gets a packet iphlen:%d pktlen:%d\n", iphlen, m->m_pkthdr.len); } #endif if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { /* No multi-cast support in SCTP */ goto bad; } /* destination port of 0 is illegal, based on RFC2960. */ if (sh->dest_port == 0) goto bad; if ((sctp_no_csum_on_loopback == 0) || (m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type != IFT_LOOP)) { /* * we do NOT validate things from the loopback if the sysctl * is set to 1. */ check = sh->checksum; /* save incoming checksum */ if ((check == 0) && (sctp_no_csum_on_loopback)) { /* * special hook for where we got a local address * somehow routed across a non IFT_LOOP type * interface */ if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst)) goto sctp_skip_csum; } sh->checksum = 0; /* prepare for calc */ calc_check = sctp_calculate_sum(m, &mlen, iphlen); if (calc_check != check) { #ifdef SCTP_DEBUG if (sctp_debug_on & SCTP_DEBUG_INPUT1) { printf("Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", calc_check, check, m, mlen, iphlen); } #endif stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), sh, ch, &in6p, &net); /* in6p's ref-count increased && stcb locked */ if ((in6p) && (stcb)) { sctp_send_packet_dropped(stcb, net, m, iphlen, 1); sctp_chunk_output((struct sctp_inpcb *)in6p, stcb, 2); } else if ((in6p != NULL) && (stcb == NULL)) { refcount_up = 1; } SCTP_STAT_INCR(sctps_badsum); SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); goto bad; } sh->checksum = calc_check; } else { sctp_skip_csum: mlen = m->m_pkthdr.len; } net = NULL; /* * Locate pcb and tcb for datagram sctp_findassociation_addr() wants * IP/SCTP/first chunk header... */ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), sh, ch, &in6p, &net); /* in6p's ref-count increased */ if (in6p == NULL) { struct sctp_init_chunk *init_chk, chunk_buf; SCTP_STAT_INCR(sctps_noport); if (ch->chunk_type == SCTP_INITIATION) { /* * we do a trick here to get the INIT tag, dig in * and get the tag from the INIT and put it in the * common header. */ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, iphlen + sizeof(*sh), sizeof(*init_chk), (u_int8_t *) & chunk_buf); sh->v_tag = init_chk->init.initiate_tag; } sctp_send_abort(m, iphlen, sh, 0, NULL); goto bad; } else if (stcb == NULL) { refcount_up = 1; } in6p_ip = (struct inpcb *)in6p; #ifdef IPSEC /* * Check AH/ESP integrity. */ if (in6p_ip && (ipsec6_in_reject(m, in6p_ip))) { /* XXX */ ipsec6stat.in_polvio++; goto bad; } #endif /* IPSEC */ /* * CONTROL chunk processing */ length = ntohs(ip6->ip6_plen) + iphlen; offset -= sizeof(*ch); ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff); s = splnet(); (void)sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, in6p, stcb, net, ecn_bits); /* inp's ref-count reduced && stcb unlocked */ splx(s); /* XXX this stuff below gets moved to appropriate parts later... */ if (m) m_freem(m); if ((in6p) && refcount_up) { /* reduce ref-count */ SCTP_INP_WLOCK(in6p); SCTP_INP_DECR_REF(in6p); SCTP_INP_WUNLOCK(in6p); } return IPPROTO_DONE; bad: if (stcb) SCTP_TCB_UNLOCK(stcb); if ((in6p) && refcount_up) { /* reduce ref-count */ SCTP_INP_WLOCK(in6p); SCTP_INP_DECR_REF(in6p); SCTP_INP_WUNLOCK(in6p); } if (m) m_freem(m); return IPPROTO_DONE; } static void sctp6_notify_mbuf(struct sctp_inpcb *inp, struct icmp6_hdr *icmp6, struct sctphdr *sh, struct sctp_tcb *stcb, struct sctp_nets *net) { u_int32_t nxtsz; if ((inp == NULL) || (stcb == NULL) || (net == NULL) || (icmp6 == NULL) || (sh == NULL)) { goto out; } /* First do we even look at it? */ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) goto out; if (icmp6->icmp6_type != ICMP6_PACKET_TOO_BIG) { /* not PACKET TO BIG */ goto out; } /* * ok we need to look closely. We could even get smarter and look at * anyone that we sent to in case we get a different ICMP that tells * us there is no way to reach a host, but for this impl, all we * care about is MTU discovery. */ nxtsz = ntohl(icmp6->icmp6_mtu); /* Stop any PMTU timer */ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL); /* Adjust destination size limit */ if (net->mtu > nxtsz) { net->mtu = nxtsz; } /* now what about the ep? */ if (stcb->asoc.smallest_mtu > nxtsz) { struct sctp_tmit_chunk *chk; /* Adjust that too */ stcb->asoc.smallest_mtu = nxtsz; /* now off to subtract IP_DF flag if needed */ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { if ((u_int32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) { chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; } } TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if ((u_int32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) { /* * For this guy we also mark for immediate * resend since we sent to big of chunk */ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; if (chk->sent != SCTP_DATAGRAM_RESEND) stcb->asoc.sent_queue_retran_cnt++; chk->sent = SCTP_DATAGRAM_RESEND; chk->rec.data.doing_fast_retransmit = 0; chk->sent = SCTP_DATAGRAM_RESEND; /* Clear any time so NO RTT is being done */ chk->sent_rcv_time.tv_sec = 0; chk->sent_rcv_time.tv_usec = 0; stcb->asoc.total_flight -= chk->send_size; net->flight_size -= chk->send_size; } } } sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL); out: if (stcb) SCTP_TCB_UNLOCK(stcb); } void sctp6_ctlinput(cmd, pktdst, d) int cmd; struct sockaddr *pktdst; void *d; { struct sctphdr sh; struct ip6ctlparam *ip6cp = NULL; int s, cm; if (pktdst->sa_family != AF_INET6 || pktdst->sa_len != sizeof(struct sockaddr_in6)) return; if ((unsigned)cmd >= PRC_NCMDS) return; if (PRC_IS_REDIRECT(cmd)) { d = NULL; } else if (inet6ctlerrmap[cmd] == 0) { return; } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { ip6cp = (struct ip6ctlparam *)d; } else { ip6cp = (struct ip6ctlparam *)NULL; } if (ip6cp) { /* * XXX: We assume that when IPV6 is non NULL, M and OFF are * valid. */ /* check if we can safely examine src and dst ports */ struct sctp_inpcb *inp = NULL; struct sctp_tcb *stcb = NULL; struct sctp_nets *net = NULL; struct sockaddr_in6 final; if (ip6cp->ip6c_m == NULL || (size_t)ip6cp->ip6c_m->m_pkthdr.len < (ip6cp->ip6c_off + sizeof(sh))) return; bzero(&sh, sizeof(sh)); bzero(&final, sizeof(final)); inp = NULL; net = NULL; m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(sh), (caddr_t)&sh); ip6cp->ip6c_src->sin6_port = sh.src_port; final.sin6_len = sizeof(final); final.sin6_family = AF_INET6; final.sin6_addr = ((struct sockaddr_in6 *)pktdst)->sin6_addr; final.sin6_port = sh.dest_port; s = splnet(); stcb = sctp_findassociation_addr_sa((struct sockaddr *)ip6cp->ip6c_src, (struct sockaddr *)&final, &inp, &net, 1); /* inp's ref-count increased && stcb locked */ if (stcb != NULL && inp && (inp->sctp_socket != NULL)) { if (cmd == PRC_MSGSIZE) { sctp6_notify_mbuf(inp, ip6cp->ip6c_icmp6, &sh, stcb, net); /* inp's ref-count reduced && stcb unlocked */ } else { if (cmd == PRC_HOSTDEAD) { cm = EHOSTUNREACH; } else { cm = inet6ctlerrmap[cmd]; } sctp_notify(inp, cm, &sh, (struct sockaddr *)&final, stcb, net); /* inp's ref-count reduced && stcb unlocked */ } } else { if (PRC_IS_REDIRECT(cmd) && inp) { in6_rtchange((struct in6pcb *)inp, inet6ctlerrmap[cmd]); } if (inp) { /* reduce inp's ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if (stcb) SCTP_TCB_UNLOCK(stcb); } splx(s); } } /* * this routine can probably be collasped into the one in sctp_userreq.c * since they do the same thing and now we lookup with a sockaddr */ static int sctp6_getcred(SYSCTL_HANDLER_ARGS) { struct sockaddr_in6 addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error, s; - error = suser(req->td); + /* + * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket + * visibility is scoped using cr_canseesocket(), which it is not + * here. + */ + error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_RESERVEDPORT, + 0); if (error) return (error); if (req->newlen != sizeof(addrs)) return (EINVAL); if (req->oldlen != sizeof(struct ucred)) return (EINVAL); error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); s = splnet(); stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[0]), sin6tosa(&addrs[1]), &inp, &net, 1); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { error = ENOENT; if (inp) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } goto out; } error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred)); SCTP_TCB_UNLOCK(stcb); out: splx(s); return (error); } SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 0, 0, sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection"); /* This is the same as the sctp_abort() could be made common */ static void sctp6_abort(struct socket *so) { struct sctp_inpcb *inp; int s; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return; s = splnet(); sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif sctp_inpcb_free(inp, 1, 0); SOCK_LOCK(so); so->so_snd.sb_cc = 0; so->so_snd.sb_mb = NULL; so->so_snd.sb_mbcnt = 0; /* * same for the rcv ones, they are only here for the * accounting/select. */ so->so_rcv.sb_cc = 0; so->so_rcv.sb_mb = NULL; so->so_rcv.sb_mbcnt = 0; /* * Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } splx(s); return; } static int sctp6_attach(struct socket *so, int proto, struct thread *p) { struct in6pcb *inp6; int s, error; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp != NULL) return EINVAL; if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = soreserve(so, sctp_sendspace, sctp_recvspace); if (error) return error; } s = splnet(); error = sctp_inpcb_alloc(so); splx(s); if (error) return error; inp = (struct sctp_inpcb *)so->so_pcb; inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */ inp6 = (struct in6pcb *)inp; inp6->inp_vflag |= INP_IPV6; inp6->in6p_hops = -1; /* use kernel default */ inp6->in6p_cksum = -1; /* just to be sure */ #ifdef INET /* * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6 * socket as well, because the socket may be bound to an IPv6 * wildcard address, which may match an IPv4-mapped IPv6 address. */ inp6->inp_ip_ttl = ip_defttl; #endif /* * Hmm what about the IPSEC stuff that is missing here but in * sctp_attach()? */ return 0; } static int sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; struct in6pcb *inp6; int s, error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return EINVAL; inp6 = (struct in6pcb *)inp; inp6->inp_vflag &= ~INP_IPV4; inp6->inp_vflag |= INP_IPV6; if (addr != NULL && (inp6->inp_flags & IN6P_IPV6_V6ONLY) == 0) { if (addr->sa_family == AF_INET) { /* binding v4 addr to v6 socket, so reset flags */ inp6->inp_vflag |= INP_IPV4; inp6->inp_vflag &= ~INP_IPV6; } else { struct sockaddr_in6 *sin6_p; sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) { inp6->inp_vflag |= INP_IPV4; } else if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6_p); inp6->inp_vflag |= INP_IPV4; inp6->inp_vflag &= ~INP_IPV6; s = splnet(); error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, p); splx(s); return error; } } } else if (addr != NULL) { /* IPV6_V6ONLY socket */ if (addr->sa_family == AF_INET) { /* can't bind v4 addr to v6 only socket! */ return EINVAL; } else { struct sockaddr_in6 *sin6_p; sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) /* can't bind v4-mapped addrs either! */ /* NOTE: we don't support SIIT */ return EINVAL; } } s = splnet(); error = sctp_inpcb_bind(so, addr, p); splx(s); return error; } static void sctp6_close(struct socket *so) { struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) return; /* * Inform all the lower layer assoc that we are done. */ sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 13); #endif sctp_inpcb_free(inp, 1, 1); } else { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 14); #endif sctp_inpcb_free(inp, 0, 1); } /* * The socket is now detached, no matter what the state of * the SCTP association. */ SOCK_LOCK(so); so->so_snd.sb_cc = 0; so->so_snd.sb_mb = NULL; so->so_snd.sb_mbcnt = 0; /* * same for the rcv ones, they are only here for the * accounting/select. */ so->so_rcv.sb_cc = 0; so->so_rcv.sb_mb = NULL; so->so_rcv.sb_mbcnt = 0; /* * Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } return; } static int sctp6_disconnect(struct socket *so) { struct sctp_inpcb *inp; int s; s = splnet(); /* XXX */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { splx(s); return (ENOTCONN); } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { if (LIST_EMPTY(&inp->sctp_asoc_list)) { /* No connection */ splx(s); SCTP_INP_RUNLOCK(inp); return (ENOTCONN); } else { int some_on_streamwheel = 0; struct sctp_association *asoc; struct sctp_tcb *stcb; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { splx(s); SCTP_INP_RUNLOCK(inp); return (EINVAL); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { if (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) { /* Left with Data unread */ struct mbuf *err; err = NULL; MGET(err, M_DONTWAIT, MT_DATA); if (err) { /* * Fill in the user * initiated abort */ struct sctp_paramhdr *ph; ph = mtod(err, struct sctp_paramhdr *); err->m_len = sizeof(struct sctp_paramhdr); ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); ph->param_length = htons(err->m_len); } sctp_send_abort_tcb(stcb, err); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } SCTP_INP_RUNLOCK(inp); if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } sctp_free_assoc(inp, stcb, 0); /* No unlock tcb assoc is gone */ splx(s); return (0); } if (!TAILQ_EMPTY(&asoc->out_wheel)) { /* Check to see if some data queued */ struct sctp_stream_out *outs; TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { if (!TAILQ_EMPTY(&outs->outqueue)) { some_on_streamwheel = 1; break; } } } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (some_on_streamwheel == 0)) { /* nothing queued to send, so I'm done... */ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* only send SHUTDOWN the first time */ sctp_send_shutdown(stcb, stcb->asoc.primary_destination); sctp_chunk_output(stcb->sctp_ep, stcb, 1); asoc->state = SCTP_STATE_SHUTDOWN_SENT; SCTP_STAT_DECR_GAUGE32(sctps_currestab); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, asoc->primary_destination); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, asoc->primary_destination); } } else { /* * we still got (or just got) data to send, * so set SHUTDOWN_PENDING */ /* * XXX sockets draft says that MSG_EOF * should be sent with no data. currently, * we will allow user data to be sent first * and move to SHUTDOWN-PENDING */ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; } SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); splx(s); return (0); } } else { /* UDP model does not support this */ SCTP_INP_RUNLOCK(inp); splx(s); return EOPNOTSUPP; } } int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); static int sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; struct inpcb *in_inp; struct in6pcb *inp6; #ifdef INET struct sockaddr_in6 *sin6; #endif /* INET */ /* No SPL needed since sctp_output does this */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) { m_freem(control); control = NULL; } m_freem(m); return EINVAL; } in_inp = (struct inpcb *)inp; inp6 = (struct in6pcb *)inp; /* * For the TCP model we may get a NULL addr, if we are a connected * socket thats ok. */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) && (addr == NULL)) { goto connected_type; } if (addr == NULL) { m_freem(m); if (control) { m_freem(control); control = NULL; } return (EDESTADDRREQ); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if ( (inp6->inp_flags & IN6P_IPV6_V6ONLY) ) { /* * if IPV6_V6ONLY flag, we discard datagrams destined to a * v4 addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { return EINVAL; } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { return EINVAL; } } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if (!ip6_v6only) { struct sockaddr_in sin; /* convert v4-mapped into v4 addr and send */ in6_sin6_2_sin(&sin, sin6); return sctp_sendm(so, flags, m, (struct sockaddr *)&sin, control, p); } else { /* mapped addresses aren't enabled */ return EINVAL; } } #endif /* INET */ connected_type: /* now what about control */ if (control) { if (inp->control) { printf("huh? control set?\n"); m_freem(inp->control); inp->control = NULL; } inp->control = control; } /* add it in possibly */ if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) { struct mbuf *x; int c_len; c_len = 0; /* How big is it */ for (x = m; x; x = x->m_next) { c_len += x->m_len; } inp->pkt->m_pkthdr.len += c_len; } /* Place the data */ if (inp->pkt) { inp->pkt_last->m_next = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD and MacOSX uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD, NetBSD and FreeBSD have methods for * re-defining sosend() to use sctp_sosend(). One can * optionaly switch back to this code (by changing back the * defininitions but this is not advisable. */ int ret; ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } static int sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { int s = splnet(); int error = 0; struct sctp_inpcb *inp; struct in6pcb *inp6; struct sctp_tcb *stcb; #ifdef INET struct sockaddr_in6 *sin6; struct sockaddr_storage ss; #endif /* INET */ inp6 = (struct in6pcb *)so->so_pcb; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == 0) { splx(s); return (ECONNRESET); /* I made the same as TCP since we are * not setup? */ } SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ SCTP_INP_RUNLOCK(inp); error = sctp6_bind(so, NULL, p); if (error) { splx(s); SCTP_ASOC_CREATE_UNLOCK(inp); return (error); } SCTP_INP_RLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ splx(s); SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); return (EADDRINUSE); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if ( (inp6->inp_flags & IN6P_IPV6_V6ONLY) ) { /* * if IPV6_V6ONLY flag, ignore connections destined to a v4 * addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { splx(s); SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); return EINVAL; } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { splx(s); SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); return EINVAL; } } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if (!ip6_v6only) { /* convert v4-mapped into v4 addr */ in6_sin6_2_sin((struct sockaddr_in *)&ss, sin6); addr = (struct sockaddr *)&ss; } else { /* mapped addresses aren't enabled */ splx(s); SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); return EINVAL; } } else #endif /* INET */ addr = addr; /* for true v6 address case */ /* Now do we connect? */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_RUNLOCK(inp); SCTP_INP_WLOCK(inp); SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } } if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_TCB_UNLOCK(stcb); splx(s); return (EALREADY); } /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0); SCTP_ASOC_CREATE_UNLOCK(inp); if (stcb == NULL) { /* Gak! no memory */ splx(s); return (error); } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); /* initialize authentication parameters for the assoc */ sctp_initialize_auth_params(inp, stcb); sctp_send_initiate(inp, stcb); SCTP_TCB_UNLOCK(stcb); splx(s); return error; } static int sctp6_getaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; struct sctp_inpcb *inp; int error; /* * Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_FREE_SONAME(sin6); return ECONNRESET; } SCTP_INP_RLOCK(inp); sin6->sin6_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* For the bound all case you get back 0 */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in6 *sin_a6; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto notConn6; } fnd = 0; sin_a6 = NULL; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; break; } } if ((!fnd) || (sin_a6 == NULL)) { /* punt */ goto notConn6; } sin6->sin6_addr = sctp_ipv6_source_address_selection( inp, stcb, (struct route *)&net->ro, net, 0); } else { /* For the bound all case you get back 0 */ notConn6: memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr)); } } else { /* Take the first IPv6 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin_a; sin_a = (struct sockaddr_in6 *)laddr->ifa->ifa_addr; sin6->sin6_addr = sin_a->sin6_addr; fnd = 1; break; } } if (!fnd) { SCTP_FREE_SONAME(sin6); SCTP_INP_RUNLOCK(inp); return ENOENT; } } SCTP_INP_RUNLOCK(inp); /* Scoping things for v6 */ if ((error = sa6_recoverscope(sin6)) != 0) return (error); (*addr) = (struct sockaddr *)sin6; return (0); } static int sctp6_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)*addr; int fnd; struct sockaddr_in6 *sin_a6; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; int error; /* * Do the malloc first in case it blocks. */ inp = (struct sctp_inpcb *)so->so_pcb; if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { /* UDP type and listeners will drop out here */ return (ENOTCONN); } SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); /* We must recapture incase we blocked */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_FREE_SONAME(sin6); return ECONNRESET; } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { SCTP_FREE_SONAME(sin6); return ECONNRESET; } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; sin6->sin6_port = stcb->rport; sin6->sin6_addr = sin_a6->sin6_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ SCTP_FREE_SONAME(sin6); return ENOENT; } if ((error = sa6_recoverscope(sin6)) != 0) return (error); *addr = (struct sockaddr *)sin6; return (0); } static int sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) { struct sockaddr *addr; struct in6pcb *inp6 = sotoin6pcb(so); int error, s; if (inp6 == NULL) return EINVAL; s = splnet(); /* allow v6 addresses precedence */ error = sctp6_getaddr(so, nam); if (error) { /* try v4 next if v6 failed */ error = sctp_ingetaddr(so, nam); if (error) { splx(s); return (error); } addr = *nam; /* if I'm V6ONLY, convert it to v4-mapped */ if ( (inp6->inp_flags & IN6P_IPV6_V6ONLY) ) { struct sockaddr_in6 sin6; in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6); memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); } } splx(s); return (error); } static int sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) { struct sockaddr *addr = *nam; struct in6pcb *inp6 = sotoin6pcb(so); int error, s; if (inp6 == NULL) return EINVAL; s = splnet(); /* allow v6 addresses precedence */ error = sctp6_peeraddr(so, nam); if (error) { /* try v4 next if v6 failed */ error = sctp_peeraddr(so, nam); if (error) { splx(s); return (error); } /* if I'm V6ONLY, convert it to v4-mapped */ if ( (inp6->inp_flags & IN6P_IPV6_V6ONLY) ) { struct sockaddr_in6 sin6; in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6); memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); } } splx(s); return error; } struct pr_usrreqs sctp6_usrreqs = { .pru_abort = sctp6_abort, .pru_accept = sctp_accept, .pru_attach = sctp6_attach, .pru_bind = sctp6_bind, .pru_connect = sctp6_connect, .pru_control = in6_control, .pru_close = sctp6_close, .pru_detach = sctp6_close, .pru_sopoll = sopoll_generic, .pru_disconnect = sctp6_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp6_getpeeraddr, .pru_send = sctp6_send, .pru_shutdown = sctp_shutdown, .pru_sockaddr = sctp6_in6getaddr, .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive };