Index: head/sys/netinet/sctp_asconf.c =================================================================== --- head/sys/netinet/sctp_asconf.c (revision 350587) +++ head/sys/netinet/sctp_asconf.c (revision 350588) @@ -1,3483 +1,3482 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include /* * debug flags: * SCTP_DEBUG_ASCONF1: protocol info, general info and errors * SCTP_DEBUG_ASCONF2: detailed info */ /* * RFC 5061 * * An ASCONF parameter queue exists per asoc which holds the pending address * operations. Lists are updated upon receipt of ASCONF-ACK. * * A restricted_addrs list exists per assoc to hold local addresses that are * not (yet) usable by the assoc as a source address. These addresses are * either pending an ASCONF operation (and exist on the ASCONF parameter * queue), or they are permanently restricted (the peer has returned an * ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF). * * Deleted addresses are always immediately removed from the lists as they will * (shortly) no longer exist in the kernel. We send ASCONFs as a courtesy, * only if allowed. */ /* * ASCONF parameter processing. * response_required: set if a reply is required (eg. SUCCESS_REPORT). * returns a mbuf to an "error" response parameter or NULL/"success" if ok. * FIX: allocating this many mbufs on the fly is pretty inefficient... */ static struct mbuf * sctp_asconf_success_response(uint32_t id) { struct mbuf *m_reply = NULL; struct sctp_asconf_paramhdr *aph; m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr), 0, M_NOWAIT, 1, MT_DATA); if (m_reply == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_success_response: couldn't get mbuf!\n"); return (NULL); } aph = mtod(m_reply, struct sctp_asconf_paramhdr *); aph->correlation_id = id; aph->ph.param_type = htons(SCTP_SUCCESS_REPORT); aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr); SCTP_BUF_LEN(m_reply) = aph->ph.param_length; aph->ph.param_length = htons(aph->ph.param_length); return (m_reply); } static struct mbuf * sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t *error_tlv, uint16_t tlv_length) { struct mbuf *m_reply = NULL; struct sctp_asconf_paramhdr *aph; struct sctp_error_cause *error; uint8_t *tlv; m_reply = sctp_get_mbuf_for_msg((sizeof(struct sctp_asconf_paramhdr) + tlv_length + sizeof(struct sctp_error_cause)), 0, M_NOWAIT, 1, MT_DATA); if (m_reply == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_error_response: couldn't get mbuf!\n"); return (NULL); } aph = mtod(m_reply, struct sctp_asconf_paramhdr *); error = (struct sctp_error_cause *)(aph + 1); aph->correlation_id = id; aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND); error->code = htons(cause); error->length = tlv_length + sizeof(struct sctp_error_cause); aph->ph.param_length = error->length + sizeof(struct sctp_asconf_paramhdr); if (aph->ph.param_length > MLEN) { SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_error_response: tlv_length (%xh) too big\n", tlv_length); sctp_m_freem(m_reply); /* discard */ return (NULL); } if (error_tlv != NULL) { tlv = (uint8_t *)(error + 1); memcpy(tlv, error_tlv, tlv_length); } SCTP_BUF_LEN(m_reply) = aph->ph.param_length; error->length = htons(error->length); aph->ph.param_length = htons(aph->ph.param_length); return (m_reply); } static struct mbuf * sctp_process_asconf_add_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *aph, struct sctp_tcb *stcb, int send_hb, int response_required) { struct sctp_nets *net; struct mbuf *m_reply = NULL; union sctp_sockstore store; struct sctp_paramhdr *ph; uint16_t param_type, aparam_length; #if defined(INET) || defined(INET6) uint16_t param_length; #endif struct sockaddr *sa; int zero_address = 0; int bad_address = 0; #ifdef INET struct sockaddr_in *sin; struct sctp_ipv4addr_param *v4addr; #endif #ifdef INET6 struct sockaddr_in6 *sin6; struct sctp_ipv6addr_param *v6addr; #endif aparam_length = ntohs(aph->ph.param_length); ph = (struct sctp_paramhdr *)(aph + 1); param_type = ntohs(ph->param_type); #if defined(INET) || defined(INET6) param_length = ntohs(ph->param_length); #endif sa = &store.sa; switch (param_type) { #ifdef INET case SCTP_IPV4_ADDRESS: if (param_length != sizeof(struct sctp_ipv4addr_param)) { /* invalid param size */ return (NULL); } v4addr = (struct sctp_ipv4addr_param *)ph; sin = &store.sin; memset(sin, 0, sizeof(*sin)); sin->sin_family = AF_INET; sin->sin_len = sizeof(struct sockaddr_in); sin->sin_port = stcb->rport; sin->sin_addr.s_addr = v4addr->addr; if ((sin->sin_addr.s_addr == INADDR_BROADCAST) || IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { bad_address = 1; } if (sin->sin_addr.s_addr == INADDR_ANY) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif #ifdef INET6 case SCTP_IPV6_ADDRESS: if (param_length != sizeof(struct sctp_ipv6addr_param)) { /* invalid param size */ return (NULL); } v6addr = (struct sctp_ipv6addr_param *)ph; sin6 = &store.sin6; memset(sin6, 0, sizeof(*sin6)); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(struct sockaddr_in6); sin6->sin6_port = stcb->rport; memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr, sizeof(struct in6_addr)); if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { bad_address = 1; } if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif default: m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_INVALID_PARAM, (uint8_t *)aph, aparam_length); return (m_reply); } /* end switch */ /* if 0.0.0.0/::0, add the source address instead */ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) { sa = src; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: using source addr "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src); } /* add the address */ if (bad_address) { m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_INVALID_PARAM, (uint8_t *)aph, aparam_length); } else if (sctp_add_remote_addr(stcb, sa, &net, stcb->asoc.port, SCTP_DONOT_SETSCOPE, SCTP_ADDR_DYNAMIC_ADDED) != 0) { SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: error adding address\n"); m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *)aph, aparam_length); } else { /* notify upper layer */ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED); if (response_required) { m_reply = sctp_asconf_success_response(aph->correlation_id); } sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); if (send_hb) { sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); } } return (m_reply); } static int sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb, struct sockaddr *src) { struct sctp_nets *src_net, *net; /* make sure the source address exists as a destination net */ src_net = sctp_findnet(stcb, src); if (src_net == NULL) { /* not found */ return (-1); } /* delete all destination addresses except the source */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net != src_net) { /* delete this address */ sctp_remove_net(stcb, net); SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_del_remote_addrs_except: deleting "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, (struct sockaddr *)&net->ro._l_addr); /* notify upper layer */ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, (struct sockaddr *)&net->ro._l_addr, SCTP_SO_NOT_LOCKED); } } return (0); } static struct mbuf * sctp_process_asconf_delete_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *aph, struct sctp_tcb *stcb, int response_required) { struct mbuf *m_reply = NULL; union sctp_sockstore store; struct sctp_paramhdr *ph; uint16_t param_type, aparam_length; #if defined(INET) || defined(INET6) uint16_t param_length; #endif struct sockaddr *sa; int zero_address = 0; int result; #ifdef INET struct sockaddr_in *sin; struct sctp_ipv4addr_param *v4addr; #endif #ifdef INET6 struct sockaddr_in6 *sin6; struct sctp_ipv6addr_param *v6addr; #endif aparam_length = ntohs(aph->ph.param_length); ph = (struct sctp_paramhdr *)(aph + 1); param_type = ntohs(ph->param_type); #if defined(INET) || defined(INET6) param_length = ntohs(ph->param_length); #endif sa = &store.sa; switch (param_type) { #ifdef INET case SCTP_IPV4_ADDRESS: if (param_length != sizeof(struct sctp_ipv4addr_param)) { /* invalid param size */ return (NULL); } v4addr = (struct sctp_ipv4addr_param *)ph; sin = &store.sin; memset(sin, 0, sizeof(*sin)); sin->sin_family = AF_INET; sin->sin_len = sizeof(struct sockaddr_in); sin->sin_port = stcb->rport; sin->sin_addr.s_addr = v4addr->addr; if (sin->sin_addr.s_addr == INADDR_ANY) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: deleting "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif #ifdef INET6 case SCTP_IPV6_ADDRESS: if (param_length != sizeof(struct sctp_ipv6addr_param)) { /* invalid param size */ return (NULL); } v6addr = (struct sctp_ipv6addr_param *)ph; sin6 = &store.sin6; memset(sin6, 0, sizeof(*sin6)); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(struct sockaddr_in6); sin6->sin6_port = stcb->rport; memcpy(&sin6->sin6_addr, v6addr->addr, sizeof(struct in6_addr)); if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: deleting "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif default: m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *)aph, aparam_length); return (m_reply); } /* make sure the source address is not being deleted */ if (sctp_cmpaddr(sa, src)) { /* trying to delete the source address! */ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete source addr\n"); m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *)aph, aparam_length); return (m_reply); } /* if deleting 0.0.0.0/::0, delete all addresses except src addr */ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) { result = sctp_asconf_del_remote_addrs_except(stcb, src); if (result) { /* src address did not exist? */ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: src addr does not exist?\n"); /* what error to reply with?? */ m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *)aph, aparam_length); } else if (response_required) { m_reply = sctp_asconf_success_response(aph->correlation_id); } return (m_reply); } /* delete the address */ result = sctp_del_remote_addr(stcb, sa); /* * note if result == -2, the address doesn't exist in the asoc but * since it's being deleted anyways, we just ack the delete -- but * this probably means something has already gone awry */ if (result == -1) { /* only one address in the asoc */ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete last IP addr!\n"); m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *)aph, aparam_length); } else { if (response_required) { m_reply = sctp_asconf_success_response(aph->correlation_id); } /* notify upper layer */ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED); } return (m_reply); } static struct mbuf * sctp_process_asconf_set_primary(struct sockaddr *src, struct sctp_asconf_paramhdr *aph, struct sctp_tcb *stcb, int response_required) { struct mbuf *m_reply = NULL; union sctp_sockstore store; struct sctp_paramhdr *ph; uint16_t param_type, aparam_length; #if defined(INET) || defined(INET6) uint16_t param_length; #endif struct sockaddr *sa; int zero_address = 0; #ifdef INET struct sockaddr_in *sin; struct sctp_ipv4addr_param *v4addr; #endif #ifdef INET6 struct sockaddr_in6 *sin6; struct sctp_ipv6addr_param *v6addr; #endif aparam_length = ntohs(aph->ph.param_length); ph = (struct sctp_paramhdr *)(aph + 1); param_type = ntohs(ph->param_type); #if defined(INET) || defined(INET6) param_length = ntohs(ph->param_length); #endif sa = &store.sa; switch (param_type) { #ifdef INET case SCTP_IPV4_ADDRESS: if (param_length != sizeof(struct sctp_ipv4addr_param)) { /* invalid param size */ return (NULL); } v4addr = (struct sctp_ipv4addr_param *)ph; sin = &store.sin; memset(sin, 0, sizeof(*sin)); sin->sin_family = AF_INET; sin->sin_len = sizeof(struct sockaddr_in); sin->sin_addr.s_addr = v4addr->addr; if (sin->sin_addr.s_addr == INADDR_ANY) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif #ifdef INET6 case SCTP_IPV6_ADDRESS: if (param_length != sizeof(struct sctp_ipv6addr_param)) { /* invalid param size */ return (NULL); } v6addr = (struct sctp_ipv6addr_param *)ph; sin6 = &store.sin6; memset(sin6, 0, sizeof(*sin6)); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(struct sockaddr_in6); memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr, sizeof(struct in6_addr)); if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) zero_address = 1; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); break; #endif default: m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *)aph, aparam_length); return (m_reply); } /* if 0.0.0.0/::0, use the source address instead */ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) { sa = src; SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: using source addr "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src); } /* set the primary address */ if (sctp_set_primary_addr(stcb, sa, NULL) == 0) { SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: primary address set\n"); /* notify upper layer */ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa, SCTP_SO_NOT_LOCKED); if ((stcb->asoc.primary_destination->dest_state & SCTP_ADDR_REACHABLE) && (!(stcb->asoc.primary_destination->dest_state & SCTP_ADDR_PF)) && (stcb->asoc.alternate)) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } if (response_required) { m_reply = sctp_asconf_success_response(aph->correlation_id); } /* * Mobility adaptation. Ideally, when the reception of SET * PRIMARY with DELETE IP ADDRESS of the previous primary * destination, unacknowledged DATA are retransmitted * immediately to the new primary destination for seamless * handover. If the destination is UNCONFIRMED and marked to * REQ_PRIM, The retransmission occur when reception of the * HEARTBEAT-ACK. (See sctp_handle_heartbeat_ack in * sctp_input.c) Also, when change of the primary * destination, it is better that all subsequent new DATA * containing already queued DATA are transmitted to the new * primary destination. (by micchie) */ if ((sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE) || sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) && sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_PRIM_DELETED) && (stcb->asoc.primary_destination->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1); if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) { sctp_assoc_immediate_retrans(stcb, stcb->asoc.primary_destination); } if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { sctp_move_chunks_from_net(stcb, stcb->asoc.deleted_primary); } sctp_delete_prim_timer(stcb->sctp_ep, stcb, stcb->asoc.deleted_primary); } } else { /* couldn't set the requested primary address! */ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: set primary failed!\n"); /* must have been an invalid address, so report */ m_reply = sctp_asconf_error_response(aph->correlation_id, SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *)aph, aparam_length); } return (m_reply); } /* * handles an ASCONF chunk. * if all parameters are processed ok, send a plain (empty) ASCONF-ACK */ void sctp_handle_asconf(struct mbuf *m, unsigned int offset, struct sockaddr *src, struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb, int first) { struct sctp_association *asoc; uint32_t serial_num; struct mbuf *n, *m_ack, *m_result, *m_tail; struct sctp_asconf_ack_chunk *ack_cp; struct sctp_asconf_paramhdr *aph; struct sctp_ipv6addr_param *p_addr; unsigned int asconf_limit, cnt; int error = 0; /* did an error occur? */ /* asconf param buffer */ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE]; struct sctp_asconf_ack *ack, *ack_next; /* verify minimum length */ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: chunk too small = %xh\n", ntohs(cp->ch.chunk_length)); return; } asoc = &stcb->asoc; serial_num = ntohl(cp->serial_number); if (SCTP_TSN_GE(asoc->asconf_seq_in, serial_num)) { /* got a duplicate ASCONF */ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: got duplicate serial number = %xh\n", serial_num); return; } else if (serial_num != (asoc->asconf_seq_in + 1)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n", serial_num, asoc->asconf_seq_in + 1); return; } /* it's the expected "next" sequence number, so process it */ asoc->asconf_seq_in = serial_num; /* update sequence */ /* get length of all the param's in the ASCONF */ asconf_limit = offset + ntohs(cp->ch.chunk_length); SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: asconf_limit=%u, sequence=%xh\n", asconf_limit, serial_num); if (first) { /* delete old cache */ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: Now processing first ASCONF. Try to delete old cache\n"); TAILQ_FOREACH_SAFE(ack, &asoc->asconf_ack_sent, next, ack_next) { if (ack->serial_number == serial_num) break; SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: delete old(%u) < first(%u)\n", ack->serial_number, serial_num); TAILQ_REMOVE(&asoc->asconf_ack_sent, ack, next); if (ack->data != NULL) { sctp_m_freem(ack->data); } SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack); } } m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0, M_NOWAIT, 1, MT_DATA); if (m_ack == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get mbuf!\n"); return; } m_tail = m_ack; /* current reply chain's tail */ /* fill in ASCONF-ACK header */ ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *); ack_cp->ch.chunk_type = SCTP_ASCONF_ACK; ack_cp->ch.chunk_flags = 0; ack_cp->serial_number = htonl(serial_num); /* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */ SCTP_BUF_LEN(m_ack) = sizeof(struct sctp_asconf_ack_chunk); ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk); /* skip the lookup address parameter */ offset += sizeof(struct sctp_asconf_chunk); p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&aparam_buf); if (p_addr == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get lookup addr!\n"); /* respond with a missing/invalid mandatory parameter error */ sctp_m_freem(m_ack); return; } /* param_length is already validated in process_control... */ offset += ntohs(p_addr->ph.param_length); /* skip lookup addr */ /* get pointer to first asconf param in ASCONF */ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *)&aparam_buf); if (aph == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "Empty ASCONF received?\n"); goto send_reply; } /* process through all parameters */ cnt = 0; while (aph != NULL) { unsigned int param_length, param_type; param_type = ntohs(aph->ph.param_type); param_length = ntohs(aph->ph.param_length); if (offset + param_length > asconf_limit) { /* parameter goes beyond end of chunk! */ sctp_m_freem(m_ack); return; } m_result = NULL; if (param_length > sizeof(aparam_buf)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) larger than buffer size!\n", param_length); sctp_m_freem(m_ack); return; } if (param_length <= sizeof(struct sctp_paramhdr)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length); sctp_m_freem(m_ack); } /* get the entire parameter */ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf); if (aph == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get entire param\n"); sctp_m_freem(m_ack); return; } switch (param_type) { case SCTP_ADD_IP_ADDRESS: m_result = sctp_process_asconf_add_ip(src, aph, stcb, (cnt < SCTP_BASE_SYSCTL(sctp_hb_maxburst)), error); cnt++; break; case SCTP_DEL_IP_ADDRESS: m_result = sctp_process_asconf_delete_ip(src, aph, stcb, error); break; case SCTP_ERROR_CAUSE_IND: /* not valid in an ASCONF chunk */ break; case SCTP_SET_PRIM_ADDR: m_result = sctp_process_asconf_set_primary(src, aph, stcb, error); break; case SCTP_NAT_VTAGS: SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n"); break; case SCTP_SUCCESS_REPORT: /* not valid in an ASCONF chunk */ break; case SCTP_ULP_ADAPTATION: /* FIX */ break; default: if ((param_type & 0x8000) == 0) { /* Been told to STOP at this param */ asconf_limit = offset; /* * FIX FIX - We need to call * sctp_arethere_unrecognized_parameters() * to get a operr and send it for any * param's with the 0x4000 bit set OR do it * here ourselves... note we still must STOP * if the 0x8000 bit is clear. */ } /* unknown/invalid param type */ break; } /* switch */ /* add any (error) result to the reply mbuf chain */ if (m_result != NULL) { SCTP_BUF_NEXT(m_tail) = m_result; m_tail = m_result; /* update lengths, make sure it's aligned too */ SCTP_BUF_LEN(m_result) = SCTP_SIZE32(SCTP_BUF_LEN(m_result)); ack_cp->ch.chunk_length += SCTP_BUF_LEN(m_result); /* set flag to force success reports */ error = 1; } offset += SCTP_SIZE32(param_length); /* update remaining ASCONF message length to process */ if (offset >= asconf_limit) { /* no more data in the mbuf chain */ break; } /* get pointer to next asconf param */ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *)&aparam_buf); if (aph == NULL) { /* can't get an asconf paramhdr */ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: can't get asconf param hdr!\n"); /* FIX ME - add error here... */ } } send_reply: ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length); /* save the ASCONF-ACK reply */ ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack), struct sctp_asconf_ack); if (ack == NULL) { sctp_m_freem(m_ack); return; } ack->serial_number = serial_num; ack->last_sent_to = NULL; ack->data = m_ack; ack->len = 0; for (n = m_ack; n != NULL; n = SCTP_BUF_NEXT(n)) { ack->len += SCTP_BUF_LEN(n); } TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next); /* see if last_control_chunk_from is set properly (use IP src addr) */ if (stcb->asoc.last_control_chunk_from == NULL) { /* * this could happen if the source address was just newly * added */ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: looking up net for IP source address\n"); SCTPDBG(SCTP_DEBUG_ASCONF1, "Looking for IP source: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src); /* look up the from address */ stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, src); #ifdef SCTP_DEBUG if (stcb->asoc.last_control_chunk_from == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: IP source address not found?!\n"); } #endif } } /* * does the address match? returns 0 if not, 1 if so */ static uint32_t sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa) { switch (sa->sa_family) { #ifdef INET6 case AF_INET6: { /* XXX scopeid */ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) && (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr, sizeof(struct in6_addr)) == 0)) { return (1); } break; } #endif #ifdef INET case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)sa; if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) && (memcmp(&aa->ap.addrp.addr, &sin->sin_addr, sizeof(struct in_addr)) == 0)) { return (1); } break; } #endif default: break; } return (0); } /* * does the address match? returns 0 if not, 1 if so */ static uint32_t sctp_addr_match(struct sctp_paramhdr *ph, struct sockaddr *sa) { #if defined(INET) || defined(INET6) uint16_t param_type, param_length; param_type = ntohs(ph->param_type); param_length = ntohs(ph->param_length); #endif switch (sa->sa_family) { #ifdef INET6 case AF_INET6: { /* XXX scopeid */ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; struct sctp_ipv6addr_param *v6addr; v6addr = (struct sctp_ipv6addr_param *)ph; if ((param_type == SCTP_IPV6_ADDRESS) && (param_length == sizeof(struct sctp_ipv6addr_param)) && (memcmp(&v6addr->addr, &sin6->sin6_addr, sizeof(struct in6_addr)) == 0)) { return (1); } break; } #endif #ifdef INET case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)sa; struct sctp_ipv4addr_param *v4addr; v4addr = (struct sctp_ipv4addr_param *)ph; if ((param_type == SCTP_IPV4_ADDRESS) && (param_length == sizeof(struct sctp_ipv4addr_param)) && (memcmp(&v4addr->addr, &sin->sin_addr, sizeof(struct in_addr)) == 0)) { return (1); } break; } #endif default: break; } return (0); } /* * Cleanup for non-responded/OP ERR'd ASCONF */ void sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net) { /* * clear out any existing asconfs going out */ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2); stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out; /* remove the old ASCONF on our outbound queue */ sctp_toss_old_asconf(stcb); } /* * cleanup any cached source addresses that may be topologically * incorrect after a new address has been added to this interface. */ static void sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn) { struct sctp_nets *net; /* * Ideally, we want to only clear cached routes and source addresses * that are topologically incorrect. But since there is no easy way * to know whether the newly added address on the ifn would cause a * routing change (i.e. a new egress interface would be chosen) * without doing a new routing lookup and source address selection, * we will (for now) just flush any cached route using a different * ifn (and cached source addrs) and let output re-choose them * during the next send on that net. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* * clear any cached route (and cached source address) if the * route's interface is NOT the same as the address change. * If it's the same interface, just clear the cached source * address. */ if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) && ((ifn == NULL) || (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) { /* clear any cached route */ RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } /* clear any cached source address */ if (net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; } } } void sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet) { int error; if (dstnet->dest_state & SCTP_ADDR_UNCONFIRMED) { return; } if (stcb->asoc.deleted_primary == NULL) { return; } if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa); sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3); stcb->asoc.num_send_timers_up--; if (stcb->asoc.num_send_timers_up < 0) { stcb->asoc.num_send_timers_up = 0; } SCTP_TCB_LOCK_ASSERT(stcb); error = sctp_t3rxt_timer(stcb->sctp_ep, stcb, stcb->asoc.deleted_primary); if (error) { SCTP_INP_DECR_REF(stcb->sctp_ep); return; } SCTP_TCB_LOCK_ASSERT(stcb); #ifdef SCTP_AUDITING_ENABLED sctp_auditing(4, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary); #endif sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); if ((stcb->asoc.num_send_timers_up == 0) && (stcb->asoc.sent_queue_cnt > 0)) { struct sctp_tmit_chunk *chk; chk = TAILQ_FIRST(&stcb->asoc.sent_queue); sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); } } return; } static int sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t); void sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net) { struct sctp_tmit_chunk *chk; SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO); sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_4); stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); net->error_count = 0; TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if (chk->whoTo == net) { if (chk->sent < SCTP_DATAGRAM_RESEND) { chk->sent = SCTP_DATAGRAM_RESEND; sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); sctp_flight_size_decrease(chk); sctp_total_flight_decrease(stcb, chk); net->marked_retrans++; stcb->asoc.marked_retrans++; } } } if (net->marked_retrans) { sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); } } static void sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa) { struct sctp_nets *net; int addrnum, changed; /* * If number of local valid addresses is 1, the valid address is * probably newly added address. Several valid addresses in this * association. A source address may not be changed. Additionally, * they can be configured on a same interface as "alias" addresses. * (by micchie) */ addrnum = sctp_local_addr_count(stcb); SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n", addrnum); if (addrnum == 1) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* clear any cached route and source address */ if (net->ro.ro_rt) { RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } if (net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; } /* Retransmit unacknowledged DATA chunks immediately */ if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) { sctp_net_immediate_retrans(stcb, net); } /* also, SET PRIMARY is maybe already sent */ } return; } /* Multiple local addresses exsist in the association. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* clear any cached route and source address */ if (net->ro.ro_rt) { RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } if (net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; } /* * Check if the nexthop is corresponding to the new address. * If the new address is corresponding to the current * nexthop, the path will be changed. If the new address is * NOT corresponding to the current nexthop, the path will * not be changed. */ SCTP_RTALLOC((sctp_route_t *)&net->ro, stcb->sctp_ep->def_vrf_id, stcb->sctp_ep->fibnum); if (net->ro.ro_rt == NULL) continue; changed = 0; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *)&net->ro)) { changed = 1; } break; #endif #ifdef INET6 case AF_INET6: if (sctp_v6src_match_nexthop( &newifa->address.sin6, (sctp_route_t *)&net->ro)) { changed = 1; } break; #endif default: break; } /* * if the newly added address does not relate routing * information, we skip. */ if (changed == 0) continue; /* Retransmit unacknowledged DATA chunks immediately */ if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) { sctp_net_immediate_retrans(stcb, net); } /* Send SET PRIMARY for this new address */ if (net == stcb->asoc.primary_destination) { (void)sctp_asconf_queue_mgmt(stcb, newifa, SCTP_SET_PRIM_ADDR); } } } /* * process an ADD/DELETE IP ack from peer. * addr: corresponding sctp_ifa to the address being added/deleted. * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS. * flag: 1=success, 0=failure. */ static void sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr, uint32_t flag) { /* * do the necessary asoc list work- if we get a failure indication, * leave the address on the assoc's restricted list. If we get a * success indication, remove the address from the restricted list. */ /* * Note: this will only occur for ADD_IP_ADDRESS, since * DEL_IP_ADDRESS is never actually added to the list... */ if (flag) { /* success case, so remove from the restricted list */ sctp_del_local_addr_restricted(stcb, addr); if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE) || sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) { sctp_path_check_and_react(stcb, addr); return; } /* clear any cached/topologically incorrect source addresses */ sctp_asconf_nets_cleanup(stcb, addr->ifn_p); } /* else, leave it on the list */ } /* * add an asconf add/delete/set primary IP address parameter to the queue. * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR. * returns 0 if queued, -1 if not queued/removed. * NOTE: if adding, but a delete for the same address is already scheduled * (and not yet sent out), simply remove it from queue. Same for deleting * an address already scheduled for add. If a duplicate operation is found, * ignore the new one. */ static int sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa, uint16_t type) { struct sctp_asconf_addr *aa, *aa_next; /* make sure the request isn't already in the queue */ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) { /* address match? */ if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0) continue; /* * is the request already in queue but not sent? pass the * request already sent in order to resolve the following * case: 1. arrival of ADD, then sent 2. arrival of DEL. we * can't remove the ADD request already sent 3. arrival of * ADD */ if (aa->ap.aph.ph.param_type == type && aa->sent == 0) { return (-1); } /* is the negative request already in queue, and not sent */ if ((aa->sent == 0) && (type == SCTP_ADD_IP_ADDRESS) && (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS)) { /* add requested, delete already queued */ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next); /* remove the ifa from the restricted list */ sctp_del_local_addr_restricted(stcb, ifa); /* free the asconf param */ SCTP_FREE(aa, SCTP_M_ASC_ADDR); SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: add removes queued entry\n"); return (-1); } if ((aa->sent == 0) && (type == SCTP_DEL_IP_ADDRESS) && (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS)) { /* delete requested, add already queued */ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next); /* remove the aa->ifa from the restricted list */ sctp_del_local_addr_restricted(stcb, aa->ifa); /* free the asconf param */ SCTP_FREE(aa, SCTP_M_ASC_ADDR); SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: delete removes queued entry\n"); return (-1); } } /* for each aa */ /* adding new request to the queue */ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), SCTP_M_ASC_ADDR); if (aa == NULL) { /* didn't get memory */ SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n"); return (-1); } aa->special_del = 0; /* fill in asconf address parameter fields */ /* top level elements are "networked" during send */ aa->ap.aph.ph.param_type = type; aa->ifa = ifa; atomic_add_int(&ifa->refcount, 1); /* correlation_id filled in during send routine later... */ switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = &ifa->address.sin6; aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param)); aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param); memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr, sizeof(struct in6_addr)); break; } #endif #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = &ifa->address.sin; aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param)); aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param); memcpy(&aa->ap.addrp.addr, &sin->sin_addr, sizeof(struct in_addr)); break; } #endif default: /* invalid family! */ SCTP_FREE(aa, SCTP_M_ASC_ADDR); sctp_free_ifa(ifa); return (-1); } aa->sent = 0; /* clear sent flag */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); #ifdef SCTP_DEBUG if (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_ASCONF2) { if (type == SCTP_ADD_IP_ADDRESS) { SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa); } else if (type == SCTP_DEL_IP_ADDRESS) { SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa); } else { SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa); } } #endif return (0); } /* * add an asconf operation for the given ifa and type. * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR. * returns 0 if completed, -1 if not completed, 1 if immediate send is * advisable. */ static int sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa, uint16_t type) { uint32_t status; int pending_delete_queued = 0; int last; /* see if peer supports ASCONF */ if (stcb->asoc.asconf_supported == 0) { return (-1); } /* * if this is deleting the last address from the assoc, mark it as * pending. */ if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending) { if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { last = (sctp_local_addr_count(stcb) == 0); } else { last = (sctp_local_addr_count(stcb) == 1); } if (last) { /* set the pending delete info only */ stcb->asoc.asconf_del_pending = 1; stcb->asoc.asconf_addr_del_pending = ifa; atomic_add_int(&ifa->refcount, 1); SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: mark delete last address pending\n"); return (-1); } } /* queue an asconf parameter */ status = sctp_asconf_queue_mgmt(stcb, ifa, type); /* * if this is an add, and there is a delete also pending (i.e. the * last local address is being changed), queue the pending delete * too. */ if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) { /* queue in the pending delete */ if (sctp_asconf_queue_mgmt(stcb, stcb->asoc.asconf_addr_del_pending, SCTP_DEL_IP_ADDRESS) == 0) { SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: queing pending delete\n"); pending_delete_queued = 1; /* clear out the pending delete info */ stcb->asoc.asconf_del_pending = 0; sctp_free_ifa(stcb->asoc.asconf_addr_del_pending); stcb->asoc.asconf_addr_del_pending = NULL; } } if (pending_delete_queued) { struct sctp_nets *net; /* * since we know that the only/last address is now being * changed in this case, reset the cwnd/rto on all nets to * start as a new address and path. Also clear the error * counts to give the assoc the best chance to complete the * address change. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); net->RTO = 0; net->error_count = 0; } stcb->asoc.overall_error_count = 0; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { sctp_misc_ints(SCTP_THRESHOLD_CLEAR, stcb->asoc.overall_error_count, 0, SCTP_FROM_SCTP_ASCONF, __LINE__); } /* queue in an advisory set primary too */ (void)sctp_asconf_queue_mgmt(stcb, ifa, SCTP_SET_PRIM_ADDR); /* let caller know we should send this out immediately */ status = 1; } return (status); } /*- * add an asconf delete IP address parameter to the queue by sockaddr and * possibly with no sctp_ifa available. This is only called by the routine * that checks the addresses in an INIT-ACK against the current address list. * returns 0 if completed, non-zero if not completed. * NOTE: if an add is already scheduled (and not yet sent out), simply * remove it from queue. If a duplicate operation is found, ignore the * new one. */ static int sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa) { struct sctp_ifa *ifa; struct sctp_asconf_addr *aa, *aa_next; if (stcb == NULL) { return (-1); } /* see if peer supports ASCONF */ if (stcb->asoc.asconf_supported == 0) { return (-1); } /* make sure the request isn't already in the queue */ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) { /* address match? */ if (sctp_asconf_addr_match(aa, sa) == 0) continue; /* is the request already in queue (sent or not) */ if (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) { return (-1); } /* is the negative request already in queue, and not sent */ if (aa->sent == 1) continue; if (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) { /* add already queued, so remove existing entry */ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next); sctp_del_local_addr_restricted(stcb, aa->ifa); /* free the entry */ SCTP_FREE(aa, SCTP_M_ASC_ADDR); return (-1); } } /* for each aa */ /* find any existing ifa-- NOTE ifa CAN be allowed to be NULL */ ifa = sctp_find_ifa_by_addr(sa, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); /* adding new request to the queue */ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), SCTP_M_ASC_ADDR); if (aa == NULL) { /* didn't get memory */ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_queue_sa_delete: failed to get memory!\n"); return (-1); } aa->special_del = 0; /* fill in asconf address parameter fields */ /* top level elements are "networked" during send */ aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS; aa->ifa = ifa; if (ifa) atomic_add_int(&ifa->refcount, 1); /* correlation_id filled in during send routine later... */ switch (sa->sa_family) { #ifdef INET6 case AF_INET6: { /* IPv6 address */ struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)sa; aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param)); aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param); memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr, sizeof(struct in6_addr)); break; } #endif #ifdef INET case AF_INET: { /* IPv4 address */ struct sockaddr_in *sin = (struct sockaddr_in *)sa; aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param)); aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param); memcpy(&aa->ap.addrp.addr, &sin->sin_addr, sizeof(struct in_addr)); break; } #endif default: /* invalid family! */ SCTP_FREE(aa, SCTP_M_ASC_ADDR); if (ifa) sctp_free_ifa(ifa); return (-1); } aa->sent = 0; /* clear sent flag */ /* delete goes to the back of the queue */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); /* sa_ignore MEMLEAK {memory is put on the tailq} */ return (0); } /* * find a specific asconf param on our "sent" queue */ static struct sctp_asconf_addr * sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id) { struct sctp_asconf_addr *aa; TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) { if (aa->ap.aph.correlation_id == correlation_id && aa->sent == 1) { /* found it */ return (aa); } } /* didn't find it */ return (NULL); } /* * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do * notifications based on the error response */ static void sctp_asconf_process_error(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_asconf_paramhdr *aph) { struct sctp_error_cause *eh; struct sctp_paramhdr *ph; uint16_t param_type; uint16_t error_code; eh = (struct sctp_error_cause *)(aph + 1); ph = (struct sctp_paramhdr *)(eh + 1); /* validate lengths */ if (htons(eh->length) + sizeof(struct sctp_error_cause) > htons(aph->ph.param_length)) { /* invalid error cause length */ SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_process_error: cause element too long\n"); return; } if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) > htons(eh->length)) { /* invalid included TLV length */ SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_process_error: included TLV too long\n"); return; } /* which error code ? */ error_code = ntohs(eh->code); param_type = ntohs(aph->ph.param_type); /* FIX: this should go back up the REMOTE_ERROR ULP notify */ switch (error_code) { case SCTP_CAUSE_RESOURCE_SHORTAGE: /* we allow ourselves to "try again" for this error */ break; default: /* peer can't handle it... */ switch (param_type) { case SCTP_ADD_IP_ADDRESS: case SCTP_DEL_IP_ADDRESS: case SCTP_SET_PRIM_ADDR: break; default: break; } } } /* * process an asconf queue param. * aparam: parameter to process, will be removed from the queue. * flag: 1=success case, 0=failure case */ static void sctp_asconf_process_param_ack(struct sctp_tcb *stcb, struct sctp_asconf_addr *aparam, uint32_t flag) { uint16_t param_type; /* process this param */ param_type = aparam->ap.aph.ph.param_type; switch (param_type) { case SCTP_ADD_IP_ADDRESS: SCTPDBG(SCTP_DEBUG_ASCONF1, "process_param_ack: added IP address\n"); sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, flag); break; case SCTP_DEL_IP_ADDRESS: SCTPDBG(SCTP_DEBUG_ASCONF1, "process_param_ack: deleted IP address\n"); /* nothing really to do... lists already updated */ break; case SCTP_SET_PRIM_ADDR: SCTPDBG(SCTP_DEBUG_ASCONF1, "process_param_ack: set primary IP address\n"); /* nothing to do... peer may start using this addr */ break; default: /* should NEVER happen */ break; } /* remove the param and free it */ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next); if (aparam->ifa) sctp_free_ifa(aparam->ifa); SCTP_FREE(aparam, SCTP_M_ASC_ADDR); } /* * cleanup from a bad asconf ack parameter */ static void sctp_asconf_ack_clear(struct sctp_tcb *stcb SCTP_UNUSED) { /* assume peer doesn't really know how to do asconfs */ /* XXX we could free the pending queue here */ } void sctp_handle_asconf_ack(struct mbuf *m, int offset, struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock) { struct sctp_association *asoc; uint32_t serial_num; uint16_t ack_length; struct sctp_asconf_paramhdr *aph; struct sctp_asconf_addr *aa, *aa_next; uint32_t last_error_id = 0; /* last error correlation id */ uint32_t id; struct sctp_asconf_addr *ap; /* asconf param buffer */ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE]; /* verify minimum length */ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: chunk too small = %xh\n", ntohs(cp->ch.chunk_length)); return; } asoc = &stcb->asoc; serial_num = ntohl(cp->serial_number); /* * NOTE: we may want to handle this differently- currently, we will * abort when we get an ack for the expected serial number + 1 (eg. * we didn't send it), process an ack normally if it is the expected * serial number, and re-send the previous ack for *ALL* other * serial numbers */ /* * if the serial number is the next expected, but I didn't send it, * abort the asoc, since someone probably just hijacked us... */ if (serial_num == (asoc->asconf_seq_out + 1)) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n"); snprintf(msg, sizeof(msg), "Never sent serial number %8.8x", serial_num); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); *abort_no_unlock = 1; return; } if (serial_num != asoc->asconf_seq_out_acked + 1) { /* got a duplicate/unexpected ASCONF-ACK */ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n", serial_num, asoc->asconf_seq_out_acked + 1); return; } if (serial_num == asoc->asconf_seq_out - 1) { /* stop our timer */ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_5); } /* process the ASCONF-ACK contents */ ack_length = ntohs(cp->ch.chunk_length) - sizeof(struct sctp_asconf_ack_chunk); offset += sizeof(struct sctp_asconf_ack_chunk); /* process through all parameters */ while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) { unsigned int param_length, param_type; /* get pointer to next asconf parameter */ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf); if (aph == NULL) { /* can't get an asconf paramhdr */ sctp_asconf_ack_clear(stcb); return; } param_type = ntohs(aph->ph.param_type); param_length = ntohs(aph->ph.param_length); if (param_length > ack_length) { sctp_asconf_ack_clear(stcb); return; } if (param_length < sizeof(struct sctp_paramhdr)) { sctp_asconf_ack_clear(stcb); return; } /* get the complete parameter... */ if (param_length > sizeof(aparam_buf)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "param length (%u) larger than buffer size!\n", param_length); sctp_asconf_ack_clear(stcb); return; } aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf); if (aph == NULL) { sctp_asconf_ack_clear(stcb); return; } /* correlation_id is transparent to peer, no ntohl needed */ id = aph->correlation_id; switch (param_type) { case SCTP_ERROR_CAUSE_IND: last_error_id = id; /* find the corresponding asconf param in our queue */ ap = sctp_asconf_find_param(stcb, id); if (ap == NULL) { /* hmm... can't find this in our queue! */ break; } /* process the parameter, failed flag */ sctp_asconf_process_param_ack(stcb, ap, 0); /* process the error response */ sctp_asconf_process_error(stcb, aph); break; case SCTP_SUCCESS_REPORT: /* find the corresponding asconf param in our queue */ ap = sctp_asconf_find_param(stcb, id); if (ap == NULL) { /* hmm... can't find this in our queue! */ break; } /* process the parameter, success flag */ sctp_asconf_process_param_ack(stcb, ap, 1); break; default: break; } /* switch */ /* update remaining ASCONF-ACK message length to process */ ack_length -= SCTP_SIZE32(param_length); if (ack_length <= 0) { /* no more data in the mbuf chain */ break; } offset += SCTP_SIZE32(param_length); } /* while */ /* * if there are any "sent" params still on the queue, these are * implicitly "success", or "failed" (if we got an error back) ... * so process these appropriately * * we assume that the correlation_id's are monotonically increasing * beginning from 1 and that we don't have *that* many outstanding * at any given time */ if (last_error_id == 0) last_error_id--; /* set to "max" value */ TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) { if (aa->sent == 1) { /* * implicitly successful or failed if correlation_id * < last_error_id, then success else, failure */ if (aa->ap.aph.correlation_id < last_error_id) sctp_asconf_process_param_ack(stcb, aa, 1); else sctp_asconf_process_param_ack(stcb, aa, 0); } else { /* * since we always process in order (FIFO queue) if * we reach one that hasn't been sent, the rest * should not have been sent either. so, we're * done... */ break; } } /* update the next sequence number to use */ asoc->asconf_seq_out_acked++; /* remove the old ASCONF on our outbound queue */ sctp_toss_old_asconf(stcb); if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) { #ifdef SCTP_TIMER_BASED_ASCONF /* we have more params, so restart our timer */ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net); #else /* we have more params, so send out more */ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); #endif } } #ifdef INET6 static uint32_t sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa) { struct sockaddr_in6 *sin6, *net6; struct sctp_nets *net; if (sa->sa_family != AF_INET6) { /* wrong family */ return (0); } sin6 = (struct sockaddr_in6 *)sa; if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) { /* not link local address */ return (0); } /* hunt through our destination nets list for this scope_id */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (((struct sockaddr *)(&net->ro._l_addr))->sa_family != AF_INET6) continue; net6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0) continue; if (sctp_is_same_scope(sin6, net6)) { /* found one */ return (1); } } /* didn't find one */ return (0); } #endif /* * address management functions */ static void sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_ifa *ifa, uint16_t type, int addr_locked) { int status; if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 || sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { /* subset bound, no ASCONF allowed case, so ignore */ return; } /* * note: we know this is not the subset bound, no ASCONF case eg. * this is boundall or subset bound w/ASCONF allowed */ /* first, make sure that the address is IPv4 or IPv6 and not jailed */ switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &ifa->address.sin6.sin6_addr) != 0) { return; } break; #endif #ifdef INET case AF_INET: if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &ifa->address.sin.sin_addr) != 0) { return; } break; #endif default: return; } #ifdef INET6 /* make sure we're "allowed" to add this type of addr */ if (ifa->address.sa.sa_family == AF_INET6) { /* invalid if we're not a v6 endpoint */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) return; /* is the v6 addr really valid ? */ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { return; } } #endif /* put this address on the "pending/do not use yet" list */ sctp_add_local_addr_restricted(stcb, ifa); /* * check address scope if address is out of scope, don't queue * anything... note: this would leave the address on both inp and * asoc lists */ switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = &ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* we skip unspecifed addresses */ return; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (stcb->asoc.scope.local_scope == 0) { return; } /* is it the right link local scope? */ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) { return; } } if (stcb->asoc.scope.site_scope == 0 && IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { return; } break; } #endif #ifdef INET case AF_INET: { struct sockaddr_in *sin; /* invalid if we are a v6 only endpoint */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && - SCTP_IPV6_V6ONLY(&inp->ip_inp.inp)) + SCTP_IPV6_V6ONLY(inp)) return; sin = &ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* we skip unspecifed addresses */ return; } if (stcb->asoc.scope.ipv4_local_scope == 0 && IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { return; } break; } #endif default: /* else, not AF_INET or AF_INET6, so skip */ return; } /* queue an asconf for this address add/delete */ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { /* does the peer do asconf? */ if (stcb->asoc.asconf_supported) { /* queue an asconf for this addr */ status = sctp_asconf_queue_add(stcb, ifa, type); /* * if queued ok, and in the open state, send out the * ASCONF. If in the non-open state, these will be * sent when the state goes open. */ if (status == 0 && ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED))) { #ifdef SCTP_TIMER_BASED_ASCONF sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, stcb->asoc.primary_destination); #else sctp_send_asconf(stcb, NULL, addr_locked); #endif } } } } int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_asconf_iterator *asc; struct sctp_ifa *ifa; struct sctp_laddr *l; int cnt_invalid = 0; asc = (struct sctp_asconf_iterator *)ptr; LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) { ifa = l->ifa; switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: /* invalid if we're not a v6 endpoint */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { cnt_invalid++; if (asc->cnt == cnt_invalid) return (1); } break; #endif #ifdef INET case AF_INET: { /* invalid if we are a v6 only endpoint */ - if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && - SCTP_IPV6_V6ONLY(&inp->ip_inp.inp)) { + SCTP_IPV6_V6ONLY(inp)) { cnt_invalid++; if (asc->cnt == cnt_invalid) return (1); } break; } #endif default: /* invalid address family */ cnt_invalid++; if (asc->cnt == cnt_invalid) return (1); } } return (0); } static int sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_ifa *ifa; struct sctp_asconf_iterator *asc; struct sctp_laddr *laddr, *nladdr, *l; /* Only for specific case not bound all */ asc = (struct sctp_asconf_iterator *)ptr; LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) { ifa = l->ifa; if (l->action == SCTP_ADD_IP_ADDRESS) { LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { laddr->action = 0; break; } } } else if (l->action == SCTP_DEL_IP_ADDRESS) { LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { /* remove only after all guys are done */ if (laddr->ifa == ifa) { sctp_del_local_addr_ep(inp, ifa); } } } } return (0); } void sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_asconf_iterator *asc; struct sctp_ifa *ifa; struct sctp_laddr *l; int cnt_invalid = 0; int type, status; int num_queued = 0; asc = (struct sctp_asconf_iterator *)ptr; LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) { ifa = l->ifa; type = l->action; /* address's vrf_id must be the vrf_id of the assoc */ if (ifa->vrf_id != stcb->asoc.vrf_id) { continue; } /* Same checks again for assoc */ switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: { /* invalid if we're not a v6 endpoint */ struct sockaddr_in6 *sin6; if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { cnt_invalid++; if (asc->cnt == cnt_invalid) return; else continue; } sin6 = &ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* we skip unspecifed addresses */ continue; } if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (stcb->asoc.scope.local_scope == 0) { continue; } /* is it the right link local scope? */ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) { continue; } } break; } #endif #ifdef INET case AF_INET: { /* invalid if we are a v6 only endpoint */ struct sockaddr_in *sin; /* invalid if we are a v6 only endpoint */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && - SCTP_IPV6_V6ONLY(&inp->ip_inp.inp)) + SCTP_IPV6_V6ONLY(inp)) continue; sin = &ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* we skip unspecifed addresses */ continue; } if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if (stcb->asoc.scope.ipv4_local_scope == 0 && IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { continue; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && - SCTP_IPV6_V6ONLY(&inp->ip_inp.inp)) { + SCTP_IPV6_V6ONLY(inp)) { cnt_invalid++; if (asc->cnt == cnt_invalid) return; else continue; } break; } #endif default: /* invalid address family */ cnt_invalid++; if (asc->cnt == cnt_invalid) return; else continue; break; } if (type == SCTP_ADD_IP_ADDRESS) { /* prevent this address from being used as a source */ sctp_add_local_addr_restricted(stcb, ifa); } else if (type == SCTP_DEL_IP_ADDRESS) { struct sctp_nets *net; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sctp_rtentry_t *rt; /* delete this address if cached */ if (net->ro._s_addr == ifa) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; rt = net->ro.ro_rt; if (rt) { RTFREE(rt); net->ro.ro_rt = NULL; } /* * Now we deleted our src address, * should we not also now reset the * cwnd/rto to start as if its a new * address? */ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); net->RTO = 0; } } } else if (type == SCTP_SET_PRIM_ADDR) { if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* must validate the ifa is in the ep */ if (sctp_is_addr_in_ep(stcb->sctp_ep, ifa) == 0) { continue; } } else { /* Need to check scopes for this guy */ if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { continue; } } } /* queue an asconf for this address add/delete */ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF) && stcb->asoc.asconf_supported == 1) { /* queue an asconf for this addr */ status = sctp_asconf_queue_add(stcb, ifa, type); /* * if queued ok, and in the open state, update the * count of queued params. If in the non-open * state, these get sent when the assoc goes open. */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { if (status >= 0) { num_queued++; } } } } /* * If we have queued params in the open state, send out an ASCONF. */ if (num_queued > 0) { sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED); } } void sctp_asconf_iterator_end(void *ptr, uint32_t val SCTP_UNUSED) { struct sctp_asconf_iterator *asc; struct sctp_ifa *ifa; struct sctp_laddr *l, *nl; asc = (struct sctp_asconf_iterator *)ptr; LIST_FOREACH_SAFE(l, &asc->list_of_work, sctp_nxt_addr, nl) { ifa = l->ifa; if (l->action == SCTP_ADD_IP_ADDRESS) { /* Clear the defer use flag */ ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE; } sctp_free_ifa(ifa); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l); SCTP_DECR_LADDR_COUNT(); } SCTP_FREE(asc, SCTP_M_ASC_IT); } /* * sa is the sockaddr to ask the peer to set primary to. * returns: 0 = completed, -1 = error */ int32_t sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa) { uint32_t vrf_id; struct sctp_ifa *ifa; /* find the ifa for the desired set primary */ vrf_id = stcb->asoc.vrf_id; ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); if (ifa == NULL) { /* Invalid address */ return (-1); } /* queue an ASCONF:SET_PRIM_ADDR to be sent */ if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) { /* set primary queuing succeeded */ SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: queued on tcb=%p, ", (void *)stcb); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { #ifdef SCTP_TIMER_BASED_ASCONF sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, stcb->asoc.primary_destination); #else sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED); #endif } } else { SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ", (void *)stcb); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa); return (-1); } return (0); } int sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa) { struct sctp_tmit_chunk *chk, *nchk; unsigned int offset, asconf_limit; struct sctp_asconf_chunk *acp; struct sctp_asconf_paramhdr *aph; uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE]; struct sctp_paramhdr *ph; int add_cnt, del_cnt; uint16_t last_param_type; add_cnt = del_cnt = 0; last_param_type = 0; TAILQ_FOREACH_SAFE(chk, &stcb->asoc.asconf_send_queue, sctp_next, nchk) { if (chk->data == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n"); continue; } offset = 0; acp = mtod(chk->data, struct sctp_asconf_chunk *); offset += sizeof(struct sctp_asconf_chunk); asconf_limit = ntohs(acp->ch.chunk_length); ph = (struct sctp_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf); if (ph == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n"); continue; } offset += ntohs(ph->param_length); aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf); if (aph == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n"); continue; } while (aph != NULL) { unsigned int param_length, param_type; param_type = ntohs(aph->ph.param_type); param_length = ntohs(aph->ph.param_length); if (offset + param_length > asconf_limit) { /* parameter goes beyond end of chunk! */ break; } if (param_length > sizeof(aparam_buf)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length); break; } if (param_length <= sizeof(struct sctp_paramhdr)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length); break; } aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf); if (aph == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n"); break; } ph = (struct sctp_paramhdr *)(aph + 1); if (sctp_addr_match(ph, &sctp_ifa->address.sa) != 0) { switch (param_type) { case SCTP_ADD_IP_ADDRESS: add_cnt++; break; case SCTP_DEL_IP_ADDRESS: del_cnt++; break; default: break; } last_param_type = param_type; } offset += SCTP_SIZE32(param_length); if (offset >= asconf_limit) { /* no more data in the mbuf chain */ break; } /* get pointer to next asconf param */ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf); } } /* * we want to find the sequences which consist of ADD -> DEL -> ADD * or DEL -> ADD */ if (add_cnt > del_cnt || (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) { return (1); } return (0); } static struct sockaddr * sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked) { struct sctp_vrf *vrf = NULL; struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; if (addr_locked == SCTP_ADDR_NOT_LOCKED) SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(stcb->asoc.vrf_id); if (vrf == NULL) { if (addr_locked == SCTP_ADDR_NOT_LOCKED) SCTP_IPI_ADDR_RUNLOCK(); return (NULL); } LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if (stcb->asoc.scope.loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* Skip if loopback_scope not set */ continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: if (stcb->asoc.scope.ipv4_addr_legal) { struct sockaddr_in *sin; sin = &sctp_ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* skip unspecifed addresses */ continue; } if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if (stcb->asoc.scope.ipv4_local_scope == 0 && IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) continue; if (sctp_is_addr_restricted(stcb, sctp_ifa) && (!sctp_is_addr_pending(stcb, sctp_ifa))) continue; /* * found a valid local v4 address to * use */ if (addr_locked == SCTP_ADDR_NOT_LOCKED) SCTP_IPI_ADDR_RUNLOCK(); return (&sctp_ifa->address.sa); } break; #endif #ifdef INET6 case AF_INET6: if (stcb->asoc.scope.ipv6_addr_legal) { struct sockaddr_in6 *sin6; if (sctp_ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { continue; } sin6 = &sctp_ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* * we skip unspecifed * addresses */ continue; } if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if (stcb->asoc.scope.local_scope == 0 && IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) continue; if (stcb->asoc.scope.site_scope == 0 && IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) continue; if (sctp_is_addr_restricted(stcb, sctp_ifa) && (!sctp_is_addr_pending(stcb, sctp_ifa))) continue; /* * found a valid local v6 address to * use */ if (addr_locked == SCTP_ADDR_NOT_LOCKED) SCTP_IPI_ADDR_RUNLOCK(); return (&sctp_ifa->address.sa); } break; #endif default: break; } } } /* no valid addresses found */ if (addr_locked == SCTP_ADDR_NOT_LOCKED) SCTP_IPI_ADDR_RUNLOCK(); return (NULL); } static struct sockaddr * sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb) { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { continue; } /* is the address restricted ? */ if (sctp_is_addr_restricted(stcb, laddr->ifa) && (!sctp_is_addr_pending(stcb, laddr->ifa))) continue; /* found a valid local address to use */ return (&laddr->ifa->address.sa); } /* no valid addresses found */ return (NULL); } /* * builds an ASCONF chunk from queued ASCONF params. * returns NULL on error (no mbuf, no ASCONF params queued, etc). */ struct mbuf * sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked) { struct mbuf *m_asconf, *m_asconf_chk; struct sctp_asconf_addr *aa; struct sctp_asconf_chunk *acp; struct sctp_asconf_paramhdr *aph; struct sctp_asconf_addr_param *aap; uint32_t p_length; uint32_t correlation_id = 1; /* 0 is reserved... */ caddr_t ptr, lookup_ptr; uint8_t lookup_used = 0; /* are there any asconf params to send? */ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) { if (aa->sent == 0) break; } if (aa == NULL) return (NULL); /* * get a chunk header mbuf and a cluster for the asconf params since * it's simpler to fill in the asconf chunk header lookup address on * the fly */ m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 0, M_NOWAIT, 1, MT_DATA); if (m_asconf_chk == NULL) { /* no mbuf's */ SCTPDBG(SCTP_DEBUG_ASCONF1, "compose_asconf: couldn't get chunk mbuf!\n"); return (NULL); } m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (m_asconf == NULL) { /* no mbuf's */ SCTPDBG(SCTP_DEBUG_ASCONF1, "compose_asconf: couldn't get mbuf!\n"); sctp_m_freem(m_asconf_chk); return (NULL); } SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk); SCTP_BUF_LEN(m_asconf) = 0; acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *); memset(acp, 0, sizeof(struct sctp_asconf_chunk)); /* save pointers to lookup address and asconf params */ lookup_ptr = (caddr_t)(acp + 1); /* after the header */ ptr = mtod(m_asconf, caddr_t); /* beginning of cluster */ /* fill in chunk header info */ acp->ch.chunk_type = SCTP_ASCONF; acp->ch.chunk_flags = 0; acp->serial_number = htonl(stcb->asoc.asconf_seq_out); stcb->asoc.asconf_seq_out++; /* add parameters... up to smallest MTU allowed */ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) { if (aa->sent) continue; /* get the parameter length */ p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length); /* will it fit in current chunk? */ if ((SCTP_BUF_LEN(m_asconf) + p_length > stcb->asoc.smallest_mtu) || (SCTP_BUF_LEN(m_asconf) + p_length > MCLBYTES)) { /* won't fit, so we're done with this chunk */ break; } /* assign (and store) a correlation id */ aa->ap.aph.correlation_id = correlation_id++; /* * fill in address if we're doing a delete this is a simple * way for us to fill in the correlation address, which * should only be used by the peer if we're deleting our * source address and adding a new address (e.g. renumbering * case) */ if (lookup_used == 0 && (aa->special_del == 0) && aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) { struct sctp_ipv6addr_param *lookup; uint16_t p_size, addr_size; lookup = (struct sctp_ipv6addr_param *)lookup_ptr; lookup->ph.param_type = htons(aa->ap.addrp.ph.param_type); if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) { /* copy IPv6 address */ p_size = sizeof(struct sctp_ipv6addr_param); addr_size = sizeof(struct in6_addr); } else { /* copy IPv4 address */ p_size = sizeof(struct sctp_ipv4addr_param); addr_size = sizeof(struct in_addr); } lookup->ph.param_length = htons(SCTP_SIZE32(p_size)); memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size); SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size); lookup_used = 1; } /* copy into current space */ memcpy(ptr, &aa->ap, p_length); /* network elements and update lengths */ aph = (struct sctp_asconf_paramhdr *)ptr; aap = (struct sctp_asconf_addr_param *)ptr; /* correlation_id is transparent to peer, no htonl needed */ aph->ph.param_type = htons(aph->ph.param_type); aph->ph.param_length = htons(aph->ph.param_length); aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type); aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length); SCTP_BUF_LEN(m_asconf) += SCTP_SIZE32(p_length); ptr += SCTP_SIZE32(p_length); /* * these params are removed off the pending list upon * getting an ASCONF-ACK back from the peer, just set flag */ aa->sent = 1; } /* check to see if the lookup addr has been populated yet */ if (lookup_used == 0) { /* NOTE: if the address param is optional, can skip this... */ /* add any valid (existing) address... */ struct sctp_ipv6addr_param *lookup; uint16_t p_size, addr_size; struct sockaddr *found_addr; caddr_t addr_ptr; if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) found_addr = sctp_find_valid_localaddr(stcb, addr_locked); else found_addr = sctp_find_valid_localaddr_ep(stcb); lookup = (struct sctp_ipv6addr_param *)lookup_ptr; if (found_addr != NULL) { switch (found_addr->sa_family) { #ifdef INET6 case AF_INET6: /* copy IPv6 address */ lookup->ph.param_type = htons(SCTP_IPV6_ADDRESS); p_size = sizeof(struct sctp_ipv6addr_param); addr_size = sizeof(struct in6_addr); addr_ptr = (caddr_t)&((struct sockaddr_in6 *) found_addr)->sin6_addr; break; #endif #ifdef INET case AF_INET: /* copy IPv4 address */ lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS); p_size = sizeof(struct sctp_ipv4addr_param); addr_size = sizeof(struct in_addr); addr_ptr = (caddr_t)&((struct sockaddr_in *) found_addr)->sin_addr; break; #endif default: p_size = 0; addr_size = 0; addr_ptr = NULL; break; } lookup->ph.param_length = htons(SCTP_SIZE32(p_size)); memcpy(lookup->addr, addr_ptr, addr_size); SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size); } else { /* uh oh... don't have any address?? */ SCTPDBG(SCTP_DEBUG_ASCONF1, "compose_asconf: no lookup addr!\n"); /* XXX for now, we send a IPv4 address of 0.0.0.0 */ lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS); lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param))); memset(lookup->addr, 0, sizeof(struct in_addr)); SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)); } } /* chain it all together */ SCTP_BUF_NEXT(m_asconf_chk) = m_asconf; *retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf); acp->ch.chunk_length = htons(*retlen); return (m_asconf_chk); } /* * section to handle address changes before an association is up eg. changes * during INIT/INIT-ACK/COOKIE-ECHO handshake */ /* * processes the (local) addresses in the INIT-ACK chunk */ static void sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m, unsigned int offset, unsigned int length) { struct sctp_paramhdr tmp_param, *ph; uint16_t plen, ptype; struct sctp_ifa *sctp_ifa; union sctp_sockstore store; #ifdef INET6 struct sctp_ipv6addr_param addr6_store; #endif #ifdef INET struct sctp_ipv4addr_param addr4_store; #endif SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n"); if (stcb == NULL) /* Un-needed check for SA */ return; /* convert to upper bound */ length += offset; if ((offset + sizeof(struct sctp_paramhdr)) > length) { return; } /* go through the addresses in the init-ack */ ph = (struct sctp_paramhdr *) sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param); while (ph != NULL) { ptype = ntohs(ph->param_type); plen = ntohs(ph->param_length); switch (ptype) { #ifdef INET6 case SCTP_IPV6_ADDRESS: { struct sctp_ipv6addr_param *a6p; /* get the entire IPv6 address param */ a6p = (struct sctp_ipv6addr_param *) sctp_m_getptr(m, offset, sizeof(struct sctp_ipv6addr_param), (uint8_t *)&addr6_store); if (plen != sizeof(struct sctp_ipv6addr_param) || a6p == NULL) { return; } memset(&store, 0, sizeof(union sctp_sockstore)); store.sin6.sin6_family = AF_INET6; store.sin6.sin6_len = sizeof(struct sockaddr_in6); store.sin6.sin6_port = stcb->rport; memcpy(&store.sin6.sin6_addr, a6p->addr, sizeof(struct in6_addr)); break; } #endif #ifdef INET case SCTP_IPV4_ADDRESS: { struct sctp_ipv4addr_param *a4p; /* get the entire IPv4 address param */ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_ipv4addr_param), (uint8_t *)&addr4_store); if (plen != sizeof(struct sctp_ipv4addr_param) || a4p == NULL) { return; } memset(&store, 0, sizeof(union sctp_sockstore)); store.sin.sin_family = AF_INET; store.sin.sin_len = sizeof(struct sockaddr_in); store.sin.sin_port = stcb->rport; store.sin.sin_addr.s_addr = a4p->addr; break; } #endif default: goto next_addr; } /* see if this address really (still) exists */ sctp_ifa = sctp_find_ifa_by_addr(&store.sa, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); if (sctp_ifa == NULL) { /* address doesn't exist anymore */ int status; /* are ASCONFs allowed ? */ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && stcb->asoc.asconf_supported) { /* queue an ASCONF DEL_IP_ADDRESS */ status = sctp_asconf_queue_sa_delete(stcb, &store.sa); /* * if queued ok, and in correct state, send * out the ASCONF. */ if (status == 0 && SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { #ifdef SCTP_TIMER_BASED_ASCONF sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, stcb->asoc.primary_destination); #else sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED); #endif } } } next_addr: /* * Sanity check: Make sure the length isn't 0, otherwise * we'll be stuck in this loop for a long time... */ if (SCTP_SIZE32(plen) == 0) { SCTP_PRINTF("process_initack_addrs: bad len (%d) type=%xh\n", plen, ptype); return; } /* get next parameter */ offset += SCTP_SIZE32(plen); if ((offset + sizeof(struct sctp_paramhdr)) > length) return; ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param); } /* while */ } /* FIX ME: need to verify return result for v6 address type if v6 disabled */ /* * checks to see if a specific address is in the initack address list returns * 1 if found, 0 if not */ static uint32_t sctp_addr_in_initack(struct mbuf *m, uint32_t offset, uint32_t length, struct sockaddr *sa) { struct sctp_paramhdr tmp_param, *ph; uint16_t plen, ptype; #ifdef INET struct sockaddr_in *sin; struct sctp_ipv4addr_param *a4p; struct sctp_ipv6addr_param addr4_store; #endif #ifdef INET6 struct sockaddr_in6 *sin6; struct sctp_ipv6addr_param *a6p; struct sctp_ipv6addr_param addr6_store; struct sockaddr_in6 sin6_tmp; #endif switch (sa->sa_family) { #ifdef INET case AF_INET: break; #endif #ifdef INET6 case AF_INET6: break; #endif default: return (0); } SCTPDBG(SCTP_DEBUG_ASCONF2, "find_initack_addr: starting search for "); SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa); /* convert to upper bound */ length += offset; if ((offset + sizeof(struct sctp_paramhdr)) > length) { SCTPDBG(SCTP_DEBUG_ASCONF1, "find_initack_addr: invalid offset?\n"); return (0); } /* go through the addresses in the init-ack */ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param); while (ph != NULL) { ptype = ntohs(ph->param_type); plen = ntohs(ph->param_length); switch (ptype) { #ifdef INET6 case SCTP_IPV6_ADDRESS: if (sa->sa_family == AF_INET6) { /* get the entire IPv6 address param */ if (plen != sizeof(struct sctp_ipv6addr_param)) { break; } /* get the entire IPv6 address param */ a6p = (struct sctp_ipv6addr_param *) sctp_m_getptr(m, offset, sizeof(struct sctp_ipv6addr_param), (uint8_t *)&addr6_store); if (a6p == NULL) { return (0); } sin6 = (struct sockaddr_in6 *)sa; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { /* create a copy and clear scope */ memcpy(&sin6_tmp, sin6, sizeof(struct sockaddr_in6)); sin6 = &sin6_tmp; in6_clearscope(&sin6->sin6_addr); } if (memcmp(&sin6->sin6_addr, a6p->addr, sizeof(struct in6_addr)) == 0) { /* found it */ return (1); } } break; #endif /* INET6 */ #ifdef INET case SCTP_IPV4_ADDRESS: if (sa->sa_family == AF_INET) { if (plen != sizeof(struct sctp_ipv4addr_param)) { break; } /* get the entire IPv4 address param */ a4p = (struct sctp_ipv4addr_param *) sctp_m_getptr(m, offset, sizeof(struct sctp_ipv4addr_param), (uint8_t *)&addr4_store); if (a4p == NULL) { return (0); } sin = (struct sockaddr_in *)sa; if (sin->sin_addr.s_addr == a4p->addr) { /* found it */ return (1); } } break; #endif default: break; } /* get next parameter */ offset += SCTP_SIZE32(plen); if (offset + sizeof(struct sctp_paramhdr) > length) { return (0); } ph = (struct sctp_paramhdr *) sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param); } /* while */ /* not found! */ return (0); } /* * makes sure that the current endpoint local addr list is consistent with * the new association (eg. subset bound, asconf allowed) adds addresses as * necessary */ static void sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset, int length, struct sockaddr *init_addr) { struct sctp_laddr *laddr; /* go through the endpoint list */ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { /* be paranoid and validate the laddr */ if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa is NULL"); continue; } if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa->ifa_addr is NULL"); continue; } /* do i have it implicitly? */ if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) { continue; } /* check to see if in the init-ack */ if (!sctp_addr_in_initack(m, offset, length, &laddr->ifa->address.sa)) { /* try to add it */ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa, SCTP_ADD_IP_ADDRESS, SCTP_ADDR_NOT_LOCKED); } } } /* * makes sure that the current kernel address list is consistent with the new * association (with all addrs bound) adds addresses as necessary */ static void sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset, int length, struct sockaddr *init_addr, uint16_t local_scope, uint16_t site_scope, uint16_t ipv4_scope, uint16_t loopback_scope) { struct sctp_vrf *vrf = NULL; struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; uint32_t vrf_id; #ifdef INET struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *sin6; #endif if (stcb) { vrf_id = stcb->asoc.vrf_id; } else { return; } SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { SCTP_IPI_ADDR_RUNLOCK(); return; } /* go through all our known interfaces */ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if (loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* skip loopback interface */ continue; } /* go through each interface address */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { /* do i have it implicitly? */ if (sctp_cmpaddr(&sctp_ifa->address.sa, init_addr)) { continue; } switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: sin = &sctp_ifa->address.sin; if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if ((ipv4_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { /* private address not in scope */ continue; } break; #endif #ifdef INET6 case AF_INET6: sin6 = &sctp_ifa->address.sin6; if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if ((local_scope == 0) && (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { continue; } if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { continue; } break; #endif default: break; } /* check to see if in the init-ack */ if (!sctp_addr_in_initack(m, offset, length, &sctp_ifa->address.sa)) { /* try to add it */ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, sctp_ifa, SCTP_ADD_IP_ADDRESS, SCTP_ADDR_LOCKED); } } /* end foreach ifa */ } /* end foreach ifn */ SCTP_IPI_ADDR_RUNLOCK(); } /* * validates an init-ack chunk (from a cookie-echo) with current addresses * adds addresses from the init-ack into our local address list, if needed * queues asconf adds/deletes addresses as needed and makes appropriate list * changes for source address selection m, offset: points to the start of the * address list in an init-ack chunk length: total length of the address * params only init_addr: address where my INIT-ACK was sent from */ void sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset, int length, struct sockaddr *init_addr, uint16_t local_scope, uint16_t site_scope, uint16_t ipv4_scope, uint16_t loopback_scope) { /* process the local addresses in the initack */ sctp_process_initack_addresses(stcb, m, offset, length); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* bound all case */ sctp_check_address_list_all(stcb, m, offset, length, init_addr, local_scope, site_scope, ipv4_scope, loopback_scope); } else { /* subset bound case */ if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) { /* asconf's allowed */ sctp_check_address_list_ep(stcb, m, offset, length, init_addr); } /* else, no asconfs allowed, so what we sent is what we get */ } } /* * sctp_bindx() support */ uint32_t sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa, uint32_t type, uint32_t vrf_id, struct sctp_ifa *sctp_ifap) { struct sctp_ifa *ifa; struct sctp_laddr *laddr, *nladdr; if (sa->sa_len == 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL); return (EINVAL); } if (sctp_ifap) { ifa = sctp_ifap; } else if (type == SCTP_ADD_IP_ADDRESS) { /* For an add the address MUST be on the system */ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); } else if (type == SCTP_DEL_IP_ADDRESS) { /* For a delete we need to find it in the inp */ ifa = sctp_find_ifa_in_ep(inp, sa, SCTP_ADDR_NOT_LOCKED); } else { ifa = NULL; } if (ifa != NULL) { if (type == SCTP_ADD_IP_ADDRESS) { sctp_add_local_addr_ep(inp, ifa, type); } else if (type == SCTP_DEL_IP_ADDRESS) { if (inp->laddr_count < 2) { /* can't delete the last local address */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL); return (EINVAL); } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (ifa == laddr->ifa) { /* Mark in the delete */ laddr->action = type; } } } if (LIST_EMPTY(&inp->sctp_asoc_list)) { /* * There is no need to start the iterator if the inp * has no associations. */ if (type == SCTP_DEL_IP_ADDRESS) { LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { if (laddr->ifa == ifa) { sctp_del_local_addr_ep(inp, ifa); } } } } else { struct sctp_asconf_iterator *asc; struct sctp_laddr *wi; int ret; SCTP_MALLOC(asc, struct sctp_asconf_iterator *, sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); if (asc == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM); return (ENOMEM); } wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); if (wi == NULL) { SCTP_FREE(asc, SCTP_M_ASC_IT); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM); return (ENOMEM); } LIST_INIT(&asc->list_of_work); asc->cnt = 1; SCTP_INCR_LADDR_COUNT(); wi->ifa = ifa; wi->action = type; atomic_add_int(&ifa->refcount, 1); LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, sctp_asconf_iterator_stcb, sctp_asconf_iterator_ep_end, SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, (void *)asc, 0, sctp_asconf_iterator_end, inp, 0); if (ret) { SCTP_PRINTF("Failed to initiate iterator for addr_mgmt_ep_sa\n"); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EFAULT); sctp_asconf_iterator_end(asc, 0); return (EFAULT); } } return (0); } else { /* invalid address! */ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL); return (EADDRNOTAVAIL); } } void sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, struct sctp_nets *net) { struct sctp_asconf_addr *aa; struct sctp_ifa *sctp_ifap; struct sctp_asconf_tag_param *vtag; #ifdef INET struct sockaddr_in *to; #endif #ifdef INET6 struct sockaddr_in6 *to6; #endif if (net == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n"); return; } if (stcb == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n"); return; } /* * Need to have in the asconf: - vtagparam(my_vtag/peer_vtag) - * add(0.0.0.0) - del(0.0.0.0) - Any global addresses add(addr) */ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), SCTP_M_ASC_ADDR); if (aa == NULL) { /* didn't get memory */ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: failed to get memory!\n"); return; } aa->special_del = 0; /* fill in asconf address parameter fields */ /* top level elements are "networked" during send */ aa->ifa = NULL; aa->sent = 0; /* clear sent flag */ vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph; vtag->aph.ph.param_type = SCTP_NAT_VTAGS; vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param); vtag->local_vtag = htonl(stcb->asoc.my_vtag); vtag->remote_vtag = htonl(stcb->asoc.peer_vtag); TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), SCTP_M_ASC_ADDR); if (aa == NULL) { /* didn't get memory */ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: failed to get memory!\n"); return; } memset(aa, 0, sizeof(struct sctp_asconf_addr)); /* fill in asconf address parameter fields */ /* ADD(0.0.0.0) */ switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param); /* No need to add an address, we are using 0.0.0.0 */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); break; #endif #ifdef INET6 case AF_INET6: aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param); /* No need to add an address, we are using 0.0.0.0 */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); break; #endif default: SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: unknown address family\n"); SCTP_FREE(aa, SCTP_M_ASC_ADDR); return; } SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), SCTP_M_ASC_ADDR); if (aa == NULL) { /* didn't get memory */ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: failed to get memory!\n"); return; } memset(aa, 0, sizeof(struct sctp_asconf_addr)); /* fill in asconf address parameter fields */ /* ADD(0.0.0.0) */ switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param); /* No need to add an address, we are using 0.0.0.0 */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); break; #endif #ifdef INET6 case AF_INET6: aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS; aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param); /* No need to add an address, we are using 0.0.0.0 */ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); break; #endif default: SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: unknown address family\n"); SCTP_FREE(aa, SCTP_M_ASC_ADDR); return; } /* Now we must hunt the addresses and add all global addresses */ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_vrf *vrf = NULL; struct sctp_ifn *sctp_ifnp; uint32_t vrf_id; vrf_id = stcb->sctp_ep->def_vrf_id; vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { goto skip_rest; } SCTP_IPI_ADDR_RLOCK(); LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { switch (sctp_ifap->address.sa.sa_family) { #ifdef INET case AF_INET: to = &sctp_ifap->address.sin; if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &to->sin_addr) != 0) { continue; } if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) { continue; } if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { continue; } break; #endif #ifdef INET6 case AF_INET6: to6 = &sctp_ifap->address.sin6; if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &to6->sin6_addr) != 0) { continue; } if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { continue; } break; #endif default: continue; } sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS); } } SCTP_IPI_ADDR_RUNLOCK(); } else { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { continue; } if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) /* * Address being deleted by the system, dont * list. */ continue; if (laddr->action == SCTP_DEL_IP_ADDRESS) { /* * Address being deleted on this ep don't * list. */ continue; } sctp_ifap = laddr->ifa; switch (sctp_ifap->address.sa.sa_family) { #ifdef INET case AF_INET: to = &sctp_ifap->address.sin; if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) { continue; } if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { continue; } break; #endif #ifdef INET6 case AF_INET6: to6 = &sctp_ifap->address.sin6; if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { continue; } break; #endif default: continue; } sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS); } } skip_rest: /* Now we must send the asconf into the queue */ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); } Index: head/sys/netinet/sctp_os_bsd.h =================================================================== --- head/sys/netinet/sctp_os_bsd.h (revision 350587) +++ head/sys/netinet/sctp_os_bsd.h (revision 350588) @@ -1,481 +1,481 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #ifndef _NETINET_SCTP_OS_BSD_H_ #define _NETINET_SCTP_OS_BSD_H_ /* * includes */ #include "opt_inet6.h" #include "opt_inet.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #include #include #include #include #endif /* INET6 */ #include #include #include /* Declare all the malloc names for all the various mallocs */ MALLOC_DECLARE(SCTP_M_MAP); MALLOC_DECLARE(SCTP_M_STRMI); MALLOC_DECLARE(SCTP_M_STRMO); MALLOC_DECLARE(SCTP_M_ASC_ADDR); MALLOC_DECLARE(SCTP_M_ASC_IT); MALLOC_DECLARE(SCTP_M_AUTH_CL); MALLOC_DECLARE(SCTP_M_AUTH_KY); MALLOC_DECLARE(SCTP_M_AUTH_HL); MALLOC_DECLARE(SCTP_M_AUTH_IF); MALLOC_DECLARE(SCTP_M_STRESET); MALLOC_DECLARE(SCTP_M_CMSG); MALLOC_DECLARE(SCTP_M_COPYAL); MALLOC_DECLARE(SCTP_M_VRF); MALLOC_DECLARE(SCTP_M_IFA); MALLOC_DECLARE(SCTP_M_IFN); MALLOC_DECLARE(SCTP_M_TIMW); MALLOC_DECLARE(SCTP_M_MVRF); MALLOC_DECLARE(SCTP_M_ITER); MALLOC_DECLARE(SCTP_M_SOCKOPT); MALLOC_DECLARE(SCTP_M_MCORE); #if defined(SCTP_LOCAL_TRACE_BUF) #define SCTP_GET_CYCLECOUNT get_cyclecount() #define SCTP_CTR6 sctp_log_trace #else #define SCTP_CTR6 CTR6 #endif /* * Macros to expand out globals defined by various modules * to either a real global or a virtualized instance of one, * depending on whether VIMAGE is defined. */ /* then define the macro(s) that hook into the vimage macros */ #define MODULE_GLOBAL(__SYMBOL) V_##__SYMBOL #define V_system_base_info VNET(system_base_info) #define SCTP_BASE_INFO(__m) V_system_base_info.sctppcbinfo.__m #define SCTP_BASE_STATS V_system_base_info.sctpstat #define SCTP_BASE_STAT(__m) V_system_base_info.sctpstat.__m #define SCTP_BASE_SYSCTL(__m) V_system_base_info.sctpsysctl.__m #define SCTP_BASE_VAR(__m) V_system_base_info.__m #define SCTP_PRINTF(params...) printf(params) #if defined(SCTP_DEBUG) #define SCTPDBG(level, params...) \ { \ do { \ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \ SCTP_PRINTF(params); \ } \ } while (0); \ } #define SCTPDBG_ADDR(level, addr) \ { \ do { \ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \ sctp_print_address(addr); \ } \ } while (0); \ } #else #define SCTPDBG(level, params...) #define SCTPDBG_ADDR(level, addr) #endif #ifdef SCTP_LTRACE_CHUNKS #define SCTP_LTRACE_CHK(a, b, c, d) if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_CHUNK_ENABLE) SCTP_CTR6(KTR_SUBSYS, "SCTP:%d[%d]:%x-%x-%x-%x", SCTP_LOG_CHUNK_PROC, 0, a, b, c, d) #else #define SCTP_LTRACE_CHK(a, b, c, d) #endif #ifdef SCTP_LTRACE_ERRORS #define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) \ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \ SCTP_PRINTF("mbuf:%p inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \ m, inp, stcb, net, file, __LINE__, err); #define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) \ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \ SCTP_PRINTF("inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \ inp, stcb, net, file, __LINE__, err); #else #define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) #define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) #endif /* * Local address and interface list handling */ #define SCTP_MAX_VRF_ID 0 #define SCTP_SIZE_OF_VRF_HASH 3 #define SCTP_IFNAMSIZ IFNAMSIZ #define SCTP_DEFAULT_VRFID 0 #define SCTP_VRF_ADDR_HASH_SIZE 16 #define SCTP_VRF_IFN_HASH_SIZE 3 #define SCTP_INIT_VRF_TABLEID(vrf) #define SCTP_IFN_IS_IFT_LOOP(ifn) ((ifn)->ifn_type == IFT_LOOP) #define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP) /* * Access to IFN's to help with src-addr-selection */ /* This could return VOID if the index works but for BSD we provide both. */ #define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) (void *)ro->ro_rt->rt_ifp #define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) (ro)->ro_rt->rt_ifp->if_index #define SCTP_ROUTE_HAS_VALID_IFN(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifp) /* * general memory allocation */ #define SCTP_MALLOC(var, type, size, name) \ do { \ var = (type)malloc(size, name, M_NOWAIT); \ } while (0) #define SCTP_FREE(var, type) free(var, type) #define SCTP_MALLOC_SONAME(var, type, size) \ do { \ var = (type)malloc(size, M_SONAME, M_WAITOK | M_ZERO); \ } while (0) #define SCTP_FREE_SONAME(var) free(var, M_SONAME) #define SCTP_PROCESS_STRUCT struct proc * /* * zone allocation functions */ #include /* SCTP_ZONE_INIT: initialize the zone */ typedef struct uma_zone *sctp_zone_t; #define SCTP_ZONE_INIT(zone, name, size, number) { \ zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,\ 0); \ uma_zone_set_max(zone, number); \ } #define SCTP_ZONE_DESTROY(zone) uma_zdestroy(zone) /* SCTP_ZONE_GET: allocate element from the zone */ #define SCTP_ZONE_GET(zone, type) \ (type *)uma_zalloc(zone, M_NOWAIT); /* SCTP_ZONE_FREE: free element from the zone */ #define SCTP_ZONE_FREE(zone, element) \ uma_zfree(zone, element); #define SCTP_HASH_INIT(size, hashmark) hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT) #define SCTP_HASH_FREE(table, hashmark) hashdestroy(table, M_PCB, hashmark) #define SCTP_M_COPYM m_copym /* * timers */ #include typedef struct callout sctp_os_timer_t; #define SCTP_OS_TIMER_INIT(tmr) callout_init(tmr, 1) #define SCTP_OS_TIMER_START callout_reset #define SCTP_OS_TIMER_STOP callout_stop #define SCTP_OS_TIMER_STOP_DRAIN callout_drain #define SCTP_OS_TIMER_PENDING callout_pending #define SCTP_OS_TIMER_ACTIVE callout_active #define SCTP_OS_TIMER_DEACTIVATE callout_deactivate #define sctp_get_tick_count() (ticks) #define SCTP_UNUSED __attribute__((unused)) /* * Functions */ /* Mbuf manipulation and access macros */ #define SCTP_BUF_LEN(m) (m->m_len) #define SCTP_BUF_NEXT(m) (m->m_next) #define SCTP_BUF_NEXT_PKT(m) (m->m_nextpkt) #define SCTP_BUF_RESV_UF(m, size) m->m_data += size #define SCTP_BUF_AT(m, size) m->m_data + size #define SCTP_BUF_IS_EXTENDED(m) (m->m_flags & M_EXT) #define SCTP_BUF_SIZE M_SIZE #define SCTP_BUF_TYPE(m) (m->m_type) #define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif) #define SCTP_BUF_PREPEND M_PREPEND #define SCTP_ALIGN_TO_END(m, len) M_ALIGN(m, len) /* We make it so if you have up to 4 threads * writing based on the default size of * the packet log 65 k, that would be * 4 16k packets before we would hit * a problem. */ #define SCTP_PKTLOG_WRITERS_NEED_LOCK 3 /*************************/ /* MTU */ /*************************/ #define SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, af) ((struct ifnet *)ifn)->if_mtu #define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, rt) ((uint32_t)((rt != NULL) ? rt->rt_mtu : 0)) #define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn) ((sctp_ifn->ifn_p != NULL) ? ((struct ifnet *)(sctp_ifn->ifn_p))->if_mtu : 0) #define SCTP_SET_MTU_OF_ROUTE(sa, rt, mtu) do { \ if (rt != NULL) \ rt->rt_mtu = mtu; \ } while(0) /* (de-)register interface event notifications */ #define SCTP_REGISTER_INTERFACE(ifhandle, af) #define SCTP_DEREGISTER_INTERFACE(ifhandle, af) /*************************/ /* These are for logging */ /*************************/ /* return the base ext data pointer */ #define SCTP_BUF_EXTEND_BASE(m) (m->m_ext.ext_buf) /* return the refcnt of the data pointer */ #define SCTP_BUF_EXTEND_REFCNT(m) (*m->m_ext.ext_cnt) /* return any buffer related flags, this is * used beyond logging for apple only. */ #define SCTP_BUF_GET_FLAGS(m) (m->m_flags) /* For BSD this just accesses the M_PKTHDR length * so it operates on an mbuf with hdr flag. Other * O/S's may have separate packet header and mbuf * chain pointers.. thus the macro. */ #define SCTP_HEADER_TO_CHAIN(m) (m) #define SCTP_DETACH_HEADER_FROM_CHAIN(m) #define SCTP_HEADER_LEN(m) ((m)->m_pkthdr.len) #define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0 #define SCTP_RELEASE_HEADER(m) #define SCTP_RELEASE_PKT(m) sctp_m_freem(m) #define SCTP_ENABLE_UDP_CSUM(m) do { \ m->m_pkthdr.csum_flags = CSUM_UDP; \ m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); \ } while (0) #define SCTP_GET_PKT_VRFID(m, vrf_id) ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID) /* Attach the chain of data into the sendable packet. */ #define SCTP_ATTACH_CHAIN(pak, m, packet_length) do { \ pak = m; \ pak->m_pkthdr.len = packet_length; \ } while(0) /* Other m_pkthdr type things */ #define SCTP_IS_IT_BROADCAST(dst, m) ((m->m_flags & M_PKTHDR) ? in_broadcast(dst, m->m_pkthdr.rcvif) : 0) #define SCTP_IS_IT_LOOPBACK(m) ((m->m_flags & M_PKTHDR) && ((m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type == IFT_LOOP))) /* This converts any input packet header * into the chain of data holders, for BSD * its a NOP. */ /* get the v6 hop limit */ -#define SCTP_GET_HLIM(inp, ro) in6_selecthlim((struct inpcb *)&inp->ip_inp.inp, (ro ? (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : (NULL))); +#define SCTP_GET_HLIM(inp, ro) in6_selecthlim(&inp->ip_inp.inp, (ro ? (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : (NULL))); /* is the endpoint v6only? */ -#define SCTP_IPV6_V6ONLY(inp) (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) +#define SCTP_IPV6_V6ONLY(sctp_inpcb) ((sctp_inpcb)->ip_inp.inp.inp_flags & IN6P_IPV6_V6ONLY) /* is the socket non-blocking? */ #define SCTP_SO_IS_NBIO(so) ((so)->so_state & SS_NBIO) #define SCTP_SET_SO_NBIO(so) ((so)->so_state |= SS_NBIO) #define SCTP_CLEAR_SO_NBIO(so) ((so)->so_state &= ~SS_NBIO) /* get the socket type */ #define SCTP_SO_TYPE(so) ((so)->so_type) /* Use a macro for renaming sb_cc to sb_acc. * Initially sb_ccc was used, but this broke select() when used * with SCTP sockets. */ #define sb_cc sb_acc /* reserve sb space for a socket */ #define SCTP_SORESERVE(so, send, recv) soreserve(so, send, recv) /* wakeup a socket */ #define SCTP_SOWAKEUP(so) wakeup(&(so)->so_timeo) /* clear the socket buffer state */ #define SCTP_SB_CLEAR(sb) \ (sb).sb_cc = 0; \ (sb).sb_mb = NULL; \ (sb).sb_mbcnt = 0; #define SCTP_SB_LIMIT_RCV(so) (SOLISTENING(so) ? so->sol_sbrcv_hiwat : so->so_rcv.sb_hiwat) #define SCTP_SB_LIMIT_SND(so) (SOLISTENING(so) ? so->sol_sbsnd_hiwat : so->so_snd.sb_hiwat) /* * routes, output, etc. */ typedef struct route sctp_route_t; typedef struct rtentry sctp_rtentry_t; #define SCTP_RTALLOC(ro, vrf_id, fibnum) \ rtalloc_ign_fib((struct route *)ro, 0UL, fibnum) /* * SCTP protocol specific mbuf flags. */ #define M_NOTIFICATION M_PROTO1 /* SCTP notification */ /* * IP output routines */ #define SCTP_IP_OUTPUT(result, o_pak, ro, stcb, vrf_id) \ { \ int o_flgs = IP_RAWOUTPUT; \ struct sctp_tcb *local_stcb = stcb; \ if (local_stcb && \ local_stcb->sctp_ep && \ local_stcb->sctp_ep->sctp_socket) \ o_flgs |= local_stcb->sctp_ep->sctp_socket->so_options & SO_DONTROUTE; \ m_clrprotoflags(o_pak); \ result = ip_output(o_pak, NULL, ro, o_flgs, 0, NULL); \ } #define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) \ { \ struct sctp_tcb *local_stcb = stcb; \ m_clrprotoflags(o_pak); \ if (local_stcb && local_stcb->sctp_ep) \ result = ip6_output(o_pak, \ ((struct inpcb *)(local_stcb->sctp_ep))->in6p_outputopts, \ (ro), 0, 0, ifp, NULL); \ else \ result = ip6_output(o_pak, NULL, (ro), 0, 0, ifp, NULL); \ } struct mbuf * sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type); /* * SCTP AUTH */ #define SCTP_READ_RANDOM(buf, len) arc4rand(buf, len, 0) /* map standard crypto API names */ #define SCTP_SHA1_CTX SHA1_CTX #define SCTP_SHA1_INIT SHA1Init #define SCTP_SHA1_UPDATE SHA1Update #define SCTP_SHA1_FINAL(x,y) SHA1Final((caddr_t)x, y) #define SCTP_SHA256_CTX SHA256_CTX #define SCTP_SHA256_INIT SHA256_Init #define SCTP_SHA256_UPDATE SHA256_Update #define SCTP_SHA256_FINAL(x,y) SHA256_Final((caddr_t)x, y) #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1) #if defined(INVARIANTS) #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ { \ int32_t oldval; \ oldval = atomic_fetchadd_int(addr, -val); \ if (oldval < val) { \ panic("Counter goes negative"); \ } \ } #else #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \ { \ int32_t oldval; \ oldval = atomic_fetchadd_int(addr, -val); \ if (oldval < val) { \ *addr = 0; \ } \ } #endif #define SCTP_IS_LISTENING(inp) ((inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) != 0) #endif Index: head/sys/netinet/sctp_pcb.c =================================================================== --- head/sys/netinet/sctp_pcb.c (revision 350587) +++ head/sys/netinet/sctp_pcb.c (revision 350588) @@ -1,7179 +1,7180 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #endif #ifdef INET6 #include #endif #include #include #include VNET_DEFINE(struct sctp_base_info, system_base_info); /* FIX: we don't handle multiple link local scopes */ /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ #ifdef INET6 int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b) { struct sockaddr_in6 tmp_a, tmp_b; memcpy(&tmp_a, a, sizeof(struct sockaddr_in6)); if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) { return (0); } memcpy(&tmp_b, b, sizeof(struct sockaddr_in6)); if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) { return (0); } return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr)); } #endif void sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb) { /* * We really don't need to lock this, but I will just because it * does not hurt. */ SCTP_INP_INFO_RLOCK(); spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep); spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc); spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr); spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr); spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk); spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq); spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq); spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks); SCTP_INP_INFO_RUNLOCK(); } /*- * Addresses are added to VRF's (Virtual Router's). For BSD we * have only the default VRF 0. We maintain a hash list of * VRF's. Each VRF has its own list of sctp_ifn's. Each of * these has a list of addresses. When we add a new address * to a VRF we lookup the ifn/ifn_index, if the ifn does * not exist we create it and add it to the list of IFN's * within the VRF. Once we have the sctp_ifn, we add the * address to the list. So we look something like: * * hash-vrf-table * vrf-> ifn-> ifn -> ifn * vrf | * ... +--ifa-> ifa -> ifa * vrf * * We keep these separate lists since the SCTP subsystem will * point to these from its source address selection nets structure. * When an address is deleted it does not happen right away on * the SCTP side, it gets scheduled. What we do when a * delete happens is immediately remove the address from * the master list and decrement the refcount. As our * addip iterator works through and frees the src address * selection pointing to the sctp_ifa, eventually the refcount * will reach 0 and we will delete it. Note that it is assumed * that any locking on system level ifn/ifa is done at the * caller of these functions and these routines will only * lock the SCTP structures as they add or delete things. * * Other notes on VRF concepts. * - An endpoint can be in multiple VRF's * - An association lives within a VRF and only one VRF. * - Any incoming packet we can deduce the VRF for by * looking at the mbuf/pak inbound (for BSD its VRF=0 :D) * - Any downward send call or connect call must supply the * VRF via ancillary data or via some sort of set default * VRF socket option call (again for BSD no brainer since * the VRF is always 0). * - An endpoint may add multiple VRF's to it. * - Listening sockets can accept associations in any * of the VRF's they are in but the assoc will end up * in only one VRF (gotten from the packet or connect/send). * */ struct sctp_vrf * sctp_allocate_vrf(int vrf_id) { struct sctp_vrf *vrf = NULL; struct sctp_vrflist *bucket; /* First allocate the VRF structure */ vrf = sctp_find_vrf(vrf_id); if (vrf) { /* Already allocated */ return (vrf); } SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf), SCTP_M_VRF); if (vrf == NULL) { /* No memory */ #ifdef INVARIANTS panic("No memory for VRF:%d", vrf_id); #endif return (NULL); } /* setup the VRF */ memset(vrf, 0, sizeof(struct sctp_vrf)); vrf->vrf_id = vrf_id; LIST_INIT(&vrf->ifnlist); vrf->total_ifa_count = 0; vrf->refcount = 0; /* now also setup table ids */ SCTP_INIT_VRF_TABLEID(vrf); /* Init the HASH of addresses */ vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE, &vrf->vrf_addr_hashmark); if (vrf->vrf_addr_hash == NULL) { /* No memory */ #ifdef INVARIANTS panic("No memory for VRF:%d", vrf_id); #endif SCTP_FREE(vrf, SCTP_M_VRF); return (NULL); } /* Add it to the hash table */ bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; LIST_INSERT_HEAD(bucket, vrf, next_vrf); atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); return (vrf); } struct sctp_ifn * sctp_find_ifn(void *ifn, uint32_t ifn_index) { struct sctp_ifn *sctp_ifnp; struct sctp_ifnlist *hash_ifn_head; /* * We assume the lock is held for the addresses if that's wrong * problems could occur :-) */ hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) { if (sctp_ifnp->ifn_index == ifn_index) { return (sctp_ifnp); } if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) { return (sctp_ifnp); } } return (NULL); } struct sctp_vrf * sctp_find_vrf(uint32_t vrf_id) { struct sctp_vrflist *bucket; struct sctp_vrf *liste; bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; LIST_FOREACH(liste, bucket, next_vrf) { if (vrf_id == liste->vrf_id) { return (liste); } } return (NULL); } void sctp_free_vrf(struct sctp_vrf *vrf) { if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) { if (vrf->vrf_addr_hash) { SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); vrf->vrf_addr_hash = NULL; } /* We zero'd the count */ LIST_REMOVE(vrf, next_vrf); SCTP_FREE(vrf, SCTP_M_VRF); atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); } } void sctp_free_ifn(struct sctp_ifn *sctp_ifnp) { if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) { /* We zero'd the count */ if (sctp_ifnp->vrf) { sctp_free_vrf(sctp_ifnp->vrf); } SCTP_FREE(sctp_ifnp, SCTP_M_IFN); atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); } } void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu) { struct sctp_ifn *sctp_ifnp; sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index); if (sctp_ifnp != NULL) { sctp_ifnp->ifn_mtu = mtu; } } void sctp_free_ifa(struct sctp_ifa *sctp_ifap) { if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) { /* We zero'd the count */ if (sctp_ifap->ifn_p) { sctp_free_ifn(sctp_ifap->ifn_p); } SCTP_FREE(sctp_ifap, SCTP_M_IFA); atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); } } static void sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock) { struct sctp_ifn *found; found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index); if (found == NULL) { /* Not in the list.. sorry */ return; } if (hold_addr_lock == 0) SCTP_IPI_ADDR_WLOCK(); LIST_REMOVE(sctp_ifnp, next_bucket); LIST_REMOVE(sctp_ifnp, next_ifn); SCTP_DEREGISTER_INTERFACE(sctp_ifnp->ifn_index, sctp_ifnp->registered_af); if (hold_addr_lock == 0) SCTP_IPI_ADDR_WUNLOCK(); /* Take away the reference, and possibly free it */ sctp_free_ifn(sctp_ifnp); } void sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index) { struct sctp_vrf *vrf; struct sctp_ifa *sctp_ifap; SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); goto out; } sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); goto out; } if (sctp_ifap->ifn_p == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n"); goto out; } if (if_name) { if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", sctp_ifap->ifn_p->ifn_name, if_name); goto out; } } else { if (sctp_ifap->ifn_p->ifn_index != ifn_index) { SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", sctp_ifap->ifn_p->ifn_index, ifn_index); goto out; } } sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID); sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; out: SCTP_IPI_ADDR_RUNLOCK(); } void sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index) { struct sctp_vrf *vrf; struct sctp_ifa *sctp_ifap; SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); goto out; } sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); goto out; } if (sctp_ifap->ifn_p == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n"); goto out; } if (if_name) { if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", sctp_ifap->ifn_p->ifn_name, if_name); goto out; } } else { if (sctp_ifap->ifn_p->ifn_index != ifn_index) { SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", sctp_ifap->ifn_p->ifn_index, ifn_index); goto out; } } sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE); sctp_ifap->localifa_flags |= SCTP_ADDR_VALID; out: SCTP_IPI_ADDR_RUNLOCK(); } /*- * Add an ifa to an ifn. * Register the interface as necessary. * NOTE: ADDR write lock MUST be held. */ static void sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap) { int ifa_af; LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); sctp_ifap->ifn_p = sctp_ifnp; atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); /* update address counts */ sctp_ifnp->ifa_count++; ifa_af = sctp_ifap->address.sa.sa_family; switch (ifa_af) { #ifdef INET case AF_INET: sctp_ifnp->num_v4++; break; #endif #ifdef INET6 case AF_INET6: sctp_ifnp->num_v6++; break; #endif default: break; } if (sctp_ifnp->ifa_count == 1) { /* register the new interface */ SCTP_REGISTER_INTERFACE(sctp_ifnp->ifn_index, ifa_af); sctp_ifnp->registered_af = ifa_af; } } /*- * Remove an ifa from its ifn. * If no more addresses exist, remove the ifn too. Otherwise, re-register * the interface based on the remaining address families left. * NOTE: ADDR write lock MUST be held. */ static void sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap) { LIST_REMOVE(sctp_ifap, next_ifa); if (sctp_ifap->ifn_p) { /* update address counts */ sctp_ifap->ifn_p->ifa_count--; switch (sctp_ifap->address.sa.sa_family) { #ifdef INET case AF_INET: sctp_ifap->ifn_p->num_v4--; break; #endif #ifdef INET6 case AF_INET6: sctp_ifap->ifn_p->num_v6--; break; #endif default: break; } if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) { /* remove the ifn, possibly freeing it */ sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED); } else { /* re-register address family type, if needed */ if ((sctp_ifap->ifn_p->num_v6 == 0) && (sctp_ifap->ifn_p->registered_af == AF_INET6)) { SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); sctp_ifap->ifn_p->registered_af = AF_INET; } else if ((sctp_ifap->ifn_p->num_v4 == 0) && (sctp_ifap->ifn_p->registered_af == AF_INET)) { SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); sctp_ifap->ifn_p->registered_af = AF_INET6; } /* free the ifn refcount */ sctp_free_ifn(sctp_ifap->ifn_p); } sctp_ifap->ifn_p = NULL; } } struct sctp_ifa * sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, uint32_t ifn_type, const char *if_name, void *ifa, struct sockaddr *addr, uint32_t ifa_flags, int dynamic_add) { struct sctp_vrf *vrf; struct sctp_ifn *sctp_ifnp = NULL; struct sctp_ifa *sctp_ifap = NULL; struct sctp_ifalist *hash_addr_head; struct sctp_ifnlist *hash_ifn_head; uint32_t hash_of_addr; int new_ifn_af = 0; #ifdef SCTP_DEBUG SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id); SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); #endif SCTP_IPI_ADDR_WLOCK(); sctp_ifnp = sctp_find_ifn(ifn, ifn_index); if (sctp_ifnp) { vrf = sctp_ifnp->vrf; } else { vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { vrf = sctp_allocate_vrf(vrf_id); if (vrf == NULL) { SCTP_IPI_ADDR_WUNLOCK(); return (NULL); } } } if (sctp_ifnp == NULL) { /* * build one and add it, can't hold lock until after malloc * done though. */ SCTP_IPI_ADDR_WUNLOCK(); SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *, sizeof(struct sctp_ifn), SCTP_M_IFN); if (sctp_ifnp == NULL) { #ifdef INVARIANTS panic("No memory for IFN"); #endif return (NULL); } memset(sctp_ifnp, 0, sizeof(struct sctp_ifn)); sctp_ifnp->ifn_index = ifn_index; sctp_ifnp->ifn_p = ifn; sctp_ifnp->ifn_type = ifn_type; sctp_ifnp->refcount = 0; sctp_ifnp->vrf = vrf; atomic_add_int(&vrf->refcount, 1); sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family); if (if_name != NULL) { snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name); } else { snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown"); } hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; LIST_INIT(&sctp_ifnp->ifalist); SCTP_IPI_ADDR_WLOCK(); LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket); LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn); atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); new_ifn_af = 1; } sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap) { /* Hmm, it already exists? */ if ((sctp_ifap->ifn_p) && (sctp_ifap->ifn_p->ifn_index == ifn_index)) { SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n", sctp_ifap->ifn_p->ifn_name, ifn_index, (void *)sctp_ifap); if (new_ifn_af) { /* Remove the created one that we don't want */ sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED); } if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) { /* easy to solve, just switch back to active */ SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n"); sctp_ifap->localifa_flags = SCTP_ADDR_VALID; sctp_ifap->ifn_p = sctp_ifnp; atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); } exit_stage_left: SCTP_IPI_ADDR_WUNLOCK(); return (sctp_ifap); } else { if (sctp_ifap->ifn_p) { /* * The last IFN gets the address, remove the * old one */ SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n", (void *)sctp_ifap, sctp_ifap->ifn_p->ifn_name, sctp_ifap->ifn_p->ifn_index, if_name, ifn_index); /* remove the address from the old ifn */ sctp_remove_ifa_from_ifn(sctp_ifap); /* move the address over to the new ifn */ sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); goto exit_stage_left; } else { /* repair ifnp which was NULL ? */ sctp_ifap->localifa_flags = SCTP_ADDR_VALID; SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n", (void *)sctp_ifnp, (void *)sctp_ifap); sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); } goto exit_stage_left; } } SCTP_IPI_ADDR_WUNLOCK(); SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA); if (sctp_ifap == NULL) { #ifdef INVARIANTS panic("No memory for IFA"); #endif return (NULL); } memset(sctp_ifap, 0, sizeof(struct sctp_ifa)); sctp_ifap->ifn_p = sctp_ifnp; atomic_add_int(&sctp_ifnp->refcount, 1); sctp_ifap->vrf_id = vrf_id; sctp_ifap->ifa = ifa; memcpy(&sctp_ifap->address, addr, addr->sa_len); sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE; sctp_ifap->flags = ifa_flags; /* Set scope */ switch (sctp_ifap->address.sa.sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = &sctp_ifap->address.sin; if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) { sctp_ifap->src_is_loop = 1; } if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { sctp_ifap->src_is_priv = 1; } sctp_ifnp->num_v4++; if (new_ifn_af) new_ifn_af = AF_INET; break; } #endif #ifdef INET6 case AF_INET6: { /* ok to use deprecated addresses? */ struct sockaddr_in6 *sin6; sin6 = &sctp_ifap->address.sin6; if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) { sctp_ifap->src_is_loop = 1; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { sctp_ifap->src_is_priv = 1; } sctp_ifnp->num_v6++; if (new_ifn_af) new_ifn_af = AF_INET6; break; } #endif default: new_ifn_af = 0; break; } hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa); if ((sctp_ifap->src_is_priv == 0) && (sctp_ifap->src_is_loop == 0)) { sctp_ifap->src_is_glob = 1; } SCTP_IPI_ADDR_WLOCK(); hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket); sctp_ifap->refcount = 1; LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); sctp_ifnp->ifa_count++; vrf->total_ifa_count++; atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); if (new_ifn_af) { SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af); sctp_ifnp->registered_af = new_ifn_af; } SCTP_IPI_ADDR_WUNLOCK(); if (dynamic_add) { /* * Bump up the refcount so that when the timer completes it * will drop back down. */ struct sctp_laddr *wi; atomic_add_int(&sctp_ifap->refcount, 1); wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); if (wi == NULL) { /* * Gak, what can we do? We have lost an address * change can you say HOSED? */ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); /* Opps, must decrement the count */ sctp_del_addr_from_vrf(vrf_id, addr, ifn_index, if_name); return (NULL); } SCTP_INCR_LADDR_COUNT(); memset(wi, 0, sizeof(*wi)); (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); wi->ifa = sctp_ifap; wi->action = SCTP_ADD_IP_ADDRESS; SCTP_WQ_ADDR_LOCK(); LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, (struct sctp_inpcb *)NULL, (struct sctp_tcb *)NULL, (struct sctp_nets *)NULL); SCTP_WQ_ADDR_UNLOCK(); } else { /* it's ready for use */ sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE; } return (sctp_ifap); } void sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, uint32_t ifn_index, const char *if_name) { struct sctp_vrf *vrf; struct sctp_ifa *sctp_ifap = NULL; SCTP_IPI_ADDR_WLOCK(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); goto out_now; } #ifdef SCTP_DEBUG SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id); SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); #endif sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap) { /* Validate the delete */ if (sctp_ifap->ifn_p) { int valid = 0; /*- * The name has priority over the ifn_index * if its given. We do this especially for * panda who might recycle indexes fast. */ if (if_name) { if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) { /* They match its a correct delete */ valid = 1; } } if (!valid) { /* last ditch check ifn_index */ if (ifn_index == sctp_ifap->ifn_p->ifn_index) { valid = 1; } } if (!valid) { SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n", ifn_index, ((if_name == NULL) ? "NULL" : if_name)); SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n", sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name); SCTP_IPI_ADDR_WUNLOCK(); return; } } SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap); sctp_ifap->localifa_flags &= SCTP_ADDR_VALID; /* * We don't set the flag. This means that the structure will * hang around in EP's that have bound specific to it until * they close. This gives us TCP like behavior if someone * removes an address (or for that matter adds it right * back). */ /* sctp_ifap->localifa_flags |= SCTP_BEING_DELETED; */ vrf->total_ifa_count--; LIST_REMOVE(sctp_ifap, next_bucket); sctp_remove_ifa_from_ifn(sctp_ifap); } #ifdef SCTP_DEBUG else { SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:", ifn_index); SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); } #endif out_now: SCTP_IPI_ADDR_WUNLOCK(); if (sctp_ifap) { struct sctp_laddr *wi; wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); if (wi == NULL) { /* * Gak, what can we do? We have lost an address * change can you say HOSED? */ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); /* Oops, must decrement the count */ sctp_free_ifa(sctp_ifap); return; } SCTP_INCR_LADDR_COUNT(); memset(wi, 0, sizeof(*wi)); (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); wi->ifa = sctp_ifap; wi->action = SCTP_DEL_IP_ADDRESS; SCTP_WQ_ADDR_LOCK(); /* * Should this really be a tailq? As it is we will process * the newest first :-0 */ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, (struct sctp_inpcb *)NULL, (struct sctp_tcb *)NULL, (struct sctp_nets *)NULL); SCTP_WQ_ADDR_UNLOCK(); } return; } static int sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) { int loopback_scope; #if defined(INET) int ipv4_local_scope, ipv4_addr_legal; #endif #if defined(INET6) int local_scope, site_scope, ipv6_addr_legal; #endif struct sctp_vrf *vrf; struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; loopback_scope = stcb->asoc.scope.loopback_scope; #if defined(INET) ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; #endif #if defined(INET6) local_scope = stcb->asoc.scope.local_scope; site_scope = stcb->asoc.scope.site_scope; ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; #endif SCTP_IPI_ADDR_RLOCK(); vrf = sctp_find_vrf(stcb->asoc.vrf_id); if (vrf == NULL) { /* no vrf, no addresses */ SCTP_IPI_ADDR_RUNLOCK(); return (0); } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { if (sctp_is_addr_restricted(stcb, sctp_ifa) && (!sctp_is_addr_pending(stcb, sctp_ifa))) { /* * We allow pending addresses, where * we have sent an asconf-add to be * considered valid. */ continue; } if (sctp_ifa->address.sa.sa_family != to->sa_family) { continue; } switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: if (ipv4_addr_legal) { struct sockaddr_in *sin, *rsin; sin = &sctp_ifa->address.sin; rsin = (struct sockaddr_in *)to; if ((ipv4_local_scope == 0) && IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { continue; } if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { SCTP_IPI_ADDR_RUNLOCK(); return (1); } } break; #endif #ifdef INET6 case AF_INET6: if (ipv6_addr_legal) { struct sockaddr_in6 *sin6, *rsin6; sin6 = &sctp_ifa->address.sin6; rsin6 = (struct sockaddr_in6 *)to; if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (local_scope == 0) continue; if (sin6->sin6_scope_id == 0) { if (sa6_recoverscope(sin6) != 0) continue; } } if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { continue; } if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { SCTP_IPI_ADDR_RUNLOCK(); return (1); } } break; #endif default: /* TSNH */ break; } } } } else { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); continue; } if (sctp_is_addr_restricted(stcb, laddr->ifa) && (!sctp_is_addr_pending(stcb, laddr->ifa))) { /* * We allow pending addresses, where we have * sent an asconf-add to be considered * valid. */ continue; } if (laddr->ifa->address.sa.sa_family != to->sa_family) { continue; } switch (to->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin, *rsin; sin = &laddr->ifa->address.sin; rsin = (struct sockaddr_in *)to; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { SCTP_IPI_ADDR_RUNLOCK(); return (1); } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6, *rsin6; sin6 = &laddr->ifa->address.sin6; rsin6 = (struct sockaddr_in6 *)to; if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { SCTP_IPI_ADDR_RUNLOCK(); return (1); } break; } #endif default: /* TSNH */ break; } } } SCTP_IPI_ADDR_RUNLOCK(); return (0); } static struct sctp_tcb * sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id) { /**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */ /* * If we support the TCP model, then we must now dig through to see * if we can find our endpoint in the list of tcp ep's. */ uint16_t lport, rport; struct sctppcbhead *ephead; struct sctp_inpcb *inp; struct sctp_laddr *laddr; struct sctp_tcb *stcb; struct sctp_nets *net; if ((to == NULL) || (from == NULL)) { return (NULL); } switch (to->sa_family) { #ifdef INET case AF_INET: if (from->sa_family == AF_INET) { lport = ((struct sockaddr_in *)to)->sin_port; rport = ((struct sockaddr_in *)from)->sin_port; } else { return (NULL); } break; #endif #ifdef INET6 case AF_INET6: if (from->sa_family == AF_INET6) { lport = ((struct sockaddr_in6 *)to)->sin6_port; rport = ((struct sockaddr_in6 *)from)->sin6_port; } else { return (NULL); } break; #endif default: return (NULL); } ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; /* * Ok now for each of the guys in this bucket we must look and see: * - Does the remote port match. - Does there single association's * addresses match this address (to). If so we update p_ep to point * to this ep and return the tcb from it. */ LIST_FOREACH(inp, ephead, sctp_hash) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } if (lport != inp->sctp_lport) { SCTP_INP_RUNLOCK(inp); continue; } switch (to->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = (struct sockaddr_in *)to; if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { SCTP_INP_RUNLOCK(inp); continue; } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)to; if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { SCTP_INP_RUNLOCK(inp); continue; } break; } #endif default: SCTP_INP_RUNLOCK(inp); continue; } if (inp->def_vrf_id != vrf_id) { SCTP_INP_RUNLOCK(inp); continue; } /* check to see if the ep has one of the addresses */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* We are NOT bound all, so look further */ int match = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__); continue; } if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); continue; } if (laddr->ifa->address.sa.sa_family == to->sa_family) { /* see if it matches */ #ifdef INET if (from->sa_family == AF_INET) { struct sockaddr_in *intf_addr, *sin; intf_addr = &laddr->ifa->address.sin; sin = (struct sockaddr_in *)to; if (sin->sin_addr.s_addr == intf_addr->sin_addr.s_addr) { match = 1; break; } } #endif #ifdef INET6 if (from->sa_family == AF_INET6) { struct sockaddr_in6 *intf_addr6; struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) to; intf_addr6 = &laddr->ifa->address.sin6; if (SCTP6_ARE_ADDR_EQUAL(sin6, intf_addr6)) { match = 1; break; } } #endif } } if (match == 0) { /* This endpoint does not have this address */ SCTP_INP_RUNLOCK(inp); continue; } } /* * Ok if we hit here the ep has the address, does it hold * the tcb? */ /* XXX: Why don't we TAILQ_FOREACH through sctp_asoc_list? */ stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); continue; } SCTP_TCB_LOCK(stcb); if (!sctp_does_stcb_own_this_addr(stcb, to)) { SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); continue; } if (stcb->rport != rport) { /* remote port does not match. */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); continue; } if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); continue; } if (!sctp_does_stcb_own_this_addr(stcb, to)) { SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); continue; } /* Does this TCB have a matching address? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._l_addr.sa.sa_family != from->sa_family) { /* not the same family, can't be a match */ continue; } switch (from->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *)&net->ro._l_addr; rsin = (struct sockaddr_in *)from; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } /* * Update the endpoint * pointer */ *inp_p = inp; SCTP_INP_RUNLOCK(inp); return (stcb); } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)from; if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { /* found it */ if (netp != NULL) { *netp = net; } /* * Update the endpoint * pointer */ *inp_p = inp; SCTP_INP_RUNLOCK(inp); return (stcb); } break; } #endif default: /* TSNH */ break; } } SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); } return (NULL); } /* * rules for use * * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an * stcb, both will be locked (locked_tcb and stcb) but decrement will be done * (if locked == NULL). 3) Decrement happens on return ONLY if locked == * NULL. */ struct sctp_tcb * sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb) { struct sctpasochead *head; struct sctp_inpcb *inp; struct sctp_tcb *stcb = NULL; struct sctp_nets *net; uint16_t rport; inp = *inp_p; switch (remote->sa_family) { #ifdef INET case AF_INET: rport = (((struct sockaddr_in *)remote)->sin_port); break; #endif #ifdef INET6 case AF_INET6: rport = (((struct sockaddr_in6 *)remote)->sin6_port); break; #endif default: return (NULL); } if (locked_tcb) { /* * UN-lock so we can do proper locking here this occurs when * called from load_addresses_from_init. */ atomic_add_int(&locked_tcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(locked_tcb); } SCTP_INP_INFO_RLOCK(); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /*- * Now either this guy is our listener or it's the * connector. If it is the one that issued the connect, then * it's only chance is to be the first TCB in the list. If * it is the acceptor, then do the special_lookup to hash * and find the real inp. */ if ((inp->sctp_socket) && SCTP_IS_LISTENING(inp)) { /* to is peer addr, from is my addr */ stcb = sctp_tcb_special_locate(inp_p, remote, local, netp, inp->def_vrf_id); if ((stcb != NULL) && (locked_tcb == NULL)) { /* we have a locked tcb, lower refcount */ SCTP_INP_DECR_REF(inp); } if ((locked_tcb != NULL) && (locked_tcb != stcb)) { SCTP_INP_RLOCK(locked_tcb->sctp_ep); SCTP_TCB_LOCK(locked_tcb); atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); } SCTP_INP_INFO_RUNLOCK(); return (stcb); } else { SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { goto null_return; } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto null_return; } SCTP_TCB_LOCK(stcb); if (stcb->rport != rport) { /* remote port does not match. */ SCTP_TCB_UNLOCK(stcb); goto null_return; } if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); goto null_return; } if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { SCTP_TCB_UNLOCK(stcb); goto null_return; } /* now look at the list of remote addresses */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { #ifdef INVARIANTS if (net == (TAILQ_NEXT(net, sctp_next))) { panic("Corrupt net list"); } #endif if (net->ro._l_addr.sa.sa_family != remote->sa_family) { /* not the same family */ continue; } switch (remote->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *) &net->ro._l_addr; rsin = (struct sockaddr_in *)remote; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } if (locked_tcb) { atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)remote; if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } if (locked_tcb) { atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } break; } #endif default: /* TSNH */ break; } } SCTP_TCB_UNLOCK(stcb); } } else { SCTP_INP_WLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { goto null_return; } head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport, inp->sctp_hashmark)]; LIST_FOREACH(stcb, head, sctp_tcbhash) { if (stcb->rport != rport) { /* remote port does not match */ continue; } SCTP_TCB_LOCK(stcb); if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); continue; } if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { SCTP_TCB_UNLOCK(stcb); continue; } /* now look at the list of remote addresses */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { #ifdef INVARIANTS if (net == (TAILQ_NEXT(net, sctp_next))) { panic("Corrupt net list"); } #endif if (net->ro._l_addr.sa.sa_family != remote->sa_family) { /* not the same family */ continue; } switch (remote->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin, *rsin; sin = (struct sockaddr_in *) &net->ro._l_addr; rsin = (struct sockaddr_in *)remote; if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } if (locked_tcb) { atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6, *rsin6; sin6 = (struct sockaddr_in6 *) &net->ro._l_addr; rsin6 = (struct sockaddr_in6 *)remote; if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { /* found it */ if (netp != NULL) { *netp = net; } if (locked_tcb == NULL) { SCTP_INP_DECR_REF(inp); } else if (locked_tcb != stcb) { SCTP_TCB_LOCK(locked_tcb); } if (locked_tcb) { atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); return (stcb); } break; } #endif default: /* TSNH */ break; } } SCTP_TCB_UNLOCK(stcb); } } null_return: /* clean up for returning null */ if (locked_tcb) { SCTP_TCB_LOCK(locked_tcb); atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); } SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_RUNLOCK(); /* not found */ return (NULL); } /* * Find an association for a specific endpoint using the association id given * out in the COMM_UP notification */ struct sctp_tcb * sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) { /* * Use my the assoc_id to find a endpoint */ struct sctpasochead *head; struct sctp_tcb *stcb; uint32_t id; if (inp == NULL) { SCTP_PRINTF("TSNH ep_associd\n"); return (NULL); } if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_PRINTF("TSNH ep_associd0\n"); return (NULL); } id = (uint32_t)asoc_id; head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; if (head == NULL) { /* invalid id TSNH */ SCTP_PRINTF("TSNH ep_associd1\n"); return (NULL); } LIST_FOREACH(stcb, head, sctp_tcbasocidhash) { if (stcb->asoc.assoc_id == id) { if (inp != stcb->sctp_ep) { /* * some other guy has the same id active (id * collision ??). */ SCTP_PRINTF("TSNH ep_associd2\n"); continue; } if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { continue; } if (want_lock) { SCTP_TCB_LOCK(stcb); } return (stcb); } } return (NULL); } struct sctp_tcb * sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) { struct sctp_tcb *stcb; SCTP_INP_RLOCK(inp); stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock); SCTP_INP_RUNLOCK(inp); return (stcb); } /* * Endpoint probe expects that the INP_INFO is locked. */ static struct sctp_inpcb * sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, uint16_t lport, uint32_t vrf_id) { struct sctp_inpcb *inp; struct sctp_laddr *laddr; #ifdef INET struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *sin6; struct sockaddr_in6 *intf_addr6; #endif int fnd; #ifdef INET sin = NULL; #endif #ifdef INET6 sin6 = NULL; #endif switch (nam->sa_family) { #ifdef INET case AF_INET: sin = (struct sockaddr_in *)nam; break; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)nam; break; #endif default: /* unsupported family */ return (NULL); } if (head == NULL) return (NULL); LIST_FOREACH(inp, head, sctp_hash) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) && (inp->sctp_lport == lport)) { /* got it */ switch (nam->sa_family) { #ifdef INET case AF_INET: if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && SCTP_IPV6_V6ONLY(inp)) { /* * IPv4 on a IPv6 socket with ONLY * IPv6 set */ SCTP_INP_RUNLOCK(inp); continue; } if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { SCTP_INP_RUNLOCK(inp); continue; } break; #endif #ifdef INET6 case AF_INET6: /* * A V6 address and the endpoint is NOT * bound V6 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { SCTP_INP_RUNLOCK(inp); continue; } if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { SCTP_INP_RUNLOCK(inp); continue; } break; #endif default: break; } /* does a VRF id match? */ fnd = 0; if (inp->def_vrf_id == vrf_id) fnd = 1; SCTP_INP_RUNLOCK(inp); if (!fnd) continue; return (inp); } SCTP_INP_RUNLOCK(inp); } switch (nam->sa_family) { #ifdef INET case AF_INET: if (sin->sin_addr.s_addr == INADDR_ANY) { /* Can't hunt for one that has no address specified */ return (NULL); } break; #endif #ifdef INET6 case AF_INET6: if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* Can't hunt for one that has no address specified */ return (NULL); } break; #endif default: break; } /* * ok, not bound to all so see if we can find a EP bound to this * address. */ LIST_FOREACH(inp, head, sctp_hash) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(inp); continue; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) { SCTP_INP_RUNLOCK(inp); continue; } /* * Ok this could be a likely candidate, look at all of its * addresses */ if (inp->sctp_lport != lport) { SCTP_INP_RUNLOCK(inp); continue; } /* does a VRF id match? */ fnd = 0; if (inp->def_vrf_id == vrf_id) fnd = 1; if (!fnd) { SCTP_INP_RUNLOCK(inp); continue; } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__); continue; } SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ", (void *)laddr->ifa); if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n"); continue; } if (laddr->ifa->address.sa.sa_family == nam->sa_family) { /* possible, see if it matches */ switch (nam->sa_family) { #ifdef INET case AF_INET: if (sin->sin_addr.s_addr == laddr->ifa->address.sin.sin_addr.s_addr) { SCTP_INP_RUNLOCK(inp); return (inp); } break; #endif #ifdef INET6 case AF_INET6: intf_addr6 = &laddr->ifa->address.sin6; if (SCTP6_ARE_ADDR_EQUAL(sin6, intf_addr6)) { SCTP_INP_RUNLOCK(inp); return (inp); } break; #endif } } } SCTP_INP_RUNLOCK(inp); } return (NULL); } static struct sctp_inpcb * sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id) { struct sctppcbhead *head; struct sctp_inpcb *t_inp; int fnd; head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))]; LIST_FOREACH(t_inp, head, sctp_hash) { if (t_inp->sctp_lport != lport) { continue; } /* is it in the VRF in question */ fnd = 0; if (t_inp->def_vrf_id == vrf_id) fnd = 1; if (!fnd) continue; /* This one is in use. */ /* check the v6/v4 binding issue */ if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && SCTP_IPV6_V6ONLY(t_inp)) { if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { /* collision in V6 space */ return (t_inp); } else { /* inp is BOUND_V4 no conflict */ continue; } } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { /* t_inp is bound v4 and v6, conflict always */ return (t_inp); } else { /* t_inp is bound only V4 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && SCTP_IPV6_V6ONLY(inp)) { /* no conflict */ continue; } /* else fall through to conflict */ } return (t_inp); } return (NULL); } int sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) { /* For 1-2-1 with port reuse */ struct sctppcbhead *head; struct sctp_inpcb *tinp, *ninp; if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) { /* only works with port reuse on */ return (-1); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { return (0); } SCTP_INP_RUNLOCK(inp); SCTP_INP_INFO_WLOCK(); head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; /* Kick out all non-listeners to the TCP hash */ LIST_FOREACH_SAFE(tinp, head, sctp_hash, ninp) { if (tinp->sctp_lport != inp->sctp_lport) { continue; } if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { continue; } if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { continue; } if (SCTP_IS_LISTENING(tinp)) { continue; } SCTP_INP_WLOCK(tinp); LIST_REMOVE(tinp, sctp_hash); head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))]; tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; LIST_INSERT_HEAD(head, tinp, sctp_hash); SCTP_INP_WUNLOCK(tinp); } SCTP_INP_WLOCK(inp); /* Pull from where he was */ LIST_REMOVE(inp, sctp_hash); inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL; head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; LIST_INSERT_HEAD(head, inp, sctp_hash); SCTP_INP_WUNLOCK(inp); SCTP_INP_RLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (0); } struct sctp_inpcb * sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, uint32_t vrf_id) { /* * First we check the hash table to see if someone has this port * bound with just the port. */ struct sctp_inpcb *inp; struct sctppcbhead *head; int lport; unsigned int i; #ifdef INET struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *sin6; #endif switch (nam->sa_family) { #ifdef INET case AF_INET: sin = (struct sockaddr_in *)nam; lport = sin->sin_port; break; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)nam; lport = sin6->sin6_port; break; #endif default: return (NULL); } /* * I could cheat here and just cast to one of the types but we will * do it right. It also provides the check against an Unsupported * type too. */ /* Find the head of the ALLADDR chain */ if (have_lock == 0) { SCTP_INP_INFO_RLOCK(); } head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))]; inp = sctp_endpoint_probe(nam, head, lport, vrf_id); /* * If the TCP model exists it could be that the main listening * endpoint is gone but there still exists a connected socket for * this guy. If so we can return the first one that we find. This * may NOT be the correct one so the caller should be wary on the * returned INP. Currently the only caller that sets find_tcp_pool * is in bindx where we are verifying that a user CAN bind the * address. He either has bound it already, or someone else has, or * its open to bind, so this is good enough. */ if (inp == NULL && find_tcp_pool) { for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) { head = &SCTP_BASE_INFO(sctp_tcpephash)[i]; inp = sctp_endpoint_probe(nam, head, lport, vrf_id); if (inp) { break; } } } if (inp) { SCTP_INP_INCR_REF(inp); } if (have_lock == 0) { SCTP_INP_INFO_RUNLOCK(); } return (inp); } /* * Find an association for an endpoint with the pointer to whom you want to * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may * need to change the *to to some other struct like a mbuf... */ struct sctp_tcb * sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to, struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool, uint32_t vrf_id) { struct sctp_inpcb *inp = NULL; struct sctp_tcb *stcb; SCTP_INP_INFO_RLOCK(); if (find_tcp_pool) { if (inp_p != NULL) { stcb = sctp_tcb_special_locate(inp_p, from, to, netp, vrf_id); } else { stcb = sctp_tcb_special_locate(&inp, from, to, netp, vrf_id); } if (stcb != NULL) { SCTP_INP_INFO_RUNLOCK(); return (stcb); } } inp = sctp_pcb_findep(to, 0, 1, vrf_id); if (inp_p != NULL) { *inp_p = inp; } SCTP_INP_INFO_RUNLOCK(); if (inp == NULL) { return (NULL); } /* * ok, we have an endpoint, now lets find the assoc for it (if any) * we now place the source address or from in the to of the find * endpoint call. Since in reality this chain is used from the * inbound packet side. */ if (inp_p != NULL) { stcb = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL); } else { stcb = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL); } return (stcb); } /* * This routine will grub through the mbuf that is a INIT or INIT-ACK and * find all addresses that the sender has specified in any address list. Each * address will be used to lookup the TCB and see if one exits. */ static struct sctp_tcb * sctp_findassociation_special_addr(struct mbuf *m, int offset, struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, struct sockaddr *dst) { struct sctp_paramhdr *phdr, param_buf; #if defined(INET) || defined(INET6) struct sctp_tcb *stcb; uint16_t ptype; #endif uint16_t plen; #ifdef INET struct sockaddr_in sin4; #endif #ifdef INET6 struct sockaddr_in6 sin6; #endif #ifdef INET memset(&sin4, 0, sizeof(sin4)); sin4.sin_len = sizeof(sin4); sin4.sin_family = AF_INET; sin4.sin_port = sh->src_port; #endif #ifdef INET6 memset(&sin6, 0, sizeof(sin6)); sin6.sin6_len = sizeof(sin6); sin6.sin6_family = AF_INET6; sin6.sin6_port = sh->src_port; #endif offset += sizeof(struct sctp_init_chunk); phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); while (phdr != NULL) { /* now we must see if we want the parameter */ #if defined(INET) || defined(INET6) ptype = ntohs(phdr->param_type); #endif plen = ntohs(phdr->param_length); if (plen == 0) { break; } #ifdef INET if (ptype == SCTP_IPV4_ADDRESS && plen == sizeof(struct sctp_ipv4addr_param)) { /* Get the rest of the address */ struct sctp_ipv4addr_param ip4_param, *p4; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ip4_param, sizeof(ip4_param)); if (phdr == NULL) { return (NULL); } p4 = (struct sctp_ipv4addr_param *)phdr; memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr)); /* look it up */ stcb = sctp_findassociation_ep_addr(inp_p, (struct sockaddr *)&sin4, netp, dst, NULL); if (stcb != NULL) { return (stcb); } } #endif #ifdef INET6 if (ptype == SCTP_IPV6_ADDRESS && plen == sizeof(struct sctp_ipv6addr_param)) { /* Get the rest of the address */ struct sctp_ipv6addr_param ip6_param, *p6; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ip6_param, sizeof(ip6_param)); if (phdr == NULL) { return (NULL); } p6 = (struct sctp_ipv6addr_param *)phdr; memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr)); /* look it up */ stcb = sctp_findassociation_ep_addr(inp_p, (struct sockaddr *)&sin6, netp, dst, NULL); if (stcb != NULL) { return (stcb); } } #endif offset += SCTP_SIZE32(plen); phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); } return (NULL); } static struct sctp_tcb * sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport, uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag) { /* * Use my vtag to hash. If we find it we then verify the source addr * is in the assoc. If all goes well we save a bit on rec of a * packet. */ struct sctpasochead *head; struct sctp_nets *net; struct sctp_tcb *stcb; SCTP_INP_INFO_RLOCK(); head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag, SCTP_BASE_INFO(hashasocmark))]; LIST_FOREACH(stcb, head, sctp_asocs) { SCTP_INP_RLOCK(stcb->sctp_ep); if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_INP_RUNLOCK(stcb->sctp_ep); continue; } if (stcb->sctp_ep->def_vrf_id != vrf_id) { SCTP_INP_RUNLOCK(stcb->sctp_ep); continue; } SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(stcb->sctp_ep); if (stcb->asoc.my_vtag == vtag) { /* candidate */ if (stcb->rport != rport) { SCTP_TCB_UNLOCK(stcb); continue; } if (stcb->sctp_ep->sctp_lport != lport) { SCTP_TCB_UNLOCK(stcb); continue; } if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); continue; } /* RRS:Need toaddr check here */ if (sctp_does_stcb_own_this_addr(stcb, to) == 0) { /* Endpoint does not own this address */ SCTP_TCB_UNLOCK(stcb); continue; } if (remote_tag) { /* * If we have both vtags that's all we match * on */ if (stcb->asoc.peer_vtag == remote_tag) { /* * If both tags match we consider it * conclusive and check NO * source/destination addresses */ goto conclusive; } } if (skip_src_check) { conclusive: if (from) { *netp = sctp_findnet(stcb, from); } else { *netp = NULL; /* unknown */ } if (inp_p) *inp_p = stcb->sctp_ep; SCTP_INP_INFO_RUNLOCK(); return (stcb); } net = sctp_findnet(stcb, from); if (net) { /* yep its him. */ *netp = net; SCTP_STAT_INCR(sctps_vtagexpress); *inp_p = stcb->sctp_ep; SCTP_INP_INFO_RUNLOCK(); return (stcb); } else { /* * not him, this should only happen in rare * cases so I peg it. */ SCTP_STAT_INCR(sctps_vtagbogus); } } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_INFO_RUNLOCK(); return (NULL); } /* * Find an association with the pointer to the inbound IP packet. This can be * a IPv4 or IPv6 packet. */ struct sctp_tcb * sctp_findassociation_addr(struct mbuf *m, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) { struct sctp_tcb *stcb; struct sctp_inpcb *inp; if (sh->v_tag) { /* we only go down this path if vtag is non-zero */ stcb = sctp_findassoc_by_vtag(src, dst, ntohl(sh->v_tag), inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0); if (stcb) { return (stcb); } } if (inp_p) { stcb = sctp_findassociation_addr_sa(src, dst, inp_p, netp, 1, vrf_id); inp = *inp_p; } else { stcb = sctp_findassociation_addr_sa(src, dst, &inp, netp, 1, vrf_id); } SCTPDBG(SCTP_DEBUG_PCB1, "stcb:%p inp:%p\n", (void *)stcb, (void *)inp); if (stcb == NULL && inp) { /* Found a EP but not this address */ if ((ch->chunk_type == SCTP_INITIATION) || (ch->chunk_type == SCTP_INITIATION_ACK)) { /*- * special hook, we do NOT return linp or an * association that is linked to an existing * association that is under the TCP pool (i.e. no * listener exists). The endpoint finding routine * will always find a listener before examining the * TCP pool. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { if (inp_p) { *inp_p = NULL; } return (NULL); } stcb = sctp_findassociation_special_addr(m, offset, sh, &inp, netp, dst); if (inp_p != NULL) { *inp_p = inp; } } } SCTPDBG(SCTP_DEBUG_PCB1, "stcb is %p\n", (void *)stcb); return (stcb); } /* * lookup an association by an ASCONF lookup address. * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup */ struct sctp_tcb * sctp_findassociation_ep_asconf(struct mbuf *m, int offset, struct sockaddr *dst, struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) { struct sctp_tcb *stcb; union sctp_sockstore remote_store; struct sctp_paramhdr param_buf, *phdr; int ptype; int zero_address = 0; #ifdef INET struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *sin6; #endif memset(&remote_store, 0, sizeof(remote_store)); phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), ¶m_buf, sizeof(struct sctp_paramhdr)); if (phdr == NULL) { SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n", __func__); return NULL; } ptype = (int)((uint32_t)ntohs(phdr->param_type)); /* get the correlation address */ switch (ptype) { #ifdef INET6 case SCTP_IPV6_ADDRESS: { /* ipv6 address param */ struct sctp_ipv6addr_param *p6, p6_buf; if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) { return NULL; } p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), &p6_buf.ph, sizeof(p6_buf)); if (p6 == NULL) { SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n", __func__); return (NULL); } sin6 = &remote_store.sin6; sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = sh->src_port; memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr)); if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) zero_address = 1; break; } #endif #ifdef INET case SCTP_IPV4_ADDRESS: { /* ipv4 address param */ struct sctp_ipv4addr_param *p4, p4_buf; if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) { return NULL; } p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), &p4_buf.ph, sizeof(p4_buf)); if (p4 == NULL) { SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n", __func__); return (NULL); } sin = &remote_store.sin; sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_port = sh->src_port; memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr)); if (sin->sin_addr.s_addr == INADDR_ANY) zero_address = 1; break; } #endif default: /* invalid address param type */ return NULL; } if (zero_address) { stcb = sctp_findassoc_by_vtag(NULL, dst, ntohl(sh->v_tag), inp_p, netp, sh->src_port, sh->dest_port, 1, vrf_id, 0); if (stcb != NULL) { SCTP_INP_DECR_REF(*inp_p); } } else { stcb = sctp_findassociation_ep_addr(inp_p, &remote_store.sa, netp, dst, NULL); } return (stcb); } /* * allocate a sctp_inpcb and setup a temporary binding to a port/all * addresses. This way if we don't get a bind we by default pick a ephemeral * port with all addresses bound. */ int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) { /* * we get called when a new endpoint starts up. We need to allocate * the sctp_inpcb structure from the zone and init it. Mark it as * unbound and find a port that we can use as an ephemeral with * INADDR_ANY. If the user binds later no problem we can then add in * the specific addresses. And setup the default parameters for the * EP. */ int i, error; struct sctp_inpcb *inp; struct sctp_pcb *m; struct timeval time; sctp_sharedkey_t *null_key; error = 0; SCTP_INP_INFO_WLOCK(); inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb); if (inp == NULL) { SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n"); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); return (ENOBUFS); } /* zap it */ memset(inp, 0, sizeof(*inp)); /* bump generations */ /* setup socket pointers */ inp->sctp_socket = so; inp->ip_inp.inp.inp_socket = so; inp->ip_inp.inp.inp_cred = crhold(so->so_cred); #ifdef INET6 if (INP_SOCKAF(so) == AF_INET6) { if (MODULE_GLOBAL(ip6_auto_flowlabel)) { inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL; } if (MODULE_GLOBAL(ip6_v6only)) { inp->ip_inp.inp.inp_flags |= IN6P_IPV6_V6ONLY; } } #endif inp->sctp_associd_counter = 1; inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT; inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; inp->max_cwnd = 0; inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off); inp->ecn_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_ecn_enable); inp->prsctp_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pr_enable); inp->auth_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_auth_enable); inp->asconf_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_asconf_enable); inp->reconfig_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_reconfig_enable); inp->nrsack_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_nrsack_enable); inp->pktdrop_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pktdrop_enable); inp->idata_supported = 0; inp->fibnum = so->so_fibnum; /* init the small hash table we use to track asocid <-> tcb */ inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark); if (inp->sctp_asocidhash == NULL) { crfree(inp->ip_inp.inp.inp_cred); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); SCTP_INP_INFO_WUNLOCK(); return (ENOBUFS); } SCTP_INCR_EP_COUNT(); inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl); SCTP_INP_INFO_WUNLOCK(); so->so_pcb = (caddr_t)inp; if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) { /* UDP style socket */ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | SCTP_PCB_FLAGS_UNBOUND); /* Be sure it is NON-BLOCKING IO for UDP */ /* SCTP_SET_SO_NBIO(so); */ } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) { /* TCP style socket */ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_UNBOUND); /* Be sure we have blocking IO by default */ SOCK_LOCK(so); SCTP_CLEAR_SO_NBIO(so); SOCK_UNLOCK(so); } else { /* * unsupported socket type (RAW, etc)- in case we missed it * in protosw */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP); so->so_pcb = NULL; crfree(inp->ip_inp.inp.inp_cred); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); return (EOPNOTSUPP); } if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize), &inp->sctp_hashmark); if (inp->sctp_tcbhash == NULL) { SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n"); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); so->so_pcb = NULL; crfree(inp->ip_inp.inp.inp_cred); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); return (ENOBUFS); } inp->def_vrf_id = vrf_id; SCTP_INP_INFO_WLOCK(); SCTP_INP_LOCK_INIT(inp); INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp"); SCTP_INP_READ_INIT(inp); SCTP_ASOC_CREATE_LOCK_INIT(inp); /* lock the new ep */ SCTP_INP_WLOCK(inp); /* add it to the info area */ LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list); SCTP_INP_INFO_WUNLOCK(); TAILQ_INIT(&inp->read_queue); LIST_INIT(&inp->sctp_addr_list); LIST_INIT(&inp->sctp_asoc_list); #ifdef SCTP_TRACK_FREED_ASOCS /* TEMP CODE */ LIST_INIT(&inp->sctp_asoc_free_list); #endif /* Init the timer structure for signature change */ SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer); inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE; /* now init the actual endpoint default data */ m = &inp->sctp_ep; /* setup the base timeout information */ m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */ m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */ m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default)); m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default)); m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default)); m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default)); m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default)); /* all max/min max are in ms */ m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default); m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default); m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default); m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default); m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default); m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default); m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default); m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default); m->def_net_pf_threshold = SCTP_BASE_SYSCTL(sctp_path_pf_threshold); m->sctp_sws_sender = SCTP_SWS_SENDER_DEF; m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF; m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default); m->fr_max_burst = SCTP_BASE_SYSCTL(sctp_fr_max_burst_default); m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module); m->sctp_default_ss_module = SCTP_BASE_SYSCTL(sctp_default_ss_module); m->max_open_streams_intome = SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default); /* number of streams to pre-open on a association */ m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default); m->default_mtu = 0; /* Add adaptation cookie */ m->adaptation_layer_indicator = 0; m->adaptation_layer_indicator_provided = 0; /* seed random number generator */ m->random_counter = 1; m->store_at = SCTP_SIGNATURE_SIZE; SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers)); sctp_fill_random_store(m); /* Minimum cookie size */ m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) + sizeof(struct sctp_state_cookie); m->size_of_a_cookie += SCTP_SIGNATURE_SIZE; /* Setup the initial secret */ (void)SCTP_GETTIME_TIMEVAL(&time); m->time_of_secret_change = time.tv_sec; for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { m->secret_key[0][i] = sctp_select_initial_TSN(m); } sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); /* How long is a cookie good for ? */ m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default)); /* * Initialize authentication parameters */ m->local_hmacs = sctp_default_supported_hmaclist(); m->local_auth_chunks = sctp_alloc_chunklist(); if (inp->asconf_supported) { sctp_auth_add_chunk(SCTP_ASCONF, m->local_auth_chunks); sctp_auth_add_chunk(SCTP_ASCONF_ACK, m->local_auth_chunks); } m->default_dscp = 0; #ifdef INET6 m->default_flowlabel = 0; #endif m->port = 0; /* encapsulation disabled by default */ LIST_INIT(&m->shared_keys); /* add default NULL key as key id 0 */ null_key = sctp_alloc_sharedkey(); sctp_insert_sharedkey(&m->shared_keys, null_key); SCTP_INP_WUNLOCK(inp); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 12); #endif return (error); } void sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, struct sctp_tcb *stcb) { struct sctp_nets *net; uint16_t lport, rport; struct sctppcbhead *head; struct sctp_laddr *laddr, *oladdr; atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(old_inp); SCTP_INP_WLOCK(new_inp); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); new_inp->sctp_ep.time_of_secret_change = old_inp->sctp_ep.time_of_secret_change; memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key, sizeof(old_inp->sctp_ep.secret_key)); new_inp->sctp_ep.current_secret_number = old_inp->sctp_ep.current_secret_number; new_inp->sctp_ep.last_secret_number = old_inp->sctp_ep.last_secret_number; new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie; /* make it so new data pours into the new socket */ stcb->sctp_socket = new_inp->sctp_socket; stcb->sctp_ep = new_inp; /* Copy the port across */ lport = new_inp->sctp_lport = old_inp->sctp_lport; rport = stcb->rport; /* Pull the tcb from the old association */ LIST_REMOVE(stcb, sctp_tcbhash); LIST_REMOVE(stcb, sctp_tcblist); if (stcb->asoc.in_asocid_hash) { LIST_REMOVE(stcb, sctp_tcbasocidhash); } /* Now insert the new_inp into the TCP connected hash */ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; LIST_INSERT_HEAD(head, new_inp, sctp_hash); /* Its safe to access */ new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; /* Now move the tcb into the endpoint list */ LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist); /* * Question, do we even need to worry about the ep-hash since we * only have one connection? Probably not :> so lets get rid of it * and not suck up any kernel memory in that. */ if (stcb->asoc.in_asocid_hash) { struct sctpasochead *lhd; lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, new_inp->hashasocidmark)]; LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash); } /* Ok. Let's restart timer. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp, stcb, net); } SCTP_INP_INFO_WUNLOCK(); if (new_inp->sctp_tcbhash != NULL) { SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark); new_inp->sctp_tcbhash = NULL; } if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* Subset bound, so copy in the laddr list from the old_inp */ LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) { laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); if (laddr == NULL) { /* * Gak, what can we do? This assoc is really * HOSED. We probably should send an abort * here. */ SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n"); continue; } SCTP_INCR_LADDR_COUNT(); memset(laddr, 0, sizeof(*laddr)); (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); laddr->ifa = oladdr->ifa; atomic_add_int(&laddr->ifa->refcount, 1); LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr, sctp_nxt_addr); new_inp->laddr_count++; if (oladdr == stcb->asoc.last_used_address) { stcb->asoc.last_used_address = laddr; } } } /* * Now any running timers need to be adjusted since we really don't * care if they are running or not just blast in the new_inp into * all of them. */ stcb->asoc.dack_timer.ep = (void *)new_inp; stcb->asoc.asconf_timer.ep = (void *)new_inp; stcb->asoc.strreset_timer.ep = (void *)new_inp; stcb->asoc.shut_guard_timer.ep = (void *)new_inp; stcb->asoc.autoclose_timer.ep = (void *)new_inp; stcb->asoc.delayed_event_timer.ep = (void *)new_inp; stcb->asoc.delete_prim_timer.ep = (void *)new_inp; /* now what about the nets? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->pmtu_timer.ep = (void *)new_inp; net->hb_timer.ep = (void *)new_inp; net->rxt_timer.ep = (void *)new_inp; } SCTP_INP_WUNLOCK(new_inp); SCTP_INP_WUNLOCK(old_inp); } /* * insert an laddr entry with the given ifa for the desired list */ static int sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act) { struct sctp_laddr *laddr; laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); if (laddr == NULL) { /* out of memory? */ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } SCTP_INCR_LADDR_COUNT(); memset(laddr, 0, sizeof(*laddr)); (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); laddr->ifa = ifa; laddr->action = act; atomic_add_int(&ifa->refcount, 1); /* insert it */ LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr); return (0); } /* * Remove an laddr entry from the local address list (on an assoc) */ static void sctp_remove_laddr(struct sctp_laddr *laddr) { /* remove from the list */ LIST_REMOVE(laddr, sctp_nxt_addr); sctp_free_ifa(laddr->ifa); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr); SCTP_DECR_LADDR_COUNT(); } /* sctp_ifap is used to bypass normal local address validation checks */ int sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct sctp_ifa *sctp_ifap, struct thread *p) { /* bind a ep to a socket address */ struct sctppcbhead *head; struct sctp_inpcb *inp, *inp_tmp; struct inpcb *ip_inp; int port_reuse_active = 0; int bindall; uint16_t lport; int error; uint32_t vrf_id; lport = 0; bindall = 1; inp = (struct sctp_inpcb *)so->so_pcb; ip_inp = (struct inpcb *)so->so_pcb; #ifdef SCTP_DEBUG if (addr) { SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port)); SCTPDBG(SCTP_DEBUG_PCB1, "Addr: "); SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); } #endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { /* already did a bind, subsequent binds NOT allowed ! */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } #ifdef INVARIANTS if (p == NULL) panic("null proc/thread"); #endif if (addr != NULL) { switch (addr->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; /* IPV6_V6ONLY socket? */ - if (SCTP_IPV6_V6ONLY(ip_inp)) { + if (SCTP_IPV6_V6ONLY(inp)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } if (addr->sa_len != sizeof(*sin)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } sin = (struct sockaddr_in *)addr; lport = sin->sin_port; /* * For LOOPBACK the prison_local_ip4() call * will transmute the ip address to the * proper value. */ if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); return (error); } if (sin->sin_addr.s_addr != INADDR_ANY) { bindall = 0; } break; } #endif #ifdef INET6 case AF_INET6: { /* * Only for pure IPv6 Address. (No IPv4 * Mapped!) */ struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (addr->sa_len != sizeof(*sin6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } lport = sin6->sin6_port; /* * For LOOPBACK the prison_local_ip6() call * will transmute the ipv6 address to the * proper value. */ if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr, (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); return (error); } if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { bindall = 0; /* KAME hack: embed scopeid */ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } } /* this must be cleared for ifa_ifwithaddr() */ sin6->sin6_scope_id = 0; break; } #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT); return (EAFNOSUPPORT); } } SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); /* Setup a vrf_id to be the default for the non-bind-all case. */ vrf_id = inp->def_vrf_id; /* increase our count due to the unlock we do */ SCTP_INP_INCR_REF(inp); if (lport) { /* * Did the caller specify a port? if so we must see if an ep * already has this one bound. */ /* got to be root to get at low ports */ if (ntohs(lport) < IPPORT_RESERVED) { if ((p != NULL) && ((error = priv_check(p, PRIV_NETINET_RESERVEDPORT) ) != 0)) { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } } SCTP_INP_WUNLOCK(inp); if (bindall) { vrf_id = inp->def_vrf_id; inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); if (inp_tmp != NULL) { /* * lock guy returned and lower count note * that we are not bound so inp_tmp should * NEVER be inp. And it is this inp * (inp_tmp) that gets the reference bump, * so we must lower it. */ SCTP_INP_DECR_REF(inp_tmp); /* unlock info */ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { /* * Ok, must be one-2-one and * allowing port re-use */ port_reuse_active = 1; goto continue_anyway; } SCTP_INP_DECR_REF(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); return (EADDRINUSE); } } else { inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); if (inp_tmp != NULL) { /* * lock guy returned and lower count note * that we are not bound so inp_tmp should * NEVER be inp. And it is this inp * (inp_tmp) that gets the reference bump, * so we must lower it. */ SCTP_INP_DECR_REF(inp_tmp); /* unlock info */ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { /* * Ok, must be one-2-one and * allowing port re-use */ port_reuse_active = 1; goto continue_anyway; } SCTP_INP_DECR_REF(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); return (EADDRINUSE); } } continue_anyway: SCTP_INP_WLOCK(inp); if (bindall) { /* verify that no lport is not used by a singleton */ if ((port_reuse_active == 0) && (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))) { /* Sorry someone already has this one bound */ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { port_reuse_active = 1; } else { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); return (EADDRINUSE); } } } } else { uint16_t first, last, candidate; uint16_t count; int done; if (ip_inp->inp_flags & INP_HIGHPORT) { first = MODULE_GLOBAL(ipport_hifirstauto); last = MODULE_GLOBAL(ipport_hilastauto); } else if (ip_inp->inp_flags & INP_LOWPORT) { if (p && (error = priv_check(p, PRIV_NETINET_RESERVEDPORT) )) { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); return (error); } first = MODULE_GLOBAL(ipport_lowfirstauto); last = MODULE_GLOBAL(ipport_lowlastauto); } else { first = MODULE_GLOBAL(ipport_firstauto); last = MODULE_GLOBAL(ipport_lastauto); } if (first > last) { uint16_t temp; temp = first; first = last; last = temp; } count = last - first + 1; /* number of candidates */ candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count); done = 0; while (!done) { if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) { done = 1; } if (!done) { if (--count == 0) { SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); return (EADDRINUSE); } if (candidate == last) candidate = first; else candidate = candidate + 1; } } lport = htons(candidate); } SCTP_INP_DECR_REF(inp); if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { /* * this really should not happen. The guy did a non-blocking * bind and then did a close at the same time. */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } /* ok we look clear to give out this port, so lets setup the binding */ if (bindall) { /* binding to all addresses, so just set in the proper flags */ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL; /* set the automatic addr changes from kernel flag */ if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF); sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } else { sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); } else { sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); } /* * set the automatic mobility_base from kernel flag (by * micchie) */ if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) { sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE); sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); } else { sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE); sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); } /* * set the automatic mobility_fasthandoff from kernel flag * (by micchie) */ if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) { sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF); sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); } else { sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF); sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); } } else { /* * bind specific, make sure flags is off and add a new * address structure to the sctp_addr_list inside the ep * structure. * * We will need to allocate one and insert it at the head. * The socketopt call can just insert new addresses in there * as well. It will also have to do the embed scope kame * hack too (before adding). */ struct sctp_ifa *ifa; union sctp_sockstore store; memset(&store, 0, sizeof(store)); switch (addr->sa_family) { #ifdef INET case AF_INET: memcpy(&store.sin, addr, sizeof(struct sockaddr_in)); store.sin.sin_port = 0; break; #endif #ifdef INET6 case AF_INET6: memcpy(&store.sin6, addr, sizeof(struct sockaddr_in6)); store.sin6.sin6_port = 0; break; #endif default: break; } /* * first find the interface with the bound address need to * zero out the port to find the address! yuck! can't do * this earlier since need port for sctp_pcb_findep() */ if (sctp_ifap != NULL) { ifa = sctp_ifap; } else { /* * Note for BSD we hit here always other O/S's will * pass things in via the sctp_ifap argument * (Panda). */ ifa = sctp_find_ifa_by_addr(&store.sa, vrf_id, SCTP_ADDR_NOT_LOCKED); } if (ifa == NULL) { /* Can't find an interface with that address */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL); return (EADDRNOTAVAIL); } #ifdef INET6 if (addr->sa_family == AF_INET6) { /* GAK, more FIXME IFA lock? */ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { /* Can't bind a non-existent addr. */ SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); return (EINVAL); } } #endif /* we're not bound all */ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL; /* allow bindx() to send ASCONF's for binding changes */ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); /* clear automatic addr changes from kernel flag */ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); /* add this address to the endpoint list */ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0); if (error != 0) { SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (error); } inp->laddr_count++; } /* find the bucket */ if (port_reuse_active) { /* Put it into tcp 1-2-1 hash */ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))]; inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; } else { head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))]; } /* put it in the bucket */ LIST_INSERT_HEAD(head, inp, sctp_hash); SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n", (void *)head, ntohs(lport), port_reuse_active); /* set in the port */ inp->sctp_lport = lport; /* turn off just the unbound flag */ inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return (0); } static void sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) { struct sctp_iterator *it, *nit; /* * We enter with the only the ITERATOR_LOCK in place and a write * lock on the inp_info stuff. */ it = sctp_it_ctl.cur_it; if (it && (it->vn != curvnet)) { /* Its not looking at our VNET */ return; } if (it && (it->inp == inp)) { /* * This is tricky and we hold the iterator lock, but when it * returns and gets the lock (when we release it) the * iterator will try to operate on inp. We need to stop that * from happening. But of course the iterator has a * reference on the stcb and inp. We can mark it and it will * stop. * * If its a single iterator situation, we set the end * iterator flag. Otherwise we set the iterator to go to the * next inp. * */ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; } else { sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP; } } /* * Now go through and remove any single reference to our inp that * may be still pending on the list */ SCTP_IPI_ITERATOR_WQ_LOCK(); TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { if (it->vn != curvnet) { continue; } if (it->inp == inp) { /* This one points to me is it inp specific? */ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { /* Remove and free this one */ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); if (it->function_atend != NULL) { (*it->function_atend) (it->pointer, it->val); } SCTP_FREE(it, SCTP_M_ITER); } else { it->inp = LIST_NEXT(it->inp, sctp_list); if (it->inp) { SCTP_INP_INCR_REF(it->inp); } } /* * When its put in the refcnt is incremented so decr * it */ SCTP_INP_DECR_REF(inp); } } SCTP_IPI_ITERATOR_WQ_UNLOCK(); } /* release sctp_inpcb unbind the port */ void sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) { /* * Here we free a endpoint. We must find it (if it is in the Hash * table) and remove it from there. Then we must also find it in the * overall list and remove it from there. After all removals are * complete then any timer has to be stopped. Then start the actual * freeing. a) Any local lists. b) Any associations. c) The hash of * all associations. d) finally the ep itself. */ struct sctp_tcb *asoc, *nasoc; struct sctp_laddr *laddr, *nladdr; struct inpcb *ip_pcb; struct socket *so; int being_refed = 0; struct sctp_queued_to_read *sq, *nsq; int cnt; sctp_sharedkey_t *shared_key, *nshared_key; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 0); #endif SCTP_ITERATOR_LOCK(); /* mark any iterators on the list or being processed */ sctp_iterator_inp_being_freed(inp); SCTP_ITERATOR_UNLOCK(); so = inp->sctp_socket; if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { /* been here before.. eeks.. get out of here */ SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 1); #endif return; } SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; /* socket is gone, so no more wakeups allowed */ inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE; inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; } /* First time through we have the socket lock, after that no more. */ sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL, SCTP_FROM_SCTP_PCB + SCTP_LOC_1); if (inp->control) { sctp_m_freem(inp->control); inp->control = NULL; } if (inp->pkt) { sctp_m_freem(inp->pkt); inp->pkt = NULL; } ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer * here but I will be nice :> (i.e. * ip_pcb = ep;) */ if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) { int cnt_in_sd; cnt_in_sd = 0; LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { SCTP_TCB_LOCK(asoc); if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { /* Skip guys being freed */ cnt_in_sd++; if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { /* * Special case - we did not start a * kill timer on the asoc due to it * was not closed. So go ahead and * start it now. */ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL); } SCTP_TCB_UNLOCK(asoc); continue; } if (((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) && (asoc->asoc.total_output_queue_size == 0)) { /* * If we have data in queue, we don't want * to just free since the app may have done, * send()/close or connect/send/close. And * it wants the data to get across first. */ /* Just abandon things in the front states */ if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) { cnt_in_sd++; } continue; } /* Disconnect the socket please */ asoc->sctp_socket = NULL; SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_CLOSED_SOCKET); if ((asoc->asoc.size_on_reasm_queue > 0) || (asoc->asoc.control_pdapi) || (asoc->asoc.size_on_all_streams > 0) || (so && (so->so_rcv.sb_cc > 0))) { /* Left with Data unread */ struct mbuf *op_err; op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3; sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4) == 0) { cnt_in_sd++; } continue; } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) && TAILQ_EMPTY(&asoc->asoc.sent_queue) && (asoc->asoc.stream_queue_cnt == 0)) { if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete) (asoc, &asoc->asoc)) { goto abort_anyway; } if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { struct sctp_nets *netp; /* * there is nothing queued to send, * so I send shutdown */ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(asoc); if (asoc->asoc.alternate) { netp = asoc->asoc.alternate; } else { netp = asoc->asoc.primary_destination; } sctp_send_shutdown(asoc, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, asoc->asoc.primary_destination); sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED); } } else { /* mark into shutdown pending */ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, asoc->asoc.primary_destination); if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete) (asoc, &asoc->asoc)) { SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_PARTIAL_MSG_LEFT); } if (TAILQ_EMPTY(&asoc->asoc.send_queue) && TAILQ_EMPTY(&asoc->asoc.sent_queue) && (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5; sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_6) == 0) { cnt_in_sd++; } continue; } else { sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); } } cnt_in_sd++; SCTP_TCB_UNLOCK(asoc); } /* now is there some left in our SHUTDOWN state? */ if (cnt_in_sd) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 2); #endif inp->sctp_socket = NULL; SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return; } } inp->sctp_socket = NULL; if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != SCTP_PCB_FLAGS_UNBOUND) { /* * ok, this guy has been bound. It's port is somewhere in * the SCTP_BASE_INFO(hash table). Remove it! */ LIST_REMOVE(inp, sctp_hash); inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; } /* * If there is a timer running to kill us, forget it, since it may * have a contest on the INP lock.. which would cause us to die ... */ cnt = 0; LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { SCTP_TCB_LOCK(asoc); if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL); } cnt++; SCTP_TCB_UNLOCK(asoc); continue; } /* Free associations that are NOT killing us */ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) { struct mbuf *op_err; op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7; sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { cnt++; SCTP_TCB_UNLOCK(asoc); continue; } if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_8) == 0) { cnt++; } } if (cnt) { /* Ok we have someone out there that will kill us */ (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 3); #endif SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return; } if (SCTP_INP_LOCK_CONTENDED(inp)) being_refed++; if (SCTP_INP_READ_CONTENDED(inp)) being_refed++; if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp)) being_refed++; if ((inp->refcount) || (being_refed) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 4); #endif sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); return; } inp->sctp_ep.signature_change.type = 0; inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE; /* * Remove it from the list .. last thing we need a lock for. */ LIST_REMOVE(inp, sctp_list); SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); /* * Now we release all locks. Since this INP cannot be found anymore * except possibly by the kill timer that might be running. We call * the drain function here. It should hit the case were it sees the * ACTIVE flag cleared and exit out freeing us to proceed and * destroy everything. */ if (from != SCTP_CALLED_FROM_INPKILL_TIMER) { (void)SCTP_OS_TIMER_STOP_DRAIN(&inp->sctp_ep.signature_change.timer); } else { /* Probably un-needed */ (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); } #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 5); #endif if ((inp->sctp_asocidhash) != NULL) { SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark); inp->sctp_asocidhash = NULL; } /* sa_ignore FREED_MEMORY */ TAILQ_FOREACH_SAFE(sq, &inp->read_queue, next, nsq) { /* Its only abandoned if it had data left */ if (sq->length) SCTP_STAT_INCR(sctps_left_abandon); TAILQ_REMOVE(&inp->read_queue, sq, next); sctp_free_remote_addr(sq->whoFrom); if (so) so->so_rcv.sb_cc -= sq->length; if (sq->data) { sctp_m_freem(sq->data); sq->data = NULL; } /* * no need to free the net count, since at this point all * assoc's are gone. */ sctp_free_a_readq(NULL, sq); } /* Now the sctp_pcb things */ /* * free each asoc if it is not already closed/free. we can't use the * macro here since le_next will get freed as part of the * sctp_free_assoc() call. */ if (ip_pcb->inp_options) { (void)sctp_m_free(ip_pcb->inp_options); ip_pcb->inp_options = 0; } #ifdef INET6 - if (ip_pcb->inp_vflag & INP_IPV6) - ip6_freepcbopts(((struct inpcb *)inp)->in6p_outputopts); + if (ip_pcb->inp_vflag & INP_IPV6) { + ip6_freepcbopts(ip_pcb->in6p_outputopts); + } #endif /* INET6 */ ip_pcb->inp_vflag = 0; /* free up authentication fields */ if (inp->sctp_ep.local_auth_chunks != NULL) sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); if (inp->sctp_ep.local_hmacs != NULL) sctp_free_hmaclist(inp->sctp_ep.local_hmacs); LIST_FOREACH_SAFE(shared_key, &inp->sctp_ep.shared_keys, next, nshared_key) { LIST_REMOVE(shared_key, next); sctp_free_sharedkey(shared_key); /* sa_ignore FREED_MEMORY */ } /* * if we have an address list the following will free the list of * ifaddr's that are set into this ep. Again macro limitations here, * since the LIST_FOREACH could be a bad idea. */ LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { sctp_remove_laddr(laddr); } #ifdef SCTP_TRACK_FREED_ASOCS /* TEMP CODE */ LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_free_list, sctp_tcblist, nasoc) { LIST_REMOVE(asoc, sctp_tcblist); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc); SCTP_DECR_ASOC_COUNT(); } /* *** END TEMP CODE *** */ #endif /* Now lets see about freeing the EP hash table. */ if (inp->sctp_tcbhash != NULL) { SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); inp->sctp_tcbhash = NULL; } /* Now we must put the ep memory back into the zone pool */ crfree(inp->ip_inp.inp.inp_cred); INP_LOCK_DESTROY(&inp->ip_inp.inp); SCTP_INP_LOCK_DESTROY(inp); SCTP_INP_READ_DESTROY(inp); SCTP_ASOC_CREATE_LOCK_DESTROY(inp); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); SCTP_DECR_EP_COUNT(); } struct sctp_nets * sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) { struct sctp_nets *net; /* locate the address */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr)) return (net); } return (NULL); } int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id) { struct sctp_ifa *sctp_ifa; sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED); if (sctp_ifa) { return (1); } else { return (0); } } /* * add's a remote endpoint address, done with the INIT/INIT-ACK as well as * when a ASCONF arrives that adds it. It will also initialize all the cwnd * stats of stuff. */ int sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, struct sctp_nets **netp, uint16_t port, int set_scope, int from) { /* * The following is redundant to the same lines in the * sctp_aloc_assoc() but is needed since others call the add address * function */ struct sctp_nets *net, *netfirst; int addr_inscope; SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ", from); SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr); netfirst = sctp_findnet(stcb, newaddr); if (netfirst) { /* * Lie and return ok, we don't want to make the association * go away for this behavior. It will happen in the TCP * model in a connected socket. It does not reach the hash * table until after the association is built so it can't be * found. Mark as reachable, since the initial creation will * have been cleared and the NOT_IN_ASSOC flag will have * been added... and we don't want to end up removing it * back out. */ if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) { netfirst->dest_state = (SCTP_ADDR_REACHABLE | SCTP_ADDR_UNCONFIRMED); } else { netfirst->dest_state = SCTP_ADDR_REACHABLE; } return (0); } addr_inscope = 1; switch (newaddr->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = (struct sockaddr_in *)newaddr; if (sin->sin_addr.s_addr == 0) { /* Invalid address */ return (-1); } /* zero out the zero area */ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); /* assure len is set */ sin->sin_len = sizeof(struct sockaddr_in); if (set_scope) { if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { stcb->asoc.scope.ipv4_local_scope = 1; } } else { /* Validate the address is in scope */ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) && (stcb->asoc.scope.ipv4_local_scope == 0)) { addr_inscope = 0; } } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)newaddr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* Invalid address */ return (-1); } /* assure len is set */ sin6->sin6_len = sizeof(struct sockaddr_in6); if (set_scope) { if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) { stcb->asoc.scope.loopback_scope = 1; stcb->asoc.scope.local_scope = 0; stcb->asoc.scope.ipv4_local_scope = 1; stcb->asoc.scope.site_scope = 1; } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { /* * If the new destination is a * LINK_LOCAL we must have common * site scope. Don't set the local * scope since we may not share all * links, only loopback can do this. * Links on the local network would * also be on our private network * for v4 too. */ stcb->asoc.scope.ipv4_local_scope = 1; stcb->asoc.scope.site_scope = 1; } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { /* * If the new destination is * SITE_LOCAL then we must have site * scope in common. */ stcb->asoc.scope.site_scope = 1; } } else { /* Validate the address is in scope */ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && (stcb->asoc.scope.loopback_scope == 0)) { addr_inscope = 0; } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) && (stcb->asoc.scope.local_scope == 0)) { addr_inscope = 0; } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) && (stcb->asoc.scope.site_scope == 0)) { addr_inscope = 0; } } break; } #endif default: /* not supported family type */ return (-1); } net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets); if (net == NULL) { return (-1); } SCTP_INCR_RADDR_COUNT(); memset(net, 0, sizeof(struct sctp_nets)); (void)SCTP_GETTIME_TIMEVAL(&net->start_time); memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len); switch (newaddr->sa_family) { #ifdef INET case AF_INET: ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport; break; #endif #ifdef INET6 case AF_INET6: ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport; break; #endif default: break; } net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id); if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) { stcb->asoc.scope.loopback_scope = 1; stcb->asoc.scope.ipv4_local_scope = 1; stcb->asoc.scope.local_scope = 0; stcb->asoc.scope.site_scope = 1; addr_inscope = 1; } net->failure_threshold = stcb->asoc.def_net_failure; net->pf_threshold = stcb->asoc.def_net_pf_threshold; if (addr_inscope == 0) { net->dest_state = (SCTP_ADDR_REACHABLE | SCTP_ADDR_OUT_OF_SCOPE); } else { if (from == SCTP_ADDR_IS_CONFIRMED) /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */ net->dest_state = SCTP_ADDR_REACHABLE; else net->dest_state = SCTP_ADDR_REACHABLE | SCTP_ADDR_UNCONFIRMED; } /* * We set this to 0, the timer code knows that this means its an * initial value */ net->rto_needed = 1; net->RTO = 0; net->RTO_measured = 0; stcb->asoc.numnets++; net->ref_count = 1; net->cwr_window_tsn = net->last_cwr_tsn = stcb->asoc.sending_seq - 1; net->port = port; net->dscp = stcb->asoc.default_dscp; #ifdef INET6 net->flowlabel = stcb->asoc.default_flowlabel; #endif if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { net->dest_state |= SCTP_ADDR_NOHB; } else { net->dest_state &= ~SCTP_ADDR_NOHB; } if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { net->dest_state |= SCTP_ADDR_NO_PMTUD; } else { net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } net->heart_beat_delay = stcb->asoc.heart_beat_delay; /* Init the timer structure */ SCTP_OS_TIMER_INIT(&net->rxt_timer.timer); SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer); SCTP_OS_TIMER_INIT(&net->hb_timer.timer); /* Now generate a route for this guy */ #ifdef INET6 /* KAME hack: embed scopeid */ if (newaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); sin6->sin6_scope_id = 0; } #endif SCTP_RTALLOC((sctp_route_t *)&net->ro, stcb->asoc.vrf_id, stcb->sctp_ep->fibnum); net->src_addr_selected = 0; if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) { /* Get source address */ net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep, stcb, (sctp_route_t *)&net->ro, net, 0, stcb->asoc.vrf_id); if (stcb->asoc.default_mtu > 0) { net->mtu = stcb->asoc.default_mtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } #if defined(INET) || defined(INET6) if (net->port) { net->mtu += (uint32_t)sizeof(struct udphdr); } #endif } else if (net->ro._s_addr != NULL) { uint32_t imtu, rmtu, hcmtu; net->src_addr_selected = 1; /* Now get the interface MTU */ if (net->ro._s_addr->ifn_p != NULL) { imtu = SCTP_GATHER_MTU_FROM_INTFC(net->ro._s_addr->ifn_p); } else { imtu = 0; } rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt); hcmtu = sctp_hc_get_mtu(&net->ro._l_addr, stcb->sctp_ep->fibnum); net->mtu = sctp_min_mtu(hcmtu, rmtu, imtu); if (rmtu == 0) { /* * Start things off to match mtu of * interface please. */ SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa, net->ro.ro_rt, net->mtu); } } } if (net->mtu == 0) { if (stcb->asoc.default_mtu > 0) { net->mtu = stcb->asoc.default_mtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } #if defined(INET) || defined(INET6) if (net->port) { net->mtu += (uint32_t)sizeof(struct udphdr); } #endif } else { switch (newaddr->sa_family) { #ifdef INET case AF_INET: net->mtu = SCTP_DEFAULT_MTU; break; #endif #ifdef INET6 case AF_INET6: net->mtu = 1280; break; #endif default: break; } } } #if defined(INET) || defined(INET6) if (net->port) { net->mtu -= (uint32_t)sizeof(struct udphdr); } #endif if (from == SCTP_ALLOC_ASOC) { stcb->asoc.smallest_mtu = net->mtu; } if (stcb->asoc.smallest_mtu > net->mtu) { sctp_pathmtu_adjustment(stcb, net->mtu); } #ifdef INET6 if (newaddr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; (void)sa6_recoverscope(sin6); } #endif /* JRS - Use the congestion control given in the CC module */ if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) (*stcb->asoc.cc_functions.sctp_set_initial_cc_param) (stcb, net); /* * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning * of assoc (2005/06/27, iyengar@cis.udel.edu) */ net->find_pseudo_cumack = 1; net->find_rtx_pseudo_cumack = 1; /* Choose an initial flowid. */ net->flowid = stcb->asoc.my_vtag ^ ntohs(stcb->rport) ^ ntohs(stcb->sctp_ep->sctp_lport); net->flowtype = M_HASHTYPE_OPAQUE_HASH; if (netp) { *netp = net; } netfirst = TAILQ_FIRST(&stcb->asoc.nets); if (net->ro.ro_rt == NULL) { /* Since we have no route put it at the back */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); } else if (netfirst == NULL) { /* We are the first one in the pool. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else if (netfirst->ro.ro_rt == NULL) { /* * First one has NO route. Place this one ahead of the first * one. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { /* * This one has a different interface than the one at the * top of the list. Place it ahead. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); } else { /* * Ok we have the same interface as the first one. Move * forward until we find either a) one with a NULL route... * insert ahead of that b) one with a different ifp.. insert * after that. c) end of the list.. insert at the tail. */ struct sctp_nets *netlook; do { netlook = TAILQ_NEXT(netfirst, sctp_next); if (netlook == NULL) { /* End of the list */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); break; } else if (netlook->ro.ro_rt == NULL) { /* next one has NO route */ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); break; } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) { TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, net, sctp_next); break; } /* Shift forward */ netfirst = netlook; } while (netlook != NULL); } /* got to have a primary set */ if (stcb->asoc.primary_destination == 0) { stcb->asoc.primary_destination = net; } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && (net->ro.ro_rt) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { /* No route to current primary adopt new primary */ stcb->asoc.primary_destination = net; } /* Validate primary is first */ net = TAILQ_FIRST(&stcb->asoc.nets); if ((net != stcb->asoc.primary_destination) && (stcb->asoc.primary_destination)) { /* * first one on the list is NOT the primary sctp_cmpaddr() * is much more efficient if the primary is the first on the * list, make it so. */ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); } return (0); } static uint32_t sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { uint32_t id; struct sctpasochead *head; struct sctp_tcb *lstcb; try_again: if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { /* TSNH */ return (0); } /* * We don't allow assoc id to be one of SCTP_FUTURE_ASSOC, * SCTP_CURRENT_ASSOC and SCTP_ALL_ASSOC. */ if (inp->sctp_associd_counter <= SCTP_ALL_ASSOC) { inp->sctp_associd_counter = SCTP_ALL_ASSOC + 1; } id = inp->sctp_associd_counter; inp->sctp_associd_counter++; lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t)id, 0); if (lstcb) { goto try_again; } head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash); stcb->asoc.in_asocid_hash = 1; return (id); } /* * allocate an association and add it to the endpoint. The caller must be * careful to add all additional addresses once they are know right away or * else the assoc will be may experience a blackout scenario. */ struct sctp_tcb * sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, int *error, uint32_t override_tag, uint32_t vrf_id, uint16_t o_streams, uint16_t port, struct thread *p, int initialize_auth_params) { /* note the p argument is only valid in unbound sockets */ struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctpasochead *head; uint16_t rport; int err; /* * Assumption made here: Caller has done a * sctp_findassociation_ep_addr(ep, addr's); to make sure the * address does not exist already. */ if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) { /* Hit max assoc, sorry no more */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); *error = ENOBUFS; return (NULL); } if (firstaddr == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) || (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { /* * If its in the TCP pool, its NOT allowed to create an * association. The parent listener needs to call * sctp_aloc_assoc.. or the one-2-many socket. If a peeled * off, or connected one does this.. its an error. */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } } SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:"); #ifdef SCTP_DEBUG if (firstaddr) { SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr); switch (firstaddr->sa_family) { #ifdef INET case AF_INET: SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", ntohs(((struct sockaddr_in *)firstaddr)->sin_port)); break; #endif #ifdef INET6 case AF_INET6: SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", ntohs(((struct sockaddr_in6 *)firstaddr)->sin6_port)); break; #endif default: break; } } else { SCTPDBG(SCTP_DEBUG_PCB3, "None\n"); } #endif /* SCTP_DEBUG */ switch (firstaddr->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = (struct sockaddr_in *)firstaddr; if ((ntohs(sin->sin_port) == 0) || (sin->sin_addr.s_addr == INADDR_ANY) || (sin->sin_addr.s_addr == INADDR_BROADCAST) || IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { /* Invalid address */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } rport = sin->sin_port; break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)firstaddr; if ((ntohs(sin6->sin6_port) == 0) || IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { /* Invalid address */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } rport = sin6->sin6_port; break; } #endif default: /* not supported family type */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } SCTP_INP_RUNLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* * If you have not performed a bind, then we need to do the * ephemeral bind for you. */ if ((err = sctp_inpcb_bind(inp->sctp_socket, (struct sockaddr *)NULL, (struct sctp_ifa *)NULL, p ))) { /* bind error, probably perm */ *error = err; return (NULL); } } stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb); if (stcb == NULL) { /* out of memory? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); *error = ENOMEM; return (NULL); } SCTP_INCR_ASOC_COUNT(); memset(stcb, 0, sizeof(*stcb)); asoc = &stcb->asoc; SCTP_TCB_LOCK_INIT(stcb); SCTP_TCB_SEND_LOCK_INIT(stcb); stcb->rport = rport; /* setup back pointer's */ stcb->sctp_ep = inp; stcb->sctp_socket = inp->sctp_socket; if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id, o_streams))) { /* failed */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_DECR_ASOC_COUNT(); *error = err; return (NULL); } /* and the port */ SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { /* inpcb freed while alloc going on */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_INP_WUNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); SCTP_DECR_ASOC_COUNT(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } SCTP_TCB_LOCK(stcb); asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb); /* now that my_vtag is set, add it to the hash */ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; /* put it in the bucket in the vtag hash of assoc's for the system */ LIST_INSERT_HEAD(head, stcb, sctp_asocs); SCTP_INP_INFO_WUNLOCK(); if ((err = sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) { /* failure.. memory error? */ if (asoc->strmout) { SCTP_FREE(asoc->strmout, SCTP_M_STRMO); asoc->strmout = NULL; } if (asoc->mapping_array) { SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); asoc->mapping_array = NULL; } if (asoc->nr_mapping_array) { SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); asoc->nr_mapping_array = NULL; } SCTP_DECR_ASOC_COUNT(); SCTP_TCB_UNLOCK(stcb); SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); LIST_REMOVE(stcb, sctp_tcbasocidhash); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); *error = ENOBUFS; return (NULL); } /* Init all the timers */ SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer); SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer); SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer); SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer); SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer); SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer); SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer); LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); /* now file the port under the hash as well */ if (inp->sctp_tcbhash != NULL) { head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport, inp->sctp_hashmark)]; LIST_INSERT_HEAD(head, stcb, sctp_tcbhash); } if (initialize_auth_params == SCTP_INITIALIZE_AUTH_PARAMS) { sctp_initialize_auth_params(inp, stcb); } SCTP_INP_WUNLOCK(inp); SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb); return (stcb); } void sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) { struct sctp_association *asoc; asoc = &stcb->asoc; asoc->numnets--; TAILQ_REMOVE(&asoc->nets, net, sctp_next); if (net == asoc->primary_destination) { /* Reset primary */ struct sctp_nets *lnet; lnet = TAILQ_FIRST(&asoc->nets); /* * Mobility adaptation Ideally, if deleted destination is * the primary, it becomes a fast retransmission trigger by * the subsequent SET PRIMARY. (by micchie) */ if (sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE) || sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_FASTHANDOFF)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n"); if (asoc->deleted_primary != NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n"); goto out; } asoc->deleted_primary = net; atomic_add_int(&net->ref_count, 1); memset(&net->lastsa, 0, sizeof(net->lastsa)); memset(&net->lastsv, 0, sizeof(net->lastsv)); sctp_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_PRIM_DELETED); sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL); } out: /* Try to find a confirmed primary */ asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0); } if (net == asoc->last_data_chunk_from) { /* Reset primary */ asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets); } if (net == asoc->last_control_chunk_from) { /* Clear net */ asoc->last_control_chunk_from = NULL; } if (net == stcb->asoc.alternate) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } sctp_free_remote_addr(net); } /* * remove a remote endpoint address from an association, it will fail if the * address does not exist. */ int sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) { /* * Here we need to remove a remote address. This is quite simple, we * first find it in the list of address for the association * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE * on that item. Note we do not allow it to be removed if there are * no other addresses. */ struct sctp_association *asoc; struct sctp_nets *net, *nnet; asoc = &stcb->asoc; /* locate the address */ TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) { continue; } if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr, remaddr)) { /* we found the guy */ if (asoc->numnets < 2) { /* Must have at LEAST two remote addresses */ return (-1); } else { sctp_remove_net(stcb, net); return (0); } } } /* not found. */ return (-2); } void sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; int found = 0; int i; chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if ((twait_block->vtag_block[i].v_tag == tag) && (twait_block->vtag_block[i].lport == lport) && (twait_block->vtag_block[i].rport == rport)) { twait_block->vtag_block[i].tv_sec_at_expire = 0; twait_block->vtag_block[i].v_tag = 0; twait_block->vtag_block[i].lport = 0; twait_block->vtag_block[i].rport = 0; found = 1; break; } } if (found) break; } } int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; int found = 0; int i; SCTP_INP_INFO_WLOCK(); chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if ((twait_block->vtag_block[i].v_tag == tag) && (twait_block->vtag_block[i].lport == lport) && (twait_block->vtag_block[i].rport == rport)) { found = 1; break; } } if (found) break; } SCTP_INP_INFO_WUNLOCK(); return (found); } void sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; struct timeval now; int set, i; if (time == 0) { /* Its disabled */ return; } (void)SCTP_GETTIME_TIMEVAL(&now); chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; set = 0; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { /* Block(s) present, lets find space, and expire on the fly */ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if ((twait_block->vtag_block[i].v_tag == 0) && !set) { twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time; twait_block->vtag_block[i].v_tag = tag; twait_block->vtag_block[i].lport = lport; twait_block->vtag_block[i].rport = rport; set = 1; } else if ((twait_block->vtag_block[i].v_tag) && ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) { /* Audit expires this guy */ twait_block->vtag_block[i].tv_sec_at_expire = 0; twait_block->vtag_block[i].v_tag = 0; twait_block->vtag_block[i].lport = 0; twait_block->vtag_block[i].rport = 0; if (set == 0) { /* Reuse it for my new tag */ twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time; twait_block->vtag_block[i].v_tag = tag; twait_block->vtag_block[i].lport = lport; twait_block->vtag_block[i].rport = rport; set = 1; } } } if (set) { /* * We only do up to the block where we can place our * tag for audits */ break; } } /* Need to add a new block to chain */ if (!set) { SCTP_MALLOC(twait_block, struct sctp_tagblock *, sizeof(struct sctp_tagblock), SCTP_M_TIMW); if (twait_block == NULL) { #ifdef INVARIANTS panic("Can not alloc tagblock"); #endif return; } memset(twait_block, 0, sizeof(struct sctp_tagblock)); LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time; twait_block->vtag_block[0].v_tag = tag; twait_block->vtag_block[0].lport = lport; twait_block->vtag_block[0].rport = rport; } } void sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh) { struct sctp_tmit_chunk *chk, *nchk; struct sctp_queued_to_read *control, *ncontrol; TAILQ_FOREACH_SAFE(control, rh, next_instrm, ncontrol) { TAILQ_REMOVE(rh, control, next_instrm); control->on_strm_q = 0; if (control->on_read_q == 0) { sctp_free_remote_addr(control->whoFrom); if (control->data) { sctp_m_freem(control->data); control->data = NULL; } } /* Reassembly free? */ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { TAILQ_REMOVE(&control->reasm, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); /* sa_ignore FREED_MEMORY */ } /* * We don't free the address here since all the net's were * freed above. */ if (control->on_read_q == 0) { sctp_free_a_readq(stcb, control); } } } /*- * Free the association after un-hashing the remote port. This * function ALWAYS returns holding NO LOCK on the stcb. It DOES * expect that the input to this function IS a locked TCB. * It will return 0, if it did NOT destroy the association (instead * it unlocks it. It will return NON-zero if it either destroyed the * association OR the association is already destroyed. */ int sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location) { int i; struct sctp_association *asoc; struct sctp_nets *net, *nnet; struct sctp_laddr *laddr, *naddr; struct sctp_tmit_chunk *chk, *nchk; struct sctp_asconf_addr *aparam, *naparam; struct sctp_asconf_ack *aack, *naack; struct sctp_stream_reset_list *strrst, *nstrrst; struct sctp_queued_to_read *sq, *nsq; struct sctp_stream_queue_pending *sp, *nsp; sctp_sharedkey_t *shared_key, *nshared_key; struct socket *so; /* first, lets purge the entry from the hash table. */ #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 6); #endif if (stcb->asoc.state == 0) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 7); #endif /* there is no asoc, really TSNH :-0 */ return (1); } if (stcb->asoc.alternate) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } /* TEMP CODE */ if (stcb->freed_from_where == 0) { /* Only record the first place free happened from */ stcb->freed_from_where = from_location; } /* TEMP CODE */ asoc = &stcb->asoc; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) /* nothing around */ so = NULL; else so = inp->sctp_socket; /* * We used timer based freeing if a reader or writer is in the way. * So we first check if we are actually being called from a timer, * if so we abort early if a reader or writer is still in the way. */ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && (from_inpcbfree == SCTP_NORMAL_PROC)) { /* * is it the timer driving us? if so are the reader/writers * gone? */ if (stcb->asoc.refcnt) { /* nope, reader or writer in the way */ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); /* no asoc destroyed */ SCTP_TCB_UNLOCK(stcb); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 8); #endif return (0); } } /* now clean up any other timers */ (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); asoc->dack_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); /*- * For stream reset we don't blast this unless * it is a str-reset timer, it might be the * free-asoc timer which we DON'T want to * disturb. */ if (asoc->strreset_timer.type == SCTP_TIMER_TYPE_STRRESET) asoc->strreset_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); asoc->asconf_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); asoc->autoclose_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); asoc->shut_guard_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); asoc->delayed_event_timer.self = NULL; /* Mobility adaptation */ (void)SCTP_OS_TIMER_STOP(&asoc->delete_prim_timer.timer); asoc->delete_prim_timer.self = NULL; TAILQ_FOREACH(net, &asoc->nets, sctp_next) { (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); net->rxt_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); net->pmtu_timer.self = NULL; (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); net->hb_timer.self = NULL; } /* Now the read queue needs to be cleaned up (only once) */ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_ABOUT_TO_BE_FREED); SCTP_INP_READ_LOCK(inp); TAILQ_FOREACH(sq, &inp->read_queue, next) { if (sq->stcb == stcb) { sq->do_not_ref_stcb = 1; sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn; /* * If there is no end, there never will be * now. */ if (sq->end_added == 0) { /* Held for PD-API clear that. */ sq->pdapi_aborted = 1; sq->held_length = 0; if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) { /* * Need to add a PD-API * aborted indication. * Setting the control_pdapi * assures that it will be * added right after this * msg. */ uint32_t strseq; stcb->asoc.control_pdapi = sq; strseq = (sq->sinfo_stream << 16) | (sq->mid & 0x0000ffff); sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&strseq, SCTP_SO_LOCKED); stcb->asoc.control_pdapi = NULL; } } /* Add an end to wake them */ sq->end_added = 1; } } SCTP_INP_READ_UNLOCK(inp); if (stcb->block_entry) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET); stcb->block_entry->error = ECONNRESET; stcb->block_entry = NULL; } } if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) { /* * Someone holds a reference OR the socket is unaccepted * yet. */ if ((stcb->asoc.refcnt) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); } SCTP_TCB_UNLOCK(stcb); if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) /* nothing around */ so = NULL; if (so) { /* Wake any reader/writers */ sctp_sorwakeup(inp, so); sctp_sowwakeup(inp, so); } #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 9); #endif /* no asoc destroyed */ return (0); } #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 10); #endif /* * When I reach here, no others want to kill the assoc yet.. and I * own the lock. Now its possible an abort comes in when I do the * lock exchange below to grab all the locks to do the final take * out. to prevent this we increment the count, which will start a * timer and blow out above thus assuring us that we hold exclusive * killing of the asoc. Note that after getting back the TCB lock we * will go ahead and increment the counter back up and stop any * timer a passing stranger may have started :-S */ if (from_inpcbfree == SCTP_NORMAL_PROC) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); SCTP_TCB_LOCK(stcb); } /* Double check the GONE flag */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) /* nothing around */ so = NULL; if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* * For TCP type we need special handling when we are * connected. We also include the peel'ed off ones to. */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED; inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED; if (so) { SOCKBUF_LOCK(&so->so_rcv); so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING | SS_ISCONNECTED); so->so_state |= SS_ISDISCONNECTED; socantrcvmore_locked(so); socantsendmore(so); sctp_sowwakeup(inp, so); sctp_sorwakeup(inp, so); SCTP_SOWAKEUP(so); } } } /* * Make it invalid too, that way if its about to run it will abort * and return. */ /* re-increment the lock */ if (from_inpcbfree == SCTP_NORMAL_PROC) { atomic_add_int(&stcb->asoc.refcnt, -1); } if (stcb->asoc.refcnt) { SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); if (from_inpcbfree == SCTP_NORMAL_PROC) { SCTP_INP_INFO_WUNLOCK(); SCTP_INP_WUNLOCK(inp); } SCTP_TCB_UNLOCK(stcb); return (0); } asoc->state = 0; if (inp->sctp_tcbhash) { LIST_REMOVE(stcb, sctp_tcbhash); } if (stcb->asoc.in_asocid_hash) { LIST_REMOVE(stcb, sctp_tcbasocidhash); } /* Now lets remove it from the list of ALL associations in the EP */ LIST_REMOVE(stcb, sctp_tcblist); if (from_inpcbfree == SCTP_NORMAL_PROC) { SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); } /* pull from vtag hash */ LIST_REMOVE(stcb, sctp_asocs); sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait), inp->sctp_lport, stcb->rport); /* * Now restop the timers to be sure this is paranoia at is finest! */ (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); TAILQ_FOREACH(net, &asoc->nets, sctp_next) { (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); } asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE; /* * The chunk lists and such SHOULD be empty but we check them just * in case. */ /* anything on the wheel needs to be removed */ SCTP_TCB_SEND_LOCK(stcb); for (i = 0; i < asoc->streamoutcnt; i++) { struct sctp_stream_out *outs; outs = &asoc->strmout[i]; /* now clean up any chunks here */ TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { atomic_subtract_int(&asoc->stream_queue_cnt, 1); TAILQ_REMOVE(&outs->outqueue, sp, next); stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); sctp_free_spbufspace(stcb, asoc, sp); if (sp->data) { if (so) { /* Still an open socket - report */ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 0, (void *)sp, SCTP_SO_LOCKED); } if (sp->data) { sctp_m_freem(sp->data); sp->data = NULL; sp->tail_mbuf = NULL; sp->length = 0; } } if (sp->net) { sctp_free_remote_addr(sp->net); sp->net = NULL; } sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); } } SCTP_TCB_SEND_UNLOCK(stcb); /* sa_ignore FREED_MEMORY */ TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); SCTP_FREE(strrst, SCTP_M_STRESET); } TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) { TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); if (sq->data) { sctp_m_freem(sq->data); sq->data = NULL; } sctp_free_remote_addr(sq->whoFrom); sq->whoFrom = NULL; sq->stcb = NULL; /* Free the ctl entry */ sctp_free_a_readq(stcb, sq); /* sa_ignore FREED_MEMORY */ } TAILQ_FOREACH_SAFE(chk, &asoc->free_chunks, sctp_next, nchk) { TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); asoc->free_chunk_cnt--; /* sa_ignore FREED_MEMORY */ } /* pending send queue SHOULD be empty */ TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { asoc->strmout[chk->rec.data.sid].chunks_on_queues--; #ifdef INVARIANTS } else { panic("No chunks on the queues for sid %u.", chk->rec.data.sid); #endif } TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); if (chk->data) { if (so) { /* Still a socket? */ sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, chk, SCTP_SO_LOCKED); } if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); if (chk->whoTo) { sctp_free_remote_addr(chk->whoTo); chk->whoTo = NULL; } SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); /* sa_ignore FREED_MEMORY */ } /* sent queue SHOULD be empty */ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { asoc->strmout[chk->rec.data.sid].chunks_on_queues--; #ifdef INVARIANTS } else { panic("No chunks on the queues for sid %u.", chk->rec.data.sid); #endif } } TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); if (chk->data) { if (so) { /* Still a socket? */ sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, chk, SCTP_SO_LOCKED); } if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); /* sa_ignore FREED_MEMORY */ } #ifdef INVARIANTS for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if (stcb->asoc.strmout[i].chunks_on_queues > 0) { panic("%u chunks left for stream %u.", stcb->asoc.strmout[i].chunks_on_queues, i); } } #endif /* control queue MAY not be empty */ TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); /* sa_ignore FREED_MEMORY */ } /* ASCONF queue MAY not be empty */ TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } if (chk->holds_key_ref) sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); sctp_free_remote_addr(chk->whoTo); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); SCTP_DECR_CHK_COUNT(); /* sa_ignore FREED_MEMORY */ } if (asoc->mapping_array) { SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); asoc->mapping_array = NULL; } if (asoc->nr_mapping_array) { SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); asoc->nr_mapping_array = NULL; } /* the stream outs */ if (asoc->strmout) { SCTP_FREE(asoc->strmout, SCTP_M_STRMO); asoc->strmout = NULL; } asoc->strm_realoutsize = asoc->streamoutcnt = 0; if (asoc->strmin) { for (i = 0; i < asoc->streamincnt; i++) { sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue); sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue); } SCTP_FREE(asoc->strmin, SCTP_M_STRMI); asoc->strmin = NULL; } asoc->streamincnt = 0; TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { #ifdef INVARIANTS if (SCTP_BASE_INFO(ipi_count_raddr) == 0) { panic("no net's left alloc'ed, or list points to itself"); } #endif TAILQ_REMOVE(&asoc->nets, net, sctp_next); sctp_free_remote_addr(net); } LIST_FOREACH_SAFE(laddr, &asoc->sctp_restricted_addrs, sctp_nxt_addr, naddr) { /* sa_ignore FREED_MEMORY */ sctp_remove_laddr(laddr); } /* pending asconf (address) parameters */ TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) { /* sa_ignore FREED_MEMORY */ TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); SCTP_FREE(aparam, SCTP_M_ASC_ADDR); } TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) { /* sa_ignore FREED_MEMORY */ TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next); if (aack->data != NULL) { sctp_m_freem(aack->data); } SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack); } /* clean up auth stuff */ if (asoc->local_hmacs) sctp_free_hmaclist(asoc->local_hmacs); if (asoc->peer_hmacs) sctp_free_hmaclist(asoc->peer_hmacs); if (asoc->local_auth_chunks) sctp_free_chunklist(asoc->local_auth_chunks); if (asoc->peer_auth_chunks) sctp_free_chunklist(asoc->peer_auth_chunks); sctp_free_authinfo(&asoc->authinfo); LIST_FOREACH_SAFE(shared_key, &asoc->shared_keys, next, nshared_key) { LIST_REMOVE(shared_key, next); sctp_free_sharedkey(shared_key); /* sa_ignore FREED_MEMORY */ } /* Insert new items here :> */ /* Get rid of LOCK */ SCTP_TCB_UNLOCK(stcb); SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); if (from_inpcbfree == SCTP_NORMAL_PROC) { SCTP_INP_INFO_WUNLOCK(); SCTP_INP_RLOCK(inp); } #ifdef SCTP_TRACK_FREED_ASOCS if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { /* now clean up the tasoc itself */ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_DECR_ASOC_COUNT(); } else { LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist); } #else SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_DECR_ASOC_COUNT(); #endif if (from_inpcbfree == SCTP_NORMAL_PROC) { if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { /* * If its NOT the inp_free calling us AND sctp_close * as been called, we call back... */ SCTP_INP_RUNLOCK(inp); /* * This will start the kill timer (if we are the * last one) since we hold an increment yet. But * this is the only safe way to do this since * otherwise if the socket closes at the same time * we are here we might collide in the cleanup. */ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, SCTP_CALLED_DIRECTLY_NOCMPSET); SCTP_INP_DECR_REF(inp); goto out_of; } else { /* The socket is still open. */ SCTP_INP_DECR_REF(inp); } } if (from_inpcbfree == SCTP_NORMAL_PROC) { SCTP_INP_RUNLOCK(inp); } out_of: /* destroyed the asoc */ #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 11); #endif return (1); } /* * determine if a destination is "reachable" based upon the addresses bound * to the current endpoint (e.g. only v4 or v6 currently bound) */ /* * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use * assoc level v4/v6 flags, as the assoc *may* not have the same address * types bound as its endpoint */ int sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) { struct sctp_inpcb *inp; int answer; /* * No locks here, the TCB, in all cases is already locked and an * assoc is up. There is either a INP lock by the caller applied (in * asconf case when deleting an address) or NOT in the HB case, * however if HB then the INP increment is up and the INP will not * be removed (on top of the fact that we have a TCB lock). So we * only want to read the sctp_flags, which is either bound-all or * not.. no protection needed since once an assoc is up you can't be * changing your binding. */ inp = stcb->sctp_ep; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* if bound all, destination is not restricted */ /* * RRS: Question during lock work: Is this correct? If you * are bound-all you still might need to obey the V4--V6 * flags??? IMO this bound-all stuff needs to be removed! */ return (1); } /* NOTE: all "scope" checks are done when local addresses are added */ switch (destaddr->sa_family) { #ifdef INET6 case AF_INET6: answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; break; #endif #ifdef INET case AF_INET: answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; break; #endif default: /* invalid family, so it's unreachable */ answer = 0; break; } return (answer); } /* * update the inp_vflags on an endpoint */ static void sctp_update_ep_vflag(struct sctp_inpcb *inp) { struct sctp_laddr *laddr; /* first clear the flag */ inp->ip_inp.inp.inp_vflag = 0; /* set the flag based on addresses on the ep list */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__); continue; } if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { continue; } switch (laddr->ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: inp->ip_inp.inp.inp_vflag |= INP_IPV6; break; #endif #ifdef INET case AF_INET: inp->ip_inp.inp.inp_vflag |= INP_IPV4; break; #endif default: break; } } } /* * Add the address to the endpoint local address list There is nothing to be * done if we are bound to all addresses */ void sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action) { struct sctp_laddr *laddr; struct sctp_tcb *stcb; int fnd, error = 0; fnd = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* You are already bound to all. You have it already */ return; } #ifdef INET6 if (ifa->address.sa.sa_family == AF_INET6) { if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { /* Can't bind a non-useable addr. */ return; } } #endif /* first, is it already present? */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { fnd = 1; break; } } if (fnd == 0) { /* Not in the ep list */ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action); if (error != 0) return; inp->laddr_count++; /* update inp_vflag flags */ switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: inp->ip_inp.inp.inp_vflag |= INP_IPV6; break; #endif #ifdef INET case AF_INET: inp->ip_inp.inp.inp_vflag |= INP_IPV4; break; #endif default: break; } LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { sctp_add_local_addr_restricted(stcb, ifa); } } return; } /* * select a new (hopefully reachable) destination net (should only be used * when we deleted an ep addr that is the only usable source address to reach * the destination net) */ static void sctp_select_primary_destination(struct sctp_tcb *stcb) { struct sctp_nets *net; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* for now, we'll just pick the first reachable one we find */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) continue; if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr)) { /* found a reachable destination */ stcb->asoc.primary_destination = net; } } /* I can't there from here! ...we're gonna die shortly... */ } /* * Delete the address from the endpoint local address list. There is nothing * to be done if we are bound to all addresses */ void sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) { struct sctp_laddr *laddr; int fnd; fnd = 0; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* You are already bound to all. You have it already */ return; } LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == ifa) { fnd = 1; break; } } if (fnd && (inp->laddr_count < 2)) { /* can't delete unless there are at LEAST 2 addresses */ return; } if (fnd) { /* * clean up any use of this address go through our * associations and clear any last_used_address that match * this one for each assoc, see if a new primary_destination * is needed */ struct sctp_tcb *stcb; /* clean up "next_addr_touse" */ if (inp->next_addr_touse == laddr) /* delete this address */ inp->next_addr_touse = NULL; /* clean up "last_used_address" */ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { struct sctp_nets *net; SCTP_TCB_LOCK(stcb); if (stcb->asoc.last_used_address == laddr) /* delete this address */ stcb->asoc.last_used_address = NULL; /* * Now spin through all the nets and purge any ref * to laddr */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._s_addr == laddr->ifa) { /* Yep, purge src address selected */ sctp_rtentry_t *rt; /* delete this address if cached */ rt = net->ro.ro_rt; if (rt != NULL) { RTFREE(rt); net->ro.ro_rt = NULL; } sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; } } SCTP_TCB_UNLOCK(stcb); } /* for each tcb */ /* remove it from the ep list */ sctp_remove_laddr(laddr); inp->laddr_count--; /* update inp_vflag flags */ sctp_update_ep_vflag(inp); } return; } /* * Add the address to the TCB local address restricted list. * This is a "pending" address list (eg. addresses waiting for an * ASCONF-ACK response) and cannot be used as a valid source address. */ void sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) { struct sctp_laddr *laddr; struct sctpladdr *list; /* * Assumes TCB is locked.. and possibly the INP. May need to * confirm/fix that if we need it and is not the case. */ list = &stcb->asoc.sctp_restricted_addrs; #ifdef INET6 if (ifa->address.sa.sa_family == AF_INET6) { if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { /* Can't bind a non-existent addr. */ return; } } #endif /* does the address already exist? */ LIST_FOREACH(laddr, list, sctp_nxt_addr) { if (laddr->ifa == ifa) { return; } } /* add to the list */ (void)sctp_insert_laddr(list, ifa, 0); return; } /* * Remove a local address from the TCB local address restricted list */ void sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) { struct sctp_inpcb *inp; struct sctp_laddr *laddr; /* * This is called by asconf work. It is assumed that a) The TCB is * locked and b) The INP is locked. This is true in as much as I can * trace through the entry asconf code where I did these locks. * Again, the ASCONF code is a bit different in that it does lock * the INP during its work often times. This must be since we don't * want other proc's looking up things while what they are looking * up is changing :-D */ inp = stcb->sctp_ep; /* if subset bound and don't allow ASCONF's, can't delete last */ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { if (stcb->sctp_ep->laddr_count < 2) { /* can't delete last address */ return; } } LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { /* remove the address if it exists */ if (laddr->ifa == NULL) continue; if (laddr->ifa == ifa) { sctp_remove_laddr(laddr); return; } } /* address not found! */ return; } /* * Temporarily remove for __APPLE__ until we use the Tiger equivalents */ /* sysctl */ static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) struct sctp_mcore_ctrl *sctp_mcore_workers = NULL; int *sctp_cpuarry = NULL; void sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) { /* Queue a packet to a processor for the specified core */ struct sctp_mcore_queue *qent; struct sctp_mcore_ctrl *wkq; int need_wake = 0; if (sctp_mcore_workers == NULL) { /* Something went way bad during setup */ sctp_input_with_port(m, off, 0); return; } SCTP_MALLOC(qent, struct sctp_mcore_queue *, (sizeof(struct sctp_mcore_queue)), SCTP_M_MCORE); if (qent == NULL) { /* This is trouble */ sctp_input_with_port(m, off, 0); return; } qent->vn = curvnet; qent->m = m; qent->off = off; qent->v6 = 0; wkq = &sctp_mcore_workers[cpu_to_use]; SCTP_MCORE_QLOCK(wkq); TAILQ_INSERT_TAIL(&wkq->que, qent, next); if (wkq->running == 0) { need_wake = 1; } SCTP_MCORE_QUNLOCK(wkq); if (need_wake) { wakeup(&wkq->running); } } static void sctp_mcore_thread(void *arg) { struct sctp_mcore_ctrl *wkq; struct sctp_mcore_queue *qent; wkq = (struct sctp_mcore_ctrl *)arg; struct mbuf *m; int off, v6; /* Wait for first tickle */ SCTP_MCORE_LOCK(wkq); wkq->running = 0; msleep(&wkq->running, &wkq->core_mtx, 0, "wait for pkt", 0); SCTP_MCORE_UNLOCK(wkq); /* Bind to our cpu */ thread_lock(curthread); sched_bind(curthread, wkq->cpuid); thread_unlock(curthread); /* Now lets start working */ SCTP_MCORE_LOCK(wkq); /* Now grab lock and go */ for (;;) { SCTP_MCORE_QLOCK(wkq); skip_sleep: wkq->running = 1; qent = TAILQ_FIRST(&wkq->que); if (qent) { TAILQ_REMOVE(&wkq->que, qent, next); SCTP_MCORE_QUNLOCK(wkq); CURVNET_SET(qent->vn); m = qent->m; off = qent->off; v6 = qent->v6; SCTP_FREE(qent, SCTP_M_MCORE); if (v6 == 0) { sctp_input_with_port(m, off, 0); } else { SCTP_PRINTF("V6 not yet supported\n"); sctp_m_freem(m); } CURVNET_RESTORE(); SCTP_MCORE_QLOCK(wkq); } wkq->running = 0; if (!TAILQ_EMPTY(&wkq->que)) { goto skip_sleep; } SCTP_MCORE_QUNLOCK(wkq); msleep(&wkq->running, &wkq->core_mtx, 0, "wait for pkt", 0); } } static void sctp_startup_mcore_threads(void) { int i, cpu; if (mp_ncpus == 1) return; if (sctp_mcore_workers != NULL) { /* * Already been here in some previous vnet? */ return; } SCTP_MALLOC(sctp_mcore_workers, struct sctp_mcore_ctrl *, ((mp_maxid + 1) * sizeof(struct sctp_mcore_ctrl)), SCTP_M_MCORE); if (sctp_mcore_workers == NULL) { /* TSNH I hope */ return; } memset(sctp_mcore_workers, 0, ((mp_maxid + 1) * sizeof(struct sctp_mcore_ctrl))); /* Init the structures */ for (i = 0; i <= mp_maxid; i++) { TAILQ_INIT(&sctp_mcore_workers[i].que); SCTP_MCORE_LOCK_INIT(&sctp_mcore_workers[i]); SCTP_MCORE_QLOCK_INIT(&sctp_mcore_workers[i]); sctp_mcore_workers[i].cpuid = i; } if (sctp_cpuarry == NULL) { SCTP_MALLOC(sctp_cpuarry, int *, (mp_ncpus * sizeof(int)), SCTP_M_MCORE); i = 0; CPU_FOREACH(cpu) { sctp_cpuarry[i] = cpu; i++; } } /* Now start them all */ CPU_FOREACH(cpu) { (void)kproc_create(sctp_mcore_thread, (void *)&sctp_mcore_workers[cpu], &sctp_mcore_workers[cpu].thread_proc, RFPROC, SCTP_KTHREAD_PAGES, SCTP_MCORE_NAME); } } #endif void sctp_pcb_init() { /* * SCTP initialization for the PCB structures should be called by * the sctp_init() function. */ int i; struct timeval tv; if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) { /* error I was called twice */ return; } SCTP_BASE_VAR(sctp_pcb_initialized) = 1; #if defined(SCTP_LOCAL_TRACE_BUF) memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log)); #endif #if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *, ((mp_maxid + 1) * sizeof(struct sctpstat)), SCTP_M_MCORE); #endif (void)SCTP_GETTIME_TIMEVAL(&tv); #if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) memset(SCTP_BASE_STATS, 0, sizeof(struct sctpstat) * (mp_maxid + 1)); SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec; SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec; #else memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat)); SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec; #endif /* init the empty list of (All) Endpoints */ LIST_INIT(&SCTP_BASE_INFO(listhead)); /* init the hash table of endpoints */ TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize)); TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize)); TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale)); SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31), &SCTP_BASE_INFO(hashasocmark)); SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), &SCTP_BASE_INFO(hashmark)); SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), &SCTP_BASE_INFO(hashtcpmark)); SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize); SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH, &SCTP_BASE_INFO(hashvrfmark)); SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE, &SCTP_BASE_INFO(vrf_ifn_hashmark)); /* init the zones */ /* * FIX ME: Should check for NULL returns, but if it does fail we are * doomed to panic anyways... add later maybe. */ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep", sizeof(struct sctp_inpcb), maxsockets); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc", sizeof(struct sctp_tcb), sctp_max_number_of_assoc); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr", sizeof(struct sctp_laddr), (sctp_max_number_of_assoc * sctp_scale_up_for_address)); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr", sizeof(struct sctp_nets), (sctp_max_number_of_assoc * sctp_scale_up_for_address)); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk", sizeof(struct sctp_tmit_chunk), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq", sizeof(struct sctp_queued_to_read), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out", sizeof(struct sctp_stream_queue_pending), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf", sizeof(struct sctp_asconf), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack", sizeof(struct sctp_asconf_ack), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); /* Master Lock INIT for info structure */ SCTP_INP_INFO_LOCK_INIT(); SCTP_STATLOG_INIT_LOCK(); SCTP_IPI_COUNT_INIT(); SCTP_IPI_ADDR_INIT(); #ifdef SCTP_PACKET_LOGGING SCTP_IP_PKTLOG_INIT(); #endif LIST_INIT(&SCTP_BASE_INFO(addr_wq)); SCTP_WQ_ADDR_INIT(); /* not sure if we need all the counts */ SCTP_BASE_INFO(ipi_count_ep) = 0; /* assoc/tcb zone info */ SCTP_BASE_INFO(ipi_count_asoc) = 0; /* local addrlist zone info */ SCTP_BASE_INFO(ipi_count_laddr) = 0; /* remote addrlist zone info */ SCTP_BASE_INFO(ipi_count_raddr) = 0; /* chunk info */ SCTP_BASE_INFO(ipi_count_chunk) = 0; /* socket queue zone info */ SCTP_BASE_INFO(ipi_count_readq) = 0; /* stream out queue cont */ SCTP_BASE_INFO(ipi_count_strmoq) = 0; SCTP_BASE_INFO(ipi_free_strmoq) = 0; SCTP_BASE_INFO(ipi_free_chunks) = 0; SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer)); /* Init the TIMEWAIT list */ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]); } sctp_startup_iterator(); #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) sctp_startup_mcore_threads(); #endif /* * INIT the default VRF which for BSD is the only one, other O/S's * may have more. But initially they must start with one and then * add the VRF's as addresses are added. */ sctp_init_vrf_list(SCTP_DEFAULT_VRF); } /* * Assumes that the SCTP_BASE_INFO() lock is NOT held. */ void sctp_pcb_finish(void) { struct sctp_vrflist *vrf_bucket; struct sctp_vrf *vrf, *nvrf; struct sctp_ifn *ifn, *nifn; struct sctp_ifa *ifa, *nifa; struct sctpvtaghead *chain; struct sctp_tagblock *twait_block, *prev_twait_block; struct sctp_laddr *wi, *nwi; int i; struct sctp_iterator *it, *nit; if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { SCTP_PRINTF("%s: race condition on teardown.\n", __func__); return; } SCTP_BASE_VAR(sctp_pcb_initialized) = 0; /* * In FreeBSD the iterator thread never exits but we do clean up. * The only way FreeBSD reaches here is if we have VRF's but we * still add the ifdef to make it compile on old versions. */ retry: SCTP_IPI_ITERATOR_WQ_LOCK(); /* * sctp_iterator_worker() might be working on an it entry without * holding the lock. We won't find it on the list either and * continue and free/destroy it. While holding the lock, spin, to * avoid the race condition as sctp_iterator_worker() will have to * wait to re-aquire the lock. */ if (sctp_it_ctl.iterator_running != 0 || sctp_it_ctl.cur_it != NULL) { SCTP_IPI_ITERATOR_WQ_UNLOCK(); SCTP_PRINTF("%s: Iterator running while we held the lock. Retry. " "cur_it=%p\n", __func__, sctp_it_ctl.cur_it); DELAY(10); goto retry; } TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { if (it->vn != curvnet) { continue; } TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); if (it->function_atend != NULL) { (*it->function_atend) (it->pointer, it->val); } SCTP_FREE(it, SCTP_M_ITER); } SCTP_IPI_ITERATOR_WQ_UNLOCK(); SCTP_ITERATOR_LOCK(); if ((sctp_it_ctl.cur_it) && (sctp_it_ctl.cur_it->vn == curvnet)) { sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; } SCTP_ITERATOR_UNLOCK(); SCTP_OS_TIMER_STOP_DRAIN(&SCTP_BASE_INFO(addr_wq_timer.timer)); SCTP_WQ_ADDR_LOCK(); LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { LIST_REMOVE(wi, sctp_nxt_addr); SCTP_DECR_LADDR_COUNT(); if (wi->action == SCTP_DEL_IP_ADDRESS) { SCTP_FREE(wi->ifa, SCTP_M_IFA); } SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi); } SCTP_WQ_ADDR_UNLOCK(); /* * free the vrf/ifn/ifa lists and hashes (be sure address monitor is * destroyed first). */ vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))]; LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) { LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) { LIST_FOREACH_SAFE(ifa, &ifn->ifalist, next_ifa, nifa) { /* free the ifa */ LIST_REMOVE(ifa, next_bucket); LIST_REMOVE(ifa, next_ifa); SCTP_FREE(ifa, SCTP_M_IFA); } /* free the ifn */ LIST_REMOVE(ifn, next_bucket); LIST_REMOVE(ifn, next_ifn); SCTP_FREE(ifn, SCTP_M_IFN); } SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); /* free the vrf */ LIST_REMOVE(vrf, next_vrf); SCTP_FREE(vrf, SCTP_M_VRF); } /* free the vrf hashes */ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark)); SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark)); /* * free the TIMEWAIT list elements malloc'd in the function * sctp_add_vtag_to_timewait()... */ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { chain = &SCTP_BASE_INFO(vtag_timewait)[i]; if (!LIST_EMPTY(chain)) { prev_twait_block = NULL; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { if (prev_twait_block) { SCTP_FREE(prev_twait_block, SCTP_M_TIMW); } prev_twait_block = twait_block; } SCTP_FREE(prev_twait_block, SCTP_M_TIMW); } } /* free the locks and mutexes */ #ifdef SCTP_PACKET_LOGGING SCTP_IP_PKTLOG_DESTROY(); #endif SCTP_IPI_ADDR_DESTROY(); SCTP_STATLOG_DESTROY(); SCTP_INP_INFO_LOCK_DESTROY(); SCTP_WQ_ADDR_DESTROY(); /* Get rid of other stuff too. */ if (SCTP_BASE_INFO(sctp_asochash) != NULL) SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark)); if (SCTP_BASE_INFO(sctp_ephash) != NULL) SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark)); if (SCTP_BASE_INFO(sctp_tcpephash) != NULL) SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack)); #if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE); #endif } int sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, int offset, int limit, struct sockaddr *src, struct sockaddr *dst, struct sockaddr *altsa, uint16_t port) { /* * grub through the INIT pulling addresses and loading them to the * nets structure in the asoc. The from address in the mbuf should * also be loaded (if it is not already). This routine can be called * with either INIT or INIT-ACK's as long as the m points to the IP * packet and the offset points to the beginning of the parameters. */ struct sctp_inpcb *inp; struct sctp_nets *net, *nnet, *net_tmp; struct sctp_paramhdr *phdr, param_buf; struct sctp_tcb *stcb_tmp; uint16_t ptype, plen; struct sockaddr *sa; uint8_t random_store[SCTP_PARAM_BUFFER_SIZE]; struct sctp_auth_random *p_random = NULL; uint16_t random_len = 0; uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE]; struct sctp_auth_hmac_algo *hmacs = NULL; uint16_t hmacs_len = 0; uint8_t saw_asconf = 0; uint8_t saw_asconf_ack = 0; uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE]; struct sctp_auth_chunk_list *chunks = NULL; uint16_t num_chunks = 0; sctp_key_t *new_key; uint32_t keylen; int got_random = 0, got_hmacs = 0, got_chklist = 0; uint8_t peer_supports_ecn; uint8_t peer_supports_prsctp; uint8_t peer_supports_auth; uint8_t peer_supports_asconf; uint8_t peer_supports_asconf_ack; uint8_t peer_supports_reconfig; uint8_t peer_supports_nrsack; uint8_t peer_supports_pktdrop; uint8_t peer_supports_idata; #ifdef INET struct sockaddr_in sin; #endif #ifdef INET6 struct sockaddr_in6 sin6; #endif /* First get the destination address setup too. */ #ifdef INET memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); sin.sin_port = stcb->rport; #endif #ifdef INET6 memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_port = stcb->rport; #endif if (altsa) { sa = altsa; } else { sa = src; } peer_supports_idata = 0; peer_supports_ecn = 0; peer_supports_prsctp = 0; peer_supports_auth = 0; peer_supports_asconf = 0; peer_supports_reconfig = 0; peer_supports_nrsack = 0; peer_supports_pktdrop = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* mark all addresses that we have currently on the list */ net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC; } /* does the source address already exist? if so skip it */ inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, dst, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { /* we must add the source address */ /* no scope set here since we have a tcb already. */ switch (sa->sa_family) { #ifdef INET case AF_INET: if (stcb->asoc.scope.ipv4_addr_legal) { if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) { return (-1); } } break; #endif #ifdef INET6 case AF_INET6: if (stcb->asoc.scope.ipv6_addr_legal) { if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { return (-2); } } break; #endif default: break; } } else { if (net_tmp != NULL && stcb_tmp == stcb) { net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } else if (stcb_tmp != stcb) { /* It belongs to another association? */ if (stcb_tmp) SCTP_TCB_UNLOCK(stcb_tmp); return (-3); } } if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-4); } /* now we must go through each of the params. */ phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); while (phdr) { ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); /* * SCTP_PRINTF("ptype => %0x, plen => %d\n", * (uint32_t)ptype, (int)plen); */ if (offset + plen > limit) { break; } if (plen == 0) { break; } #ifdef INET if (ptype == SCTP_IPV4_ADDRESS) { if (stcb->asoc.scope.ipv4_addr_legal) { struct sctp_ipv4addr_param *p4, p4_buf; /* ok get the v4 address and check/add */ phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); if (plen != sizeof(struct sctp_ipv4addr_param) || phdr == NULL) { return (-5); } p4 = (struct sctp_ipv4addr_param *)phdr; sin.sin_addr.s_addr = p4->addr; if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { /* Skip multi-cast addresses */ goto next_param; } if ((sin.sin_addr.s_addr == INADDR_BROADCAST) || (sin.sin_addr.s_addr == INADDR_ANY)) { goto next_param; } sa = (struct sockaddr *)&sin; inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, dst, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { /* we must add the source address */ /* * no scope set since we have a tcb * already */ /* * we must validate the state again * here */ add_it_now: if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-7); } if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) { return (-8); } } else if (stcb_tmp == stcb) { if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-10); } if (net != NULL) { /* clear flag */ net->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } } else { /* * strange, address is in another * assoc? straighten out locks. */ if (stcb_tmp) { if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; /* * in setup state we * abort this guy */ snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_abort_an_association(stcb_tmp->sctp_ep, stcb_tmp, op_err, SCTP_SO_NOT_LOCKED); goto add_it_now; } SCTP_TCB_UNLOCK(stcb_tmp); } if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-12); } return (-13); } } } else #endif #ifdef INET6 if (ptype == SCTP_IPV6_ADDRESS) { if (stcb->asoc.scope.ipv6_addr_legal) { /* ok get the v6 address and check/add */ struct sctp_ipv6addr_param *p6, p6_buf; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); if (plen != sizeof(struct sctp_ipv6addr_param) || phdr == NULL) { return (-14); } p6 = (struct sctp_ipv6addr_param *)phdr; memcpy((caddr_t)&sin6.sin6_addr, p6->addr, sizeof(p6->addr)); if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { /* Skip multi-cast addresses */ goto next_param; } if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { /* * Link local make no sense without * scope */ goto next_param; } sa = (struct sockaddr *)&sin6; inp = stcb->sctp_ep; atomic_add_int(&stcb->asoc.refcnt, 1); stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, dst, stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if (stcb_tmp == NULL && (inp == stcb->sctp_ep || inp == NULL)) { /* * we must validate the state again * here */ add_it_now6: if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-16); } /* * we must add the address, no scope * set */ if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) { return (-17); } } else if (stcb_tmp == stcb) { /* * we must validate the state again * here */ if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-19); } if (net != NULL) { /* clear flag */ net->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; } } else { /* * strange, address is in another * assoc? straighten out locks. */ if (stcb_tmp) { if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) { struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; /* * in setup state we * abort this guy */ snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_abort_an_association(stcb_tmp->sctp_ep, stcb_tmp, op_err, SCTP_SO_NOT_LOCKED); goto add_it_now6; } SCTP_TCB_UNLOCK(stcb_tmp); } if (stcb->asoc.state == 0) { /* the assoc was freed? */ return (-21); } return (-22); } } } else #endif if (ptype == SCTP_ECN_CAPABLE) { peer_supports_ecn = 1; } else if (ptype == SCTP_ULP_ADAPTATION) { if (stcb->asoc.state != SCTP_STATE_OPEN) { struct sctp_adaptation_layer_indication ai, *aip; phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&ai, sizeof(ai)); aip = (struct sctp_adaptation_layer_indication *)phdr; if (aip) { stcb->asoc.peers_adaptation = ntohl(aip->indication); stcb->asoc.adaptation_needed = 1; } } } else if (ptype == SCTP_SET_PRIM_ADDR) { struct sctp_asconf_addr_param lstore, *fee; int lptype; struct sockaddr *lsa = NULL; #ifdef INET struct sctp_asconf_addrv4_param *fii; #endif if (stcb->asoc.asconf_supported == 0) { return (-100); } if (plen > sizeof(lstore)) { return (-23); } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&lstore, plen); if (phdr == NULL) { return (-24); } fee = (struct sctp_asconf_addr_param *)phdr; lptype = ntohs(fee->addrp.ph.param_type); switch (lptype) { #ifdef INET case SCTP_IPV4_ADDRESS: if (plen != sizeof(struct sctp_asconf_addrv4_param)) { SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n", (int)sizeof(struct sctp_asconf_addrv4_param), plen); } else { fii = (struct sctp_asconf_addrv4_param *)fee; sin.sin_addr.s_addr = fii->addrp.addr; lsa = (struct sockaddr *)&sin; } break; #endif #ifdef INET6 case SCTP_IPV6_ADDRESS: if (plen != sizeof(struct sctp_asconf_addr_param)) { SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n", (int)sizeof(struct sctp_asconf_addr_param), plen); } else { memcpy(sin6.sin6_addr.s6_addr, fee->addrp.addr, sizeof(fee->addrp.addr)); lsa = (struct sockaddr *)&sin6; } break; #endif default: break; } if (lsa) { (void)sctp_set_primary_addr(stcb, sa, NULL); } } else if (ptype == SCTP_HAS_NAT_SUPPORT) { stcb->asoc.peer_supports_nat = 1; } else if (ptype == SCTP_PRSCTP_SUPPORTED) { /* Peer supports pr-sctp */ peer_supports_prsctp = 1; } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) { /* A supported extension chunk */ struct sctp_supported_chunk_types_param *pr_supported; uint8_t local_store[SCTP_PARAM_BUFFER_SIZE]; int num_ent, i; if (plen > sizeof(local_store)) { return (-35); } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)&local_store, plen); if (phdr == NULL) { return (-25); } pr_supported = (struct sctp_supported_chunk_types_param *)phdr; num_ent = plen - sizeof(struct sctp_paramhdr); for (i = 0; i < num_ent; i++) { switch (pr_supported->chunk_types[i]) { case SCTP_ASCONF: peer_supports_asconf = 1; break; case SCTP_ASCONF_ACK: peer_supports_asconf_ack = 1; break; case SCTP_FORWARD_CUM_TSN: peer_supports_prsctp = 1; break; case SCTP_PACKET_DROPPED: peer_supports_pktdrop = 1; break; case SCTP_NR_SELECTIVE_ACK: peer_supports_nrsack = 1; break; case SCTP_STREAM_RESET: peer_supports_reconfig = 1; break; case SCTP_AUTHENTICATION: peer_supports_auth = 1; break; case SCTP_IDATA: peer_supports_idata = 1; break; default: /* one I have not learned yet */ break; } } } else if (ptype == SCTP_RANDOM) { if (plen > sizeof(random_store)) break; if (got_random) { /* already processed a RANDOM */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)random_store, plen); if (phdr == NULL) return (-26); p_random = (struct sctp_auth_random *)phdr; random_len = plen - sizeof(*p_random); /* enforce the random length */ if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) { SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n"); return (-27); } got_random = 1; } else if (ptype == SCTP_HMAC_LIST) { uint16_t num_hmacs; uint16_t i; if (plen > sizeof(hmacs_store)) break; if (got_hmacs) { /* already processed a HMAC list */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)hmacs_store, plen); if (phdr == NULL) return (-28); hmacs = (struct sctp_auth_hmac_algo *)phdr; hmacs_len = plen - sizeof(*hmacs); num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]); /* validate the hmac list */ if (sctp_verify_hmac_param(hmacs, num_hmacs)) { return (-29); } if (stcb->asoc.peer_hmacs != NULL) sctp_free_hmaclist(stcb->asoc.peer_hmacs); stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs); if (stcb->asoc.peer_hmacs != NULL) { for (i = 0; i < num_hmacs; i++) { (void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs, ntohs(hmacs->hmac_ids[i])); } } got_hmacs = 1; } else if (ptype == SCTP_CHUNK_LIST) { int i; if (plen > sizeof(chunks_store)) break; if (got_chklist) { /* already processed a Chunks list */ goto next_param; } phdr = sctp_get_next_param(m, offset, (struct sctp_paramhdr *)chunks_store, plen); if (phdr == NULL) return (-30); chunks = (struct sctp_auth_chunk_list *)phdr; num_chunks = plen - sizeof(*chunks); if (stcb->asoc.peer_auth_chunks != NULL) sctp_clear_chunklist(stcb->asoc.peer_auth_chunks); else stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist(); for (i = 0; i < num_chunks; i++) { (void)sctp_auth_add_chunk(chunks->chunk_types[i], stcb->asoc.peer_auth_chunks); /* record asconf/asconf-ack if listed */ if (chunks->chunk_types[i] == SCTP_ASCONF) saw_asconf = 1; if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) saw_asconf_ack = 1; } got_chklist = 1; } else if ((ptype == SCTP_HEARTBEAT_INFO) || (ptype == SCTP_STATE_COOKIE) || (ptype == SCTP_UNRECOG_PARAM) || (ptype == SCTP_COOKIE_PRESERVE) || (ptype == SCTP_SUPPORTED_ADDRTYPE) || (ptype == SCTP_ADD_IP_ADDRESS) || (ptype == SCTP_DEL_IP_ADDRESS) || (ptype == SCTP_ERROR_CAUSE_IND) || (ptype == SCTP_SUCCESS_REPORT)) { /* don't care */ ; } else { if ((ptype & 0x8000) == 0x0000) { /* * must stop processing the rest of the * param's. Any report bits were handled * with the call to * sctp_arethere_unrecognized_parameters() * when the INIT or INIT-ACK was first seen. */ break; } } next_param: offset += SCTP_SIZE32(plen); if (offset >= limit) { break; } phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); } /* Now check to see if we need to purge any addresses */ TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) { if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) == SCTP_ADDR_NOT_IN_ASSOC) { /* This address has been removed from the asoc */ /* remove and free it */ stcb->asoc.numnets--; TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); sctp_free_remote_addr(net); if (net == stcb->asoc.primary_destination) { stcb->asoc.primary_destination = NULL; sctp_select_primary_destination(stcb); } } } if ((stcb->asoc.ecn_supported == 1) && (peer_supports_ecn == 0)) { stcb->asoc.ecn_supported = 0; } if ((stcb->asoc.prsctp_supported == 1) && (peer_supports_prsctp == 0)) { stcb->asoc.prsctp_supported = 0; } if ((stcb->asoc.auth_supported == 1) && ((peer_supports_auth == 0) || (got_random == 0) || (got_hmacs == 0))) { stcb->asoc.auth_supported = 0; } if ((stcb->asoc.asconf_supported == 1) && ((peer_supports_asconf == 0) || (peer_supports_asconf_ack == 0) || (stcb->asoc.auth_supported == 0) || (saw_asconf == 0) || (saw_asconf_ack == 0))) { stcb->asoc.asconf_supported = 0; } if ((stcb->asoc.reconfig_supported == 1) && (peer_supports_reconfig == 0)) { stcb->asoc.reconfig_supported = 0; } if ((stcb->asoc.idata_supported == 1) && (peer_supports_idata == 0)) { stcb->asoc.idata_supported = 0; } if ((stcb->asoc.nrsack_supported == 1) && (peer_supports_nrsack == 0)) { stcb->asoc.nrsack_supported = 0; } if ((stcb->asoc.pktdrop_supported == 1) && (peer_supports_pktdrop == 0)) { stcb->asoc.pktdrop_supported = 0; } /* validate authentication required parameters */ if ((peer_supports_auth == 0) && (got_chklist == 1)) { /* peer does not support auth but sent a chunks list? */ return (-31); } if ((peer_supports_asconf == 1) && (peer_supports_auth == 0)) { /* peer supports asconf but not auth? */ return (-32); } else if ((peer_supports_asconf == 1) && (peer_supports_auth == 1) && ((saw_asconf == 0) || (saw_asconf_ack == 0))) { return (-33); } /* concatenate the full random key */ keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len; if (chunks != NULL) { keylen += sizeof(*chunks) + num_chunks; } new_key = sctp_alloc_key(keylen); if (new_key != NULL) { /* copy in the RANDOM */ if (p_random != NULL) { keylen = sizeof(*p_random) + random_len; memcpy(new_key->key, p_random, keylen); } else { keylen = 0; } /* append in the AUTH chunks */ if (chunks != NULL) { memcpy(new_key->key + keylen, chunks, sizeof(*chunks) + num_chunks); keylen += sizeof(*chunks) + num_chunks; } /* append in the HMACs */ if (hmacs != NULL) { memcpy(new_key->key + keylen, hmacs, sizeof(*hmacs) + hmacs_len); } } else { /* failed to get memory for the key */ return (-34); } if (stcb->asoc.authinfo.peer_random != NULL) sctp_free_key(stcb->asoc.authinfo.peer_random); stcb->asoc.authinfo.peer_random = new_key; sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid); sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid); return (0); } int sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, struct sctp_nets *net) { /* make sure the requested primary address exists in the assoc */ if (net == NULL && sa) net = sctp_findnet(stcb, sa); if (net == NULL) { /* didn't find the requested primary address! */ return (-1); } else { /* set the primary address */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* Must be confirmed, so queue to set */ net->dest_state |= SCTP_ADDR_REQ_PRIMARY; return (0); } stcb->asoc.primary_destination = net; if (!(net->dest_state & SCTP_ADDR_PF) && (stcb->asoc.alternate)) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } net = TAILQ_FIRST(&stcb->asoc.nets); if (net != stcb->asoc.primary_destination) { /* * first one on the list is NOT the primary * sctp_cmpaddr() is much more efficient if the * primary is the first on the list, make it so. */ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); } return (0); } } int sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now) { /* * This function serves two purposes. It will see if a TAG can be * re-used and return 1 for yes it is ok and 0 for don't use that * tag. A secondary function it will do is purge out old tags that * can be removed. */ struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; struct sctpasochead *head; struct sctp_tcb *stcb; int i; SCTP_INP_INFO_RLOCK(); head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, SCTP_BASE_INFO(hashasocmark))]; LIST_FOREACH(stcb, head, sctp_asocs) { /* * We choose not to lock anything here. TCB's can't be * removed since we have the read lock, so they can't be * freed on us, same thing for the INP. I may be wrong with * this assumption, but we will go with it for now :-) */ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { continue; } if (stcb->asoc.my_vtag == tag) { /* candidate */ if (stcb->rport != rport) { continue; } if (stcb->sctp_ep->sctp_lport != lport) { continue; } /* Its a used tag set */ SCTP_INP_INFO_RUNLOCK(); return (0); } } chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; /* Now what about timed wait ? */ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { /* * Block(s) are present, lets see if we have this tag in the * list */ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { if (twait_block->vtag_block[i].v_tag == 0) { /* not used */ continue; } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire < now->tv_sec) { /* Audit expires this guy */ twait_block->vtag_block[i].tv_sec_at_expire = 0; twait_block->vtag_block[i].v_tag = 0; twait_block->vtag_block[i].lport = 0; twait_block->vtag_block[i].rport = 0; } else if ((twait_block->vtag_block[i].v_tag == tag) && (twait_block->vtag_block[i].lport == lport) && (twait_block->vtag_block[i].rport == rport)) { /* Bad tag, sorry :< */ SCTP_INP_INFO_RUNLOCK(); return (0); } } } SCTP_INP_INFO_RUNLOCK(); return (1); } static void sctp_drain_mbufs(struct sctp_tcb *stcb) { /* * We must hunt this association for MBUF's past the cumack (i.e. * out of order data that we can renege on). */ struct sctp_association *asoc; struct sctp_tmit_chunk *chk, *nchk; uint32_t cumulative_tsn_p1; struct sctp_queued_to_read *control, *ncontrol; int cnt, strmat; uint32_t gap, i; int fnd = 0; /* We look for anything larger than the cum-ack + 1 */ asoc = &stcb->asoc; if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) { /* none we can reneg on. */ return; } SCTP_STAT_INCR(sctps_protocol_drains_done); cumulative_tsn_p1 = asoc->cumulative_tsn + 1; cnt = 0; /* Ok that was fun, now we will drain all the inbound streams? */ for (strmat = 0; strmat < asoc->streamincnt; strmat++) { TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].inqueue, next_instrm, ncontrol) { #ifdef INVARIANTS if (control->on_strm_q != SCTP_ON_ORDERED) { panic("Huh control: %p on_q: %d -- not ordered?", control, control->on_strm_q); } #endif if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) { /* Yep it is above cum-ack */ cnt++; SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn); KASSERT(control->length > 0, ("control has zero length")); if (asoc->size_on_all_streams >= control->length) { asoc->size_on_all_streams -= control->length; } else { #ifdef INVARIANTS panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else asoc->size_on_all_streams = 0; #endif } sctp_ucount_decr(asoc->cnt_on_all_streams); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); if (control->on_read_q) { TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); control->on_read_q = 0; } TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, control, next_instrm); control->on_strm_q = 0; if (control->data) { sctp_m_freem(control->data); control->data = NULL; } sctp_free_remote_addr(control->whoFrom); /* Now its reasm? */ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { cnt++; SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn); KASSERT(chk->send_size > 0, ("chunk has zero length")); if (asoc->size_on_reasm_queue >= chk->send_size) { asoc->size_on_reasm_queue -= chk->send_size; } else { #ifdef INVARIANTS panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); #else asoc->size_on_reasm_queue = 0; #endif } sctp_ucount_decr(asoc->cnt_on_reasm_queue); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); TAILQ_REMOVE(&control->reasm, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); } sctp_free_a_readq(stcb, control); } } TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].uno_inqueue, next_instrm, ncontrol) { #ifdef INVARIANTS if (control->on_strm_q != SCTP_ON_UNORDERED) { panic("Huh control: %p on_q: %d -- not unordered?", control, control->on_strm_q); } #endif if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) { /* Yep it is above cum-ack */ cnt++; SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn); KASSERT(control->length > 0, ("control has zero length")); if (asoc->size_on_all_streams >= control->length) { asoc->size_on_all_streams -= control->length; } else { #ifdef INVARIANTS panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); #else asoc->size_on_all_streams = 0; #endif } sctp_ucount_decr(asoc->cnt_on_all_streams); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); if (control->on_read_q) { TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); control->on_read_q = 0; } TAILQ_REMOVE(&asoc->strmin[strmat].uno_inqueue, control, next_instrm); control->on_strm_q = 0; if (control->data) { sctp_m_freem(control->data); control->data = NULL; } sctp_free_remote_addr(control->whoFrom); /* Now its reasm? */ TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { cnt++; SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn); KASSERT(chk->send_size > 0, ("chunk has zero length")); if (asoc->size_on_reasm_queue >= chk->send_size) { asoc->size_on_reasm_queue -= chk->send_size; } else { #ifdef INVARIANTS panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); #else asoc->size_on_reasm_queue = 0; #endif } sctp_ucount_decr(asoc->cnt_on_reasm_queue); SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); TAILQ_REMOVE(&control->reasm, chk, sctp_next); if (chk->data) { sctp_m_freem(chk->data); chk->data = NULL; } sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); } sctp_free_a_readq(stcb, control); } } } if (cnt) { /* We must back down to see what the new highest is */ for (i = asoc->highest_tsn_inside_map; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { asoc->highest_tsn_inside_map = i; fnd = 1; break; } } if (!fnd) { asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; } /* * Question, should we go through the delivery queue? The * only reason things are on here is the app not reading OR * a p-d-api up. An attacker COULD send enough in to * initiate the PD-API and then send a bunch of stuff to * other streams... these would wind up on the delivery * queue.. and then we would not get to them. But in order * to do this I then have to back-track and un-deliver * sequence numbers in streams.. el-yucko. I think for now * we will NOT look at the delivery queue and leave it to be * something to consider later. An alternative would be to * abort the P-D-API with a notification and then deliver * the data.... Or another method might be to keep track of * how many times the situation occurs and if we see a * possible attack underway just abort the association. */ #ifdef SCTP_DEBUG SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt); #endif /* * Now do we need to find a new * asoc->highest_tsn_inside_map? */ asoc->last_revoke_count = cnt; (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); /* sa_ignore NO_NULL_CHK */ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED); } /* * Another issue, in un-setting the TSN's in the mapping array we * DID NOT adjust the highest_tsn marker. This will cause one of * two things to occur. It may cause us to do extra work in checking * for our mapping array movement. More importantly it may cause us * to SACK every datagram. This may not be a bad thing though since * we will recover once we get our cum-ack above and all this stuff * we dumped recovered. */ } void sctp_drain() { /* * We must walk the PCB lists for ALL associations here. The system * is LOW on MBUF's and needs help. This is where reneging will * occur. We really hope this does NOT happen! */ VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); struct sctp_inpcb *inp; struct sctp_tcb *stcb; SCTP_STAT_INCR(sctps_protocol_drain_calls); if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { #ifdef VIMAGE continue; #else return; #endif } SCTP_INP_INFO_RLOCK(); LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) { /* For each endpoint */ SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { /* For each association */ SCTP_TCB_LOCK(stcb); sctp_drain_mbufs(stcb); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } SCTP_INP_INFO_RUNLOCK(); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); } /* * start a new iterator * iterates through all endpoints and associations based on the pcb_state * flags and asoc_state. "af" (mandatory) is executed for all matching * assocs and "ef" (optional) is executed when the iterator completes. * "inpf" (optional) is executed for each new endpoint as it is being * iterated through. inpe (optional) is called when the inp completes * its way through all the stcbs. */ int sctp_initiate_iterator(inp_func inpf, asoc_func af, inp_func inpe, uint32_t pcb_state, uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi, end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off) { struct sctp_iterator *it = NULL; if (af == NULL) { return (-1); } if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { SCTP_PRINTF("%s: abort on initialize being %d\n", __func__, SCTP_BASE_VAR(sctp_pcb_initialized)); return (-1); } SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), SCTP_M_ITER); if (it == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); return (ENOMEM); } memset(it, 0, sizeof(*it)); it->function_assoc = af; it->function_inp = inpf; if (inpf) it->done_current_ep = 0; else it->done_current_ep = 1; it->function_atend = ef; it->pointer = argp; it->val = argi; it->pcb_flags = pcb_state; it->pcb_features = pcb_features; it->asoc_state = asoc_state; it->function_inp_end = inpe; it->no_chunk_output = chunk_output_off; it->vn = curvnet; if (s_inp) { /* Assume lock is held here */ it->inp = s_inp; SCTP_INP_INCR_REF(it->inp); it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP; } else { SCTP_INP_INFO_RLOCK(); it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead)); if (it->inp) { SCTP_INP_INCR_REF(it->inp); } SCTP_INP_INFO_RUNLOCK(); it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; } SCTP_IPI_ITERATOR_WQ_LOCK(); if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { SCTP_IPI_ITERATOR_WQ_UNLOCK(); SCTP_PRINTF("%s: rollback on initialize being %d it=%p\n", __func__, SCTP_BASE_VAR(sctp_pcb_initialized), it); SCTP_FREE(it, SCTP_M_ITER); return (-1); } TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); if (sctp_it_ctl.iterator_running == 0) { sctp_wakeup_iterator(); } SCTP_IPI_ITERATOR_WQ_UNLOCK(); /* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */ return (0); } Index: head/sys/netinet/sctp_usrreq.c =================================================================== --- head/sys/netinet/sctp_usrreq.c (revision 350587) +++ head/sys/netinet/sctp_usrreq.c (revision 350588) @@ -1,7506 +1,7505 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include extern const struct sctp_cc_functions sctp_cc_functions[]; extern const struct sctp_ss_functions sctp_ss_functions[]; void sctp_init(void) { u_long sb_max_adj; /* Initialize and modify the sysctled variables */ sctp_init_sysctls(); if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); /* * Allow a user to take no more than 1/2 the number of clusters or * the SB_MAX whichever is smaller for the send window. */ sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, (((uint32_t)nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); /* * Now for the recv window, should we take the same amount? or * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For * now I will just copy. */ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace); SCTP_BASE_VAR(first_time) = 0; SCTP_BASE_VAR(sctp_pcb_initialized) = 0; sctp_pcb_init(); #if defined(SCTP_PACKET_LOGGING) SCTP_BASE_VAR(packet_log_writers) = 0; SCTP_BASE_VAR(packet_log_end) = 0; memset(&SCTP_BASE_VAR(packet_log_buffer), 0, SCTP_PACKET_LOG_SIZE); #endif } #ifdef VIMAGE static void sctp_finish(void *unused __unused) { sctp_pcb_finish(); } VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL); #endif void sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint16_t nxtsz) { struct sctp_tmit_chunk *chk; uint16_t overhead; /* Adjust that too */ stcb->asoc.smallest_mtu = nxtsz; /* now off to subtract IP_DF flag if needed */ overhead = IP_HDR_SIZE + sizeof(struct sctphdr); if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); } TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) { if ((chk->send_size + overhead) > nxtsz) { chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; } } TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if ((chk->send_size + overhead) > nxtsz) { /* * For this guy we also mark for immediate resend * since we sent to big of chunk */ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; if (chk->sent < SCTP_DATAGRAM_RESEND) { sctp_flight_size_decrease(chk); sctp_total_flight_decrease(stcb, chk); chk->sent = SCTP_DATAGRAM_RESEND; sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); chk->rec.data.doing_fast_retransmit = 0; if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU, chk->whoTo->flight_size, chk->book_size, (uint32_t)(uintptr_t)chk->whoTo, chk->rec.data.tsn); } /* Clear any time so NO RTT is being done */ chk->do_rtt = 0; } } } } #ifdef INET void sctp_notify(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint8_t icmp_type, uint8_t icmp_code, uint16_t ip_len, uint32_t next_mtu) { #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) struct socket *so; #endif int timer_stopped; if (icmp_type != ICMP_UNREACH) { /* We only care about unreachable */ SCTP_TCB_UNLOCK(stcb); return; } if ((icmp_code == ICMP_UNREACH_NET) || (icmp_code == ICMP_UNREACH_HOST) || (icmp_code == ICMP_UNREACH_NET_UNKNOWN) || (icmp_code == ICMP_UNREACH_HOST_UNKNOWN) || (icmp_code == ICMP_UNREACH_ISOLATED) || (icmp_code == ICMP_UNREACH_NET_PROHIB) || (icmp_code == ICMP_UNREACH_HOST_PROHIB) || (icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { /* Mark the net unreachable. */ if (net->dest_state & SCTP_ADDR_REACHABLE) { /* OK, that destination is NOT reachable. */ net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state &= ~SCTP_ADDR_PF; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, (void *)net, SCTP_SO_NOT_LOCKED); } SCTP_TCB_UNLOCK(stcb); } else if ((icmp_code == ICMP_UNREACH_PROTOCOL) || (icmp_code == ICMP_UNREACH_PORT)) { /* Treat it like an ABORT. */ sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED); #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) SCTP_SOCKET_UNLOCK(so, 1); /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */ #endif /* no need to unlock here, since the TCB is gone */ } else if (icmp_code == ICMP_UNREACH_NEEDFRAG) { if (net->dest_state & SCTP_ADDR_NO_PMTUD) { SCTP_TCB_UNLOCK(stcb); return; } /* Find the next (smaller) MTU */ if (next_mtu == 0) { /* * Old type router that does not tell us what the * next MTU is. Rats we will have to guess (in a * educated fashion of course). */ next_mtu = sctp_get_prev_mtu(ip_len); } /* Stop the PMTU timer. */ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { timer_stopped = 1; sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); } else { timer_stopped = 0; } /* Update the path MTU. */ if (net->port) { next_mtu -= sizeof(struct udphdr); } if (net->mtu > next_mtu) { net->mtu = next_mtu; if (net->port) { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr)); } else { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu); } } /* Update the association MTU */ if (stcb->asoc.smallest_mtu > next_mtu) { sctp_pathmtu_adjustment(stcb, next_mtu); } /* Finally, start the PMTU timer if it was running before. */ if (timer_stopped) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } SCTP_TCB_UNLOCK(stcb); } else { SCTP_TCB_UNLOCK(stcb); } } void sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) { struct ip *outer_ip; struct ip *inner_ip; struct sctphdr *sh; struct icmp *icmp; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; struct sctp_init_chunk *ch; struct sockaddr_in src, dst; if (sa->sa_family != AF_INET || ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { return; } if (PRC_IS_REDIRECT(cmd)) { vip = NULL; } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { return; } if (vip != NULL) { inner_ip = (struct ip *)vip; icmp = (struct icmp *)((caddr_t)inner_ip - (sizeof(struct icmp) - sizeof(struct ip))); outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); sh = (struct sctphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); memset(&src, 0, sizeof(struct sockaddr_in)); src.sin_family = AF_INET; src.sin_len = sizeof(struct sockaddr_in); src.sin_port = sh->src_port; src.sin_addr = inner_ip->ip_src; memset(&dst, 0, sizeof(struct sockaddr_in)); dst.sin_family = AF_INET; dst.sin_len = sizeof(struct sockaddr_in); dst.sin_port = sh->dest_port; dst.sin_addr = inner_ip->ip_dst; /* * 'dst' holds the dest of the packet that failed to be * sent. 'src' holds our local endpoint address. Thus we * reverse the dst and the src in the lookup. */ inp = NULL; net = NULL; stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, (struct sockaddr *)&src, &inp, &net, 1, SCTP_DEFAULT_VRFID); if ((stcb != NULL) && (net != NULL) && (inp != NULL)) { /* Check the verification tag */ if (ntohl(sh->v_tag) != 0) { /* * This must be the verification tag used * for sending out packets. We don't * consider packets reflecting the * verification tag. */ if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { SCTP_TCB_UNLOCK(stcb); return; } } else { if (ntohs(outer_ip->ip_len) >= sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + 20) { /* * In this case we can check if we * got an INIT chunk and if the * initiate tag matches. */ ch = (struct sctp_init_chunk *)(sh + 1); if ((ch->ch.chunk_type != SCTP_INITIATION) || (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { SCTP_TCB_UNLOCK(stcb); return; } } else { SCTP_TCB_UNLOCK(stcb); return; } } sctp_notify(inp, stcb, net, icmp->icmp_type, icmp->icmp_code, ntohs(inner_ip->ip_len), (uint32_t)ntohs(icmp->icmp_nextmtu)); } else { if ((stcb == NULL) && (inp != NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if (stcb) { SCTP_TCB_UNLOCK(stcb); } } } return; } #endif static int sctp_getcred(SYSCTL_HANDLER_ARGS) { struct xucred xuc; struct sockaddr_in addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error; uint32_t vrf_id; /* FIX, for non-bsd is this right? */ vrf_id = SCTP_DEFAULT_VRFID; error = priv_check(req->td, PRIV_NETINET_GETCRED); if (error) return (error); error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); stcb = sctp_findassociation_addr_sa(sintosa(&addrs[1]), sintosa(&addrs[0]), &inp, &net, 1, vrf_id); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { if ((inp != NULL) && (stcb == NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); goto cred_can_cont; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; goto out; } SCTP_TCB_UNLOCK(stcb); /* * We use the write lock here, only since in the error leg we need * it. If we used RLOCK, then we would have to * wlock/decr/unlock/rlock. Which in theory could create a hole. * Better to use higher wlock. */ SCTP_INP_WLOCK(inp); cred_can_cont: error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); if (error) { SCTP_INP_WUNLOCK(inp); goto out; } cru2x(inp->sctp_socket->so_cred, &xuc); SCTP_INP_WUNLOCK(inp); error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); out: return (error); } SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); #ifdef INET static void sctp_abort(struct socket *so) { struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { return; } sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); SOCK_LOCK(so); SCTP_SB_CLEAR(so->so_snd); /* * same for the rcv ones, they are only here for the * accounting/select. */ SCTP_SB_CLEAR(so->so_rcv); /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } return; } static int sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) { struct sctp_inpcb *inp; struct inpcb *ip_inp; int error; uint32_t vrf_id = SCTP_DEFAULT_VRFID; inp = (struct sctp_inpcb *)so->so_pcb; if (inp != NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); if (error) { return (error); } } error = sctp_inpcb_alloc(so, vrf_id); if (error) { return (error); } inp = (struct sctp_inpcb *)so->so_pcb; SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */ ip_inp = &inp->ip_inp.inp; ip_inp->inp_vflag |= INP_IPV4; ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl); SCTP_INP_WUNLOCK(inp); return (0); } static int sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (addr != NULL) { if ((addr->sa_family != AF_INET) || (addr->sa_len != sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } } return (sctp_inpcb_bind(so, addr, NULL, p)); } #endif void sctp_close(struct socket *so) { struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) return; /* * Inform all the lower layer assoc that we are done. */ sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 13); #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } else { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 14); #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } /* * The socket is now detached, no matter what the state of * the SCTP association. */ SOCK_LOCK(so); SCTP_SB_CLEAR(so->so_snd); /* * same for the rcv ones, they are only here for the * accounting/select. */ SCTP_SB_CLEAR(so->so_rcv); /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } return; } int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; int error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) { sctp_m_freem(control); control = NULL; } SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); sctp_m_freem(m); return (EINVAL); } /* Got to have an to address if we are NOT a connected socket */ if ((addr == NULL) && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) { goto connected_type; } else if (addr == NULL) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); error = EDESTADDRREQ; sctp_m_freem(m); if (control) { sctp_m_freem(control); control = NULL; } return (error); } #ifdef INET6 if (addr->sa_family != AF_INET) { /* must be a v4 address! */ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); sctp_m_freem(m); if (control) { sctp_m_freem(control); control = NULL; } error = EDESTADDRREQ; return (error); } #endif /* INET6 */ connected_type: /* now what about control */ if (control) { if (inp->control) { sctp_m_freem(inp->control); inp->control = NULL; } inp->control = control; } /* Place the data */ if (inp->pkt) { SCTP_BUF_NEXT(inp->pkt_last) = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for * re-defining sosend to use the sctp_sosend. One can * optionally switch back to this code (by changing back the * definitions) but this is not advisable. This code is used * by FreeBSD when sending a file with sendfile() though. */ int ret; ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } int sctp_disconnect(struct socket *so) { struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { if (LIST_EMPTY(&inp->sctp_asoc_list)) { /* No connection */ SCTP_INP_RUNLOCK(inp); return (0); } else { struct sctp_association *asoc; struct sctp_tcb *stcb; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { /* We are about to be freed, out of here */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { if (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) { /* Left with Data unread */ struct mbuf *op_err; op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); } SCTP_INP_RUNLOCK(inp); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); /* No unlock tcb assoc is gone */ return (0); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { /* there is nothing queued to send, so done */ if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { /* only send SHUTDOWN 1st time thru */ struct sctp_nets *netp; if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { netp = stcb->asoc.primary_destination; } sctp_send_shutdown(stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); } } else { /* * we still got (or just got) data to send, * so set SHUTDOWN_PENDING */ /* * XXX sockets draft says that SCTP_EOF * should be sent with no data. currently, * we will allow user data to be sent first * and move to SHUTDOWN-PENDING */ struct sctp_nets *netp; if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { netp = stcb->asoc.primary_destination; } SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4; sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { SCTP_STAT_DECR_GAUGE32(sctps_currestab); } SCTP_INP_RUNLOCK(inp); (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); return (0); } else { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); } } soisdisconnecting(so); SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } /* not reached */ } else { /* UDP model does not support this */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP); } } int sctp_flush(struct socket *so, int how) { /* * We will just clear out the values and let subsequent close clear * out the data, if any. Note if the user did a shutdown(SHUT_RD) * they will not be able to read the data, the socket will block * that from happening. */ struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } SCTP_INP_RLOCK(inp); /* For the 1 to many model this does nothing */ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { SCTP_INP_RUNLOCK(inp); return (0); } SCTP_INP_RUNLOCK(inp); if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) { /* * First make sure the sb will be happy, we don't use these * except maybe the count */ SCTP_INP_WLOCK(inp); SCTP_INP_READ_LOCK(inp); inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ; SCTP_INP_READ_UNLOCK(inp); SCTP_INP_WUNLOCK(inp); so->so_rcv.sb_cc = 0; so->so_rcv.sb_mbcnt = 0; so->so_rcv.sb_mb = NULL; } if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) { /* * First make sure the sb will be happy, we don't use these * except maybe the count */ so->so_snd.sb_cc = 0; so->so_snd.sb_mbcnt = 0; so->so_snd.sb_mb = NULL; } return (0); } int sctp_shutdown(struct socket *so) { struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } SCTP_INP_RLOCK(inp); /* For UDP model this is a invalid call */ if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { /* Restore the flags that the soshutdown took away. */ SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; SOCKBUF_UNLOCK(&so->so_rcv); /* This proc will wakeup for read and do nothing (I hope) */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP); } else { /* * Ok, if we reach here its the TCP model and it is either a * SHUT_WR or SHUT_RDWR. This means we put the shutdown flag * against it. */ struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctp_nets *netp; if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { SCTP_INP_RUNLOCK(inp); return (ENOTCONN); } socantsendmore(so); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { /* * Ok, we hit the case that the shutdown call was * made after an abort or something. Nothing to do * now. */ SCTP_INP_RUNLOCK(inp); return (0); } SCTP_TCB_LOCK(stcb); asoc = &stcb->asoc; if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } if ((SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) && (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_ECHOED) && (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN)) { /* * If we are not in or before ESTABLISHED, there is * no protocol action required. */ SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { netp = stcb->asoc.primary_destination; } if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) && TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->stream_queue_cnt == 0)) { if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { goto abort_anyway; } /* there is nothing queued to send, so I'm done... */ SCTP_STAT_DECR_GAUGE32(sctps_currestab); SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); sctp_send_shutdown(stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); } else { /* * We still got (or just got) data to send, so set * SHUTDOWN_PENDING. */ SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { struct mbuf *op_err; abort_anyway: op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; SCTP_INP_RUNLOCK(inp); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_LOCKED); return (0); } } sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp); /* * XXX: Why do this in the case where we have still data * queued? */ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); } } /* * copies a "user" presentable address and removes embedded scope, etc. * returns 0 on success, 1 on error */ static uint32_t sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) { #ifdef INET6 struct sockaddr_in6 lsa6; sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, &lsa6); #endif memcpy(ss, sa, sa->sa_len); return (0); } /* * NOTE: assumes addr lock is held */ static size_t sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, struct sctp_tcb *stcb, size_t limit, struct sockaddr_storage *sas, uint32_t vrf_id) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; size_t actual; int loopback_scope; #if defined(INET) int ipv4_local_scope, ipv4_addr_legal; #endif #if defined(INET6) int local_scope, site_scope, ipv6_addr_legal; #endif struct sctp_vrf *vrf; actual = 0; if (limit <= 0) return (actual); if (stcb) { /* Turn on all the appropriate scope */ loopback_scope = stcb->asoc.scope.loopback_scope; #if defined(INET) ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; #endif #if defined(INET6) local_scope = stcb->asoc.scope.local_scope; site_scope = stcb->asoc.scope.site_scope; ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; #endif } else { /* Use generic values for endpoints. */ loopback_scope = 1; #if defined(INET) ipv4_local_scope = 1; #endif #if defined(INET6) local_scope = 1; site_scope = 1; #endif if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { #if defined(INET6) ipv6_addr_legal = 1; #endif #if defined(INET) if (SCTP_IPV6_V6ONLY(inp)) { ipv4_addr_legal = 0; } else { ipv4_addr_legal = 1; } #endif } else { #if defined(INET6) ipv6_addr_legal = 0; #endif #if defined(INET) ipv4_addr_legal = 1; #endif } } vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0); } if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { /* Skip loopback if loopback_scope not set */ continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { if (stcb) { /* * For the BOUND-ALL case, the list * associated with a TCB is Always * considered a reverse list.. i.e. * it lists addresses that are NOT * part of the association. If this * is one of those we must skip it. */ if (sctp_is_addr_restricted(stcb, sctp_ifa)) { continue; } } switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: if (ipv4_addr_legal) { struct sockaddr_in *sin; sin = &sctp_ifa->address.sin; if (sin->sin_addr.s_addr == 0) { /* * we skip * unspecifed * addresses */ continue; } if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; } if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { continue; } #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { #endif if (actual + sizeof(struct sockaddr_in) > limit) { return (actual); } memcpy(sas, sin, sizeof(struct sockaddr_in)); ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in)); actual += sizeof(struct sockaddr_in); #ifdef INET6 } #endif } else { continue; } break; #endif #ifdef INET6 case AF_INET6: if (ipv6_addr_legal) { struct sockaddr_in6 *sin6; sin6 = &sctp_ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { /* * we skip * unspecifed * addresses */ continue; } if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; } if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { if (local_scope == 0) continue; if (sin6->sin6_scope_id == 0) { if (sa6_recoverscope(sin6) != 0) /* * * bad * link * * local * * address */ continue; } } if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { continue; } if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } memcpy(sas, sin6, sizeof(struct sockaddr_in6)); ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { continue; } break; #endif default: /* TSNH */ break; } } } } else { struct sctp_laddr *laddr; size_t sa_len; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (stcb) { if (sctp_is_addr_restricted(stcb, laddr->ifa)) { continue; } } sa_len = laddr->ifa->address.sa.sa_len; if (actual + sa_len > limit) { return (actual); } if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) continue; switch (laddr->ifa->address.sa.sa_family) { #ifdef INET case AF_INET: ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; break; #endif default: /* TSNH */ break; } sas = (struct sockaddr_storage *)((caddr_t)sas + sa_len); actual += sa_len; } } return (actual); } static size_t sctp_fill_up_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, size_t limit, struct sockaddr_storage *sas) { size_t size = 0; SCTP_IPI_ADDR_RLOCK(); /* fill up addresses for the endpoint's default vrf */ size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, inp->def_vrf_id); SCTP_IPI_ADDR_RUNLOCK(); return (size); } /* * NOTE: assumes addr lock is held */ static int sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) { int cnt = 0; struct sctp_vrf *vrf = NULL; /* * In both sub-set bound an bound_all cases we return the MAXIMUM * number of addresses that you COULD get. In reality the sub-set * bound may have an exclusion list for a given TCB OR in the * bound-all case a TCB may NOT include the loopback or other * addresses as well. */ vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0); } if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { /* Count them if they are the right type */ switch (sctp_ifa->address.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) cnt += sizeof(struct sockaddr_in6); else cnt += sizeof(struct sockaddr_in); #else cnt += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: cnt += sizeof(struct sockaddr_in6); break; #endif default: break; } } } } else { struct sctp_laddr *laddr; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { switch (laddr->ifa->address.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) cnt += sizeof(struct sockaddr_in6); else cnt += sizeof(struct sockaddr_in); #else cnt += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: cnt += sizeof(struct sockaddr_in6); break; #endif default: break; } } } return (cnt); } static int sctp_count_max_addresses(struct sctp_inpcb *inp) { int cnt = 0; SCTP_IPI_ADDR_RLOCK(); /* count addresses for the endpoint's default VRF */ cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id); SCTP_IPI_ADDR_RUNLOCK(); return (cnt); } static int sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, size_t optsize, void *p, int delay) { int error; int creat_lock_on = 0; struct sctp_tcb *stcb = NULL; struct sockaddr *sa; unsigned int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr; uint32_t vrf_id; sctp_assoc_t *a_id; SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n"); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); return (EADDRINUSE); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); SCTP_INP_RUNLOCK(inp); } if (stcb) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); return (EALREADY); } SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); creat_lock_on = 1; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); error = EFAULT; goto out_now; } totaddrp = (unsigned int *)optval; totaddr = *totaddrp; sa = (struct sockaddr *)(totaddrp + 1); error = sctp_connectx_helper_find(inp, sa, totaddr, &num_v4, &num_v6, (unsigned int)(optsize - sizeof(int))); if (error != 0) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); creat_lock_on = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); goto out_now; } #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (num_v6 > 0)) { error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (num_v4 > 0)) { - if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, ignore connections destined * to a v4 addr or v4-mapped addr */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } } #endif /* INET6 */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ error = sctp_inpcb_bind(so, NULL, NULL, p); if (error) { goto out_now; } } /* FIX ME: do we want to pass in a vrf on the connect call? */ vrf_id = inp->def_vrf_id; /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, (struct thread *)p, SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); /* move to second address */ switch (sa->sa_family) { #ifdef INET case AF_INET: sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in)); break; #endif #ifdef INET6 case AF_INET6: sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6)); break; #endif default: break; } error = 0; sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error); /* Fill in the return id */ if (error) { goto out_now; } a_id = (sctp_assoc_t *)optval; *a_id = sctp_get_associd(stcb); if (delay) { /* doing delayed connection */ stcb->asoc.delayed_connection = 1; sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination); } else { (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); } SCTP_TCB_UNLOCK(stcb); out_now: if (creat_lock_on) { SCTP_ASOC_CREATE_UNLOCK(inp); } SCTP_INP_DECR_REF(inp); return (error); } #define SCTP_FIND_STCB(inp, stcb, assoc_id) { \ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \ SCTP_INP_RLOCK(inp); \ stcb = LIST_FIRST(&inp->sctp_asoc_list); \ if (stcb) { \ SCTP_TCB_LOCK(stcb); \ } \ SCTP_INP_RUNLOCK(inp); \ } else if (assoc_id > SCTP_ALL_ASSOC) { \ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \ if (stcb == NULL) { \ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \ error = ENOENT; \ break; \ } \ } else { \ stcb = NULL; \ } \ } #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ if (size < sizeof(type)) { \ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ error = EINVAL; \ break; \ } else { \ destp = (type *)srcp; \ } \ } static int sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, void *p) { struct sctp_inpcb *inp = NULL; int error, val = 0; struct sctp_tcb *stcb = NULL; if (optval == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return EINVAL; } error = 0; switch (optname) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_EXPLICIT_EOR: case SCTP_AUTO_ASCONF: case SCTP_DISABLE_FRAGMENTS: case SCTP_I_WANT_MAPPED_V4_ADDR: case SCTP_USE_EXT_RCVINFO: SCTP_INP_RLOCK(inp); switch (optname) { case SCTP_DISABLE_FRAGMENTS: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT); break; case SCTP_I_WANT_MAPPED_V4_ADDR: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4); break; case SCTP_AUTO_ASCONF: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* only valid for bound all sockets */ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto flags_out; } break; case SCTP_EXPLICIT_EOR: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); break; case SCTP_NODELAY: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY); break; case SCTP_USE_EXT_RCVINFO: val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO); break; case SCTP_AUTOCLOSE: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); else val = 0; break; default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; } /* end switch (sopt->sopt_name) */ if (*optsize < sizeof(val)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } flags_out: SCTP_INP_RUNLOCK(inp); if (error == 0) { /* return the option value */ *(int *)optval = val; *optsize = sizeof(val); } break; case SCTP_GET_PACKET_LOG: { #ifdef SCTP_PACKET_LOGGING uint8_t *target; int ret; SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize); ret = sctp_copy_out_packet_log(target, (int)*optsize); *optsize = ret; #else SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; #endif break; } case SCTP_REUSE_PORT: { uint32_t *value; if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { /* Can't do this for a 1-m socket */ error = EINVAL; break; } SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); *optsize = sizeof(uint32_t); break; } case SCTP_PARTIAL_DELIVERY_POINT: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); *value = inp->partial_delivery_point; *optsize = sizeof(uint32_t); break; } case SCTP_FRAGMENT_INTERLEAVE: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) { if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) { *value = SCTP_FRAG_LEVEL_2; } else { *value = SCTP_FRAG_LEVEL_1; } } else { *value = SCTP_FRAG_LEVEL_0; } *optsize = sizeof(uint32_t); break; } case SCTP_INTERLEAVING_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.idata_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); if (inp->idata_supported) { av->assoc_value = 1; } else { av->assoc_value = 0; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_CMT_ON_OFF: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.sctp_cmt_on_off; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_cmt_on_off; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PLUGGABLE_CC: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.congestion_control_module; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.sctp_default_cc_module; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_CC_OPTION: { struct sctp_cc_option *cc_opt; SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize); SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); if (stcb == NULL) { error = EINVAL; } else { if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { error = ENOTSUP; } else { error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 0, cc_opt); *optsize = sizeof(struct sctp_cc_option); } SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_PLUGGABLE_SS: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.stream_scheduling_module; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.sctp_default_ss_module; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_SS_VALUE: { struct sctp_stream_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { if ((av->stream_id >= stcb->asoc.streamoutcnt) || (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], &av->stream_value) < 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { *optsize = sizeof(struct sctp_stream_value); } SCTP_TCB_UNLOCK(stcb); } else { /* * Can't get stream value without * association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_GET_ADDR_LEN: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); error = EINVAL; #ifdef INET if (av->assoc_value == AF_INET) { av->assoc_value = sizeof(struct sockaddr_in); error = 0; } #endif #ifdef INET6 if (av->assoc_value == AF_INET6) { av->assoc_value = sizeof(struct sockaddr_in6); error = 0; } #endif if (error) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_ASSOC_NUMBER: { uint32_t *value, cnt; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* Can't do this for a 1-1 socket */ error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } cnt = 0; LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { cnt++; } SCTP_INP_RUNLOCK(inp); *value = cnt; *optsize = sizeof(uint32_t); break; } case SCTP_GET_ASSOC_ID_LIST: { struct sctp_assoc_ids *ids; uint32_t at; size_t limit; SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* Can't do this for a 1-1 socket */ error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } at = 0; limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { if (at < limit) { ids->gaids_assoc_id[at++] = sctp_get_associd(stcb); if (at == 0) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } SCTP_INP_RUNLOCK(inp); if (error == 0) { ids->gaids_number_of_ids = at; *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t)); } break; } case SCTP_CONTEXT: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.context; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_context; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_VRF_ID: { uint32_t *default_vrfid; SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize); *default_vrfid = inp->def_vrf_id; *optsize = sizeof(uint32_t); break; } case SCTP_GET_ASOC_VRF: { struct sctp_assoc_value *id; SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, id->assoc_id); if (stcb == NULL) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { id->assoc_value = stcb->asoc.vrf_id; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_VRF_IDS: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_GET_NONCE_VALUES: { struct sctp_get_nonce_values *gnv; SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize); SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id); if (stcb) { gnv->gn_peers_tag = stcb->asoc.peer_vtag; gnv->gn_local_tag = stcb->asoc.my_vtag; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_get_nonce_values); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); error = ENOTCONN; } break; } case SCTP_DELAYED_SACK: { struct sctp_sack_info *sack; SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize); SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); if (stcb) { sack->sack_delay = stcb->asoc.delayed_ack; sack->sack_freq = stcb->asoc.sack_freq; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sack->sack_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); sack->sack_freq = inp->sctp_ep.sctp_sack_freq; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sack_info); } break; } case SCTP_GET_SNDBUF_USE: { struct sctp_sockstat *ss; SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize); SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id); if (stcb) { ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size; ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue + stcb->asoc.size_on_all_streams); SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_sockstat); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); error = ENOTCONN; } break; } case SCTP_MAX_BURST: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.max_burst; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->sctp_ep.max_burst; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_MAXSEG: { struct sctp_assoc_value *av; int ovh; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ovh = SCTP_MED_OVERHEAD; } else { ovh = SCTP_MED_V4_OVERHEAD; } if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT) av->assoc_value = 0; else av->assoc_value = inp->sctp_frag_point - ovh; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_GET_STAT_LOG: error = sctp_fill_stat_log(optval, optsize); break; case SCTP_EVENTS: { struct sctp_event_subscribe *events; SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize); memset(events, 0, sizeof(struct sctp_event_subscribe)); SCTP_INP_RLOCK(inp); if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) events->sctp_data_io_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT)) events->sctp_association_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT)) events->sctp_address_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) events->sctp_send_failure_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR)) events->sctp_peer_error_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) events->sctp_shutdown_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) events->sctp_partial_delivery_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) events->sctp_adaptation_layer_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT)) events->sctp_authentication_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT)) events->sctp_sender_dry_event = 1; if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) events->sctp_stream_reset_event = 1; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(struct sctp_event_subscribe); break; } case SCTP_ADAPTATION_LAYER: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = inp->sctp_ep.adaptation_layer_indicator; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_SET_INITIAL_DBG_SEQ: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = inp->sctp_ep.initial_sequence_debug; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_GET_LOCAL_ADDR_SIZE: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); SCTP_INP_RLOCK(inp); *value = sctp_count_max_addresses(inp); SCTP_INP_RUNLOCK(inp); *optsize = sizeof(uint32_t); break; } case SCTP_GET_REMOTE_ADDR_SIZE: { uint32_t *value; size_t size; struct sctp_nets *net; SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize); /* FIXME MT: change to sctp_assoc_value? */ SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t)*value); if (stcb) { size = 0; /* Count the sizes */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { size += sizeof(struct sockaddr_in6); } else { size += sizeof(struct sockaddr_in); } #else size += sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct sockaddr_in6); break; #endif default: break; } } SCTP_TCB_UNLOCK(stcb); *value = (uint32_t)size; *optsize = sizeof(uint32_t); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); error = ENOTCONN; } break; } case SCTP_GET_PEER_ADDRESSES: /* * Get the address information, an array is passed in to * fill up we pack it. */ { size_t cpsz, left; struct sockaddr_storage *sas; struct sctp_nets *net; struct sctp_getaddresses *saddr; SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); if (stcb) { left = (*optsize) - sizeof(sctp_assoc_t); *optsize = sizeof(sctp_assoc_t); sas = (struct sockaddr_storage *)&saddr->addr[0]; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { cpsz = sizeof(struct sockaddr_in6); } else { cpsz = sizeof(struct sockaddr_in); } #else cpsz = sizeof(struct sockaddr_in); #endif break; #endif #ifdef INET6 case AF_INET6: cpsz = sizeof(struct sockaddr_in6); break; #endif default: cpsz = 0; break; } if (cpsz == 0) { break; } if (left < cpsz) { /* not enough room. */ break; } #if defined(INET) && defined(INET6) if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && (net->ro._l_addr.sa.sa_family == AF_INET)) { /* Must map the address */ in6_sin_2_v4mapsin6(&net->ro._l_addr.sin, (struct sockaddr_in6 *)sas); } else { memcpy(sas, &net->ro._l_addr, cpsz); } #else memcpy(sas, &net->ro._l_addr, cpsz); #endif ((struct sockaddr_in *)sas)->sin_port = stcb->rport; sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); left -= cpsz; *optsize += cpsz; } SCTP_TCB_UNLOCK(stcb); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; } break; } case SCTP_GET_LOCAL_ADDRESSES: { size_t limit, actual; struct sockaddr_storage *sas; struct sctp_getaddresses *saddr; SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); sas = (struct sockaddr_storage *)&saddr->addr[0]; limit = *optsize - sizeof(sctp_assoc_t); actual = sctp_fill_up_addresses(inp, stcb, limit, sas); if (stcb) { SCTP_TCB_UNLOCK(stcb); } *optsize = sizeof(sctp_assoc_t) + actual; break; } case SCTP_PEER_ADDR_PARAMS: { struct sctp_paddrparams *paddrp; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize); SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); #if defined(INET) && defined(INET6) if (paddrp->spp_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddrp->spp_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddrp->spp_address; } } else { addr = (struct sockaddr *)&paddrp->spp_address; } #else addr = (struct sockaddr *)&paddrp->spp_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { /* Applies to the specific association */ paddrp->spp_flags = 0; if (net != NULL) { paddrp->spp_hbinterval = net->heart_beat_delay; paddrp->spp_pathmaxrxt = net->failure_threshold; paddrp->spp_pathmtu = net->mtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: paddrp->spp_pathmtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: paddrp->spp_pathmtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } /* get flags for HB */ if (net->dest_state & SCTP_ADDR_NOHB) { paddrp->spp_flags |= SPP_HB_DISABLE; } else { paddrp->spp_flags |= SPP_HB_ENABLE; } /* get flags for PMTU */ if (net->dest_state & SCTP_ADDR_NO_PMTUD) { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } else { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } if (net->dscp & 0x01) { paddrp->spp_dscp = net->dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if ((net->ro._l_addr.sa.sa_family == AF_INET6) && (net->flowlabel & 0x80000000)) { paddrp->spp_ipv6_flowlabel = net->flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif } else { /* * No destination so return default * value */ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure; paddrp->spp_pathmtu = stcb->asoc.default_mtu; if (stcb->asoc.default_dscp & 0x01) { paddrp->spp_dscp = stcb->asoc.default_dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if (stcb->asoc.default_flowlabel & 0x80000000) { paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif /* default settings should be these */ if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { paddrp->spp_flags |= SPP_HB_DISABLE; } else { paddrp->spp_flags |= SPP_HB_ENABLE; } if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } else { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay; } paddrp->spp_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC)) { /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); paddrp->spp_assoc_id = SCTP_FUTURE_ASSOC; /* get inp's default */ if (inp->sctp_ep.default_dscp & 0x01) { paddrp->spp_dscp = inp->sctp_ep.default_dscp & 0xfc; paddrp->spp_flags |= SPP_DSCP; } #ifdef INET6 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && (inp->sctp_ep.default_flowlabel & 0x80000000)) { paddrp->spp_ipv6_flowlabel = inp->sctp_ep.default_flowlabel & 0x000fffff; paddrp->spp_flags |= SPP_IPV6_FLOWLABEL; } #endif paddrp->spp_pathmtu = inp->sctp_ep.default_mtu; if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { paddrp->spp_flags |= SPP_HB_ENABLE; } else { paddrp->spp_flags |= SPP_HB_DISABLE; } if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { paddrp->spp_flags |= SPP_PMTUD_ENABLE; } else { paddrp->spp_flags |= SPP_PMTUD_DISABLE; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_paddrparams); } break; } case SCTP_GET_PEER_ADDR_INFO: { struct sctp_paddrinfo *paddri; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize); SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id); #if defined(INET) && defined(INET6) if (paddri->spinfo_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddri->spinfo_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddri->spinfo_address; } } else { addr = (struct sockaddr *)&paddri->spinfo_address; } #else addr = (struct sockaddr *)&paddri->spinfo_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net != NULL)) { if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* It's unconfirmed */ paddri->spinfo_state = SCTP_UNCONFIRMED; } else if (net->dest_state & SCTP_ADDR_REACHABLE) { /* It's active */ paddri->spinfo_state = SCTP_ACTIVE; } else { /* It's inactive */ paddri->spinfo_state = SCTP_INACTIVE; } paddri->spinfo_cwnd = net->cwnd; paddri->spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; paddri->spinfo_rto = net->RTO; paddri->spinfo_assoc_id = sctp_get_associd(stcb); paddri->spinfo_mtu = net->mtu; switch (addr->sa_family) { #if defined(INET) case AF_INET: paddri->spinfo_mtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #if defined(INET6) case AF_INET6: paddri->spinfo_mtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_paddrinfo); } else { if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; } break; } case SCTP_PCB_STATUS: { struct sctp_pcbinfo *spcb; SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize); sctp_fill_pcbinfo(spcb); *optsize = sizeof(struct sctp_pcbinfo); break; } case SCTP_STATUS: { struct sctp_nets *net; struct sctp_status *sstat; SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize); SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } sstat->sstat_state = sctp_map_assoc_state(stcb->asoc.state); sstat->sstat_assoc_id = sctp_get_associd(stcb); sstat->sstat_rwnd = stcb->asoc.peers_rwnd; sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt; /* * We can't include chunks that have been passed to * the socket layer. Only things in queue. */ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + stcb->asoc.cnt_on_all_streams); sstat->sstat_instrms = stcb->asoc.streamincnt; sstat->sstat_outstrms = stcb->asoc.streamoutcnt; sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); net = stcb->asoc.primary_destination; if (net != NULL) { memcpy(&sstat->sstat_primary.spinfo_address, &stcb->asoc.primary_destination->ro._l_addr, ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; /* * Again the user can get info from * sctp_constants.h for what the state of * the network is. */ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { /* It's unconfirmed */ sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED; } else if (net->dest_state & SCTP_ADDR_REACHABLE) { /* It's active */ sstat->sstat_primary.spinfo_state = SCTP_ACTIVE; } else { /* It's inactive */ sstat->sstat_primary.spinfo_state = SCTP_INACTIVE; } sstat->sstat_primary.spinfo_cwnd = net->cwnd; sstat->sstat_primary.spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT; sstat->sstat_primary.spinfo_rto = net->RTO; sstat->sstat_primary.spinfo_mtu = net->mtu; switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) { #if defined(INET) case AF_INET: sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_V4_OVERHEAD; break; #endif #if defined(INET6) case AF_INET6: sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_OVERHEAD; break; #endif default: break; } } else { memset(&sstat->sstat_primary, 0, sizeof(struct sctp_paddrinfo)); } sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_status); break; } case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize); SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); if (stcb) { srto->srto_initial = stcb->asoc.initial_rto; srto->srto_max = stcb->asoc.maxrto; srto->srto_min = stcb->asoc.minrto; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (srto->srto_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); srto->srto_initial = inp->sctp_ep.initial_rto; srto->srto_max = inp->sctp_ep.sctp_maxrto; srto->srto_min = inp->sctp_ep.sctp_minrto; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_rtoinfo); } break; } case SCTP_TIMEOUTS: { struct sctp_timeouts *stimo; SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize); SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id); if (stcb) { stimo->stimo_init = stcb->asoc.timoinit; stimo->stimo_data = stcb->asoc.timodata; stimo->stimo_sack = stcb->asoc.timosack; stimo->stimo_shutdown = stcb->asoc.timoshutdown; stimo->stimo_heartbeat = stcb->asoc.timoheartbeat; stimo->stimo_cookie = stcb->asoc.timocookie; stimo->stimo_shutdownack = stcb->asoc.timoshutdownack; SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_timeouts); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize); SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); if (stcb) { sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; sasoc->sasoc_number_peer_destinations = 0; sasoc->sasoc_peer_rwnd = 0; sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assocparams); } break; } case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize); SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); if (stcb) { memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send)); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); memcpy(s_info, &inp->def_send, sizeof(inp->def_send)); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sndrcvinfo); } break; } case SCTP_INITMSG: { struct sctp_initmsg *sinit; SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize); SCTP_INP_RLOCK(inp); sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count; sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome; sinit->sinit_max_attempts = inp->sctp_ep.max_init_times; sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max; SCTP_INP_RUNLOCK(inp); *optsize = sizeof(struct sctp_initmsg); break; } case SCTP_PRIMARY_ADDR: /* we allow a "get" operation on this */ { struct sctp_setprim *ssp; SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize); SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id); if (stcb) { union sctp_sockstore *addr; addr = &stcb->asoc.primary_destination->ro._l_addr; switch (addr->sa.sa_family) { #ifdef INET case AF_INET: #ifdef INET6 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { in6_sin_2_v4mapsin6(&addr->sin, (struct sockaddr_in6 *)&ssp->ssp_addr); } else { memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in)); } #else memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in)); #endif break; #endif #ifdef INET6 case AF_INET6: memcpy(&ssp->ssp_addr, &addr->sin6, sizeof(struct sockaddr_in6)); break; #endif default: break; } SCTP_TCB_UNLOCK(stcb); *optsize = sizeof(struct sctp_setprim); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; uint32_t size; int i; SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); SCTP_INP_RLOCK(inp); hmaclist = inp->sctp_ep.local_hmacs; if (hmaclist == NULL) { /* no HMACs to return */ *optsize = sizeof(*shmac); SCTP_INP_RUNLOCK(inp); break; } /* is there room for all of the hmac ids? */ size = sizeof(*shmac) + (hmaclist->num_algo * sizeof(shmac->shmac_idents[0])); if ((size_t)(*optsize) < size) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_INP_RUNLOCK(inp); break; } /* copy in the list */ shmac->shmac_number_of_idents = hmaclist->num_algo; for (i = 0; i < hmaclist->num_algo; i++) { shmac->shmac_idents[i] = hmaclist->hmac[i]; } SCTP_INP_RUNLOCK(inp); *optsize = size; break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize); SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); if (stcb) { /* get the active key on the assoc */ scact->scact_keynumber = stcb->asoc.authinfo.active_keyid; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (scact->scact_assoc_id == SCTP_FUTURE_ASSOC)) { /* get the endpoint active key */ SCTP_INP_RLOCK(inp); scact->scact_keynumber = inp->sctp_ep.default_keyid; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_authkeyid); } break; } case SCTP_LOCAL_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; size_t size = 0; SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); if (stcb) { /* get off the assoc */ chklist = stcb->asoc.local_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sac->gauth_assoc_id == SCTP_FUTURE_ASSOC)) { /* get off the endpoint */ SCTP_INP_RLOCK(inp); chklist = inp->sctp_ep.local_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PEER_AUTH_CHUNKS: { struct sctp_authchunks *sac; sctp_auth_chklist_t *chklist = NULL; size_t size = 0; SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize); SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id); if (stcb) { /* get off the assoc */ chklist = stcb->asoc.peer_auth_chunks; /* is there enough space? */ size = sctp_auth_get_chklist_size(chklist); if (*optsize < (sizeof(struct sctp_authchunks) + size)) { error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); } else { /* copy in the chunks */ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks); sac->gauth_number_of_chunks = (uint32_t)size; *optsize = sizeof(struct sctp_authchunks) + size; } SCTP_TCB_UNLOCK(stcb); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; } break; } case SCTP_EVENT: { struct sctp_event *event; uint32_t event_type; SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, *optsize); SCTP_FIND_STCB(inp, stcb, event->se_assoc_id); switch (event->se_type) { case SCTP_ASSOC_CHANGE: event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT; break; case SCTP_PEER_ADDR_CHANGE: event_type = SCTP_PCB_FLAGS_RECVPADDREVNT; break; case SCTP_REMOTE_ERROR: event_type = SCTP_PCB_FLAGS_RECVPEERERR; break; case SCTP_SEND_FAILED: event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT; break; case SCTP_SHUTDOWN_EVENT: event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT; break; case SCTP_ADAPTATION_INDICATION: event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT; break; case SCTP_PARTIAL_DELIVERY_EVENT: event_type = SCTP_PCB_FLAGS_PDAPIEVNT; break; case SCTP_AUTHENTICATION_EVENT: event_type = SCTP_PCB_FLAGS_AUTHEVNT; break; case SCTP_STREAM_RESET_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT; break; case SCTP_SENDER_DRY_EVENT: event_type = SCTP_PCB_FLAGS_DRYEVNT; break; case SCTP_NOTIFICATIONS_STOPPED_EVENT: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; case SCTP_ASSOC_RESET_EVENT: event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT; break; case SCTP_STREAM_CHANGE_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT; break; case SCTP_SEND_FAILED_EVENT: event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT; break; default: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (event_type > 0) { if (stcb) { event->se_on = sctp_stcb_is_feature_on(inp, stcb, event_type); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (event->se_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); event->se_on = sctp_is_feature_on(inp, event_type); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_event); } break; } case SCTP_RECVRCVINFO: { int onoff; if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); SCTP_INP_RUNLOCK(inp); } if (error == 0) { /* return the option value */ *(int *)optval = onoff; *optsize = sizeof(int); } break; } case SCTP_RECVNXTINFO: { int onoff; if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); SCTP_INP_RUNLOCK(inp); } if (error == 0) { /* return the option value */ *(int *)optval = onoff; *optsize = sizeof(int); } break; } case SCTP_DEFAULT_SNDINFO: { struct sctp_sndinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, *optsize); SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id); if (stcb) { info->snd_sid = stcb->asoc.def_send.sinfo_stream; info->snd_flags = stcb->asoc.def_send.sinfo_flags; info->snd_flags &= 0xfff0; info->snd_ppid = stcb->asoc.def_send.sinfo_ppid; info->snd_context = stcb->asoc.def_send.sinfo_context; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (info->snd_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); info->snd_sid = inp->def_send.sinfo_stream; info->snd_flags = inp->def_send.sinfo_flags; info->snd_flags &= 0xfff0; info->snd_ppid = inp->def_send.sinfo_ppid; info->snd_context = inp->def_send.sinfo_context; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_sndinfo); } break; } case SCTP_DEFAULT_PRINFO: { struct sctp_default_prinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, *optsize); SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id); if (stcb) { info->pr_policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); info->pr_value = stcb->asoc.def_send.sinfo_timetolive; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (info->pr_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); info->pr_policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags); info->pr_value = inp->def_send.sinfo_timetolive; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_default_prinfo); } break; } case SCTP_PEER_ADDR_THLDS: { struct sctp_paddrthlds *thlds; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, *optsize); SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id); #if defined(INET) && defined(INET6) if (thlds->spt_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&thlds->spt_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&thlds->spt_address; } } else { addr = (struct sockaddr *)&thlds->spt_address; } #else addr = (struct sockaddr *)&thlds->spt_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net != NULL) { thlds->spt_pathmaxrxt = net->failure_threshold; thlds->spt_pathpfthld = net->pf_threshold; thlds->spt_pathcpthld = 0xffff; } else { thlds->spt_pathmaxrxt = stcb->asoc.def_net_failure; thlds->spt_pathpfthld = stcb->asoc.def_net_pf_threshold; thlds->spt_pathcpthld = 0xffff; } thlds->spt_assoc_id = sctp_get_associd(stcb); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC)) { /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); thlds->spt_pathmaxrxt = inp->sctp_ep.def_net_failure; thlds->spt_pathpfthld = inp->sctp_ep.def_net_pf_threshold; thlds->spt_pathcpthld = 0xffff; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_paddrthlds); } break; } case SCTP_REMOTE_UDP_ENCAPS_PORT: { struct sctp_udpencaps *encaps; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, *optsize); SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id); #if defined(INET) && defined(INET6) if (encaps->sue_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&encaps->sue_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&encaps->sue_address; } } else { addr = (struct sockaddr *)&encaps->sue_address; } #else addr = (struct sockaddr *)&encaps->sue_address; #endif if (stcb) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net) { encaps->sue_port = net->port; } else { encaps->sue_port = stcb->asoc.port; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); encaps->sue_port = inp->sctp_ep.port; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_udpencaps); } break; } case SCTP_ECN_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.ecn_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->ecn_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PR_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.prsctp_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->prsctp_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_AUTH_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.auth_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->auth_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_ASCONF_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.asconf_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->asconf_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_RECONFIG_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.reconfig_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->reconfig_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_NRSACK_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.nrsack_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->nrsack_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PKTDROP_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.pktdrop_supported; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->pktdrop_supported; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_ENABLE_STREAM_RESET: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = (uint32_t)stcb->asoc.local_strreset_support; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = (uint32_t)inp->local_strreset_support; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } case SCTP_PR_STREAM_STATUS: { struct sctp_prstatus *sprstat; uint16_t sid; uint16_t policy; SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize); SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id); sid = sprstat->sprstat_sid; policy = sprstat->sprstat_policy; #if defined(SCTP_DETAILED_STR_STATS) if ((stcb != NULL) && (sid < stcb->asoc.streamoutcnt) && (policy != SCTP_PR_SCTP_NONE) && ((policy <= SCTP_PR_SCTP_MAX) || (policy == SCTP_PR_SCTP_ALL))) { if (policy == SCTP_PR_SCTP_ALL) { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0]; } else { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[policy]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[policy]; } #else if ((stcb != NULL) && (sid < stcb->asoc.streamoutcnt) && (policy == SCTP_PR_SCTP_ALL)) { sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0]; #endif } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_prstatus); } break; } case SCTP_PR_ASSOC_STATUS: { struct sctp_prstatus *sprstat; uint16_t policy; SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize); SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id); policy = sprstat->sprstat_policy; if ((stcb != NULL) && (policy != SCTP_PR_SCTP_NONE) && ((policy <= SCTP_PR_SCTP_MAX) || (policy == SCTP_PR_SCTP_ALL))) { if (policy == SCTP_PR_SCTP_ALL) { sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[0]; sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[0]; } else { sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[policy]; sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[policy]; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } if (error == 0) { *optsize = sizeof(struct sctp_prstatus); } break; } case SCTP_MAX_CWND: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { av->assoc_value = stcb->asoc.max_cwnd; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_RLOCK(inp); av->assoc_value = inp->max_cwnd; SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } break; } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; break; } /* end switch (sopt->sopt_name) */ if (error) { *optsize = 0; } return (error); } static int sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, void *p) { int error, set_opt; uint32_t *mopt; struct sctp_tcb *stcb = NULL; struct sctp_inpcb *inp = NULL; uint32_t vrf_id; if (optval == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } vrf_id = inp->def_vrf_id; error = 0; switch (optname) { case SCTP_NODELAY: case SCTP_AUTOCLOSE: case SCTP_AUTO_ASCONF: case SCTP_EXPLICIT_EOR: case SCTP_DISABLE_FRAGMENTS: case SCTP_USE_EXT_RCVINFO: case SCTP_I_WANT_MAPPED_V4_ADDR: /* copy in the option value */ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); set_opt = 0; if (error) break; switch (optname) { case SCTP_DISABLE_FRAGMENTS: set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT; break; case SCTP_AUTO_ASCONF: /* * NOTE: we don't really support this flag */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* only valid for bound all sockets */ if ((SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) && (*mopt != 0)) { /* forbidden by admin */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPERM); return (EPERM); } set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } break; case SCTP_EXPLICIT_EOR: set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR; break; case SCTP_USE_EXT_RCVINFO: set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO; break; case SCTP_I_WANT_MAPPED_V4_ADDR: if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } break; case SCTP_NODELAY: set_opt = SCTP_PCB_FLAGS_NODELAY; break; case SCTP_AUTOCLOSE: if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } set_opt = SCTP_PCB_FLAGS_AUTOCLOSE; /* * The value is in ticks. Note this does not effect * old associations, only new ones. */ inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); break; } SCTP_INP_WLOCK(inp); if (*mopt != 0) { sctp_feature_on(inp, set_opt); } else { sctp_feature_off(inp, set_opt); } SCTP_INP_WUNLOCK(inp); break; case SCTP_REUSE_PORT: { SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { /* Can't set it after we are bound */ error = EINVAL; break; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { /* Can't do this for a 1-m socket */ error = EINVAL; break; } if (optval) sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE); else sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE); break; } case SCTP_PARTIAL_DELIVERY_POINT: { uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); if (*value > SCTP_SB_LIMIT_RCV(so)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } inp->partial_delivery_point = *value; break; } case SCTP_FRAGMENT_INTERLEAVE: /* not yet until we re-write sctp_recvmsg() */ { uint32_t *level; SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize); if (*level == SCTP_FRAG_LEVEL_2) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (*level == SCTP_FRAG_LEVEL_1) { sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else if (*level == SCTP_FRAG_LEVEL_0) { sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_INTERLEAVING_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->idata_supported = 0; } else { if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS))) { inp->idata_supported = 1; } else { /* * Must have Frag * interleave and * stream interleave * on */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_CMT_ON_OFF: if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if (av->assoc_value > SCTP_CMT_MAX) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.sctp_cmt_on_off = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_cmt_on_off = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.sctp_cmt_on_off = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; } break; case SCTP_PLUGGABLE_CC: { struct sctp_assoc_value *av; struct sctp_nets *net; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if ((av->assoc_value != SCTP_CC_RFC2581) && (av->assoc_value != SCTP_CC_HSTCP) && (av->assoc_value != SCTP_CC_HTCP) && (av->assoc_value != SCTP_CC_RTCC)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; stcb->asoc.congestion_control_module = av->assoc_value; if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); } } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_ep.sctp_default_cc_module = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value]; stcb->asoc.congestion_control_module = av->assoc_value; if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); } } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_CC_OPTION: { struct sctp_cc_option *cc_opt; SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize); SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id); if (stcb == NULL) { if (cc_opt->aid_value.assoc_id == SCTP_CURRENT_ASSOC) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (stcb->asoc.cc_functions.sctp_cwnd_socket_option) { (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1, cc_opt); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { error = EINVAL; } } else { if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) { error = ENOTSUP; } else { error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1, cc_opt); } SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_PLUGGABLE_SS: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if ((av->assoc_value != SCTP_SS_DEFAULT) && (av->assoc_value != SCTP_SS_ROUND_ROBIN) && (av->assoc_value != SCTP_SS_ROUND_ROBIN_PACKET) && (av->assoc_value != SCTP_SS_PRIORITY) && (av->assoc_value != SCTP_SS_FAIR_BANDWITH) && (av->assoc_value != SCTP_SS_FIRST_COME)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_TCB_SEND_LOCK(stcb); stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1); stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; stcb->asoc.stream_scheduling_module = av->assoc_value; stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_ep.sctp_default_ss_module = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); SCTP_TCB_SEND_LOCK(stcb); stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1); stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value]; stcb->asoc.stream_scheduling_module = av->assoc_value; stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_SS_VALUE: { struct sctp_stream_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { if ((av->stream_id >= stcb->asoc.streamoutcnt) || (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], av->stream_value) < 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if (av->assoc_id == SCTP_CURRENT_ASSOC) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (av->stream_id < stcb->asoc.streamoutcnt) { stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id], av->stream_value); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { /* * Can't set stream value without * association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_CLR_STAT_LOG: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; case SCTP_CONTEXT: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.context = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_context = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.context = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_VRF_ID: { uint32_t *default_vrfid; SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize); if (*default_vrfid > SCTP_MAX_VRF_ID) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } inp->def_vrf_id = *default_vrfid; break; } case SCTP_DEL_VRF_ID: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_ADD_VRF_ID: { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; break; } case SCTP_DELAYED_SACK: { struct sctp_sack_info *sack; SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); if (sack->sack_delay) { if (sack->sack_delay > SCTP_MAX_SACK_DELAY) sack->sack_delay = SCTP_MAX_SACK_DELAY; if (MSEC_TO_TICKS(sack->sack_delay) < 1) { sack->sack_delay = TICKS_TO_MSEC(1); } } if (stcb) { if (sack->sack_delay) { stcb->asoc.delayed_ack = sack->sack_delay; } if (sack->sack_freq) { stcb->asoc.sack_freq = sack->sack_freq; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sack->sack_assoc_id == SCTP_FUTURE_ASSOC) || (sack->sack_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); if (sack->sack_delay) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); } if (sack->sack_freq) { inp->sctp_ep.sctp_sack_freq = sack->sack_freq; } SCTP_INP_WUNLOCK(inp); } if ((sack->sack_assoc_id == SCTP_CURRENT_ASSOC) || (sack->sack_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (sack->sack_delay) { stcb->asoc.delayed_ack = sack->sack_delay; } if (sack->sack_freq) { stcb->asoc.sack_freq = sack->sack_freq; } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_CHUNK: { struct sctp_authchunk *sauth; SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize); SCTP_INP_WLOCK(inp); if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { inp->auth_supported = 1; } SCTP_INP_WUNLOCK(inp); break; } case SCTP_AUTH_KEY: { struct sctp_authkey *sca; struct sctp_keyhead *shared_keys; sctp_sharedkey_t *shared_key; sctp_key_t *key = NULL; size_t size; SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize); if (sca->sca_keylength == 0) { size = optsize - sizeof(struct sctp_authkey); } else { if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) { size = sca->sca_keylength; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } } SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id); if (stcb) { shared_keys = &stcb->asoc.shared_keys; /* clear the cached keys for this key id */ sctp_clear_cachedkeys(stcb, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sca->sca_assoc_id == SCTP_FUTURE_ASSOC) || (sca->sca_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); shared_keys = &inp->sctp_ep.shared_keys; /* * clear the cached keys on all * assocs for this key id */ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber); /* * create the new shared key and * insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_INP_WUNLOCK(inp); break; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_INP_WUNLOCK(inp); } if ((sca->sca_assoc_id == SCTP_CURRENT_ASSOC) || (sca->sca_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); shared_keys = &stcb->asoc.shared_keys; /* * clear the cached keys for * this key id */ sctp_clear_cachedkeys(stcb, sca->sca_keynumber); /* * create the new shared key * and insert/replace it */ if (size > 0) { key = sctp_set_key(sca->sca_key, (uint32_t)size); if (key == NULL) { SCTP_TCB_UNLOCK(stcb); continue; } } shared_key = sctp_alloc_sharedkey(); if (shared_key == NULL) { sctp_free_key(key); SCTP_TCB_UNLOCK(stcb); continue; } shared_key->key = key; shared_key->keyid = sca->sca_keynumber; error = sctp_insert_sharedkey(shared_keys, shared_key); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_HMAC_IDENT: { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; uint16_t hmacid; uint32_t i; SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize); if ((optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) || (shmac->shmac_number_of_idents > 0xffff)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } hmaclist = sctp_alloc_hmaclist((uint16_t)shmac->shmac_number_of_idents); if (hmaclist == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; break; } for (i = 0; i < shmac->shmac_number_of_idents; i++) { hmacid = shmac->shmac_idents[i]; if (sctp_auth_add_hmacid(hmaclist, hmacid)) { /* invalid HMACs were found */ ; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; sctp_free_hmaclist(hmaclist); goto sctp_set_hmac_done; } } for (i = 0; i < hmaclist->num_algo; i++) { if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) { /* already in list */ break; } } if (i == hmaclist->num_algo) { /* not found in list */ sctp_free_hmaclist(hmaclist); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } /* set it on the endpoint */ SCTP_INP_WLOCK(inp); if (inp->sctp_ep.local_hmacs) sctp_free_hmaclist(inp->sctp_ep.local_hmacs); inp->sctp_ep.local_hmacs = hmaclist; SCTP_INP_WUNLOCK(inp); sctp_set_hmac_done: break; } case SCTP_AUTH_ACTIVE_KEY: { struct sctp_authkeyid *scact; SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id); /* set the active key on the right place */ if (stcb) { /* set the active key on the assoc */ if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (scact->scact_assoc_id == SCTP_FUTURE_ASSOC) || (scact->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((scact->scact_assoc_id == SCTP_CURRENT_ASSOC) || (scact->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_auth_setactivekey(stcb, scact->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_DELETE_KEY: { struct sctp_authkeyid *scdel; SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id); /* delete the key from the right place */ if (stcb) { if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (scdel->scact_assoc_id == SCTP_FUTURE_ASSOC) || (scdel->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((scdel->scact_assoc_id == SCTP_CURRENT_ASSOC) || (scdel->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_delete_sharedkey(stcb, scdel->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_AUTH_DEACTIVATE_KEY: { struct sctp_authkeyid *keyid; SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, optsize); SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id); /* deactivate the key from the right place */ if (stcb) { if (sctp_deact_sharedkey(stcb, keyid->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (keyid->scact_assoc_id == SCTP_FUTURE_ASSOC) || (keyid->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); if (sctp_deact_sharedkey_ep(inp, keyid->scact_keynumber)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } if ((keyid->scact_assoc_id == SCTP_CURRENT_ASSOC) || (keyid->scact_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); sctp_deact_sharedkey(stcb, keyid->scact_keynumber); SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_ENABLE_STREAM_RESET: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); if (av->assoc_value & (~SCTP_ENABLE_VALUE_MASK)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->local_strreset_support = (uint8_t)av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_RESET_STREAMS: { struct sctp_reset_streams *strrst; int i, send_out = 0; int send_in = 0; SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_reset_streams, optsize); SCTP_FIND_STCB(inp, stcb, strrst->srs_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (sizeof(struct sctp_reset_streams) + strrst->srs_number_streams * sizeof(uint16_t) > optsize) { error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (strrst->srs_flags & SCTP_STREAM_RESET_INCOMING) { send_in = 1; if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } } if (strrst->srs_flags & SCTP_STREAM_RESET_OUTGOING) { send_out = 1; } if ((strrst->srs_number_streams > SCTP_MAX_STREAMS_AT_ONCE_RESET) && send_in) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM); error = ENOMEM; SCTP_TCB_UNLOCK(stcb); break; } if ((send_in == 0) && (send_out == 0)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } for (i = 0; i < strrst->srs_number_streams; i++) { if ((send_in) && (strrst->srs_stream_list[i] >= stcb->asoc.streamincnt)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if ((send_out) && (strrst->srs_stream_list[i] >= stcb->asoc.streamoutcnt)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } } if (error) { SCTP_TCB_UNLOCK(stcb); break; } if (send_out) { int cnt; uint16_t strm; if (strrst->srs_number_streams) { for (i = 0, cnt = 0; i < strrst->srs_number_streams; i++) { strm = strrst->srs_stream_list[i]; if (stcb->asoc.strmout[strm].state == SCTP_STREAM_OPEN) { stcb->asoc.strmout[strm].state = SCTP_STREAM_RESET_PENDING; cnt++; } } } else { /* Its all */ for (i = 0, cnt = 0; i < stcb->asoc.streamoutcnt; i++) { if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) { stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING; cnt++; } } } } if (send_in) { error = sctp_send_str_reset_req(stcb, strrst->srs_number_streams, strrst->srs_stream_list, send_in, 0, 0, 0, 0, 0); } else { error = sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_LOCKED); } if (error == 0) { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); } else { /* * For outgoing streams don't report any * problems in sending the request to the * application. XXX: Double check resetting * incoming streams. */ error = 0; } SCTP_TCB_UNLOCK(stcb); break; } case SCTP_ADD_STREAMS: { struct sctp_add_streams *stradd; uint8_t addstream = 0; uint16_t add_o_strmcnt = 0; uint16_t add_i_strmcnt = 0; SCTP_CHECK_AND_CAST(stradd, optval, struct sctp_add_streams, optsize); SCTP_FIND_STCB(inp, stcb, stradd->sas_assoc_id); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } if ((stradd->sas_outstrms == 0) && (stradd->sas_instrms == 0)) { error = EINVAL; goto skip_stuff; } if (stradd->sas_outstrms) { addstream = 1; /* We allocate here */ add_o_strmcnt = stradd->sas_outstrms; if ((((int)add_o_strmcnt) + ((int)stcb->asoc.streamoutcnt)) > 0x0000ffff) { /* You can't have more than 64k */ error = EINVAL; goto skip_stuff; } } if (stradd->sas_instrms) { int cnt; addstream |= 2; /* * We allocate inside * sctp_send_str_reset_req() */ add_i_strmcnt = stradd->sas_instrms; cnt = add_i_strmcnt; cnt += stcb->asoc.streamincnt; if (cnt > 0x0000ffff) { /* You can't have more than 64k */ error = EINVAL; goto skip_stuff; } if (cnt > (int)stcb->asoc.max_inbound_streams) { /* More than you are allowed */ error = EINVAL; goto skip_stuff; } } error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, addstream, add_o_strmcnt, add_i_strmcnt, 0); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); skip_stuff: SCTP_TCB_UNLOCK(stcb); break; } case SCTP_RESET_ASSOC: { int i; uint32_t *value; SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize); SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t)*value); if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.reconfig_supported == 0) { /* * Peer does not support the chunk type. */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); error = EOPNOTSUPP; SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); break; } if (stcb->asoc.stream_reset_outstanding) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; SCTP_TCB_UNLOCK(stcb); break; } /* * Is there any data pending in the send or sent * queues? */ if (!TAILQ_EMPTY(&stcb->asoc.send_queue) || !TAILQ_EMPTY(&stcb->asoc.sent_queue)) { busy_out: error = EBUSY; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); SCTP_TCB_UNLOCK(stcb); break; } /* Do any streams have data queued? */ for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { goto busy_out; } } error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 1, 0, 0, 0, 0); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); break; } case SCTP_CONNECT_X: if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } error = sctp_do_connect_x(so, inp, optval, optsize, p, 0); break; case SCTP_CONNECT_X_DELAYED: if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } error = sctp_do_connect_x(so, inp, optval, optsize, p, 1); break; case SCTP_CONNECT_X_COMPLETE: { struct sockaddr *sa; /* FIXME MT: check correct? */ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); /* find tcb */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if (stcb == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); error = ENOENT; break; } if (stcb->asoc.delayed_connection == 1) { stcb->asoc.delayed_connection = 0; (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); } else { /* * already expired or did not use delayed * connectx */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; } SCTP_TCB_UNLOCK(stcb); break; } case SCTP_MAX_BURST: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.max_burst = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_ep.max_burst = av->assoc_value; SCTP_INP_WUNLOCK(inp); } if ((av->assoc_id == SCTP_CURRENT_ASSOC) || (av->assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.max_burst = av->assoc_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_MAXSEG: { struct sctp_assoc_value *av; int ovh; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { ovh = SCTP_MED_OVERHEAD; } else { ovh = SCTP_MED_V4_OVERHEAD; } if (stcb) { if (av->assoc_value) { stcb->asoc.sctp_frag_point = (av->assoc_value + ovh); } else { stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); /* * FIXME MT: I think this is not in * tune with the API ID */ if (av->assoc_value) { inp->sctp_frag_point = (av->assoc_value + ovh); } else { inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_EVENTS: { struct sctp_event_subscribe *events; SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize); SCTP_INP_WLOCK(inp); if (events->sctp_data_io_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); } if (events->sctp_association_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT); } if (events->sctp_address_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT); } if (events->sctp_send_failure_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } if (events->sctp_peer_error_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR); } if (events->sctp_shutdown_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } if (events->sctp_partial_delivery_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT); } if (events->sctp_adaptation_layer_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } if (events->sctp_authentication_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT); } if (events->sctp_sender_dry_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT); } if (events->sctp_stream_reset_event) { sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } SCTP_INP_WUNLOCK(inp); SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (events->sctp_association_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT); } if (events->sctp_address_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT); } if (events->sctp_send_failure_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT); } if (events->sctp_peer_error_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR); } if (events->sctp_shutdown_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT); } if (events->sctp_partial_delivery_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT); } if (events->sctp_adaptation_layer_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT); } if (events->sctp_authentication_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT); } if (events->sctp_sender_dry_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT); } if (events->sctp_stream_reset_event) { sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } else { sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT); } SCTP_TCB_UNLOCK(stcb); } /* * Send up the sender dry event only for 1-to-1 * style sockets. */ if (events->sctp_sender_dry_event) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); if (TAILQ_EMPTY(&stcb->asoc.send_queue) && TAILQ_EMPTY(&stcb->asoc.sent_queue) && (stcb->asoc.stream_queue_cnt == 0)) { sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); } SCTP_TCB_UNLOCK(stcb); } } } SCTP_INP_RUNLOCK(inp); break; } case SCTP_ADAPTATION_LAYER: { struct sctp_setadaptation *adap_bits; SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize); SCTP_INP_WLOCK(inp); inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind; inp->sctp_ep.adaptation_layer_indicator_provided = 1; SCTP_INP_WUNLOCK(inp); break; } #ifdef SCTP_DEBUG case SCTP_SET_INITIAL_DBG_SEQ: { uint32_t *vvv; SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize); SCTP_INP_WLOCK(inp); inp->sctp_ep.initial_sequence_debug = *vvv; SCTP_INP_WUNLOCK(inp); break; } #endif case SCTP_DEFAULT_SEND_PARAM: { struct sctp_sndrcvinfo *s_info; SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize); SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id); if (stcb) { if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) { memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC) || (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send))); SCTP_INP_WUNLOCK(inp); } if ((s_info->sinfo_assoc_id == SCTP_CURRENT_ASSOC) || (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) { memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send))); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_PEER_ADDR_PARAMS: { struct sctp_paddrparams *paddrp; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize); SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id); #if defined(INET) && defined(INET6) if (paddrp->spp_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&paddrp->spp_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&paddrp->spp_address; } } else { addr = (struct sockaddr *)&paddrp->spp_address; } #else addr = (struct sockaddr *)&paddrp->spp_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } /* sanity checks */ if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) { if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) { if (stcb) SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } if (stcb != NULL) { /************************TCB SPECIFIC SET ******************/ if (net != NULL) { /************************NET SPECIFIC SET ******************/ if (paddrp->spp_flags & SPP_HB_DISABLE) { if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && !(net->dest_state & SCTP_ADDR_NOHB)) { sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9); } net->dest_state |= SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_hbinterval) { net->heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { net->heart_beat_delay = 0; } sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); net->dest_state &= ~SCTP_ADDR_NOHB; } if (paddrp->spp_flags & SPP_HB_DEMAND) { if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } } if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); } net->dest_state |= SCTP_ADDR_NO_PMTUD; net->mtu = paddrp->spp_pathmtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } if (net->mtu < stcb->asoc.smallest_mtu) { sctp_pathmtu_adjustment(stcb, net->mtu); } } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } if (paddrp->spp_pathmaxrxt) { if (net->dest_state & SCTP_ADDR_PF) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count <= paddrp->spp_pathmaxrxt) && (net->error_count > net->pf_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= paddrp->spp_pathmaxrxt) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } net->failure_threshold = paddrp->spp_pathmaxrxt; } if (paddrp->spp_flags & SPP_DSCP) { net->dscp = paddrp->spp_dscp & 0xfc; net->dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { if (net->ro._l_addr.sa.sa_family == AF_INET6) { net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; net->flowlabel |= 0x80000000; } } #endif } else { /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ if (paddrp->spp_pathmaxrxt != 0) { stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->dest_state & SCTP_ADDR_PF) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count <= paddrp->spp_pathmaxrxt) && (net->error_count > net->pf_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_13); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= paddrp->spp_pathmaxrxt) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } net->failure_threshold = paddrp->spp_pathmaxrxt; } } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_hbinterval != 0) { stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { stcb->asoc.heart_beat_delay = 0; } /* Turn back on the timer */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (paddrp->spp_hbinterval != 0) { net->heart_beat_delay = paddrp->spp_hbinterval; } else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { net->heart_beat_delay = 0; } if (net->dest_state & SCTP_ADDR_NOHB) { net->dest_state &= ~SCTP_ADDR_NOHB; } sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_14); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); } sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_HB_DISABLE) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (!(net->dest_state & SCTP_ADDR_NOHB)) { net->dest_state |= SCTP_ADDR_NOHB; if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_15); } } } sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_16); } net->dest_state |= SCTP_ADDR_NO_PMTUD; net->mtu = paddrp->spp_pathmtu; switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: net->mtu += SCTP_MIN_V4_OVERHEAD; break; #endif #ifdef INET6 case AF_INET6: net->mtu += SCTP_MIN_OVERHEAD; break; #endif default: break; } if (net->mtu < stcb->asoc.smallest_mtu) { sctp_pathmtu_adjustment(stcb, net->mtu); } } stcb->asoc.default_mtu = paddrp->spp_pathmtu; sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } stcb->asoc.default_mtu = 0; sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_DSCP) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->dscp = paddrp->spp_dscp & 0xfc; net->dscp |= 0x01; } stcb->asoc.default_dscp = paddrp->spp_dscp & 0xfc; stcb->asoc.default_dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._l_addr.sa.sa_family == AF_INET6) { net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; net->flowlabel |= 0x80000000; } } stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; stcb->asoc.default_flowlabel |= 0x80000000; } #endif } SCTP_TCB_UNLOCK(stcb); } else { /************************NO TCB, SET TO default stuff ******************/ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); /* * For the TOS/FLOWLABEL stuff you * set it with the options on the * socket */ if (paddrp->spp_pathmaxrxt != 0) { inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; } if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; else if (paddrp->spp_hbinterval != 0) { if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL; inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; } else if (paddrp->spp_hbinterval) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); } sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } else if (paddrp->spp_flags & SPP_HB_DISABLE) { sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { inp->sctp_ep.default_mtu = 0; sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } else if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { if (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU) { inp->sctp_ep.default_mtu = paddrp->spp_pathmtu; } sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_DSCP) { inp->sctp_ep.default_dscp = paddrp->spp_dscp & 0xfc; inp->sctp_ep.default_dscp |= 0x01; } #ifdef INET6 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) { if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { inp->sctp_ep.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff; inp->sctp_ep.default_flowlabel |= 0x80000000; } } #endif SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_RTOINFO: { struct sctp_rtoinfo *srto; uint32_t new_init, new_min, new_max; SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize); SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id); if (stcb) { if (srto->srto_initial) new_init = srto->srto_initial; else new_init = stcb->asoc.initial_rto; if (srto->srto_max) new_max = srto->srto_max; else new_max = stcb->asoc.maxrto; if (srto->srto_min) new_min = srto->srto_min; else new_min = stcb->asoc.minrto; if ((new_min <= new_init) && (new_init <= new_max)) { stcb->asoc.initial_rto = new_init; stcb->asoc.maxrto = new_max; stcb->asoc.minrto = new_min; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (srto->srto_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (srto->srto_initial) new_init = srto->srto_initial; else new_init = inp->sctp_ep.initial_rto; if (srto->srto_max) new_max = srto->srto_max; else new_max = inp->sctp_ep.sctp_maxrto; if (srto->srto_min) new_min = srto->srto_min; else new_min = inp->sctp_ep.sctp_minrto; if ((new_min <= new_init) && (new_init <= new_max)) { inp->sctp_ep.initial_rto = new_init; inp->sctp_ep.sctp_maxrto = new_max; inp->sctp_ep.sctp_minrto = new_min; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ASSOCINFO: { struct sctp_assocparams *sasoc; SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); if (sasoc->sasoc_cookie_life) { /* boundary check the cookie life */ if (sasoc->sasoc_cookie_life < 1000) sasoc->sasoc_cookie_life = 1000; if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; } } if (stcb) { if (sasoc->sasoc_asocmaxrxt) stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; if (sasoc->sasoc_cookie_life) { stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (sasoc->sasoc_asocmaxrxt) inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; if (sasoc->sasoc_cookie_life) { inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_INITMSG: { struct sctp_initmsg *sinit; SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize); SCTP_INP_WLOCK(inp); if (sinit->sinit_num_ostreams) inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams; if (sinit->sinit_max_instreams) inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams; if (sinit->sinit_max_attempts) inp->sctp_ep.max_init_times = sinit->sinit_max_attempts; if (sinit->sinit_max_init_timeo) inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo; SCTP_INP_WUNLOCK(inp); break; } case SCTP_PRIMARY_ADDR: { struct sctp_setprim *spa; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize); SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id); #if defined(INET) && defined(INET6) if (spa->ssp_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&spa->ssp_addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&spa->ssp_addr; } } else { addr = (struct sockaddr *)&spa->ssp_addr; } #else addr = (struct sockaddr *)&spa->ssp_addr; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net != NULL)) { if (net != stcb->asoc.primary_destination) { if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { /* Ok we need to set it */ if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) { if ((stcb->asoc.alternate) && (!(net->dest_state & SCTP_ADDR_PF)) && (net->dest_state & SCTP_ADDR_REACHABLE)) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } break; } case SCTP_SET_DYNAMIC_PRIMARY: { union sctp_sockstore *ss; error = priv_check(curthread, PRIV_NETINET_RESERVEDPORT); if (error) break; SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize); /* SUPER USER CHECK? */ error = sctp_dynamic_set_primary(&ss->sa, vrf_id); break; } case SCTP_SET_PEER_PRIMARY_ADDR: { struct sctp_setpeerprim *sspp; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize); SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id); if (stcb != NULL) { struct sctp_ifa *ifa; #if defined(INET) && defined(INET6) if (sspp->sspp_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&sspp->sspp_addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&sspp->sspp_addr; } } else { addr = (struct sockaddr *)&sspp->sspp_addr; } #else addr = (struct sockaddr *)&sspp->sspp_addr; #endif ifa = sctp_find_ifa_by_addr(addr, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED); if (ifa == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* * Must validate the ifa found is in * our ep */ struct sctp_laddr *laddr; int found = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", __func__); continue; } if ((sctp_is_addr_restricted(stcb, laddr->ifa)) && (!sctp_is_addr_pending(stcb, laddr->ifa))) { continue; } if (laddr->ifa == ifa) { found = 1; break; } } if (!found) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } } else { switch (addr->sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } break; } #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_of_it; } } if (sctp_set_primary_ip_address_sa(stcb, addr) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED); out_of_it: SCTP_TCB_UNLOCK(stcb); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } break; } case SCTP_BINDX_ADD_ADDR: { struct sctp_getaddresses *addrs; struct thread *td; td = (struct thread *)p; SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); #ifdef INET if (addrs->addr->sa_family == AF_INET) { if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addrs->addr->sa_family == AF_INET6) { if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; break; } sctp_bindx_add_address(so, inp, addrs->addr, addrs->sget_assoc_id, vrf_id, &error, p); break; } case SCTP_BINDX_REM_ADDR: { struct sctp_getaddresses *addrs; struct thread *td; td = (struct thread *)p; SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); #ifdef INET if (addrs->addr->sa_family == AF_INET) { if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif #ifdef INET6 if (addrs->addr->sa_family == AF_INET6) { if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } else #endif { error = EAFNOSUPPORT; break; } sctp_bindx_delete_address(inp, addrs->addr, addrs->sget_assoc_id, vrf_id, &error); break; } case SCTP_EVENT: { struct sctp_event *event; uint32_t event_type; SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, optsize); SCTP_FIND_STCB(inp, stcb, event->se_assoc_id); switch (event->se_type) { case SCTP_ASSOC_CHANGE: event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT; break; case SCTP_PEER_ADDR_CHANGE: event_type = SCTP_PCB_FLAGS_RECVPADDREVNT; break; case SCTP_REMOTE_ERROR: event_type = SCTP_PCB_FLAGS_RECVPEERERR; break; case SCTP_SEND_FAILED: event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT; break; case SCTP_SHUTDOWN_EVENT: event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT; break; case SCTP_ADAPTATION_INDICATION: event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT; break; case SCTP_PARTIAL_DELIVERY_EVENT: event_type = SCTP_PCB_FLAGS_PDAPIEVNT; break; case SCTP_AUTHENTICATION_EVENT: event_type = SCTP_PCB_FLAGS_AUTHEVNT; break; case SCTP_STREAM_RESET_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT; break; case SCTP_SENDER_DRY_EVENT: event_type = SCTP_PCB_FLAGS_DRYEVNT; break; case SCTP_NOTIFICATIONS_STOPPED_EVENT: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; case SCTP_ASSOC_RESET_EVENT: event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT; break; case SCTP_STREAM_CHANGE_EVENT: event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT; break; case SCTP_SEND_FAILED_EVENT: event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT; break; default: event_type = 0; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (event_type > 0) { if (stcb) { if (event->se_on) { sctp_stcb_feature_on(inp, stcb, event_type); if (event_type == SCTP_PCB_FLAGS_DRYEVNT) { if (TAILQ_EMPTY(&stcb->asoc.send_queue) && TAILQ_EMPTY(&stcb->asoc.sent_queue) && (stcb->asoc.stream_queue_cnt == 0)) { sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED); } } } else { sctp_stcb_feature_off(inp, stcb, event_type); } SCTP_TCB_UNLOCK(stcb); } else { /* * We don't want to send up a storm * of events, so return an error for * sender dry events */ if ((event_type == SCTP_PCB_FLAGS_DRYEVNT) && ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) && ((event->se_assoc_id == SCTP_ALL_ASSOC) || (event->se_assoc_id == SCTP_CURRENT_ASSOC))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP); error = ENOTSUP; break; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (event->se_assoc_id == SCTP_FUTURE_ASSOC) || (event->se_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); if (event->se_on) { sctp_feature_on(inp, event_type); } else { sctp_feature_off(inp, event_type); } SCTP_INP_WUNLOCK(inp); } if ((event->se_assoc_id == SCTP_CURRENT_ASSOC) || (event->se_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (event->se_on) { sctp_stcb_feature_on(inp, stcb, event_type); } else { sctp_stcb_feature_off(inp, stcb, event_type); } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } } else { if (stcb) { SCTP_TCB_UNLOCK(stcb); } } break; } case SCTP_RECVRCVINFO: { int *onoff; SCTP_CHECK_AND_CAST(onoff, optval, int, optsize); SCTP_INP_WLOCK(inp); if (*onoff != 0) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO); } SCTP_INP_WUNLOCK(inp); break; } case SCTP_RECVNXTINFO: { int *onoff; SCTP_CHECK_AND_CAST(onoff, optval, int, optsize); SCTP_INP_WLOCK(inp); if (*onoff != 0) { sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); } else { sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO); } SCTP_INP_WUNLOCK(inp); break; } case SCTP_DEFAULT_SNDINFO: { struct sctp_sndinfo *info; uint16_t policy; SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, optsize); SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id); if (stcb) { if (info->snd_sid < stcb->asoc.streamoutcnt) { stcb->asoc.def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); stcb->asoc.def_send.sinfo_flags = info->snd_flags; stcb->asoc.def_send.sinfo_flags |= policy; stcb->asoc.def_send.sinfo_ppid = info->snd_ppid; stcb->asoc.def_send.sinfo_context = info->snd_context; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (info->snd_assoc_id == SCTP_FUTURE_ASSOC) || (info->snd_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags); inp->def_send.sinfo_flags = info->snd_flags; inp->def_send.sinfo_flags |= policy; inp->def_send.sinfo_ppid = info->snd_ppid; inp->def_send.sinfo_context = info->snd_context; SCTP_INP_WUNLOCK(inp); } if ((info->snd_assoc_id == SCTP_CURRENT_ASSOC) || (info->snd_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); if (info->snd_sid < stcb->asoc.streamoutcnt) { stcb->asoc.def_send.sinfo_stream = info->snd_sid; policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags); stcb->asoc.def_send.sinfo_flags = info->snd_flags; stcb->asoc.def_send.sinfo_flags |= policy; stcb->asoc.def_send.sinfo_ppid = info->snd_ppid; stcb->asoc.def_send.sinfo_context = info->snd_context; } SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_DEFAULT_PRINFO: { struct sctp_default_prinfo *info; SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, optsize); SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id); if (info->pr_policy > SCTP_PR_SCTP_MAX) { if (stcb) { SCTP_TCB_UNLOCK(stcb); } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } if (stcb) { stcb->asoc.def_send.sinfo_flags &= 0xfff0; stcb->asoc.def_send.sinfo_flags |= info->pr_policy; stcb->asoc.def_send.sinfo_timetolive = info->pr_value; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (info->pr_assoc_id == SCTP_FUTURE_ASSOC) || (info->pr_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_WLOCK(inp); inp->def_send.sinfo_flags &= 0xfff0; inp->def_send.sinfo_flags |= info->pr_policy; inp->def_send.sinfo_timetolive = info->pr_value; SCTP_INP_WUNLOCK(inp); } if ((info->pr_assoc_id == SCTP_CURRENT_ASSOC) || (info->pr_assoc_id == SCTP_ALL_ASSOC)) { SCTP_INP_RLOCK(inp); LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { SCTP_TCB_LOCK(stcb); stcb->asoc.def_send.sinfo_flags &= 0xfff0; stcb->asoc.def_send.sinfo_flags |= info->pr_policy; stcb->asoc.def_send.sinfo_timetolive = info->pr_value; SCTP_TCB_UNLOCK(stcb); } SCTP_INP_RUNLOCK(inp); } } break; } case SCTP_PEER_ADDR_THLDS: /* Applies to the specific association */ { struct sctp_paddrthlds *thlds; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, optsize); SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id); #if defined(INET) && defined(INET6) if (thlds->spt_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&thlds->spt_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&thlds->spt_address; } } else { addr = (struct sockaddr *)&thlds->spt_address; } #else addr = (struct sockaddr *)&thlds->spt_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (thlds->spt_pathcpthld != 0xffff) { if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } if (stcb != NULL) { if (net != NULL) { net->failure_threshold = thlds->spt_pathmaxrxt; net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { if ((net->error_count > net->failure_threshold) || (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count > net->pf_threshold) && (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_17); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } } else { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { net->failure_threshold = thlds->spt_pathmaxrxt; net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { if ((net->error_count > net->failure_threshold) || (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { if ((net->error_count > net->pf_threshold) && (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_18); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); } } if (net->dest_state & SCTP_ADDR_REACHABLE) { if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } } stcb->asoc.def_net_failure = thlds->spt_pathmaxrxt; stcb->asoc.def_net_pf_threshold = thlds->spt_pathpfthld; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_ep.def_net_failure = thlds->spt_pathmaxrxt; inp->sctp_ep.def_net_pf_threshold = thlds->spt_pathpfthld; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_REMOTE_UDP_ENCAPS_PORT: { struct sctp_udpencaps *encaps; struct sctp_nets *net; struct sockaddr *addr; #if defined(INET) && defined(INET6) struct sockaddr_in sin_store; #endif SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, optsize); SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id); #if defined(INET) && defined(INET6) if (encaps->sue_address.ss_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&encaps->sue_address; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin_store, sin6); addr = (struct sockaddr *)&sin_store; } else { addr = (struct sockaddr *)&encaps->sue_address; } } else { addr = (struct sockaddr *)&encaps->sue_address; } #else addr = (struct sockaddr *)&encaps->sue_address; #endif if (stcb != NULL) { net = sctp_findnet(stcb, addr); } else { /* * We increment here since * sctp_findassociation_ep_addr() wil do a * decrement if it finds the stcb as long as * the locked tcb (last argument) is NOT a * TCB.. aka NULL. */ net = NULL; SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } } if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif #ifdef INET6 if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); SCTP_TCB_UNLOCK(stcb); error = EINVAL; break; } } else #endif { error = EAFNOSUPPORT; SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } } if (stcb != NULL) { if (net != NULL) { net->port = encaps->sue_port; } else { stcb->asoc.port = encaps->sue_port; } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); inp->sctp_ep.port = encaps->sue_port; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ECN_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->ecn_supported = 0; } else { inp->ecn_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PR_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->prsctp_supported = 0; } else { inp->prsctp_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_AUTH_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { if ((av->assoc_value == 0) && (inp->asconf_supported == 1)) { /* * AUTH is required for * ASCONF */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->auth_supported = 0; } else { inp->auth_supported = 1; } SCTP_INP_WUNLOCK(inp); } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_ASCONF_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { if ((av->assoc_value != 0) && (inp->auth_supported == 0)) { /* * AUTH is required for * ASCONF */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->asconf_supported = 0; sctp_auth_delete_chunk(SCTP_ASCONF, inp->sctp_ep.local_auth_chunks); sctp_auth_delete_chunk(SCTP_ASCONF_ACK, inp->sctp_ep.local_auth_chunks); } else { inp->asconf_supported = 1; sctp_auth_add_chunk(SCTP_ASCONF, inp->sctp_ep.local_auth_chunks); sctp_auth_add_chunk(SCTP_ASCONF_ACK, inp->sctp_ep.local_auth_chunks); } SCTP_INP_WUNLOCK(inp); } } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_RECONFIG_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->reconfig_supported = 0; } else { inp->reconfig_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_NRSACK_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->nrsack_supported = 0; } else { inp->nrsack_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_PKTDROP_SUPPORTED: { struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); if (av->assoc_value == 0) { inp->pktdrop_supported = 0; } else { inp->pktdrop_supported = 1; } SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } case SCTP_MAX_CWND: { struct sctp_assoc_value *av; struct sctp_nets *net; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); SCTP_FIND_STCB(inp, stcb, av->assoc_id); if (stcb) { stcb->asoc.max_cwnd = av->assoc_value; if (stcb->asoc.max_cwnd > 0) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if ((net->cwnd > stcb->asoc.max_cwnd) && (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { net->cwnd = stcb->asoc.max_cwnd; if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { net->cwnd = net->mtu - sizeof(struct sctphdr); } } } } SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || (av->assoc_id == SCTP_FUTURE_ASSOC)) { SCTP_INP_WLOCK(inp); inp->max_cwnd = av->assoc_value; SCTP_INP_WUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } } break; } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; break; } /* end switch (opt) */ return (error); } int sctp_ctloutput(struct socket *so, struct sockopt *sopt) { void *optval = NULL; size_t optsize = 0; void *p; int error = 0; struct sctp_inpcb *inp; if ((sopt->sopt_level == SOL_SOCKET) && (sopt->sopt_name == SO_SETFIB)) { inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (EINVAL); } SCTP_INP_WLOCK(inp); inp->fibnum = so->so_fibnum; SCTP_INP_WUNLOCK(inp); return (0); } if (sopt->sopt_level != IPPROTO_SCTP) { /* wrong proto level... send back up to IP */ #ifdef INET6 if (INP_CHECK_SOCKAF(so, AF_INET6)) error = ip6_ctloutput(so, sopt); #endif /* INET6 */ #if defined(INET) && defined(INET6) else #endif #ifdef INET error = ip_ctloutput(so, sopt); #endif return (error); } optsize = sopt->sopt_valsize; if (optsize > SCTP_SOCKET_OPTION_LIMIT) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (ENOBUFS); } if (optsize) { SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT); if (optval == NULL) { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS); return (ENOBUFS); } error = sooptcopyin(sopt, optval, optsize, optsize); if (error) { SCTP_FREE(optval, SCTP_M_SOCKOPT); goto out; } } p = (void *)sopt->sopt_td; if (sopt->sopt_dir == SOPT_SET) { error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); } else if (sopt->sopt_dir == SOPT_GET) { error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); } else { SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } if ((error == 0) && (optval != NULL)) { error = sooptcopyout(sopt, optval, optsize); SCTP_FREE(optval, SCTP_M_SOCKOPT); } else if (optval != NULL) { SCTP_FREE(optval, SCTP_M_SOCKOPT); } out: return (error); } #ifdef INET static int sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { int error = 0; int create_lock_on = 0; uint32_t vrf_id; struct sctp_inpcb *inp; struct sctp_tcb *stcb = NULL; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { /* I made the same as TCP since we are not setup? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } if (addr == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return EINVAL; } switch (addr->sa_family) { #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } sin6 = (struct sockaddr_in6 *)addr; if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6->sin6_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); return (error); } break; } #endif #ifdef INET case AF_INET: { - struct sockaddr_in *sinp; + struct sockaddr_in *sin; if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } - sinp = (struct sockaddr_in *)addr; - if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) { + sin = (struct sockaddr_in *)addr; + if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sin->sin_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); return (error); } break; } #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); return (EAFNOSUPPORT); } SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); create_lock_on = 1; if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { /* Should I really unlock ? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT); error = EFAULT; goto out_now; } #ifdef INET6 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (addr->sa_family == AF_INET6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } #endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ error = sctp_inpcb_bind(so, NULL, NULL, p); if (error) { goto out_now; } } /* Now do we connect? */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; goto out_now; } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); error = EADDRINUSE; goto out_now; } if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); SCTP_INP_RUNLOCK(inp); } else { /* * We increment here since sctp_findassociation_ep_addr() * will do a decrement if it finds the stcb as long as the * locked tcb (last argument) is NOT a TCB.. aka NULL. */ SCTP_INP_INCR_REF(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_DECR_REF(inp); } else { SCTP_TCB_UNLOCK(stcb); } } if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY); error = EALREADY; goto out_now; } vrf_id = inp->def_vrf_id; /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, p, SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); out_now: if (create_lock_on) { SCTP_ASOC_CREATE_UNLOCK(inp); } SCTP_INP_DECR_REF(inp); return (error); } #endif int sctp_listen(struct socket *so, int backlog, struct thread *p) { /* * Note this module depends on the protocol processing being called * AFTER any socket level flags and backlog are applied to the * socket. The traditional way that the socket flags are applied is * AFTER protocol processing. We have made a change to the * sys/kern/uipc_socket.c module to reverse this but this MUST be in * place if the socket API for SCTP is to work properly. */ int error = 0; struct sctp_inpcb *inp; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { /* I made the same as TCP since we are not setup? */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) { /* See if we have a listener */ struct sctp_inpcb *tinp; union sctp_sockstore store; if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { /* not bound all */ struct sctp_laddr *laddr; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { memcpy(&store, &laddr->ifa->address, sizeof(store)); switch (store.sa.sa_family) { #ifdef INET case AF_INET: store.sin.sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: store.sin6.sin6_port = inp->sctp_lport; break; #endif default: break; } tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id); if (tinp && (tinp != inp) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (SCTP_IS_LISTENING(tinp))) { /* * we have a listener already and * its not this inp. */ SCTP_INP_DECR_REF(tinp); return (EADDRINUSE); } else if (tinp) { SCTP_INP_DECR_REF(tinp); } } } else { /* Setup a local addr bound all */ memset(&store, 0, sizeof(store)); #ifdef INET6 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { store.sa.sa_family = AF_INET6; store.sa.sa_len = sizeof(struct sockaddr_in6); } #endif #ifdef INET if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { store.sa.sa_family = AF_INET; store.sa.sa_len = sizeof(struct sockaddr_in); } #endif switch (store.sa.sa_family) { #ifdef INET case AF_INET: store.sin.sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: store.sin6.sin6_port = inp->sctp_lport; break; #endif default: break; } tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id); if (tinp && (tinp != inp) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) && ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (SCTP_IS_LISTENING(tinp))) { /* * we have a listener already and its not * this inp. */ SCTP_INP_DECR_REF(tinp); return (EADDRINUSE); } else if (tinp) { SCTP_INP_DECR_REF(tinp); } } } SCTP_INP_RLOCK(inp); #ifdef SCTP_LOCK_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); } #endif SOCK_LOCK(so); error = solisten_proto_check(so); SOCK_UNLOCK(so); if (error) { SCTP_INP_RUNLOCK(inp); return (error); } if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* * The unlucky case - We are in the tcp pool with this guy. * - Someone else is in the main inp slot. - We must move * this guy (the listener) to the main slot - We must then * move the guy that was listener to the TCP Pool. */ if (sctp_swap_inpcb_for_listen(inp)) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); return (EADDRINUSE); } } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); return (EADDRINUSE); } SCTP_INP_RUNLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* We must do a bind. */ if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { /* bind error, probably perm */ return (error); } } SCTP_INP_WLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) { SOCK_LOCK(so); solisten_proto(so, backlog); SOCK_UNLOCK(so); } if (backlog > 0) { inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; } else { inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING; } SCTP_INP_WUNLOCK(inp); return (error); } static int sctp_defered_wakeup_cnt = 0; int sctp_accept(struct socket *so, struct sockaddr **addr) { struct sctp_tcb *stcb; struct sctp_inpcb *inp; union sctp_sockstore store; #ifdef INET6 int error; #endif inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_INP_RLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP); return (EOPNOTSUPP); } if (so->so_state & SS_ISDISCONNECTED) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED); return (ECONNABORTED); } stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_TCB_LOCK(stcb); SCTP_INP_RUNLOCK(inp); store = stcb->asoc.primary_destination->ro._l_addr; SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); SCTP_TCB_UNLOCK(stcb); switch (store.sa.sa_family) { #ifdef INET case AF_INET: { struct sockaddr_in *sin; SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_port = store.sin.sin_port; sin->sin_addr = store.sin.sin_addr; *addr = (struct sockaddr *)sin; break; } #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6; SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) return (ENOMEM); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = store.sin6.sin6_port; sin6->sin6_addr = store.sin6.sin6_addr; if ((error = sa6_recoverscope(sin6)) != 0) { SCTP_FREE_SONAME(sin6); return (error); } *addr = (struct sockaddr *)sin6; break; } #endif default: /* TSNH */ break; } /* Wake any delayed sleep action */ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE; if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; SCTP_INP_WUNLOCK(inp); SOCKBUF_LOCK(&inp->sctp_socket->so_snd); if (sowriteable(inp->sctp_socket)) { sowwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd); } SCTP_INP_WLOCK(inp); } if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; SCTP_INP_WUNLOCK(inp); SOCKBUF_LOCK(&inp->sctp_socket->so_rcv); if (soreadable(inp->sctp_socket)) { sctp_defered_wakeup_cnt++; sorwakeup_locked(inp->sctp_socket); } else { SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv); } SCTP_INP_WLOCK(inp); } SCTP_INP_WUNLOCK(inp); } if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { SCTP_TCB_LOCK(stcb); sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_19); } return (0); } #ifdef INET int sctp_ingetaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; uint32_t vrf_id; struct sctp_inpcb *inp; struct sctp_ifa *sctp_ifa; /* * Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); inp = (struct sctp_inpcb *)so->so_pcb; if (!inp) { SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } SCTP_INP_RLOCK(inp); sin->sin_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in *sin_a; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { goto notConn; } fnd = 0; sin_a = NULL; SCTP_TCB_LOCK(stcb); TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a == NULL) /* this will make coverity happy */ continue; if (sin_a->sin_family == AF_INET) { fnd = 1; break; } } if ((!fnd) || (sin_a == NULL)) { /* punt */ SCTP_TCB_UNLOCK(stcb); goto notConn; } vrf_id = inp->def_vrf_id; sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (sctp_ifa) { sin->sin_addr = sctp_ifa->address.sin.sin_addr; sctp_free_ifa(sctp_ifa); } SCTP_TCB_UNLOCK(stcb); } else { /* For the bound all case you get back 0 */ notConn: sin->sin_addr.s_addr = 0; } } else { /* Take the first IPv4 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->address.sa.sa_family == AF_INET) { struct sockaddr_in *sin_a; sin_a = &laddr->ifa->address.sin; sin->sin_addr = sin_a->sin_addr; fnd = 1; break; } } if (!fnd) { SCTP_FREE_SONAME(sin); SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); return (ENOENT); } } SCTP_INP_RUNLOCK(inp); (*addr) = (struct sockaddr *)sin; return (0); } int sctp_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; int fnd; struct sockaddr_in *sin_a; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; /* Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); inp = (struct sctp_inpcb *)so->so_pcb; if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (ECONNRESET); } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a = (struct sockaddr_in *)&net->ro._l_addr; if (sin_a->sin_family == AF_INET) { fnd = 1; sin->sin_port = stcb->rport; sin->sin_addr = sin_a->sin_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ SCTP_FREE_SONAME(sin); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); return (ENOENT); } (*addr) = (struct sockaddr *)sin; return (0); } struct pr_usrreqs sctp_usrreqs = { .pru_abort = sctp_abort, .pru_accept = sctp_accept, .pru_attach = sctp_attach, .pru_bind = sctp_bind, .pru_connect = sctp_connect, .pru_control = in_control, .pru_close = sctp_close, .pru_detach = sctp_close, .pru_sopoll = sopoll_generic, .pru_flush = sctp_flush, .pru_disconnect = sctp_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp_peeraddr, .pru_send = sctp_sendm, .pru_shutdown = sctp_shutdown, .pru_sockaddr = sctp_ingetaddr, .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive }; #endif Index: head/sys/netinet6/sctp6_usrreq.c =================================================================== --- head/sys/netinet6/sctp6_usrreq.c (revision 350587) +++ head/sys/netinet6/sctp6_usrreq.c (revision 350588) @@ -1,1178 +1,1178 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * a) Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * b) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the distribution. * * c) Neither the name of Cisco Systems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef INET6 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern struct protosw inetsw[]; int sctp6_input_with_port(struct mbuf **i_pak, int *offp, uint16_t port) { struct mbuf *m; int iphlen; uint32_t vrf_id; uint8_t ecn_bits; struct sockaddr_in6 src, dst; struct ip6_hdr *ip6; struct sctphdr *sh; struct sctp_chunkhdr *ch; int length, offset; uint8_t compute_crc; uint32_t mflowid; uint8_t mflowtype; uint16_t fibnum; iphlen = *offp; if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) { SCTP_RELEASE_PKT(*i_pak); return (IPPROTO_DONE); } m = SCTP_HEADER_TO_CHAIN(*i_pak); #ifdef SCTP_MBUF_LOGGING /* Log in any input mbufs */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { sctp_log_mbc(m, SCTP_MBUF_INPUT); } #endif #ifdef SCTP_PACKET_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { sctp_packet_log(m); } #endif SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", m->m_pkthdr.len, if_name(m->m_pkthdr.rcvif), (int)m->m_pkthdr.csum_flags, CSUM_BITS); mflowid = m->m_pkthdr.flowid; mflowtype = M_HASHTYPE_GET(m); fibnum = M_GETFIB(m); SCTP_STAT_INCR(sctps_recvpackets); SCTP_STAT_INCR_COUNTER64(sctps_inpackets); /* Get IP, SCTP, and first chunk header together in the first mbuf. */ offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); ip6 = mtod(m, struct ip6_hdr *); IP6_EXTHDR_GET(sh, struct sctphdr *, m, iphlen, (int)(sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr))); if (sh == NULL) { SCTP_STAT_INCR(sctps_hdrops); return (IPPROTO_DONE); } ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); offset -= sizeof(struct sctp_chunkhdr); memset(&src, 0, sizeof(struct sockaddr_in6)); src.sin6_family = AF_INET6; src.sin6_len = sizeof(struct sockaddr_in6); src.sin6_port = sh->src_port; src.sin6_addr = ip6->ip6_src; if (in6_setscope(&src.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) { goto out; } memset(&dst, 0, sizeof(struct sockaddr_in6)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(struct sockaddr_in6); dst.sin6_port = sh->dest_port; dst.sin6_addr = ip6->ip6_dst; if (in6_setscope(&dst.sin6_addr, m->m_pkthdr.rcvif, NULL) != 0) { goto out; } length = ntohs(ip6->ip6_plen) + iphlen; /* Validate mbuf chain length with IP payload length. */ if (SCTP_HEADER_LEN(m) != length) { SCTPDBG(SCTP_DEBUG_INPUT1, "sctp6_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); SCTP_STAT_INCR(sctps_hdrops); goto out; } if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { goto out; } ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff); if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { SCTP_STAT_INCR(sctps_recvhwcrc); compute_crc = 0; } else { SCTP_STAT_INCR(sctps_recvswcrc); compute_crc = 1; } sctp_common_input_processing(&m, iphlen, offset, length, (struct sockaddr *)&src, (struct sockaddr *)&dst, sh, ch, compute_crc, ecn_bits, mflowtype, mflowid, fibnum, vrf_id, port); out: if (m) { sctp_m_freem(m); } return (IPPROTO_DONE); } int sctp6_input(struct mbuf **i_pak, int *offp, int proto SCTP_UNUSED) { return (sctp6_input_with_port(i_pak, offp, 0)); } void sctp6_notify(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint8_t icmp6_type, uint8_t icmp6_code, uint32_t next_mtu) { #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) struct socket *so; #endif int timer_stopped; switch (icmp6_type) { case ICMP6_DST_UNREACH: if ((icmp6_code == ICMP6_DST_UNREACH_NOROUTE) || (icmp6_code == ICMP6_DST_UNREACH_ADMIN) || (icmp6_code == ICMP6_DST_UNREACH_BEYONDSCOPE) || (icmp6_code == ICMP6_DST_UNREACH_ADDR)) { /* Mark the net unreachable. */ if (net->dest_state & SCTP_ADDR_REACHABLE) { /* Ok that destination is not reachable */ net->dest_state &= ~SCTP_ADDR_REACHABLE; net->dest_state &= ~SCTP_ADDR_PF; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, (void *)net, SCTP_SO_NOT_LOCKED); } } SCTP_TCB_UNLOCK(stcb); break; case ICMP6_PARAM_PROB: /* Treat it like an ABORT. */ if (icmp6_code == ICMP6_PARAMPROB_NEXTHEADER) { sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED); #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) SCTP_SOCKET_UNLOCK(so, 1); #endif } else { SCTP_TCB_UNLOCK(stcb); } break; case ICMP6_PACKET_TOO_BIG: if (net->dest_state & SCTP_ADDR_NO_PMTUD) { SCTP_TCB_UNLOCK(stcb); break; } if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) { timer_stopped = 1; sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1); } else { timer_stopped = 0; } /* Update the path MTU. */ if (net->port) { next_mtu -= sizeof(struct udphdr); } if (net->mtu > next_mtu) { net->mtu = next_mtu; } /* Update the association MTU */ if (stcb->asoc.smallest_mtu > next_mtu) { sctp_pathmtu_adjustment(stcb, next_mtu); } /* Finally, start the PMTU timer if it was running before. */ if (timer_stopped) { sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); } SCTP_TCB_UNLOCK(stcb); break; default: SCTP_TCB_UNLOCK(stcb); break; } } void sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d) { struct ip6ctlparam *ip6cp; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; struct sctphdr sh; struct sockaddr_in6 src, dst; if (pktdst->sa_family != AF_INET6 || pktdst->sa_len != sizeof(struct sockaddr_in6)) { return; } if ((unsigned)cmd >= PRC_NCMDS) { return; } if (PRC_IS_REDIRECT(cmd)) { d = NULL; } else if (inet6ctlerrmap[cmd] == 0) { return; } /* If the parameter is from icmp6, decode it. */ if (d != NULL) { ip6cp = (struct ip6ctlparam *)d; } else { ip6cp = (struct ip6ctlparam *)NULL; } if (ip6cp != NULL) { /* * XXX: We assume that when IPV6 is non NULL, M and OFF are * valid. */ if (ip6cp->ip6c_m == NULL) { return; } /* * Check if we can safely examine the ports and the * verification tag of the SCTP common header. */ if (ip6cp->ip6c_m->m_pkthdr.len < (int32_t)(ip6cp->ip6c_off + offsetof(struct sctphdr, checksum))) { return; } /* Copy out the port numbers and the verification tag. */ memset(&sh, 0, sizeof(sh)); m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), (caddr_t)&sh); memset(&src, 0, sizeof(struct sockaddr_in6)); src.sin6_family = AF_INET6; src.sin6_len = sizeof(struct sockaddr_in6); src.sin6_port = sh.src_port; src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } memset(&dst, 0, sizeof(struct sockaddr_in6)); dst.sin6_family = AF_INET6; dst.sin6_len = sizeof(struct sockaddr_in6); dst.sin6_port = sh.dest_port; dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } inp = NULL; net = NULL; stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, (struct sockaddr *)&src, &inp, &net, 1, SCTP_DEFAULT_VRFID); if ((stcb != NULL) && (net != NULL) && (inp != NULL)) { /* Check the verification tag */ if (ntohl(sh.v_tag) != 0) { /* * This must be the verification tag used * for sending out packets. We don't * consider packets reflecting the * verification tag. */ if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { SCTP_TCB_UNLOCK(stcb); return; } } else { if (ip6cp->ip6c_m->m_pkthdr.len >= ip6cp->ip6c_off + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) + offsetof(struct sctp_init, a_rwnd)) { /* * In this case we can check if we * got an INIT chunk and if the * initiate tag matches. */ uint32_t initiate_tag; uint8_t chunk_type; m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off + sizeof(struct sctphdr), sizeof(uint8_t), (caddr_t)&chunk_type); m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr), sizeof(uint32_t), (caddr_t)&initiate_tag); if ((chunk_type != SCTP_INITIATION) || (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { SCTP_TCB_UNLOCK(stcb); return; } } else { SCTP_TCB_UNLOCK(stcb); return; } } sctp6_notify(inp, stcb, net, ip6cp->ip6c_icmp6->icmp6_type, ip6cp->ip6c_icmp6->icmp6_code, ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); } else { if ((stcb == NULL) && (inp != NULL)) { /* reduce inp's ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } if (stcb) { SCTP_TCB_UNLOCK(stcb); } } } } /* * this routine can probably be collasped into the one in sctp_userreq.c * since they do the same thing and now we lookup with a sockaddr */ static int sctp6_getcred(SYSCTL_HANDLER_ARGS) { struct xucred xuc; struct sockaddr_in6 addrs[2]; struct sctp_inpcb *inp; struct sctp_nets *net; struct sctp_tcb *stcb; int error; uint32_t vrf_id; vrf_id = SCTP_DEFAULT_VRFID; error = priv_check(req->td, PRIV_NETINET_GETCRED); if (error) return (error); if (req->newlen != sizeof(addrs)) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (req->oldlen != sizeof(struct ucred)) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[1]), sin6tosa(&addrs[0]), &inp, &net, 1, vrf_id); if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) { if ((inp != NULL) && (stcb == NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); goto cred_can_cont; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); error = ENOENT; goto out; } SCTP_TCB_UNLOCK(stcb); /* * We use the write lock here, only since in the error leg we need * it. If we used RLOCK, then we would have to * wlock/decr/unlock/rlock. Which in theory could create a hole. * Better to use higher wlock. */ SCTP_INP_WLOCK(inp); cred_can_cont: error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket); if (error) { SCTP_INP_WUNLOCK(inp); goto out; } cru2x(inp->sctp_socket->so_cred, &xuc); SCTP_INP_WUNLOCK(inp); error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); out: return (error); } SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, 0, 0, sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection"); /* This is the same as the sctp_abort() could be made common */ static void sctp6_abort(struct socket *so) { struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return; } sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); SOCK_LOCK(so); SCTP_SB_CLEAR(so->so_snd); /* * same for the rcv ones, they are only here for the * accounting/select. */ SCTP_SB_CLEAR(so->so_rcv); /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; SOCK_UNLOCK(so); } else { flags = inp->sctp_flags; if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { goto sctp_must_try_again; } } return; } static int sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) { int error; struct sctp_inpcb *inp; uint32_t vrf_id = SCTP_DEFAULT_VRFID; inp = (struct sctp_inpcb *)so->so_pcb; if (inp != NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace)); if (error) return (error); } error = sctp_inpcb_alloc(so, vrf_id); if (error) return (error); inp = (struct sctp_inpcb *)so->so_pcb; SCTP_INP_WLOCK(inp); inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */ inp->ip_inp.inp.inp_vflag |= INP_IPV6; inp->ip_inp.inp.in6p_hops = -1; /* use kernel default */ inp->ip_inp.inp.in6p_cksum = -1; /* just to be sure */ #ifdef INET /* * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6 * socket as well, because the socket may be bound to an IPv6 * wildcard address, which may match an IPv4-mapped IPv6 address. */ inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl); #endif SCTP_INP_WUNLOCK(inp); return (0); } static int sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p) { struct sctp_inpcb *inp; int error; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (addr) { switch (addr->sa_family) { #ifdef INET case AF_INET: if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif #ifdef INET6 case AF_INET6: if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } inp->ip_inp.inp.inp_vflag &= ~INP_IPV4; inp->ip_inp.inp.inp_vflag |= INP_IPV6; - if ((addr != NULL) && (SCTP_IPV6_V6ONLY(&inp->ip_inp.inp) == 0)) { + if ((addr != NULL) && (SCTP_IPV6_V6ONLY(inp) == 0)) { switch (addr->sa_family) { #ifdef INET case AF_INET: /* binding v4 addr to v6 socket, so reset flags */ inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; break; #endif #ifdef INET6 case AF_INET6: { struct sockaddr_in6 *sin6_p; sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) { inp->ip_inp.inp.inp_vflag |= INP_IPV4; } #ifdef INET if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6_p); inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p); return (error); } #endif break; } #endif default: break; } } else if (addr != NULL) { struct sockaddr_in6 *sin6_p; /* IPV6_V6ONLY socket */ #ifdef INET if (addr->sa_family == AF_INET) { /* can't bind v4 addr to v6 only socket! */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } #endif sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { /* can't bind v4-mapped addrs either! */ /* NOTE: we don't support SIIT */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } error = sctp_inpcb_bind(so, addr, NULL, p); return (error); } static void sctp6_close(struct socket *so) { sctp_close(so); } /* This could be made common with sctp_detach() since they are identical */ static int sctp6_disconnect(struct socket *so) { return (sctp_disconnect(so)); } int sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); static int sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { struct sctp_inpcb *inp; #ifdef INET struct sockaddr_in6 *sin6; #endif /* INET */ /* No SPL needed since sctp_output does this */ inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* * For the TCP model we may get a NULL addr, if we are a connected * socket thats ok. */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) && (addr == NULL)) { goto connected_type; } if (addr == NULL) { SCTP_RELEASE_PKT(m); if (control) { SCTP_RELEASE_PKT(control); control = NULL; } SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EDESTADDRREQ); return (EDESTADDRREQ); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, we discard datagrams destined to a * v4 addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { struct sockaddr_in sin; /* convert v4-mapped into v4 addr and send */ in6_sin6_2_sin(&sin, sin6); return (sctp_sendm(so, flags, m, (struct sockaddr *)&sin, control, p)); } #endif /* INET */ connected_type: /* now what about control */ if (control) { if (inp->control) { SCTP_PRINTF("huh? control set?\n"); SCTP_RELEASE_PKT(inp->control); inp->control = NULL; } inp->control = control; } /* Place the data */ if (inp->pkt) { SCTP_BUF_NEXT(inp->pkt_last) = m; inp->pkt_last = m; } else { inp->pkt_last = inp->pkt = m; } if ( /* FreeBSD and MacOSX uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) ) { /* * note with the current version this code will only be used * by OpenBSD, NetBSD and FreeBSD have methods for * re-defining sosend() to use sctp_sosend(). One can * optionaly switch back to this code (by changing back the * defininitions but this is not advisable. */ int ret; ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); inp->pkt = NULL; inp->control = NULL; return (ret); } else { return (0); } } static int sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p) { uint32_t vrf_id; int error = 0; struct sctp_inpcb *inp; struct sctp_tcb *stcb; #ifdef INET struct sockaddr_in6 *sin6; union sctp_sockstore store; #endif inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); /* I made the same as TCP since we are * not setup? */ } if (addr == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } switch (addr->sa_family) { #ifdef INET case AF_INET: if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif #ifdef INET6 case AF_INET6: if (addr->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } break; #endif default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } vrf_id = inp->def_vrf_id; SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_RLOCK(inp); if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) { /* Bind a ephemeral port */ SCTP_INP_RUNLOCK(inp); error = sctp6_bind(so, NULL, p); if (error) { SCTP_ASOC_CREATE_UNLOCK(inp); return (error); } SCTP_INP_RLOCK(inp); } if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { /* We are already connected AND the TCP model */ SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EADDRINUSE); return (EADDRINUSE); } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if (SCTP_IPV6_V6ONLY(inp)) { /* * if IPV6_V6ONLY flag, ignore connections destined to a v4 * addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { SCTP_INP_RUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { /* convert v4-mapped into v4 addr */ in6_sin6_2_sin(&store.sin, sin6); addr = &store.sa; } #endif /* INET */ /* Now do we connect? */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); } else { SCTP_INP_RUNLOCK(inp); SCTP_INP_WLOCK(inp); SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL); if (stcb == NULL) { SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); SCTP_INP_WUNLOCK(inp); } } if (stcb != NULL) { /* Already have or am bring up an association */ SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_TCB_UNLOCK(stcb); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EALREADY); return (EALREADY); } /* We are GOOD to go */ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, inp->sctp_ep.pre_open_stream_count, inp->sctp_ep.port, p, SCTP_INITIALIZE_AUTH_PARAMS); SCTP_ASOC_CREATE_UNLOCK(inp); if (stcb == NULL) { /* Gak! no memory */ return (error); } if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; /* Set the connected flag so we can queue data */ soisconnecting(so); } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); return (error); } static int sctp6_getaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; struct sctp_inpcb *inp; uint32_t vrf_id; struct sctp_ifa *sctp_ifa; int error; /* * Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof(*sin6)); if (sin6 == NULL) return (ENOMEM); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); } SCTP_INP_RLOCK(inp); sin6->sin6_port = inp->sctp_lport; if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { /* For the bound all case you get back 0 */ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { struct sctp_tcb *stcb; struct sockaddr_in6 *sin_a6; struct sctp_nets *net; int fnd; stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } fnd = 0; sin_a6 = NULL; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6 == NULL) /* this will make coverity happy */ continue; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; break; } } if ((!fnd) || (sin_a6 == NULL)) { /* punt */ SCTP_INP_RUNLOCK(inp); SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } vrf_id = inp->def_vrf_id; sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *)&net->ro, net, 0, vrf_id); if (sctp_ifa) { sin6->sin6_addr = sctp_ifa->address.sin6.sin6_addr; } } else { /* For the bound all case you get back 0 */ memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr)); } } else { /* Take the first IPv6 address in the list */ struct sctp_laddr *laddr; int fnd = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa->address.sa.sa_family == AF_INET6) { struct sockaddr_in6 *sin_a; sin_a = &laddr->ifa->address.sin6; sin6->sin6_addr = sin_a->sin6_addr; fnd = 1; break; } } if (!fnd) { SCTP_FREE_SONAME(sin6); SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } } SCTP_INP_RUNLOCK(inp); /* Scoping things for v6 */ if ((error = sa6_recoverscope(sin6)) != 0) { SCTP_FREE_SONAME(sin6); return (error); } (*addr) = (struct sockaddr *)sin6; return (0); } static int sctp6_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; int fnd; struct sockaddr_in6 *sin_a6; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; int error; /* Do the malloc first in case it blocks. */ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) return (ENOMEM); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); inp = (struct sctp_inpcb *)so->so_pcb; if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN); return (ENOTCONN); } SCTP_INP_RLOCK(inp); stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb) { SCTP_TCB_LOCK(stcb); } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); return (ECONNRESET); } fnd = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr; if (sin_a6->sin6_family == AF_INET6) { fnd = 1; sin6->sin6_port = stcb->rport; sin6->sin6_addr = sin_a6->sin6_addr; break; } } SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); return (ENOENT); } if ((error = sa6_recoverscope(sin6)) != 0) { SCTP_FREE_SONAME(sin6); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, error); return (error); } *addr = (struct sockaddr *)sin6; return (0); } static int sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) { struct inpcb *inp = sotoinpcb(so); int error; if (inp == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* allow v6 addresses precedence */ error = sctp6_getaddr(so, nam); #ifdef INET if (error) { struct sockaddr_in6 *sin6; /* try v4 next if v6 failed */ error = sctp_ingetaddr(so, nam); if (error) { return (error); } SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) { SCTP_FREE_SONAME(*nam); return (ENOMEM); } in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); SCTP_FREE_SONAME(*nam); *nam = (struct sockaddr *)sin6; } #endif return (error); } static int sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) { struct inpcb *inp = sotoinpcb(so); int error; if (inp == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } /* allow v6 addresses precedence */ error = sctp6_peeraddr(so, nam); #ifdef INET if (error) { struct sockaddr_in6 *sin6; /* try v4 next if v6 failed */ error = sctp_peeraddr(so, nam); if (error) { return (error); } SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) { SCTP_FREE_SONAME(*nam); return (ENOMEM); } in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); SCTP_FREE_SONAME(*nam); *nam = (struct sockaddr *)sin6; } #endif return (error); } struct pr_usrreqs sctp6_usrreqs = { .pru_abort = sctp6_abort, .pru_accept = sctp_accept, .pru_attach = sctp6_attach, .pru_bind = sctp6_bind, .pru_connect = sctp6_connect, .pru_control = in6_control, .pru_close = sctp6_close, .pru_detach = sctp6_close, .pru_sopoll = sopoll_generic, .pru_flush = sctp_flush, .pru_disconnect = sctp6_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp6_getpeeraddr, .pru_send = sctp6_send, .pru_shutdown = sctp_shutdown, .pru_sockaddr = sctp6_in6getaddr, .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive }; #endif